/* 获得accept锁,多个worker仅有一个可以得到这把锁。 获得锁不是阻塞过程,都是立刻返回,获取成功的话ngx_accept_mutex_held被置为1。 拿到锁,那么监听句柄会被放到本进程的epoll中了,否则,则监听句柄会被从epoll中取出。 */ ngx_int_t ngx_trylock_accept_mutex(ngx_cycle_t *cycle) { if (ngx_shmtx_trylock(&ngx_accept_mutex)) {//文件锁或者spinlock ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex locked"); if (ngx_accept_mutex_held && ngx_accept_events == 0 && !(ngx_event_flags & NGX_USE_RTSIG_EVENT)) {//注意后面有个非字 return NGX_OK; } //将监听的SOCK 的读事件加入到epoll,因为我们获得了锁,所以我们可以进行accept了,于是将将accept事件加入epoll if (ngx_enable_accept_events(cycle) == NGX_ERROR) { ngx_shmtx_unlock(&ngx_accept_mutex); return NGX_ERROR; } ngx_accept_events = 0; ngx_accept_mutex_held = 1;//我拿到了,这里可以返回了 return NGX_OK; } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex lock failed: %ui", ngx_accept_mutex_held); if (ngx_accept_mutex_held) { //ngx_accept_mutex_held在外面是不会被改变的,因此这里表示,如果刚才我获得过一次锁了,这回我没有拿到锁,那我得删除epoll注册才行。 //这里主要避免一点,开始的时候是没有加入epoll的,如果第一次没有拿到锁,那么这里就不需要删除,如果连续几次都没有拿到锁,那也不需要重复删除 if (ngx_disable_accept_events(cycle) == NGX_ERROR) { return NGX_ERROR; } ngx_accept_mutex_held = 0; } return NGX_OK; }
int ngx_shmap_foreach(ngx_shm_zone_t* zone, foreach_pt func, void* args) { ngx_queue_t *q; ngx_shmap_node_t *sd; ngx_shmap_ctx_t *ctx; assert(zone != NULL); ctx = zone->data; int locked = ngx_shmtx_trylock(&ctx->shpool->mutex); if (!locked){ return -1; } for (q = ngx_queue_head(&ctx->sh->queue); q != ngx_queue_sentinel(&ctx->sh->queue); q = ngx_queue_next(q)) { sd = ngx_queue_data(q, ngx_shmap_node_t, queue); func(sd, args); } ngx_shmtx_unlock(&ctx->shpool->mutex); return 0; }
// 尝试获取负载均衡锁,监听端口 // 如未获取则不监听端口 // 锁标志ngx_accept_mutex_held // 内部调用ngx_enable_accept_events/ngx_disable_accept_events ngx_int_t ngx_trylock_accept_mutex(ngx_cycle_t *cycle) { // 尝试锁定共享内存锁 // 非阻塞,会立即返回 if (ngx_shmtx_trylock(&ngx_accept_mutex)) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex locked"); // 锁成功 // 之前已经持有了锁,那么就直接返回,继续监听端口 // ngx_accept_events在epoll里不使用 // rtsig在nginx 1.9.x已经删除 if (ngx_accept_mutex_held && ngx_accept_events == 0) { return NGX_OK; } // 之前没有持有锁,需要注册epoll事件监听端口 // 遍历监听端口列表,加入epoll连接事件,开始接受请求 if (ngx_enable_accept_events(cycle) == NGX_ERROR) { // 如果监听失败就需要立即解锁,函数结束 ngx_shmtx_unlock(&ngx_accept_mutex); return NGX_ERROR; } // 已经成功将监听事件加入epoll // 设置已经获得锁的标志 ngx_accept_events = 0; ngx_accept_mutex_held = 1; return NGX_OK; } // try失败,未获得锁,极小的消耗 ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex lock failed: %ui", ngx_accept_mutex_held); // 未获得锁 // 但之前持有锁,也就是说之前在监听端口 if (ngx_accept_mutex_held) { // 遍历监听端口列表,删除epoll监听连接事件,不接受请求 if (ngx_disable_accept_events(cycle, 0) == NGX_ERROR) { return NGX_ERROR; } // 设置未获得锁的标志 ngx_accept_mutex_held = 0; } return NGX_OK; }
/* 试图处理监听端口的新连接事件 */ ngx_int_t ngx_trylock_accept_mutex(ngx_cycle_t *cycle) { /* 获取ngx_accept_mutex锁,成功返回1,失败返回0 */ if (ngx_shmtx_trylock(&ngx_accept_mutex)) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex locked"); /* * 标志位ngx_accept_mutex_held为1表示当前进程已经获取了ngx_accept_mutex锁; * 满足下面条件时,表示当前进程在之前已经获得ngx_accept_mutex锁; * 则直接返回; */ if (ngx_accept_mutex_held && ngx_accept_events == 0 && !(ngx_event_flags & NGX_USE_RTSIG_EVENT)) { return NGX_OK; } /* 将所有监听连接的读事件添加到当前的epoll事件驱动模块中 */ if (ngx_enable_accept_events(cycle) == NGX_ERROR) { /* 若添加失败,则释放该锁 */ ngx_shmtx_unlock(&ngx_accept_mutex); return NGX_ERROR; } /* 设置当前进程获取锁的情况 */ ngx_accept_events = 0; ngx_accept_mutex_held = 1;/* 表示当前进程已经得到ngx_accept_mutex锁 */ return NGX_OK; } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex lock failed: %ui", ngx_accept_mutex_held); /* * 若当前进程获取ngx_accept_mutex锁失败,并且ngx_accept_mutex_held为1, * 此时是错误情况 */ if (ngx_accept_mutex_held) { /* 将所有监听连接的读事件从事件驱动模块中移除 */ if (ngx_disable_accept_events(cycle) == NGX_ERROR) { return NGX_ERROR; } ngx_accept_mutex_held = 0; } return NGX_OK; }
static void ngx_btt_expire(ngx_event_t *ev) { #if 0 time_t now; ngx_uint_t i; ngx_queue_t *q; #endif ngx_btt_conf_t *bcf; #if 0 ngx_btt_torrent_t *t; #endif ngx_log_debug0(NGX_LOG_DEBUG_CORE, ev->log, 0, "btt expire"); bcf = ev->data; #if 0 if (!ngx_shmtx_trylock(&bcf->pool->mutex)) { goto done; } now = ngx_time(); for (i = 0; i < 2; i++) { if (ngx_queue_empty(&bcf->cache->queue)) { break; } q = ngx_queue_last(&bcf->cache->queue); code = ngx_queue_data(q, ngx_btt_torrent_t, queue); if (code->expire >= now) { break; } ngx_log_debug1(NGX_LOG_DEBUG_CORE, ev->log, 0, "lua cache expire node \"%V\"", &code->path); ngx_queue_remove(&code->queue); ngx_rbtree_delete(&bcf->cache->rbtree, &code->node); ngx_slab_free_locked(bcf->pool, code); } ngx_shmtx_unlock(&bcf->pool->mutex); done: #endif ngx_add_timer(&bcf->event, bcf->expire * 1000 / 10); }
//在打开accept_mutex锁的情况下,只有调用ngx_trylock_accept_mutex方法后, //当前的worker进程才会去试着监听web端口 ngx_int_t ngx_trylock_accept_mutex(ngx_cycle_t *cycle) { /* * 使用进程间的同步锁,试图获取accept_mutex锁. * ngx_shmtx_trylock返回1表示成功拿到锁,返回0表示获取锁失败. * 这个获取锁的过程是非阻塞的,此时一旦锁被其它的worker子进程占用 * ngx_shmtx_trylock方法会立刻返回 * */ if (ngx_shmtx_trylock(&ngx_accept_mutex)) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex locked"); /* 如果获取到accept_mutex锁,但ngx_accept_mutex_held为1,则立刻返回. * ngx_accept_mutex_held是一个标志位,当它为1时,表示当前进程已经获取到锁了 * */ if (ngx_accept_mutex_held && ngx_accept_events == 0) { return NGX_OK; } //将所有监听连接的读事件添加到当前的epoll等事件驱动模块中 if (ngx_enable_accept_events(cycle) == NGX_ERROR) { //如果将监听句柄添加到事件驱动模块失败,就必须释放ngx_accept_mutex锁 ngx_shmtx_unlock(&ngx_accept_mutex); return NGX_ERROR; } /* 经过ngx_enable_accept_events方法的调用,当前进程的事件驱动模块已经开始监听所有的端口, * 这时需要把ngx_accept_mutex_held标志位置为1,方便本进程的其它模块了解它目前已经获取到了锁 * */ ngx_accept_events = 0; ngx_accept_mutex_held = 1; return NGX_OK; } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex lock failed: %ui", ngx_accept_mutex_held); /* 如果ngx_shmtx_trylock返回0,则表明获取ngx_accept_mutex锁失败,这时如果ngx_accept_mutex_held * 标志位还为1,即当前进程还在获取到锁的状态,这当然是不正确的,需要处理 * */ if (ngx_accept_mutex_held) { //ngx_disable_accept_events会将所有监听连接的读事件从事件驱动模块中移除 if (ngx_disable_accept_events(cycle) == NGX_ERROR) { return NGX_ERROR; } //在没有获取到ngx_accept_mutex锁时,必须把标志位置为0 ngx_accept_mutex_held = 0; } return NGX_OK; }
ngx_int_t ngx_trylock_accept_mutex(ngx_cycle_t *cycle) { if (ngx_shmtx_trylock(&ngx_accept_mutex)) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex locked"); if (ngx_accept_mutex_held && ngx_accept_events == 0 && !(ngx_event_flags & NGX_USE_RTSIG_EVENT)) { return NGX_OK; } // 将监听fd加入到epoll事件中 if (ngx_enable_accept_events(cycle) == NGX_ERROR) { ngx_shmtx_unlock(&ngx_accept_mutex); return NGX_ERROR; } ngx_accept_events = 0; ngx_accept_mutex_held = 1; return NGX_OK; } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex lock failed: %ui", ngx_accept_mutex_held); // 如果当前没有获得锁,则将监听fd从epoll事件中移除 if (ngx_accept_mutex_held) { if (ngx_disable_accept_events(cycle) == NGX_ERROR) { return NGX_ERROR; } ngx_accept_mutex_held = 0; } return NGX_OK; }
ngx_int_t ngx_trylock_accept_mutex(ngx_cycle_t *cycle) { if (ngx_shmtx_trylock(&ngx_accept_mutex)) {// ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex locked"); //如果本来已经获得锁,则直接返回Ok if (ngx_accept_mutex_held && ngx_accept_events == 0) { return NGX_OK; } //到达这里,说明重新获得锁成功,因此需要打开被关闭的listening句柄,调用ngx_enable_accept_events函数,将监听端口注册到当前worker进程的epoll当中去 if (ngx_enable_accept_events(cycle) == NGX_ERROR) { ngx_shmtx_unlock(&ngx_accept_mutex); return NGX_ERROR; } ngx_accept_events = 0; ngx_accept_mutex_held = 1; ////表示当前获取了锁 return NGX_OK; } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex lock failed: %ui", ngx_accept_mutex_held); //这里表示的是以前曾经获取过,但是这次却获取失败了,那么需要将监听端口从当前的worker进程的epoll当中移除,调用的是ngx_disable_accept_events函数 if (ngx_accept_mutex_held) { if (ngx_disable_accept_events(cycle, 0) == NGX_ERROR) { return NGX_ERROR; } ngx_accept_mutex_held = 0; } return NGX_OK; }
ngx_int_t ngx_trylock_accept_mutex(ngx_cycle_t *cycle) { if (ngx_shmtx_trylock(&ngx_accept_mutex)) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex locked"); if (ngx_accept_mutex_held && ngx_accept_events == 0) { return NGX_OK; } if (ngx_enable_accept_events(cycle) == NGX_ERROR) { ngx_shmtx_unlock(&ngx_accept_mutex); return NGX_ERROR; } ngx_accept_events = 0; ngx_accept_mutex_held = 1; return NGX_OK; } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex lock failed: %ui", ngx_accept_mutex_held); if (ngx_accept_mutex_held) { if (ngx_disable_accept_events(cycle) == NGX_ERROR) { return NGX_ERROR; } ngx_accept_mutex_held = 0; } return NGX_OK; }
static void ngx_http_tfs_timeout_handler(ngx_event_t *event) { ngx_int_t rc; ngx_pool_t *pool; ngx_http_tfs_t *t; ngx_connection_t *dummy; ngx_http_request_t *r; ngx_http_tfs_timers_data_t *data; dummy = event->data; data = dummy->data; if (ngx_shmtx_trylock(&data->lock->ngx_http_tfs_kp_mutex)) { if (ngx_queue_empty(&data->upstream->rc_ctx->sh->kp_queue)) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, event->log, 0, "empty rc keepalive queue"); ngx_shmtx_unlock(&data->lock->ngx_http_tfs_kp_mutex); ngx_add_timer(event, data->upstream->rcs_interval); return; } pool = ngx_create_pool(8192, event->log); if (pool == NULL) { ngx_shmtx_unlock(&data->lock->ngx_http_tfs_kp_mutex); return; } /* fake ngx_http_request_t */ r = ngx_pcalloc(pool, sizeof(ngx_http_request_t)); if (r == NULL) { ngx_shmtx_unlock(&data->lock->ngx_http_tfs_kp_mutex); return; } r->pool = pool; r->connection = ngx_pcalloc(pool, sizeof(ngx_connection_t)); if (r->connection == NULL) { ngx_destroy_pool(pool); ngx_shmtx_unlock(&data->lock->ngx_http_tfs_kp_mutex); return; } r->connection->log = event->log; /* in order to return from ngx_http_run_posted_requests() */ r->connection->destroyed = 1; t = ngx_pcalloc(pool, sizeof(ngx_http_tfs_t)); if (t == NULL) { ngx_destroy_pool(pool); ngx_shmtx_unlock(&data->lock->ngx_http_tfs_kp_mutex); return; } t->pool = pool; t->data = r; t->log = event->log; t->finalize_request = ngx_http_tfs_timers_finalize_request_handler; t->finalize_data = event; t->r_ctx.action.code = NGX_HTTP_TFS_ACTION_KEEPALIVE; t->r_ctx.version = 1; t->loc_conf = ngx_pcalloc(pool, sizeof(ngx_http_tfs_loc_conf_t)); if (t->loc_conf == NULL) { ngx_destroy_pool(pool); ngx_shmtx_unlock(&data->lock->ngx_http_tfs_kp_mutex); return; } t->loc_conf->upstream = data->upstream; t->main_conf = data->main_conf; rc = ngx_http_tfs_init(t); if (rc == NGX_ERROR) { ngx_destroy_pool(pool); ngx_shmtx_unlock(&data->lock->ngx_http_tfs_kp_mutex); return; } } else { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, event->log, 0, "tfs kp mutex lock failed"); } }