static void poll_cb(uv_fs_t* req) { uv_statbuf_t* statbuf; struct poll_ctx* ctx; uint64_t interval; const int tid = uv_getThreadKeyId() + 1; ctx = container_of(req, struct poll_ctx, fs_req); if (ctx->parent_handle == NULL) {/* handle has been stopped or closed */ if (req->path != NULL || req->ptr != NULL) { uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb); uv_fs_req_cleanup(req); } return; } if (req->result != 0) { if (ctx->busy_polling != -req->errorno) { uv__set_artificial_error(ctx->loop, req->errorno); ctx->poll_cb(ctx->parent_handle, -1, &ctx->statbuf, &zero_statbuf[tid]); ctx->busy_polling = -req->errorno; } goto out; } statbuf = &req->statbuf; if (ctx->busy_polling != 0) if (ctx->busy_polling < 0 || !statbuf_eq(&ctx->statbuf, statbuf)) ctx->poll_cb(ctx->parent_handle, 0, &ctx->statbuf, statbuf); ctx->statbuf = *statbuf; ctx->busy_polling = 1; out: uv_fs_req_cleanup(req); if (ctx->parent_handle == NULL) {/* handle has been stopped by callback */ uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb); return; } /* Reschedule timer, subtract the delay from doing the stat(). */ interval = ctx->interval; interval -= (uv_now(ctx->loop) - ctx->start_time) % interval; if (uv_timer_start(&ctx->timer_handle, timer_cb, interval, 0)) abort(); }
static void poll_cb(uv_fs_t* req) { uv_stat_t* statbuf; struct poll_ctx* ctx; uint64_t interval; ctx = container_of(req, struct poll_ctx, fs_req); if (ctx->parent_handle == NULL) { /* handle has been stopped or closed */ uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb); uv_fs_req_cleanup(req); return; } if (req->result != 0) { if (ctx->busy_polling != req->result) { ctx->poll_cb(ctx->parent_handle, req->result, &ctx->statbuf, &zero_statbuf); ctx->busy_polling = req->result; } goto out; } statbuf = &req->statbuf; if (ctx->busy_polling != 0) if (ctx->busy_polling < 0 || !statbuf_eq(&ctx->statbuf, statbuf)) ctx->poll_cb(ctx->parent_handle, 0, &ctx->statbuf, statbuf); ctx->statbuf = *statbuf; ctx->busy_polling = 1; out: uv_fs_req_cleanup(req); if (ctx->parent_handle == NULL) { /* handle has been stopped by callback */ uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb); return; } /* Reschedule timer, subtract the delay from doing the stat(). */ interval = ctx->interval; interval -= (uv_now(ctx->loop) - ctx->start_time) % interval; if (uv_timer_start(&ctx->timer_handle, timer_cb, interval, 0)) abort(); }
static void poll_cb(uv_fs_t* req) { uv_statbuf_t* statbuf; uv_fs_poll_t* handle; uint64_t interval; handle = req->data; if (handle == NULL) /* Handle has been stopped or closed. */ goto out; assert(req == handle->fs_req); if (req->result != 0) { if (handle->busy_polling != -req->errorno) { uv__set_artificial_error(handle->loop, req->errorno); handle->poll_cb(handle, -1, NULL, NULL); handle->busy_polling = -req->errorno; } goto out; } statbuf = req->ptr; if (handle->busy_polling != 0) if (handle->busy_polling < 0 || !statbuf_eq(&handle->statbuf, statbuf)) handle->poll_cb(handle, 0, &handle->statbuf, statbuf); handle->statbuf = *statbuf; handle->busy_polling = 1; out: uv_fs_req_cleanup(req); if (req->data == NULL) { /* Handle has been stopped or closed. */ free(req); return; } req->data = NULL; /* Tell uv_fs_poll_stop() it's safe to free the req. */ /* Reschedule timer, subtract the delay from doing the stat(). */ interval = handle->interval; interval -= (uv_now(handle->loop) - handle->start_time) % interval; if (uv_timer_start(&handle->timer_handle, timer_cb, interval, 0)) abort(); }