Exemple #1
0
void internal::Peer::LoopData::walkAndCountHandlesCb(
        uv_handle_t* h,
        void* opaqueCountHandle) {

    CountHandle * countHandle = static_cast<  CountHandle *  >(
            opaqueCountHandle
        );

    if (
            reinterpret_cast<uv_handle_t const *>(&countHandle->loopData->prepare_)
            ==
            const_cast<uv_handle_t const *>(h)) {
        return;
    }

    ++countHandle->counter;

    LOG(INFO)
        << countHandle->loopData
        << " still waiting on pending handle of type "
        << getUvHandleType(h)
        << " @"
        << reinterpret_cast<void const *>(h)
        << " which is "
        << (uv_is_closing(h) ? "" : "not ")
        << "closing"
        ;

    /** we assert() here, but everything will be cleaned up in production code */
    assert(uv_is_closing(h));

}
Exemple #2
0
static void
Handle_tp_dealloc(Handle *self)
{
    ASSERT(self->uv_handle);
    if (self->initialized && !uv_is_closing(self->uv_handle)) {
        uv_close(self->uv_handle, on_handle_dealloc_close);
        ASSERT(uv_is_closing(self->uv_handle));
        /* resurrect the Python object until the close callback is called */
        Py_INCREF(self);
        resurrect_object((PyObject *)self);
        return;
    } else {
        /* There are a few cases why the code will take this path:
         *   - A subclass of a handle didn't call it's parent's __init__
         *   - Aclosed handle is deallocated. Refcount is increased in close(),
         *     so it's guaranteed that if we arrived here and the user had called close(),
         *     the callback was already executed.
         *  - A handle goes out of scope and it's closed here in tp_dealloc and resurrected.
         *    Once it's deallocated again it will take this path because the handle is now
         *    closed.
         */
        ;
    }
    if (self->weakreflist != NULL) {
        PyObject_ClearWeakRefs((PyObject *)self);
    }
    Py_TYPE(self)->tp_clear((PyObject *)self);
    Py_TYPE(self)->tp_free((PyObject *)self);
}
Exemple #3
0
int uv_custom_close(uv_poll_t *req) {
	struct uv_custom_poll_t *custom_poll_data = req->data;
	struct iobuf_t *send_io = NULL;

	if(uv_is_closing((uv_handle_t *)req)) {
		return -1;
	}

	if(custom_poll_data != NULL) {
		custom_poll_data->doclose = 1;
	}

	send_io = &custom_poll_data->send_iobuf;

	if(custom_poll_data->doclose == 1 && send_io->len == 0) {
		custom_poll_data->doclose = 2;
		if(custom_poll_data->close_cb != NULL) {
			custom_poll_data->close_cb(req);
		}
		if(!uv_is_closing((uv_handle_t *)req)) {
			uv_poll_stop(req);
		}
	} else if(send_io->len > 0) {
		uv_custom_write(req);
	}

	return 0;
}
Exemple #4
0
static void connect_cb(uv_connect_t* req, int status) {
  int r;

  ASSERT(req == &connect_req);
  ASSERT(status == 0);

  r = uv_shutdown(&shutdown_req, req->handle, shutdown_cb);
  ASSERT(r == 0);
  ASSERT(!uv_is_closing((uv_handle_t*) req->handle));
  uv_close((uv_handle_t*) req->handle, close_cb);
  ASSERT(uv_is_closing((uv_handle_t*) req->handle));

  connect_cb_called++;
}
Exemple #5
0
static void idle_1_cb(uv_idle_t* handle, int status) {
  int r;

  LOG("IDLE_1_CB\n");

  ASSERT(handle != NULL);
  ASSERT(status == 0);

  ASSERT(idles_1_active > 0);

  /* Init idle_2 and make it active */
  if (!idle_2_is_active && !uv_is_closing((uv_handle_t*)&idle_2_handle)) {
    r = uv_idle_init(uv_default_loop(), &idle_2_handle);
    ASSERT(r == 0);
    r = uv_idle_start(&idle_2_handle, idle_2_cb);
    ASSERT(r == 0);
    idle_2_is_active = 1;
    idle_2_cb_started++;
  }

  idle_1_cb_called++;

  if (idle_1_cb_called % 5 == 0) {
    r = uv_idle_stop((uv_idle_t*)handle);
    ASSERT(r == 0);
    idles_1_active--;
  }
}
static void
as_uv_connected(uv_connect_t* req, int status)
{
	if (uv_is_closing((uv_handle_t*)req->handle)) {
		return;
	}

	as_event_command* cmd = req->data;

	if (status == 0) {
		if (cmd->cluster->user) {
			as_uv_auth_write_start(cmd, req->handle);
		}
		else {
			as_uv_command_write_start(cmd, req->handle);
		}
	}
	else if (status != UV_ECANCELED) {
		as_node* node = cmd->node;
		as_address* primary = as_vector_get(&node->addresses, node->address_index);
		
		as_error err;
		as_error_update(&err, AEROSPIKE_ERR_ASYNC_CONNECTION, "Failed to connect: %s %s:%d",
						node->name, primary->name, (int)cf_swap_from_be16(primary->addr.sin_port));
		as_uv_connect_error(cmd, &err);
	}
}
static void
as_uv_auth_write_complete(uv_write_t* req, int status)
{
	if (uv_is_closing((uv_handle_t*)req->handle)) {
		return;
	}

	as_event_command* cmd = req->data;
	
	if (status == 0) {
		as_event_set_auth_read_header(cmd);
		status = uv_read_start(req->handle, as_uv_auth_command_buffer, as_uv_auth_read);
		
		if (status) {
			as_error err;
			as_error_update(&err, AEROSPIKE_ERR_ASYNC_CONNECTION, "Authenticate uv_read_start failed: %s", uv_strerror(status));
			as_event_socket_error(cmd, &err);
		}
	}
	else if (status != UV_ECANCELED) {
		as_error err;
		as_error_update(&err, AEROSPIKE_ERR_ASYNC_CONNECTION, "Authenticate socket write failed: %s", uv_strerror(status));
		as_event_socket_error(cmd, &err);
	}
}
Exemple #8
0
static void tcp_recv(uv_stream_t *handle, ssize_t nread, const uv_buf_t *buf)
{
	uv_loop_t *loop = handle->loop;
	struct worker_ctx *worker = loop->data;

	/* Check for originator connection close. */
	if (nread <= 0) {
		if (handle->data) {
			worker_exec(worker, (uv_handle_t *)handle, NULL, NULL);
		}
		if (!uv_is_closing((uv_handle_t *)handle)) {
			uv_close((uv_handle_t *)handle, handle_free);
		}
		return;
	}
	
	int ret = worker_process_tcp(worker, (uv_handle_t *)handle, (const uint8_t *)buf->base, nread);
	if (ret == 0) {
		/* Push - pull, stop reading from this handle until
		 * the task is finished. Since the handle has no track of the
		 * pending tasks, it might be freed before the task finishes
		 * leading various errors. */
		uv_unref((uv_handle_t *)handle);
		io_stop_read((uv_handle_t *)handle);
	}
	mp_flush(worker->pkt_pool.ctx);
}
Exemple #9
0
int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
  uv_os_fd_t fd_out;

  switch (handle->type) {
  case UV_TCP:
    fd_out = (uv_os_fd_t)((uv_tcp_t*) handle)->socket;
    break;

  case UV_NAMED_PIPE:
    fd_out = ((uv_pipe_t*) handle)->handle;
    break;

  case UV_TTY:
    fd_out = ((uv_tty_t*) handle)->handle;
    break;

  case UV_UDP:
    fd_out = (uv_os_fd_t)((uv_udp_t*) handle)->socket;
    break;

  case UV_POLL:
    fd_out = (uv_os_fd_t)((uv_poll_t*) handle)->socket;
    break;

  default:
    return UV_EINVAL;
  }

  if (uv_is_closing(handle) || fd_out == INVALID_HANDLE_VALUE)
    return UV_EBADF;

  *fd = fd_out;
  return 0;
}
Exemple #10
0
static int luv_is_closing(lua_State* L) {
  uv_handle_t* handle = luv_check_handle(L, 1);
  int ret = uv_is_closing(handle);
  if (ret < 0) return luv_error(L, ret);
  lua_pushboolean(L, ret);
  return 1;
}
Exemple #11
0
DLLEXPORT void jl_close_uv(uv_handle_t *handle)
{
    if (handle->type==UV_TTY)
        uv_tty_set_mode((uv_tty_t*)handle,0);

    if ((handle->type == UV_NAMED_PIPE || handle->type == UV_TCP) &&
        uv_is_writable((uv_stream_t*)handle)) {
        uv_shutdown_t *req = (uv_shutdown_t*)malloc(sizeof(uv_shutdown_t));
        req->data = 0;
        /*
         * We are explicity ignoring the error here for the following reason:
         * There is only two scenarios in which this returns an error:
         * a) In case the stream is already shut down, in which case we're likely
         *    in the process of closing this stream (since there's no other call to
         *    uv_shutdown).
         * b) In case the stream is already closed, in which case uv_close would
         *    cause an assertion failure.
         */
        uv_shutdown(req, (uv_stream_t*)handle, &jl_uv_shutdownCallback);
    }
    else if (handle->type == UV_FILE) {
        uv_fs_t req;
        jl_uv_file_t *fd = (jl_uv_file_t*)handle;
        if (fd->file != -1) {
            uv_fs_close(handle->loop, &req, fd->file, NULL);
            fd->file = -1;
        }
    }
    else if (!uv_is_closing((uv_handle_t*)handle)) {
        uv_close(handle,&jl_uv_closeHandle);
    }
}
Exemple #12
0
DLLEXPORT void jl_close_uv(uv_handle_t *handle)
{
    if (!handle || uv_is_closing(handle))
       return;
    if (handle->type==UV_TTY)
        uv_tty_set_mode((uv_tty_t*)handle,0);

    if ( (handle->type == UV_NAMED_PIPE || handle->type == UV_TCP) && uv_is_writable( (uv_stream_t *) handle)) { 
        // Make sure that the stream has not already been marked closed in Julia.
        // A double shutdown would cause the process to hang on exit.
        JULIA_CB(isopen, handle->data, 0);
        if (!jl_is_int32(ret)) {
            jl_error("jl_close_uv: _uv_hook_isopen must return an int32.");
        }
        if (!jl_unbox_int32(ret)){
            return;
        }

        uv_shutdown_t *req = malloc(sizeof(uv_shutdown_t));
        int err = uv_shutdown(req, (uv_stream_t*)handle, &shutdownCallback);
        if (err != 0) {
            printf("shutdown err: %s\n", uv_strerror(uv_last_error(jl_global_event_loop())));
            uv_close(handle, &closeHandle);
        }
    }
    else {
        uv_close(handle,&closeHandle);
    }
}
Exemple #13
0
static void after_write_cb(uv_write_t* req, int status)
{
	server_ctx *ctx = (server_ctx *)req->handle->data;
	if (status) {
		if (uv_last_error(req->handle->loop).code != UV_ECANCELED) {
			if ((uv_tcp_t *)req->handle == &ctx->client) {
				HANDLE_CLOSE((uv_handle_t *)req->handle, client_established_close_cb);
			} else {
				HANDLE_CLOSE((uv_handle_t *)req->handle, remote_established_close_cb);
			}
		}
		free(req->data); // Free buffer
		free(req);
		return;
	}

	if ((uv_tcp_t *)req->handle == &ctx->client && !uv_is_closing((uv_handle_t *)(void *)&ctx->remote)) {
		if (ctx->buffer_len <= MAX_PENDING_PER_CONN) {
			int n = uv_read_start((uv_stream_t *)(void *)&ctx->remote, established_alloc_cb, remote_established_read_cb);
			if (n) {
				SHOW_UV_ERROR(ctx->client.loop);
				free(req->data); // Free buffer
				free(req);
				return;
			}
		}
		ctx->buffer_len--;
	}

	free(req->data); // Free buffer
	free(req);
}
Exemple #14
0
void uv_udp_handle_t::write(request_udp_write_t& request)
{
	int32_t err = UV_UNKNOWN;
	if (request.m_shared_write) {
		uv_udp_handle_t* handle = (uv_udp_handle_t*)singleton_ref(network_t).get_shared_write_socket(request.m_socket_fd);
		if (handle != NULL) {
			uint32_t length = request.m_length > 0 ? request.m_length : (uint32_t)buffer_data_length(request.m_buffer);
			if (uv_is_closing((uv_handle_t*)handle)) {
				err = NL_EUDPSCLOSED;
			} else {
				err = handle->write_handle(request);
			}
		} else {
			err = NL_EUDPNOWSHARED;
		}
	} else {
		err = request.m_socket_handle->write_handle(request);
	}
	if (err != UV_OK) { /* write error had been occurred */
		if (request.m_length > 0) {
			nl_free((void*)request.m_string);
		} else {
			buffer_release(request.m_buffer);
		}
		if (request.m_session != LUA_REFNIL) {
			singleton_ref(node_lua_t).context_send(request.m_source, 0, request.m_session, RESPONSE_UDP_WRITE, (nl_err_code)err);
		}
	}
}
/* Fully close a loop */
void TCPClient::CloseWalkCB(uv_handle_t* handle, void* arg)
{
    TCPClient* theclass = (TCPClient*)arg;
    if (!uv_is_closing(handle)) {
        uv_close(handle, AfterClientClose);
    }
}
Exemple #16
0
static void tcp_recv(uv_stream_t *handle, ssize_t nread, const uv_buf_t *buf)
{
	uv_loop_t *loop = handle->loop;
	struct session *s = handle->data;
	struct worker_ctx *worker = loop->data;
	/* TCP pipelining is rather complicated and requires cooperation from the worker
	 * so the whole message reassembly and demuxing logic is inside worker */
	int ret = 0;
	if (s->has_tls) {
		ret = tls_process(worker, handle, (const uint8_t *)buf->base, nread);
	} else {
		ret = worker_process_tcp(worker, handle, (const uint8_t *)buf->base, nread);
	}
	if (ret < 0) {
		worker_end_tcp(worker, (uv_handle_t *)handle);
		/* Exceeded per-connection quota for outstanding requests
		 * stop reading from stream and close after last message is processed. */
		if (!s->outgoing && !uv_is_closing((uv_handle_t *)&s->timeout)) {
			uv_timer_stop(&s->timeout);
			if (s->tasks.len == 0) {
				uv_close((uv_handle_t *)&s->timeout, tcp_timeout);
			} else { /* If there are tasks running, defer until they finish. */
				uv_timer_start(&s->timeout, tcp_timeout_trigger, 1, KR_CONN_RTT_MAX/2);
			}
		}
	/* Connection spawned more than one request, reset its deadline for next query. */
	} else if (ret > 0 && !s->outgoing) {
		uv_timer_again(&s->timeout);
	}
	mp_flush(worker->pkt_pool.ctx);
}
Exemple #17
0
static int couv_is_closing(lua_State *L) {
  uv_handle_t *handle;

  handle = couvL_checkudataclass(L, 1, COUV_HANDLE_MTBL_NAME);
  lua_pushboolean(L, uv_is_closing(handle));
  return 1;
}
Exemple #18
0
void ListeningPeer::destroy(bool now) {

    VLOG(1)
        << this
    ;
 // Peer::destroy();

    assert(!destroying_);
    if (destroying_) {
        LOG(WARNING)
            << this
            << " Double destroy() detected"
        ;

        return;
    }

    destroying_ = 1;

    if (down()) {
        return;
    }

    if (connected_) {
        connected_ = 0;
        if (!uv_is_closing((uv_handle_t*)getHandle())) {
            uv_close((uv_handle_t*)getHandle(), on_close);
        }
    }
}
Exemple #19
0
	// shutdown shuts down the Node's event loop and cleans up resources.
	void
	shutdown()
	{
		uv_async_init(m_uv_loop.get(), &m_async, [](uv_async_t* handle) {
			auto self = (Node*)(handle->data);
			auto timer = self->m_timer.get();
			uv_timer_stop(timer);

			uv_close((uv_handle_t*)timer, [](uv_handle_t* handle) {
				auto self = (Node*)(handle->data);
				auto tcp = self->m_tcp.get();
				auto loop = self->m_uv_loop.get();

				self->m_peer_registry = nullptr;

				uv_close((uv_handle_t*)tcp, [](uv_handle_t* handle) {
					auto self = (Node*)(handle->data);
					auto loop = self->m_uv_loop.get();

					uv_walk(loop, [](uv_handle_t* handle, void* arg) {
						if (uv_is_closing(handle) == 0) {
							uv_close(handle, [](uv_handle_t* h){});
						}
					}, nullptr);
				});
			});
		});
		m_async.data = this;
		uv_async_send(&m_async);
	}
Exemple #20
0
static void countCallback(uv_handle_t* event, void* vEventCount)
{
    int* eventCount = (int*) vEventCount;
    if (!uv_is_closing(event)) {
        *eventCount = *eventCount + 1;
    }
}
Exemple #21
0
static void connect_to_remote_cb(uv_connect_t* req, int status)
{
	server_ctx *ctx = (server_ctx *)req->data;
	if (status) {
		if (uv_last_error(req->handle->loop).code != UV_ECANCELED) {
			SHOW_UV_ERROR(ctx->client.loop);
			HANDLE_CLOSE((uv_handle_t*)(void *)&ctx->remote, remote_established_close_cb);
			free(ctx->handshake_buffer);
			free(req);
		}
		return;
	}

	free(req);

	LOGCONN(&ctx->remote, "Connected to %s");

	uv_buf_t buf;
	buf.base = (char *)ctx->handshake_buffer;
	buf.len = HANDSHAKE_BUFFER_SIZE;

	shadow_encrypt((uint8_t *)buf.base, encrypt_table, ctx->buffer_len);

	client_established_read_cb((uv_stream_t *)(void *)&ctx->client, ctx->buffer_len, buf); // Deal with ramaining data, only once
	ctx->handshake_buffer = NULL;
	ctx->buffer_len = 0;

	if (uv_is_closing((uv_handle_t *)(void *)&ctx->remote) || uv_is_closing((uv_handle_t *)(void *)&ctx->client)) {
		LOGE("Connection failed, remote or client already closed");
		return;
	}
	
	int n = uv_read_start((uv_stream_t *)(void *)&ctx->client, established_alloc_cb, client_established_read_cb);
	if (n) {
		SHOW_UV_ERROR(ctx->client.loop);
		HANDLE_CLOSE((uv_handle_t*)(void *)&ctx->remote, remote_established_close_cb);
		return;
	}
	n = uv_read_start((uv_stream_t *)(void *)&ctx->remote, established_alloc_cb, remote_established_read_cb);
	if (n) {
		SHOW_UV_ERROR(ctx->client.loop);
		HANDLE_CLOSE((uv_handle_t*)(void *)&ctx->remote, remote_established_close_cb);
		return;
	}
}
Exemple #22
0
static void
close_client(struct client_context *client) {
    uv_close((uv_handle_t *)client->timer, timer_close_cb);
    if (!uv_is_closing((uv_handle_t *)&client->server_handle)) {
        uv_close((uv_handle_t *)&client->server_handle, client_close_cb);
    } else {
        free(client);
    }
}
Exemple #23
0
static void send_recv_start() {
  int r;
  ASSERT(1 == uv_is_readable((uv_stream_t*)&ctx2.channel));
  ASSERT(1 == uv_is_writable((uv_stream_t*)&ctx2.channel));
  ASSERT(0 == uv_is_closing((uv_handle_t*)&ctx2.channel));

  r = uv_read_start((uv_stream_t*)&ctx2.channel, alloc_cb, read_cb);
  ASSERT(r == 0);
}
Exemple #24
0
static PyObject *
Handle_closed_get(Handle *self, void *closure)
{
    UNUSED_ARG(closure);

    RAISE_IF_HANDLE_NOT_INITIALIZED(self, NULL);

    return PyBool_FromLong((long)uv_is_closing(self->uv_handle));
}
Exemple #25
0
void on_close(evt_tls_t *tls, int status)
{
    assert(1 == status);
    uv_tls_t *ut = (uv_tls_t*)tls->data;
    assert( ut->tls_cls_cb != NULL);

    evt_tls_free(tls);
    if ( !uv_is_closing((uv_handle_t*)&(ut->skt)))
        uv_close( (uv_handle_t*)&(ut->skt), ut->tls_cls_cb);
}
Exemple #26
0
void on_write(uv_write_t *req, int status) {
  guava_response_t *resp = (guava_response_t *)req->data;
  guava_conn_t *conn = resp->conn;
  guava_response_free(resp);
  if (!uv_is_closing((uv_handle_t *)&conn->stream)) {
    if (!conn->keep_alive) {
      uv_close((uv_handle_t *)&conn->stream, guava_server_on_close);
    }
  }
}
static void
as_uv_auth_read(uv_stream_t* stream, ssize_t nread, const uv_buf_t* buf)
{
	if (uv_is_closing((uv_handle_t*)stream)) {
		return;
	}

	as_event_command* cmd = as_uv_auth_get_command(stream->data);
		
	if (nread < 0) {
		uv_read_stop(stream);
		as_error err;
		as_error_update(&err, AEROSPIKE_ERR_ASYNC_CONNECTION, "Authenticate socket read failed: %zd", nread);
		as_event_socket_error(cmd, &err);
		return;
	}
	
	cmd->pos += nread;
	
	if (cmd->pos < cmd->len) {
		// Read not finished.
		return;
	}
	
	if (cmd->state == AS_ASYNC_STATE_AUTH_READ_HEADER) {
		as_event_set_auth_parse_header(cmd);
		
		if (cmd->len > cmd->capacity) {
			uv_read_stop(stream);
			as_error err;
			as_error_update(&err, AEROSPIKE_ERR_CLIENT, "Authenticate response size is corrupt: %u", cmd->auth_len);
			as_event_socket_error(cmd, &err);
			return;
		}
		return;
	}
	
	// Done reading authentication data.
	uv_read_stop(stream);
	
	// Parse authentication response.
	cmd->len -= cmd->auth_len;
	uint8_t code = cmd->buf[cmd->len + AS_ASYNC_AUTH_RETURN_CODE];
	
	if (code) {
		// Can't authenticate socket, so must close it.
		as_error err;
		as_error_update(&err, code, "Authentication failed: %s", as_error_string(code));
		as_event_socket_error(cmd, &err);
		return;
	}
	
	cmd->pos = 0;
	as_uv_command_write_start(cmd, stream);
}
Exemple #28
0
void uv_udp_handle_t::set_udp_wshared(bool enable)
{
	if (!uv_is_closing((uv_handle_t*)(m_handle))) {
		int64_t fd = SOCKET_MAKE_FD(m_lua_ref, m_source);
		if (enable) {
			singleton_ref(network_t).put_shared_write_socket(fd, this);
		} else {
			singleton_ref(network_t).pop_shared_write_socket(fd);
		}
	}
}
Exemple #29
0
int luv_close (lua_State* L) {
    uv_handle_t* handle = luv_checkudata(L, 1, "handle");
    /*  printf("close   \tlhandle=%p handle=%p\n", handle->data, handle);*/
    if (uv_is_closing(handle)) {
        fprintf(stderr, "WARNING: Handle already closing \tlhandle=%p handle=%p\n", handle->data, handle);
        return 0;
    }
    uv_close(handle, luv_on_close);
    luv_handle_ref(L, handle->data, 1);
    return 0;
}
Exemple #30
0
static int luv_close(lua_State* L) {
  uv_handle_t* handle = luv_check_handle(L, 1);
  if (uv_is_closing(handle)) {
    luaL_error(L, "handle %p is already closing", handle);
  }
  if (!lua_isnoneornil(L, 2)) {
    luv_check_callback(L, handle->data, LUV_CLOSED, 2);
  }
  uv_close(handle, luv_close_cb);
  return 0;
}