pn_socket_t pni_iocp_end_accept(iocpdesc_t *ld, sockaddr *addr, socklen_t *addrlen, bool *would_block, pn_error_t *error) { if (!is_listener(ld)) { set_iocp_error_status(error, PN_ERR, WSAEOPNOTSUPP); return INVALID_SOCKET; } if (ld->read_closed) { set_iocp_error_status(error, PN_ERR, WSAENOTSOCK); return INVALID_SOCKET; } if (pn_list_size(ld->acceptor->accepts) == 0) { if (ld->events & PN_READABLE && ld->iocp->iocp_trace) iocp_log("listen socket readable with no available accept completions\n"); *would_block = true; return INVALID_SOCKET; } accept_result_t *result = (accept_result_t *) pn_list_get(ld->acceptor->accepts, 0); pn_list_del(ld->acceptor->accepts, 0, 1); if (!pn_list_size(ld->acceptor->accepts)) pni_events_update(ld, ld->events & ~PN_READABLE); // No pending accepts pn_socket_t accept_sock; if (result->base.status) { accept_sock = INVALID_SOCKET; pni_win32_error(ld->error, "accept failure", result->base.status); if (ld->iocp->iocp_trace) iocp_log("%s\n", pn_error_text(ld->error)); // App never sees this socket so close it here. pni_iocp_begin_close(result->new_sock); } else { accept_sock = result->new_sock->socket; // AcceptEx special setsockopt: setsockopt(accept_sock, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, (char*)&ld->socket, sizeof (SOCKET)); if (addr && addrlen && *addrlen > 0) { sockaddr_storage *local_addr = NULL; sockaddr_storage *remote_addr = NULL; int local_addrlen, remote_addrlen; LPFN_GETACCEPTEXSOCKADDRS fn = ld->acceptor->fn_get_accept_ex_sockaddrs; fn(result->address_buffer, 0, IOCP_SOCKADDRMAXLEN, IOCP_SOCKADDRMAXLEN, (SOCKADDR **) &local_addr, &local_addrlen, (SOCKADDR **) &remote_addr, &remote_addrlen); *addrlen = pn_min(*addrlen, remote_addrlen); memmove(addr, remote_addr, *addrlen); } } if (accept_sock != INVALID_SOCKET) { // Connected. result->new_sock->read_closed = false; result->new_sock->write_closed = false; } // Done with the completion result, so reuse it result->new_sock = NULL; begin_accept(ld->acceptor, result); return accept_sock; }
ssize_t pni_iocp_begin_write(iocpdesc_t *iocpd, const void *buf, size_t len, bool *would_block, pn_error_t *error) { if (len == 0) return 0; *would_block = false; if (is_listener(iocpd)) { set_iocp_error_status(error, PN_ERR, WSAEOPNOTSUPP); return INVALID_SOCKET; } if (iocpd->closing) { set_iocp_error_status(error, PN_ERR, WSAESHUTDOWN); return SOCKET_ERROR; } if (iocpd->write_closed) { assert(pn_error_code(iocpd->error)); pn_error_copy(error, iocpd->error); if (iocpd->iocp->iocp_trace) iocp_log("write error: %s\n", pn_error_text(error)); return SOCKET_ERROR; } if (len == 0) return 0; if (!(iocpd->events & PN_WRITABLE)) { *would_block = true; return SOCKET_ERROR; } size_t written = 0; size_t requested = len; const char *outgoing = (const char *) buf; size_t available = pni_write_pipeline_reserve(iocpd->pipeline, len); if (!available) { *would_block = true; return SOCKET_ERROR; } for (size_t wr_count = 0; wr_count < available; wr_count++) { write_result_t *result = pni_write_pipeline_next(iocpd->pipeline); assert(result); result->base.iocpd = iocpd; ssize_t actual_len = pn_min(len, result->buffer.size); result->requested = actual_len; memmove((void *)result->buffer.start, outgoing, actual_len); outgoing += actual_len; written += actual_len; len -= actual_len; int werror = submit_write(result, result->buffer.start, actual_len); if (werror && WSAGetLastError() != ERROR_IO_PENDING) { pni_write_pipeline_return(iocpd->pipeline, result); iocpdesc_fail(iocpd, WSAGetLastError(), "overlapped send"); return SOCKET_ERROR; } iocpd->ops_in_progress++; } if (!pni_write_pipeline_writable(iocpd->pipeline)) pni_events_update(iocpd, iocpd->events & ~PN_WRITABLE); return written; }
void pni_iocpdesc_start(iocpdesc_t *iocpd) { if (iocpd->bound) return; bind_to_completion_port(iocpd); if (is_listener(iocpd)) { begin_accept(iocpd->acceptor, NULL); } else { release_sys_sendbuf(iocpd->socket); pni_events_update(iocpd, PN_WRITABLE); start_reading(iocpd); } }
static void iocpdesc_fail(iocpdesc_t *iocpd, HRESULT status, const char* text) { pni_win32_error(iocpd->error, text, status); if (iocpd->iocp->iocp_trace) { iocp_log("connection terminated: %s\n", pn_error_text(iocpd->error)); } if (!is_listener(iocpd) && !iocpd->write_closed && !pni_write_pipeline_size(iocpd->pipeline)) iocp_shutdown(iocpd); iocpd->write_closed = true; iocpd->read_closed = true; iocpd->poll_error = true; pni_events_update(iocpd, iocpd->events & ~(PN_READABLE | PN_WRITABLE)); }
static pn_list_t *iocp_map_close_all(iocp_t *iocp) { // Zombify stragglers, i.e. no pn_close() from the application. pn_list_t *externals = pn_list(PN_OBJECT, 0); for (pn_handle_t entry = pn_hash_head(iocp->iocpdesc_map); entry; entry = pn_hash_next(iocp->iocpdesc_map, entry)) { iocpdesc_t *iocpd = (iocpdesc_t *) pn_hash_value(iocp->iocpdesc_map, entry); // Just listeners first. if (is_listener(iocpd)) { if (iocpd->external) { // Owned by application, just keep a temporary reference to it. // iocp_result_t structs must not be free'd until completed or // the completion port is closed. if (iocpd->ops_in_progress) pn_list_add(externals, iocpd); pni_iocpdesc_map_del(iocp, iocpd->socket); } else { // Make it a zombie. pni_iocp_begin_close(iocpd); } } } pni_iocp_drain_completions(iocp); for (pn_handle_t entry = pn_hash_head(iocp->iocpdesc_map); entry; entry = pn_hash_next(iocp->iocpdesc_map, entry)) { iocpdesc_t *iocpd = (iocpdesc_t *) pn_hash_value(iocp->iocpdesc_map, entry); if (iocpd->external) { iocpd->read_closed = true; // Do not consume from read side iocpd->write_closed = true; // Do not shutdown write side if (iocpd->ops_in_progress) pn_list_add(externals, iocpd); pni_iocpdesc_map_del(iocp, iocpd->socket); } else { // Make it a zombie. pni_iocp_begin_close(iocpd); } } return externals; }
ssize_t pni_iocp_recv(iocpdesc_t *iocpd, void *buf, size_t size, bool *would_block, pn_error_t *error) { if (size == 0) return 0; *would_block = false; if (is_listener(iocpd)) { set_iocp_error_status(error, PN_ERR, WSAEOPNOTSUPP); return SOCKET_ERROR; } if (iocpd->closing) { // Previous call to pn_close() set_iocp_error_status(error, PN_ERR, WSAESHUTDOWN); return SOCKET_ERROR; } if (iocpd->read_closed) { if (pn_error_code(iocpd->error)) pn_error_copy(error, iocpd->error); else set_iocp_error_status(error, PN_ERR, WSAENOTCONN); return SOCKET_ERROR; } int count = recv(iocpd->socket, (char *) buf, size, 0); if (count > 0) { pni_events_update(iocpd, iocpd->events & ~PN_READABLE); begin_zero_byte_read(iocpd); return (ssize_t) count; } else if (count == 0) { iocpd->read_closed = true; return 0; } if (WSAGetLastError() == WSAEWOULDBLOCK) *would_block = true; else { set_iocp_error_status(error, PN_ERR, WSAGetLastError()); iocpd->read_closed = true; } return SOCKET_ERROR; }
void pni_iocp_begin_close(iocpdesc_t *iocpd) { assert (!iocpd->closing); if (is_listener(iocpd)) { // Listening socket is easy. Close the socket which will cancel async ops. pn_socket_t old_sock = iocpd->socket; iocpd->socket = INVALID_SOCKET; iocpd->closing = true; iocpd->read_closed = true; iocpd->write_closed = true; closesocket(old_sock); // Pending accepts will now complete. Zombie can die when all consumed. zombie_list_add(iocpd); pni_iocpdesc_map_del(iocpd->iocp, old_sock); // may pn_free *iocpd } else { // Continue async operation looking for graceful close confirmation or timeout. pn_socket_t old_sock = iocpd->socket; iocpd->closing = true; if (!iocpd->write_closed && !write_in_progress(iocpd)) iocp_shutdown(iocpd); zombie_list_add(iocpd); pni_iocpdesc_map_del(iocpd->iocp, old_sock); // may pn_free *iocpd } }