@@ -236,12 +236,7 @@ void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
236236 if (handle -> flags & UV_HANDLE_CLOSING &&
237237 handle -> reqs_pending == 0 ) {
238238 assert (!(handle -> flags & UV_HANDLE_CLOSED ));
239-
240- if (!(handle -> flags & UV_HANDLE_TCP_SOCKET_CLOSED )) {
241- closesocket (handle -> socket );
242- handle -> socket = INVALID_SOCKET ;
243- handle -> flags |= UV_HANDLE_TCP_SOCKET_CLOSED ;
244- }
239+ assert (handle -> socket == INVALID_SOCKET );
245240
246241 if (!(handle -> flags & UV_HANDLE_CONNECTION ) && handle -> tcp .serv .accept_reqs ) {
247242 if (handle -> flags & UV_HANDLE_EMULATE_IOCP ) {
@@ -599,6 +594,7 @@ int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
599594 }
600595 }
601596
597+ /* If this flag is set, we already made this listen call in xfer. */
602598 if (!(handle -> flags & UV_HANDLE_SHARED_TCP_SOCKET ) &&
603599 listen (handle -> socket , backlog ) == SOCKET_ERROR ) {
604600 return WSAGetLastError ();
@@ -769,7 +765,7 @@ static int uv__is_loopback(const struct sockaddr_storage* storage) {
769765}
770766
771767// Check if Windows version is 10.0.16299 or later
772- static int uv__is_fast_loopback_fail_supported () {
768+ static int uv__is_fast_loopback_fail_supported (void ) {
773769 OSVERSIONINFOW os_info ;
774770 if (!pRtlGetVersion )
775771 return 0 ;
@@ -1160,9 +1156,14 @@ void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
11601156 }
11611157
11621158 handle -> stream .conn .write_reqs_pending -- ;
1163- if (handle -> stream .conn .shutdown_req != NULL &&
1164- handle -> stream .conn .write_reqs_pending == 0 ) {
1165- uv_want_endgame (loop , (uv_handle_t * )handle );
1159+ if (handle -> stream .conn .write_reqs_pending == 0 ) {
1160+ if (handle -> flags & UV_HANDLE_CLOSING ) {
1161+ closesocket (handle -> socket );
1162+ handle -> socket = INVALID_SOCKET ;
1163+ }
1164+ if (handle -> stream .conn .shutdown_req != NULL ) {
1165+ uv_want_endgame (loop , (uv_handle_t * )handle );
1166+ }
11661167 }
11671168
11681169 DECREASE_PENDING_REQ_COUNT (handle );
@@ -1404,9 +1405,24 @@ int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
14041405}
14051406
14061407
1407- static int uv_tcp_try_cancel_io (uv_tcp_t * tcp ) {
1408- SOCKET socket = tcp -> socket ;
1408+ static void uv_tcp_try_cancel_reqs (uv_tcp_t * tcp ) {
1409+ SOCKET socket ;
14091410 int non_ifs_lsp ;
1411+ int reading ;
1412+ int writing ;
1413+
1414+ socket = tcp -> socket ;
1415+ reading = tcp -> flags & UV_HANDLE_READING ;
1416+ writing = tcp -> stream .conn .write_reqs_pending > 0 ;
1417+ if (!reading && !writing )
1418+ return ;
1419+
1420+ /* TODO: in libuv v2, keep explicit track of write_reqs, so we can cancel
1421+ * them each explicitly with CancelIoEx (like unix). */
1422+ if (reading )
1423+ CancelIoEx ((HANDLE ) socket , & tcp -> read_req .u .io .overlapped );
1424+ if (writing )
1425+ CancelIo ((HANDLE ) socket );
14101426
14111427 /* Check if we have any non-IFS LSPs stacked on top of TCP */
14121428 non_ifs_lsp = (tcp -> flags & UV_HANDLE_IPV6 ) ? uv_tcp_non_ifs_lsp_ipv6 :
@@ -1426,82 +1442,57 @@ static int uv_tcp_try_cancel_io(uv_tcp_t* tcp) {
14261442 NULL ,
14271443 NULL ) != 0 ) {
14281444 /* Failed. We can't do CancelIo. */
1429- return -1 ;
1445+ return ;
14301446 }
14311447 }
14321448
14331449 assert (socket != 0 && socket != INVALID_SOCKET );
14341450
1435- if (!CancelIo ((HANDLE ) socket )) {
1436- return GetLastError ();
1451+ if (socket != tcp -> socket ) {
1452+ if (reading )
1453+ CancelIoEx ((HANDLE ) socket , & tcp -> read_req .u .io .overlapped );
1454+ if (writing )
1455+ CancelIo ((HANDLE ) socket );
14371456 }
1438-
1439- /* It worked. */
1440- return 0 ;
14411457}
14421458
14431459
14441460void uv_tcp_close (uv_loop_t * loop , uv_tcp_t * tcp ) {
1445- int close_socket = 1 ;
1446-
1447- if (tcp -> flags & UV_HANDLE_READ_PENDING ) {
1448- /* In order for winsock to do a graceful close there must not be any any
1449- * pending reads, or the socket must be shut down for writing */
1450- if (!(tcp -> flags & UV_HANDLE_SHARED_TCP_SOCKET )) {
1451- /* Just do shutdown on non-shared sockets, which ensures graceful close. */
1452- shutdown (tcp -> socket , SD_SEND );
1453-
1454- } else if (uv_tcp_try_cancel_io (tcp ) == 0 ) {
1455- /* In case of a shared socket, we try to cancel all outstanding I/O,. If
1456- * that works, don't close the socket yet - wait for the read req to
1457- * return and close the socket in uv_tcp_endgame. */
1458- close_socket = 0 ;
1459-
1460- } else {
1461- /* When cancelling isn't possible - which could happen when an LSP is
1462- * present on an old Windows version, we will have to close the socket
1463- * with a read pending. That is not nice because trailing sent bytes may
1464- * not make it to the other side. */
1461+ if (tcp -> flags & UV_HANDLE_CONNECTION ) {
1462+ uv_tcp_try_cancel_reqs (tcp );
1463+ if (tcp -> flags & UV_HANDLE_READING ) {
1464+ uv_read_stop ((uv_stream_t * ) tcp );
14651465 }
1466-
1467- } else if ((tcp -> flags & UV_HANDLE_SHARED_TCP_SOCKET ) &&
1468- tcp -> tcp .serv .accept_reqs != NULL ) {
1469- /* Under normal circumstances closesocket() will ensure that all pending
1470- * accept reqs are canceled. However, when the socket is shared the
1471- * presence of another reference to the socket in another process will keep
1472- * the accept reqs going, so we have to ensure that these are canceled. */
1473- if (uv_tcp_try_cancel_io (tcp ) != 0 ) {
1474- /* When cancellation is not possible, there is another option: we can
1475- * close the incoming sockets, which will also cancel the accept
1476- * operations. However this is not cool because we might inadvertently
1477- * close a socket that just accepted a new connection, which will cause
1478- * the connection to be aborted. */
1466+ } else {
1467+ if (tcp -> tcp .serv .accept_reqs != NULL ) {
1468+ /* First close the incoming sockets to cancel the accept operations before
1469+ * we free their resources. */
14791470 unsigned int i ;
14801471 for (i = 0 ; i < uv_simultaneous_server_accepts ; i ++ ) {
14811472 uv_tcp_accept_t * req = & tcp -> tcp .serv .accept_reqs [i ];
1482- if (req -> accept_socket != INVALID_SOCKET &&
1483- !HasOverlappedIoCompleted (& req -> u .io .overlapped )) {
1473+ if (req -> accept_socket != INVALID_SOCKET ) {
14841474 closesocket (req -> accept_socket );
14851475 req -> accept_socket = INVALID_SOCKET ;
14861476 }
14871477 }
14881478 }
1489- }
1490-
1491- if (tcp -> flags & UV_HANDLE_READING ) {
1492- tcp -> flags &= ~UV_HANDLE_READING ;
1493- DECREASE_ACTIVE_COUNT (loop , tcp );
1479+ assert (!(tcp -> flags & UV_HANDLE_READING ));
14941480 }
14951481
14961482 if (tcp -> flags & UV_HANDLE_LISTENING ) {
14971483 tcp -> flags &= ~UV_HANDLE_LISTENING ;
14981484 DECREASE_ACTIVE_COUNT (loop , tcp );
14991485 }
15001486
1501- if (close_socket ) {
1487+ /* If any overlapped req failed to cancel, calling `closesocket` now would
1488+ * cause Win32 to send an RST packet. Try to avoid that for writes, if
1489+ * possibly applicable, by waiting to process the completion notifications
1490+ * first (which typically should be cancellations). There's not much we can
1491+ * do about canceled reads, which also will generate an RST packet. */
1492+ if (!(tcp -> flags & UV_HANDLE_CONNECTION ) ||
1493+ tcp -> stream .conn .write_reqs_pending == 0 ) {
15021494 closesocket (tcp -> socket );
15031495 tcp -> socket = INVALID_SOCKET ;
1504- tcp -> flags |= UV_HANDLE_TCP_SOCKET_CLOSED ;
15051496 }
15061497
15071498 tcp -> flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE );
0 commit comments