2017-07-13 20:15:15 +02:00
|
|
|
/****************************************************************************
|
2020-01-21 07:11:29 +01:00
|
|
|
* net/tcp/tcp_close.c
|
2017-07-13 20:15:15 +02:00
|
|
|
*
|
2021-02-19 12:45:37 +01:00
|
|
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
* contributor license agreements. See the NOTICE file distributed with
|
|
|
|
* this work for additional information regarding copyright ownership. The
|
|
|
|
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
|
|
|
* "License"); you may not use this file except in compliance with the
|
|
|
|
* License. You may obtain a copy of the License at
|
2017-07-13 20:15:15 +02:00
|
|
|
*
|
2021-02-19 12:45:37 +01:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2017-07-13 20:15:15 +02:00
|
|
|
*
|
2021-02-19 12:45:37 +01:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
2017-07-13 20:15:15 +02:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Included Files
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#include <nuttx/config.h>
|
2020-01-21 07:11:29 +01:00
|
|
|
#ifdef CONFIG_NET_TCP
|
2017-07-13 20:15:15 +02:00
|
|
|
|
|
|
|
#include <errno.h>
|
|
|
|
#include <debug.h>
|
|
|
|
#include <assert.h>
|
|
|
|
|
2020-01-22 16:29:26 +01:00
|
|
|
#include <nuttx/semaphore.h>
|
2017-07-13 20:15:15 +02:00
|
|
|
#include <nuttx/net/net.h>
|
|
|
|
#include <nuttx/net/netdev.h>
|
|
|
|
#include <nuttx/net/tcp.h>
|
|
|
|
|
|
|
|
#include "netdev/netdev.h"
|
|
|
|
#include "devif/devif.h"
|
|
|
|
#include "tcp/tcp.h"
|
|
|
|
#include "socket/socket.h"
|
|
|
|
|
|
|
|
/****************************************************************************
|
2022-06-09 11:23:26 +02:00
|
|
|
* Private Functions
|
2017-07-13 20:15:15 +02:00
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
2022-06-09 11:23:26 +02:00
|
|
|
* Name: tcp_close_work
|
2017-07-13 20:15:15 +02:00
|
|
|
****************************************************************************/
|
|
|
|
|
2022-06-09 11:23:26 +02:00
|
|
|
static void tcp_close_work(FAR void *param)
|
|
|
|
{
|
|
|
|
FAR struct tcp_conn_s *conn = (FAR struct tcp_conn_s *)param;
|
|
|
|
|
|
|
|
net_lock();
|
|
|
|
|
net/tcp: use independent work to free the conn instance
I noticed that the conn instance will leak during stress test,
The close work queued from tcp_close_eventhandler() will be canceled
by tcp_timer() immediately:
Breakpoint 1, tcp_close_eventhandler (dev=0x565cd338 <up_irq_restore+108>, pvpriv=0x5655e6ff <getpid+12>, flags=0) at tcp/tcp_close.c:71
(gdb) bt
| #0 tcp_close_eventhandler (dev=0x565cd338 <up_irq_restore+108>, pvpriv=0x5655e6ff <getpid+12>, flags=0) at tcp/tcp_close.c:71
| #1 0x5658bf1e in devif_conn_event (dev=0x5660bd80 <g_sim_dev>, flags=512, list=0x5660d558 <g_cbprealloc+312>) at devif/devif_callback.c:508
| #2 0x5658a219 in tcp_callback (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>, flags=512) at tcp/tcp_callback.c:167
| #3 0x56589253 in tcp_timer (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_timer.c:378
| #4 0x5658dd47 in tcp_poll (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_devpoll.c:95
| #5 0x5658b95f in devif_poll_tcp_connections (dev=0x5660bd80 <g_sim_dev>, callback=0x565770f2 <netdriver_txpoll>) at devif/devif_poll.c:601
| #6 0x5658b9ea in devif_poll (dev=0x5660bd80 <g_sim_dev>, callback=0x565770f2 <netdriver_txpoll>) at devif/devif_poll.c:722
| #7 0x56577230 in netdriver_txavail_work (arg=0x5660bd80 <g_sim_dev>) at sim/up_netdriver.c:308
| #8 0x5655999e in work_thread (argc=2, argv=0xf3db5dd0) at wqueue/kwork_thread.c:178
| #9 0x5655983f in nxtask_start () at task/task_start.c:129
(gdb) c
Continuing.
Breakpoint 2, tcp_update_timer (conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_timer.c:178
(gdb) bt
| #0 tcp_update_timer (conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_timer.c:178
| #1 0x5658952a in tcp_timer (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_timer.c:708
| #2 0x5658dd47 in tcp_poll (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_devpoll.c:95
| #3 0x5658b95f in devif_poll_tcp_connections (dev=0x5660bd80 <g_sim_dev>, callback=0x565770f2 <netdriver_txpoll>) at devif/devif_poll.c:601
| #4 0x5658b9ea in devif_poll (dev=0x5660bd80 <g_sim_dev>, callback=0x565770f2 <netdriver_txpoll>) at devif/devif_poll.c:722
| #5 0x56577230 in netdriver_txavail_work (arg=0x5660bd80 <g_sim_dev>) at sim/up_netdriver.c:308
| #6 0x5655999e in work_thread (argc=2, argv=0xf3db5dd0) at wqueue/kwork_thread.c:178
| #7 0x5655983f in nxtask_start () at task/task_start.c:129
Since a separate work will add 24 bytes to each conn instance,
but in order to support the feature of asynchronous close(),
I can not find a better way than adding a separate work,
for resource constraints, I recommend the developers to enable
CONFIG_NET_ALLOC_CONNS, which will reduce the ram usage.
Signed-off-by: chao an <anchao@xiaomi.com>
2022-08-30 05:04:31 +02:00
|
|
|
if (conn && conn->crefs == 0)
|
|
|
|
{
|
|
|
|
/* Stop the network monitor for all sockets */
|
2022-06-09 11:23:26 +02:00
|
|
|
|
net/tcp: use independent work to free the conn instance
I noticed that the conn instance will leak during stress test,
The close work queued from tcp_close_eventhandler() will be canceled
by tcp_timer() immediately:
Breakpoint 1, tcp_close_eventhandler (dev=0x565cd338 <up_irq_restore+108>, pvpriv=0x5655e6ff <getpid+12>, flags=0) at tcp/tcp_close.c:71
(gdb) bt
| #0 tcp_close_eventhandler (dev=0x565cd338 <up_irq_restore+108>, pvpriv=0x5655e6ff <getpid+12>, flags=0) at tcp/tcp_close.c:71
| #1 0x5658bf1e in devif_conn_event (dev=0x5660bd80 <g_sim_dev>, flags=512, list=0x5660d558 <g_cbprealloc+312>) at devif/devif_callback.c:508
| #2 0x5658a219 in tcp_callback (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>, flags=512) at tcp/tcp_callback.c:167
| #3 0x56589253 in tcp_timer (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_timer.c:378
| #4 0x5658dd47 in tcp_poll (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_devpoll.c:95
| #5 0x5658b95f in devif_poll_tcp_connections (dev=0x5660bd80 <g_sim_dev>, callback=0x565770f2 <netdriver_txpoll>) at devif/devif_poll.c:601
| #6 0x5658b9ea in devif_poll (dev=0x5660bd80 <g_sim_dev>, callback=0x565770f2 <netdriver_txpoll>) at devif/devif_poll.c:722
| #7 0x56577230 in netdriver_txavail_work (arg=0x5660bd80 <g_sim_dev>) at sim/up_netdriver.c:308
| #8 0x5655999e in work_thread (argc=2, argv=0xf3db5dd0) at wqueue/kwork_thread.c:178
| #9 0x5655983f in nxtask_start () at task/task_start.c:129
(gdb) c
Continuing.
Breakpoint 2, tcp_update_timer (conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_timer.c:178
(gdb) bt
| #0 tcp_update_timer (conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_timer.c:178
| #1 0x5658952a in tcp_timer (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_timer.c:708
| #2 0x5658dd47 in tcp_poll (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_devpoll.c:95
| #3 0x5658b95f in devif_poll_tcp_connections (dev=0x5660bd80 <g_sim_dev>, callback=0x565770f2 <netdriver_txpoll>) at devif/devif_poll.c:601
| #4 0x5658b9ea in devif_poll (dev=0x5660bd80 <g_sim_dev>, callback=0x565770f2 <netdriver_txpoll>) at devif/devif_poll.c:722
| #5 0x56577230 in netdriver_txavail_work (arg=0x5660bd80 <g_sim_dev>) at sim/up_netdriver.c:308
| #6 0x5655999e in work_thread (argc=2, argv=0xf3db5dd0) at wqueue/kwork_thread.c:178
| #7 0x5655983f in nxtask_start () at task/task_start.c:129
Since a separate work will add 24 bytes to each conn instance,
but in order to support the feature of asynchronous close(),
I can not find a better way than adding a separate work,
for resource constraints, I recommend the developers to enable
CONFIG_NET_ALLOC_CONNS, which will reduce the ram usage.
Signed-off-by: chao an <anchao@xiaomi.com>
2022-08-30 05:04:31 +02:00
|
|
|
tcp_stop_monitor(conn, TCP_CLOSE);
|
|
|
|
tcp_free(conn);
|
|
|
|
}
|
2022-06-09 11:23:26 +02:00
|
|
|
|
|
|
|
net_unlock();
|
|
|
|
}
|
|
|
|
|
2017-07-13 20:15:15 +02:00
|
|
|
/****************************************************************************
|
2017-08-29 22:08:04 +02:00
|
|
|
* Name: tcp_close_eventhandler
|
2017-07-13 20:15:15 +02:00
|
|
|
****************************************************************************/
|
|
|
|
|
2017-08-29 22:08:04 +02:00
|
|
|
static uint16_t tcp_close_eventhandler(FAR struct net_driver_s *dev,
|
2022-08-25 15:17:57 +02:00
|
|
|
FAR void *pvpriv, uint16_t flags)
|
2017-07-13 20:15:15 +02:00
|
|
|
{
|
2022-08-25 14:15:56 +02:00
|
|
|
FAR struct tcp_conn_s *conn = pvpriv;
|
2017-07-13 20:15:15 +02:00
|
|
|
|
2020-10-13 03:37:15 +02:00
|
|
|
ninfo("flags: %04x\n", flags);
|
2017-07-13 20:15:15 +02:00
|
|
|
|
|
|
|
/* TCP_DISCONN_EVENTS:
|
|
|
|
* TCP_CLOSE: The remote host has closed the connection
|
|
|
|
* TCP_ABORT: The remote host has aborted the connection
|
|
|
|
* TCP_TIMEDOUT: The remote did not respond, the connection timed out
|
|
|
|
* NETDEV_DOWN: The network device went down
|
|
|
|
*/
|
|
|
|
|
|
|
|
if ((flags & TCP_DISCONN_EVENTS) != 0)
|
|
|
|
{
|
2019-09-25 15:24:20 +02:00
|
|
|
/* The disconnection is complete. Wake up the waiting thread with an
|
2019-09-25 16:16:37 +02:00
|
|
|
* appropriate result. Success is returned in these cases:
|
2019-09-25 15:24:20 +02:00
|
|
|
*
|
2019-09-25 16:16:37 +02:00
|
|
|
* * TCP_CLOSE indicates normal successful closure. The TCP_CLOSE
|
|
|
|
* event is sent when the remote ACKs the outgoing FIN in the
|
|
|
|
* FIN_WAIT_1 state. That is the appropriate time for the
|
|
|
|
* application to close the socket.
|
2019-09-25 15:24:20 +02:00
|
|
|
*
|
2019-09-25 16:16:37 +02:00
|
|
|
* NOTE: The underlying connection, however, will persist, waiting
|
|
|
|
* for the FIN to be returned by the remote in the TIME_WAIT state.
|
|
|
|
*
|
|
|
|
* * TCP_ABORT is less likely but still means that the socket was
|
|
|
|
* closed, albeit abnormally due to a RST from the remote.
|
|
|
|
*
|
|
|
|
* * TCP_TIMEDOUT would be reported in this context if there is no
|
|
|
|
* ACK response to the FIN in the FIN_WAIT_2 state. The socket will
|
|
|
|
* again be closed abnormally.
|
|
|
|
*
|
|
|
|
* This is the only true error case.
|
2019-09-25 15:39:49 +02:00
|
|
|
*
|
2019-09-25 16:16:37 +02:00
|
|
|
* * NETDEV_DOWN would indicate that the network went down before the
|
|
|
|
* close completed. A non-standard ENODEV error will be returned
|
|
|
|
* in this case. The socket will be left in a limbo state if the
|
|
|
|
* network is taken down but should recover later when the
|
|
|
|
* NETWORK_DOWN event is processed further.
|
2019-09-25 15:24:20 +02:00
|
|
|
*/
|
2017-07-13 20:15:15 +02:00
|
|
|
|
2019-10-25 19:31:42 +02:00
|
|
|
goto end_wait;
|
2017-07-13 20:15:15 +02:00
|
|
|
}
|
2019-12-24 12:57:34 +01:00
|
|
|
|
2021-07-01 05:14:21 +02:00
|
|
|
/* Check if all outstanding bytes have been ACKed.
|
|
|
|
*
|
|
|
|
* Note: in case of passive close, this ensures our FIN is acked.
|
|
|
|
*/
|
2017-07-13 20:15:15 +02:00
|
|
|
|
2021-07-01 05:14:21 +02:00
|
|
|
else if (conn->tx_unacked != 0
|
|
|
|
#ifdef CONFIG_NET_TCP_WRITE_BUFFERS
|
|
|
|
|| !sq_empty(&conn->write_q)
|
|
|
|
#endif /* CONFIG_NET_TCP_WRITE_BUFFERS */
|
|
|
|
)
|
2019-12-24 12:57:34 +01:00
|
|
|
{
|
|
|
|
/* No... we are still waiting for ACKs. Drop any received data, but
|
|
|
|
* do not yet report TCP_CLOSE in the response.
|
|
|
|
*/
|
|
|
|
|
|
|
|
dev->d_len = 0;
|
|
|
|
flags &= ~TCP_NEWDATA;
|
2021-07-01 05:14:21 +02:00
|
|
|
ninfo("waiting for ack\n");
|
2019-12-24 12:57:34 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2021-07-01 05:14:21 +02:00
|
|
|
/* Note: the following state shouldn't reach here because
|
|
|
|
*
|
|
|
|
* FIN_WAIT_1, CLOSING, LAST_ACK
|
|
|
|
* should have tx_unacked != 0, already handled above
|
|
|
|
*
|
|
|
|
* CLOSED, TIME_WAIT
|
|
|
|
* a TCP_CLOSE callback should have already cleared this callback
|
|
|
|
* when transitioning to these states.
|
|
|
|
*
|
|
|
|
* FIN_WAIT_2
|
|
|
|
* new data is dropped by tcp_input without invoking tcp_callback.
|
|
|
|
* timer is handled by tcp_timer without invoking tcp_callback.
|
|
|
|
* TCP_CLOSE is handled above.
|
|
|
|
*/
|
|
|
|
|
|
|
|
DEBUGASSERT(conn->tcpstateflags == TCP_ESTABLISHED);
|
|
|
|
|
2019-12-24 12:57:34 +01:00
|
|
|
/* Drop data received in this state and make sure that TCP_CLOSE
|
|
|
|
* is set in the response
|
|
|
|
*/
|
|
|
|
|
2021-06-29 13:00:57 +02:00
|
|
|
#ifdef CONFIG_NET_TCP_WRITE_BUFFERS
|
|
|
|
/* We don't need the send callback anymore. */
|
|
|
|
|
2022-02-07 04:03:03 +01:00
|
|
|
if (conn->sndcb != NULL)
|
2021-06-29 13:00:57 +02:00
|
|
|
{
|
2022-02-07 04:03:03 +01:00
|
|
|
conn->sndcb->flags = 0;
|
|
|
|
conn->sndcb->event = NULL;
|
2021-06-29 13:00:57 +02:00
|
|
|
|
|
|
|
/* The callback will be freed by tcp_free. */
|
|
|
|
|
2022-02-07 04:03:03 +01:00
|
|
|
conn->sndcb = NULL;
|
2021-06-29 13:00:57 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-12-24 12:57:34 +01:00
|
|
|
dev->d_len = 0;
|
|
|
|
flags = (flags & ~TCP_NEWDATA) | TCP_CLOSE;
|
2017-07-13 20:15:15 +02:00
|
|
|
}
|
|
|
|
|
2019-09-28 20:57:26 +02:00
|
|
|
UNUSED(conn); /* May not be used */
|
2017-07-13 20:15:15 +02:00
|
|
|
return flags;
|
|
|
|
|
|
|
|
end_wait:
|
net/tcp: use independent work to free the conn instance
I noticed that the conn instance will leak during stress test,
The close work queued from tcp_close_eventhandler() will be canceled
by tcp_timer() immediately:
Breakpoint 1, tcp_close_eventhandler (dev=0x565cd338 <up_irq_restore+108>, pvpriv=0x5655e6ff <getpid+12>, flags=0) at tcp/tcp_close.c:71
(gdb) bt
| #0 tcp_close_eventhandler (dev=0x565cd338 <up_irq_restore+108>, pvpriv=0x5655e6ff <getpid+12>, flags=0) at tcp/tcp_close.c:71
| #1 0x5658bf1e in devif_conn_event (dev=0x5660bd80 <g_sim_dev>, flags=512, list=0x5660d558 <g_cbprealloc+312>) at devif/devif_callback.c:508
| #2 0x5658a219 in tcp_callback (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>, flags=512) at tcp/tcp_callback.c:167
| #3 0x56589253 in tcp_timer (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_timer.c:378
| #4 0x5658dd47 in tcp_poll (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_devpoll.c:95
| #5 0x5658b95f in devif_poll_tcp_connections (dev=0x5660bd80 <g_sim_dev>, callback=0x565770f2 <netdriver_txpoll>) at devif/devif_poll.c:601
| #6 0x5658b9ea in devif_poll (dev=0x5660bd80 <g_sim_dev>, callback=0x565770f2 <netdriver_txpoll>) at devif/devif_poll.c:722
| #7 0x56577230 in netdriver_txavail_work (arg=0x5660bd80 <g_sim_dev>) at sim/up_netdriver.c:308
| #8 0x5655999e in work_thread (argc=2, argv=0xf3db5dd0) at wqueue/kwork_thread.c:178
| #9 0x5655983f in nxtask_start () at task/task_start.c:129
(gdb) c
Continuing.
Breakpoint 2, tcp_update_timer (conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_timer.c:178
(gdb) bt
| #0 tcp_update_timer (conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_timer.c:178
| #1 0x5658952a in tcp_timer (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_timer.c:708
| #2 0x5658dd47 in tcp_poll (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_devpoll.c:95
| #3 0x5658b95f in devif_poll_tcp_connections (dev=0x5660bd80 <g_sim_dev>, callback=0x565770f2 <netdriver_txpoll>) at devif/devif_poll.c:601
| #4 0x5658b9ea in devif_poll (dev=0x5660bd80 <g_sim_dev>, callback=0x565770f2 <netdriver_txpoll>) at devif/devif_poll.c:722
| #5 0x56577230 in netdriver_txavail_work (arg=0x5660bd80 <g_sim_dev>) at sim/up_netdriver.c:308
| #6 0x5655999e in work_thread (argc=2, argv=0xf3db5dd0) at wqueue/kwork_thread.c:178
| #7 0x5655983f in nxtask_start () at task/task_start.c:129
Since a separate work will add 24 bytes to each conn instance,
but in order to support the feature of asynchronous close(),
I can not find a better way than adding a separate work,
for resource constraints, I recommend the developers to enable
CONFIG_NET_ALLOC_CONNS, which will reduce the ram usage.
Signed-off-by: chao an <anchao@xiaomi.com>
2022-08-30 05:04:31 +02:00
|
|
|
if (conn->clscb != NULL)
|
|
|
|
{
|
|
|
|
tcp_callback_free(conn, conn->clscb);
|
|
|
|
conn->clscb = NULL;
|
|
|
|
}
|
2022-06-09 11:23:26 +02:00
|
|
|
|
|
|
|
/* Free network resources */
|
|
|
|
|
net/tcp: use independent work to free the conn instance
I noticed that the conn instance will leak during stress test,
The close work queued from tcp_close_eventhandler() will be canceled
by tcp_timer() immediately:
Breakpoint 1, tcp_close_eventhandler (dev=0x565cd338 <up_irq_restore+108>, pvpriv=0x5655e6ff <getpid+12>, flags=0) at tcp/tcp_close.c:71
(gdb) bt
| #0 tcp_close_eventhandler (dev=0x565cd338 <up_irq_restore+108>, pvpriv=0x5655e6ff <getpid+12>, flags=0) at tcp/tcp_close.c:71
| #1 0x5658bf1e in devif_conn_event (dev=0x5660bd80 <g_sim_dev>, flags=512, list=0x5660d558 <g_cbprealloc+312>) at devif/devif_callback.c:508
| #2 0x5658a219 in tcp_callback (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>, flags=512) at tcp/tcp_callback.c:167
| #3 0x56589253 in tcp_timer (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_timer.c:378
| #4 0x5658dd47 in tcp_poll (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_devpoll.c:95
| #5 0x5658b95f in devif_poll_tcp_connections (dev=0x5660bd80 <g_sim_dev>, callback=0x565770f2 <netdriver_txpoll>) at devif/devif_poll.c:601
| #6 0x5658b9ea in devif_poll (dev=0x5660bd80 <g_sim_dev>, callback=0x565770f2 <netdriver_txpoll>) at devif/devif_poll.c:722
| #7 0x56577230 in netdriver_txavail_work (arg=0x5660bd80 <g_sim_dev>) at sim/up_netdriver.c:308
| #8 0x5655999e in work_thread (argc=2, argv=0xf3db5dd0) at wqueue/kwork_thread.c:178
| #9 0x5655983f in nxtask_start () at task/task_start.c:129
(gdb) c
Continuing.
Breakpoint 2, tcp_update_timer (conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_timer.c:178
(gdb) bt
| #0 tcp_update_timer (conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_timer.c:178
| #1 0x5658952a in tcp_timer (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_timer.c:708
| #2 0x5658dd47 in tcp_poll (dev=0x5660bd80 <g_sim_dev>, conn=0x5660c4a0 <g_tcp_connections>) at tcp/tcp_devpoll.c:95
| #3 0x5658b95f in devif_poll_tcp_connections (dev=0x5660bd80 <g_sim_dev>, callback=0x565770f2 <netdriver_txpoll>) at devif/devif_poll.c:601
| #4 0x5658b9ea in devif_poll (dev=0x5660bd80 <g_sim_dev>, callback=0x565770f2 <netdriver_txpoll>) at devif/devif_poll.c:722
| #5 0x56577230 in netdriver_txavail_work (arg=0x5660bd80 <g_sim_dev>) at sim/up_netdriver.c:308
| #6 0x5655999e in work_thread (argc=2, argv=0xf3db5dd0) at wqueue/kwork_thread.c:178
| #7 0x5655983f in nxtask_start () at task/task_start.c:129
Since a separate work will add 24 bytes to each conn instance,
but in order to support the feature of asynchronous close(),
I can not find a better way than adding a separate work,
for resource constraints, I recommend the developers to enable
CONFIG_NET_ALLOC_CONNS, which will reduce the ram usage.
Signed-off-by: chao an <anchao@xiaomi.com>
2022-08-30 05:04:31 +02:00
|
|
|
work_queue(LPWORK, &conn->clswork, tcp_close_work, conn, 0);
|
2017-07-13 20:15:15 +02:00
|
|
|
|
2019-11-24 17:41:11 +01:00
|
|
|
return flags;
|
2017-07-13 20:15:15 +02:00
|
|
|
}
|
|
|
|
|
2019-12-24 12:57:34 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: tcp_close_txnotify
|
|
|
|
*
|
|
|
|
* Description:
|
2020-12-26 12:40:24 +01:00
|
|
|
* Notify the appropriate device driver that we have data ready to
|
|
|
|
* be sent (TCP)
|
2019-12-24 12:57:34 +01:00
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* psock - Socket state structure
|
|
|
|
* conn - The TCP connection structure
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* None
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
static inline void tcp_close_txnotify(FAR struct socket *psock,
|
|
|
|
FAR struct tcp_conn_s *conn)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_NET_IPv4
|
|
|
|
#ifdef CONFIG_NET_IPv6
|
|
|
|
/* If both IPv4 and IPv6 support are enabled, then we will need to select
|
|
|
|
* the device driver using the appropriate IP domain.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (psock->s_domain == PF_INET)
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
/* Notify the device driver that send data is available */
|
|
|
|
|
|
|
|
netdev_ipv4_txnotify(conn->u.ipv4.laddr, conn->u.ipv4.raddr);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_IPv4 */
|
|
|
|
|
|
|
|
#ifdef CONFIG_NET_IPv6
|
|
|
|
#ifdef CONFIG_NET_IPv4
|
|
|
|
else /* if (psock->s_domain == PF_INET6) */
|
|
|
|
#endif /* CONFIG_NET_IPv4 */
|
|
|
|
{
|
|
|
|
/* Notify the device driver that send data is available */
|
|
|
|
|
|
|
|
DEBUGASSERT(psock->s_domain == PF_INET6);
|
|
|
|
netdev_ipv6_txnotify(conn->u.ipv6.laddr, conn->u.ipv6.raddr);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_IPv6 */
|
|
|
|
}
|
|
|
|
|
2017-07-13 20:15:15 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: tcp_close_disconnect
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Break any current TCP connection
|
|
|
|
*
|
2018-03-13 16:52:27 +01:00
|
|
|
* Input Parameters:
|
2017-07-13 20:15:15 +02:00
|
|
|
* conn - TCP connection structure
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* None
|
|
|
|
*
|
|
|
|
* Assumptions:
|
|
|
|
* Called from normal user-level logic
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
static inline int tcp_close_disconnect(FAR struct socket *psock)
|
|
|
|
{
|
|
|
|
FAR struct tcp_conn_s *conn;
|
|
|
|
int ret = OK;
|
|
|
|
|
|
|
|
/* Interrupts are disabled here to avoid race conditions */
|
|
|
|
|
|
|
|
net_lock();
|
2017-10-13 14:47:09 +02:00
|
|
|
|
2017-07-13 20:15:15 +02:00
|
|
|
conn = (FAR struct tcp_conn_s *)psock->s_conn;
|
2017-10-13 14:47:09 +02:00
|
|
|
DEBUGASSERT(conn != NULL);
|
2017-07-13 20:15:15 +02:00
|
|
|
|
2019-07-01 20:25:32 +02:00
|
|
|
#ifdef CONFIG_NET_SOLINGER
|
|
|
|
/* SO_LINGER
|
|
|
|
* Lingers on a close() if data is present. This option controls the
|
|
|
|
* action taken when unsent messages queue on a socket and close() is
|
|
|
|
* performed. If SO_LINGER is set, the system shall block the calling
|
|
|
|
* thread during close() until it can transmit the data or until the
|
|
|
|
* time expires. If SO_LINGER is not specified, and close() is issued,
|
|
|
|
* the system handles the call in a way that allows the calling thread
|
|
|
|
* to continue as quickly as possible. This option takes a linger
|
|
|
|
* structure, as defined in the <sys/socket.h> header, to specify the
|
|
|
|
* state of the option and linger interval.
|
|
|
|
*/
|
|
|
|
|
2022-02-07 07:20:31 +01:00
|
|
|
if (_SO_GETOPT(conn->sconn.s_options, SO_LINGER))
|
2019-07-01 20:25:32 +02:00
|
|
|
{
|
2020-01-04 11:37:46 +01:00
|
|
|
/* Wait until for the buffered TX data to be sent. */
|
2019-07-01 20:25:32 +02:00
|
|
|
|
2022-02-08 07:53:43 +01:00
|
|
|
ret = tcp_txdrain(psock, _SO_TIMEOUT(conn->sconn.s_linger));
|
2020-01-04 11:37:46 +01:00
|
|
|
if (ret < 0)
|
2019-07-03 02:02:23 +02:00
|
|
|
{
|
2020-01-04 11:37:46 +01:00
|
|
|
/* tcp_txdrain may fail, but that won't stop us from closing
|
|
|
|
* the socket.
|
2019-07-01 22:07:12 +02:00
|
|
|
*/
|
2019-07-01 20:25:32 +02:00
|
|
|
|
2020-01-04 11:37:46 +01:00
|
|
|
nerr("ERROR: tcp_txdrain() failed: %d\n", ret);
|
2019-07-01 20:25:32 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
net/tcp: fix assertion of fallback connection alloc
When the free connection list is unenough to alloc a new instance,
the TCP stack will reuse the currently closed connection, but if
the handle is not released by the user via close(2), the reference
count of the connection remains in a non-zero value, it will cause
the assertion to fail, so when the handle is not released we should
not use such a conn instance when being actively closed, and ensure
that the reference count is assigned within the net lock protection
|(gdb) bt
|#0 up_assert (filename=0x565c78f7 "tcp/tcp_conn.c", lineno=771) at sim/up_assert.c:75
|#1 0x56566177 in _assert (filename=0x565c78f7 "tcp/tcp_conn.c", linenum=771) at assert/lib_assert.c:36
|#2 0x5657d620 in tcp_free (conn=0x565fb3e0 <g_tcp_connections>) at tcp/tcp_conn.c:771
|#3 0x5657d5a1 in tcp_alloc (domain=2 '\002') at tcp/tcp_conn.c:700
|#4 0x565b1f50 in inet_tcp_alloc (psock=0xf3dea150) at inet/inet_sockif.c:144
|#5 0x565b2082 in inet_setup (psock=0xf3dea150, protocol=0) at inet/inet_sockif.c:253
|#6 0x565b1bf0 in psock_socket (domain=2, type=1, protocol=0, psock=0xf3dea150) at socket/socket.c:121
|#7 0x56588f5f in socket (domain=2, type=1, protocol=0) at socket/socket.c:278
|#8 0x565b11c0 in hello_main (argc=1, argv=0xf3dfab10) at hello_main.c:35
|#9 0x56566631 in nxtask_startup (entrypt=0x565b10ef <hello_main>, argc=1, argv=0xf3dfab10) at sched/task_startup.c:70
|#10 0x565597fa in nxtask_start () at task/task_start.c:134
Signed-off-by: chao.an <anchao@xiaomi.com>
2022-07-08 21:30:10 +02:00
|
|
|
/* Discard our reference to the connection */
|
|
|
|
|
|
|
|
conn->crefs = 0;
|
|
|
|
|
2021-07-01 05:14:21 +02:00
|
|
|
/* TCP_ESTABLISHED
|
|
|
|
* We need to initiate an active close and wait for its completion.
|
|
|
|
*
|
|
|
|
* TCP_LAST_ACK
|
|
|
|
* We still need to wait for the ACK for our FIN, possibly
|
|
|
|
* retransmitting the FIN, before disposing the connection.
|
|
|
|
*/
|
2017-07-13 20:15:15 +02:00
|
|
|
|
2021-07-01 05:14:21 +02:00
|
|
|
if ((conn->tcpstateflags == TCP_ESTABLISHED ||
|
|
|
|
conn->tcpstateflags == TCP_LAST_ACK) &&
|
2022-08-25 14:15:56 +02:00
|
|
|
(conn->clscb = tcp_callback_alloc(conn)) != NULL)
|
2017-07-13 20:15:15 +02:00
|
|
|
{
|
|
|
|
/* Set up to receive TCP data event callbacks */
|
|
|
|
|
2022-08-25 14:15:56 +02:00
|
|
|
conn->clscb->flags = TCP_NEWDATA | TCP_POLL | TCP_DISCONN_EVENTS;
|
|
|
|
conn->clscb->event = tcp_close_eventhandler;
|
|
|
|
conn->clscb->priv = conn; /* reference for event handler to free cb */
|
2019-12-24 12:57:34 +01:00
|
|
|
|
|
|
|
/* Notify the device driver of the availability of TX data */
|
|
|
|
|
|
|
|
tcp_close_txnotify(psock, conn);
|
2022-06-09 11:23:26 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Stop the network monitor for all sockets */
|
2017-07-13 20:15:15 +02:00
|
|
|
|
2022-06-09 11:23:26 +02:00
|
|
|
tcp_stop_monitor(conn, TCP_CLOSE);
|
2017-07-13 20:15:15 +02:00
|
|
|
|
2022-06-09 11:23:26 +02:00
|
|
|
/* Free network resources */
|
2017-07-13 20:15:15 +02:00
|
|
|
|
2022-06-09 11:23:26 +02:00
|
|
|
tcp_free(conn);
|
2019-07-01 15:25:47 +02:00
|
|
|
}
|
2017-07-13 20:15:15 +02:00
|
|
|
|
net/tcp: fix assertion of fallback connection alloc
When the free connection list is unenough to alloc a new instance,
the TCP stack will reuse the currently closed connection, but if
the handle is not released by the user via close(2), the reference
count of the connection remains in a non-zero value, it will cause
the assertion to fail, so when the handle is not released we should
not use such a conn instance when being actively closed, and ensure
that the reference count is assigned within the net lock protection
|(gdb) bt
|#0 up_assert (filename=0x565c78f7 "tcp/tcp_conn.c", lineno=771) at sim/up_assert.c:75
|#1 0x56566177 in _assert (filename=0x565c78f7 "tcp/tcp_conn.c", linenum=771) at assert/lib_assert.c:36
|#2 0x5657d620 in tcp_free (conn=0x565fb3e0 <g_tcp_connections>) at tcp/tcp_conn.c:771
|#3 0x5657d5a1 in tcp_alloc (domain=2 '\002') at tcp/tcp_conn.c:700
|#4 0x565b1f50 in inet_tcp_alloc (psock=0xf3dea150) at inet/inet_sockif.c:144
|#5 0x565b2082 in inet_setup (psock=0xf3dea150, protocol=0) at inet/inet_sockif.c:253
|#6 0x565b1bf0 in psock_socket (domain=2, type=1, protocol=0, psock=0xf3dea150) at socket/socket.c:121
|#7 0x56588f5f in socket (domain=2, type=1, protocol=0) at socket/socket.c:278
|#8 0x565b11c0 in hello_main (argc=1, argv=0xf3dfab10) at hello_main.c:35
|#9 0x56566631 in nxtask_startup (entrypt=0x565b10ef <hello_main>, argc=1, argv=0xf3dfab10) at sched/task_startup.c:70
|#10 0x565597fa in nxtask_start () at task/task_start.c:134
Signed-off-by: chao.an <anchao@xiaomi.com>
2022-07-08 21:30:10 +02:00
|
|
|
psock->s_conn = NULL;
|
|
|
|
|
2017-07-13 20:15:15 +02:00
|
|
|
net_unlock();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-07-01 22:07:12 +02:00
|
|
|
/****************************************************************************
|
2020-01-21 07:11:29 +01:00
|
|
|
* Public Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: tcp_close
|
2019-07-01 22:07:12 +02:00
|
|
|
*
|
|
|
|
* Description:
|
2020-01-21 07:11:29 +01:00
|
|
|
* Break any current TCP connection
|
2019-07-01 22:07:12 +02:00
|
|
|
*
|
|
|
|
* Input Parameters:
|
2020-01-21 07:11:29 +01:00
|
|
|
* psock - An instance of the internal socket structure.
|
2019-07-01 22:07:12 +02:00
|
|
|
*
|
|
|
|
* Assumptions:
|
|
|
|
* Called from normal user-level logic
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2020-01-21 07:11:29 +01:00
|
|
|
int tcp_close(FAR struct socket *psock)
|
2019-07-01 22:07:12 +02:00
|
|
|
{
|
2020-01-21 07:11:29 +01:00
|
|
|
FAR struct tcp_conn_s *conn = psock->s_conn;
|
2019-07-01 22:07:12 +02:00
|
|
|
|
2020-01-21 07:11:29 +01:00
|
|
|
/* Perform the disconnection now */
|
2019-07-01 22:07:12 +02:00
|
|
|
|
2020-01-21 07:11:29 +01:00
|
|
|
tcp_unlisten(conn); /* No longer accepting connections */
|
2019-07-01 22:07:12 +02:00
|
|
|
|
2020-01-21 07:11:29 +01:00
|
|
|
/* Break any current connections and close the socket */
|
2019-07-01 22:07:12 +02:00
|
|
|
|
2022-06-09 11:23:26 +02:00
|
|
|
return tcp_close_disconnect(psock);
|
2019-07-01 22:07:12 +02:00
|
|
|
}
|
2017-07-13 20:15:15 +02:00
|
|
|
|
|
|
|
#endif /* CONFIG_NET_TCP */
|