2014-01-14 00:11:01 +01:00
|
|
|
/****************************************************************************
|
2014-06-24 16:03:44 +02:00
|
|
|
* net/tcp/tcp_send_buffered.c
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
2018-01-22 18:11:23 +01:00
|
|
|
* Copyright (C) 2007-2014, 2016-2018 Gregory Nutt. All rights reserved.
|
2014-01-14 00:11:01 +01:00
|
|
|
* Author: Gregory Nutt <gnutt@nuttx.org>
|
|
|
|
* Jason Jiang <jasonj@live.cn>
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* 3. Neither the name NuttX nor the names of its contributors may be
|
|
|
|
* used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
|
|
|
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
|
|
|
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
|
|
|
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
|
|
|
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
|
|
|
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
|
|
|
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Included Files
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#include <nuttx/config.h>
|
|
|
|
|
|
|
|
#if defined(CONFIG_NET) && defined(CONFIG_NET_TCP) && \
|
|
|
|
defined(CONFIG_NET_TCP_WRITE_BUFFERS)
|
|
|
|
|
2016-06-11 22:14:08 +02:00
|
|
|
#if defined(CONFIG_DEBUG_FEATURES) && defined(CONFIG_NET_TCP_WRBUFFER_DEBUG)
|
2014-06-18 19:45:55 +02:00
|
|
|
/* Force debug output (from this file only) */
|
|
|
|
|
|
|
|
# undef CONFIG_DEBUG_NET
|
|
|
|
# define CONFIG_DEBUG_NET 1
|
|
|
|
#endif
|
|
|
|
|
2014-01-14 00:11:01 +01:00
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdbool.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <debug.h>
|
|
|
|
#include <debug.h>
|
|
|
|
|
|
|
|
#include <arch/irq.h>
|
|
|
|
#include <nuttx/clock.h>
|
2014-07-05 00:38:51 +02:00
|
|
|
#include <nuttx/net/net.h>
|
2017-05-09 15:34:59 +02:00
|
|
|
#include <nuttx/mm/iob.h>
|
2014-06-24 17:28:44 +02:00
|
|
|
#include <nuttx/net/netdev.h>
|
2014-07-06 19:05:28 +02:00
|
|
|
#include <nuttx/net/arp.h>
|
2014-06-26 17:32:39 +02:00
|
|
|
#include <nuttx/net/tcp.h>
|
2019-05-29 15:26:26 +02:00
|
|
|
#include <nuttx/net/net.h>
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2014-06-27 17:56:45 +02:00
|
|
|
#include "netdev/netdev.h"
|
2018-02-22 23:30:46 +01:00
|
|
|
#include "devif/devif.h"
|
2017-08-06 22:48:19 +02:00
|
|
|
#include "socket/socket.h"
|
|
|
|
#include "inet/inet.h"
|
2014-08-19 00:22:14 +02:00
|
|
|
#include "arp/arp.h"
|
2015-02-02 20:44:31 +01:00
|
|
|
#include "icmpv6/icmpv6.h"
|
2015-01-22 17:09:10 +01:00
|
|
|
#include "neighbor/neighbor.h"
|
2018-02-22 23:30:46 +01:00
|
|
|
#include "route/route.h"
|
2019-06-03 00:16:44 +02:00
|
|
|
#include "utils/utils.h"
|
2014-06-21 23:23:39 +02:00
|
|
|
#include "tcp/tcp.h"
|
2014-01-14 00:11:01 +01:00
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Pre-processor Definitions
|
|
|
|
****************************************************************************/
|
2015-01-17 14:42:09 +01:00
|
|
|
/* If both IPv4 and IPv6 support are both enabled, then we will need to build
|
|
|
|
* in some additional domain selection support.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if defined(CONFIG_NET_IPv4) && defined(CONFIG_NET_IPv6)
|
|
|
|
# define NEED_IPDOMAIN_SUPPORT 1
|
|
|
|
#endif
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2015-01-18 15:56:05 +01:00
|
|
|
#define TCPIPv4BUF ((struct tcp_hdr_s *)&dev->d_buf[NET_LL_HDRLEN(dev) + IPv4_HDRLEN])
|
|
|
|
#define TCPIPv6BUF ((struct tcp_hdr_s *)&dev->d_buf[NET_LL_HDRLEN(dev) + IPv6_HDRLEN])
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2014-06-23 02:53:18 +02:00
|
|
|
/* Debug */
|
|
|
|
|
|
|
|
#ifdef CONFIG_NET_TCP_WRBUFFER_DUMP
|
|
|
|
# define BUF_DUMP(msg,buf,len) lib_dumpbuffer(msg,buf,len)
|
|
|
|
#else
|
2014-06-24 01:31:30 +02:00
|
|
|
# define BUF_DUMP(msg,buf,len)
|
2018-01-23 02:33:14 +01:00
|
|
|
# undef TCP_WBDUMP
|
|
|
|
# define TCP_WBDUMP(msg,wrb,len,offset)
|
2014-06-23 02:53:18 +02:00
|
|
|
#endif
|
|
|
|
|
2014-01-14 00:11:01 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Private Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
2017-04-22 00:33:14 +02:00
|
|
|
* Name: psock_insert_segment
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
|
|
|
* Description:
|
2014-01-14 16:43:59 +01:00
|
|
|
* Insert a new segment in a write buffer queue, keep the segment queue in
|
|
|
|
* ascending order of sequence number.
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
2018-03-13 16:52:27 +01:00
|
|
|
* Input Parameters:
|
2014-06-22 19:27:57 +02:00
|
|
|
* wrb The segment to be inserted
|
|
|
|
* q The write buffer queue in which to insert the segment
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
|
|
|
* Returned Value:
|
2014-01-14 16:43:59 +01:00
|
|
|
* None
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
|
|
|
* Assumptions:
|
2017-08-29 23:08:38 +02:00
|
|
|
* The network is locked
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-06-25 02:12:49 +02:00
|
|
|
static void psock_insert_segment(FAR struct tcp_wrbuffer_s *wrb,
|
|
|
|
FAR sq_queue_t *q)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2015-10-08 23:10:04 +02:00
|
|
|
FAR sq_entry_t *entry = (FAR sq_entry_t *)wrb;
|
|
|
|
FAR sq_entry_t *insert = NULL;
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2015-10-08 23:10:04 +02:00
|
|
|
FAR sq_entry_t *itr;
|
2014-01-14 00:11:01 +01:00
|
|
|
for (itr = sq_peek(q); itr; itr = sq_next(itr))
|
|
|
|
{
|
2015-10-08 23:10:04 +02:00
|
|
|
FAR struct tcp_wrbuffer_s *wrb0 = (FAR struct tcp_wrbuffer_s *)itr;
|
2018-01-23 02:33:14 +01:00
|
|
|
if (TCP_WBSEQNO(wrb0) < TCP_WBSEQNO(wrb))
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
|
|
|
insert = itr;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (insert)
|
|
|
|
{
|
|
|
|
sq_addafter(insert, entry, q);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
sq_addfirst(entry, q);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-01 20:25:32 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: psock_writebuffer_notify
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* The TCP connection has been lost. Free all write buffers.
|
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* psock The socket structure
|
|
|
|
* conn The connection structure associated with the socket
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* None
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#ifdef CONFIG_TCP_NOTIFIER
|
|
|
|
static void psock_writebuffer_notify(FAR struct tcp_conn_s *conn)
|
|
|
|
{
|
|
|
|
/* Check if all write buffers have been sent and ACKed */
|
|
|
|
|
|
|
|
if (sq_empty(&conn->write_q) && sq_empty(&conn->unacked_q))
|
|
|
|
{
|
|
|
|
/* Notify any waiters that the write buffers have been drained. */
|
|
|
|
|
|
|
|
tcp_writebuffer_signal(conn);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
# define psock_writebuffer_notify(conn)
|
|
|
|
#endif
|
|
|
|
|
2014-06-22 19:27:57 +02:00
|
|
|
/****************************************************************************
|
2017-04-22 00:33:14 +02:00
|
|
|
* Name: psock_lost_connection
|
2014-06-22 19:27:57 +02:00
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* The TCP connection has been lost. Free all write buffers.
|
|
|
|
*
|
2018-03-13 16:52:27 +01:00
|
|
|
* Input Parameters:
|
2014-06-22 19:27:57 +02:00
|
|
|
* psock The socket structure
|
|
|
|
* conn The connection structure associated with the socket
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* None
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-06-25 02:12:49 +02:00
|
|
|
static inline void psock_lost_connection(FAR struct socket *psock,
|
|
|
|
FAR struct tcp_conn_s *conn)
|
2014-06-22 19:27:57 +02:00
|
|
|
{
|
|
|
|
FAR sq_entry_t *entry;
|
|
|
|
FAR sq_entry_t *next;
|
|
|
|
|
|
|
|
/* Do not allow any further callbacks */
|
|
|
|
|
2016-01-22 22:54:45 +01:00
|
|
|
if (psock->s_sndcb != NULL)
|
|
|
|
{
|
|
|
|
psock->s_sndcb->flags = 0;
|
|
|
|
psock->s_sndcb->event = NULL;
|
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2018-02-22 13:29:18 +01:00
|
|
|
if (conn != NULL)
|
2014-06-22 19:27:57 +02:00
|
|
|
{
|
2018-02-22 13:29:18 +01:00
|
|
|
/* Free all queued write buffers */
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2018-02-22 13:29:18 +01:00
|
|
|
for (entry = sq_peek(&conn->unacked_q); entry; entry = next)
|
|
|
|
{
|
|
|
|
next = sq_next(entry);
|
|
|
|
tcp_wrbuffer_release((FAR struct tcp_wrbuffer_s *)entry);
|
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2018-02-22 13:29:18 +01:00
|
|
|
for (entry = sq_peek(&conn->write_q); entry; entry = next)
|
|
|
|
{
|
|
|
|
next = sq_next(entry);
|
|
|
|
tcp_wrbuffer_release((FAR struct tcp_wrbuffer_s *)entry);
|
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2018-02-22 13:29:18 +01:00
|
|
|
/* Reset write buffering variables */
|
|
|
|
|
|
|
|
sq_init(&conn->unacked_q);
|
|
|
|
sq_init(&conn->write_q);
|
2019-07-01 20:25:32 +02:00
|
|
|
|
|
|
|
/* Notify any waiters if the write buffers have been drained. */
|
|
|
|
|
|
|
|
psock_writebuffer_notify(conn);
|
|
|
|
|
2018-02-22 13:29:18 +01:00
|
|
|
conn->sent = 0;
|
|
|
|
conn->sndseq_max = 0;
|
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
}
|
|
|
|
|
2015-01-17 14:42:09 +01:00
|
|
|
/****************************************************************************
|
2017-04-22 00:33:14 +02:00
|
|
|
* Name: send_ipselect
|
2015-01-17 14:42:09 +01:00
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* If both IPv4 and IPv6 support are enabled, then we will need to select
|
|
|
|
* which one to use when generating the outgoing packet. If only one
|
|
|
|
* domain is selected, then the setup is already in place and we need do
|
|
|
|
* nothing.
|
|
|
|
*
|
2018-03-13 16:52:27 +01:00
|
|
|
* Input Parameters:
|
2017-09-02 18:27:03 +02:00
|
|
|
* dev - The structure of the network driver that caused the event
|
2015-01-17 14:42:09 +01:00
|
|
|
* psock - Socket state structure
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* None
|
|
|
|
*
|
|
|
|
* Assumptions:
|
2017-08-29 23:08:38 +02:00
|
|
|
* The network is locked
|
2015-01-17 14:42:09 +01:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#ifdef NEED_IPDOMAIN_SUPPORT
|
|
|
|
static inline void send_ipselect(FAR struct net_driver_s *dev,
|
2017-01-18 10:57:15 +01:00
|
|
|
FAR struct tcp_conn_s *conn)
|
2015-01-17 14:42:09 +01:00
|
|
|
{
|
2017-05-11 21:35:56 +02:00
|
|
|
/* Which domain the socket support */
|
2015-01-17 14:42:09 +01:00
|
|
|
|
2017-01-18 10:57:15 +01:00
|
|
|
if (conn->domain == PF_INET)
|
2015-01-17 14:42:09 +01:00
|
|
|
{
|
|
|
|
/* Select the IPv4 domain */
|
|
|
|
|
|
|
|
tcp_ipv4_select(dev);
|
|
|
|
}
|
2017-01-18 10:57:15 +01:00
|
|
|
else /* if (conn->domain == PF_INET6) */
|
2015-01-17 14:42:09 +01:00
|
|
|
{
|
|
|
|
/* Select the IPv6 domain */
|
|
|
|
|
2017-01-18 10:57:15 +01:00
|
|
|
DEBUGASSERT(conn->domain == PF_INET6);
|
2017-07-07 16:50:01 +02:00
|
|
|
tcp_ipv6_select(dev);
|
2015-01-17 14:42:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-01-14 00:11:01 +01:00
|
|
|
/****************************************************************************
|
2017-08-29 22:08:04 +02:00
|
|
|
* Name: psock_send_eventhandler
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
|
|
|
* Description:
|
2017-09-02 18:27:03 +02:00
|
|
|
* This function is called to perform the actual send operation when
|
|
|
|
* polled by the lower, device interfacing layer.
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
2018-03-13 16:52:27 +01:00
|
|
|
* Input Parameters:
|
2017-09-02 18:27:03 +02:00
|
|
|
* dev The structure of the network driver that caused the event
|
2014-01-14 00:11:01 +01:00
|
|
|
* conn The connection structure associated with the socket
|
|
|
|
* flags Set of events describing why the callback was invoked
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* None
|
|
|
|
*
|
|
|
|
* Assumptions:
|
2017-08-29 23:08:38 +02:00
|
|
|
* The network is locked
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2017-08-29 22:08:04 +02:00
|
|
|
static uint16_t psock_send_eventhandler(FAR struct net_driver_s *dev,
|
|
|
|
FAR void *pvconn, FAR void *pvpriv,
|
|
|
|
uint16_t flags)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2014-06-25 02:12:49 +02:00
|
|
|
FAR struct tcp_conn_s *conn = (FAR struct tcp_conn_s *)pvconn;
|
2014-01-14 16:43:59 +01:00
|
|
|
FAR struct socket *psock = (FAR struct socket *)pvpriv;
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2015-08-27 17:06:46 +02:00
|
|
|
/* The TCP socket is connected and, hence, should be bound to a device.
|
2016-03-20 15:19:00 +01:00
|
|
|
* Make sure that the polling device is the one that we are bound to.
|
2015-08-27 17:06:46 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
DEBUGASSERT(conn->dev != NULL);
|
|
|
|
if (dev != conn->dev)
|
|
|
|
{
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
2016-06-20 17:37:08 +02:00
|
|
|
ninfo("flags: %04x\n", flags);
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2018-08-21 18:33:16 +02:00
|
|
|
/* If this packet contains an acknowledgment, then update the count of
|
2014-01-14 00:11:01 +01:00
|
|
|
* acknowledged bytes.
|
|
|
|
*/
|
|
|
|
|
2014-07-07 01:22:02 +02:00
|
|
|
if ((flags & TCP_ACKDATA) != 0)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2014-06-22 19:27:57 +02:00
|
|
|
FAR struct tcp_wrbuffer_s *wrb;
|
2015-01-18 15:56:05 +01:00
|
|
|
FAR struct tcp_hdr_s *tcp;
|
2014-06-12 21:32:07 +02:00
|
|
|
FAR sq_entry_t *entry;
|
|
|
|
FAR sq_entry_t *next;
|
2014-01-14 00:11:01 +01:00
|
|
|
uint32_t ackno;
|
|
|
|
|
2015-01-18 15:56:05 +01:00
|
|
|
/* Get the offset address of the TCP header */
|
|
|
|
|
|
|
|
#ifdef CONFIG_NET_IPv4
|
|
|
|
#ifdef CONFIG_NET_IPv6
|
2015-01-18 16:23:22 +01:00
|
|
|
if (conn->domain == PF_INET)
|
2015-01-18 15:56:05 +01:00
|
|
|
#endif
|
|
|
|
{
|
|
|
|
DEBUGASSERT(IFF_IS_IPv4(dev->d_flags));
|
|
|
|
tcp = TCPIPv4BUF;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_IPv4 */
|
|
|
|
|
|
|
|
#ifdef CONFIG_NET_IPv6
|
|
|
|
#ifdef CONFIG_NET_IPv4
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
DEBUGASSERT(IFF_IS_IPv6(dev->d_flags));
|
|
|
|
tcp = TCPIPv6BUF;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_IPv6 */
|
|
|
|
|
|
|
|
/* Get the ACK number from the TCP header */
|
|
|
|
|
|
|
|
ackno = tcp_getsequence(tcp->ackno);
|
2016-06-20 17:37:08 +02:00
|
|
|
ninfo("ACK: ackno=%u flags=%04x\n", ackno, flags);
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2014-06-23 00:47:32 +02:00
|
|
|
/* Look at every write buffer in the unacked_q. The unacked_q
|
2014-06-22 19:27:57 +02:00
|
|
|
* holds write buffers that have been entirely sent, but which
|
2014-06-23 00:47:32 +02:00
|
|
|
* have not yet been ACKed.
|
2014-06-22 19:27:57 +02:00
|
|
|
*/
|
|
|
|
|
2014-01-14 00:11:01 +01:00
|
|
|
for (entry = sq_peek(&conn->unacked_q); entry; entry = next)
|
|
|
|
{
|
2014-06-22 19:27:57 +02:00
|
|
|
uint32_t lastseq;
|
|
|
|
|
|
|
|
/* Check of some or all of this write buffer has been ACKed. */
|
|
|
|
|
|
|
|
next = sq_next(entry);
|
2015-10-08 23:10:04 +02:00
|
|
|
wrb = (FAR struct tcp_wrbuffer_s *)entry;
|
2014-06-22 19:27:57 +02:00
|
|
|
|
|
|
|
/* If the ACKed sequence number is greater than the start
|
|
|
|
* sequence number of the write buffer, then some or all of
|
|
|
|
* the write buffer has been ACKed.
|
|
|
|
*/
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
if (ackno > TCP_WBSEQNO(wrb))
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2014-06-22 19:27:57 +02:00
|
|
|
/* Get the sequence number at the end of the data */
|
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
lastseq = TCP_WBSEQNO(wrb) + TCP_WBPKTLEN(wrb);
|
2016-06-20 17:37:08 +02:00
|
|
|
ninfo("ACK: wrb=%p seqno=%u lastseq=%u pktlen=%u ackno=%u\n",
|
2018-01-23 02:33:14 +01:00
|
|
|
wrb, TCP_WBSEQNO(wrb), lastseq, TCP_WBPKTLEN(wrb),
|
2018-01-22 18:11:23 +01:00
|
|
|
ackno);
|
2014-06-22 19:27:57 +02:00
|
|
|
|
|
|
|
/* Has the entire buffer been ACKed? */
|
|
|
|
|
|
|
|
if (ackno >= lastseq)
|
|
|
|
{
|
2016-06-20 17:37:08 +02:00
|
|
|
ninfo("ACK: wrb=%p Freeing write buffer\n", wrb);
|
2014-06-23 02:53:18 +02:00
|
|
|
|
2014-06-22 19:27:57 +02:00
|
|
|
/* Yes... Remove the write buffer from ACK waiting queue */
|
|
|
|
|
|
|
|
sq_rem(entry, &conn->unacked_q);
|
|
|
|
|
|
|
|
/* And return the write buffer to the pool of free buffers */
|
|
|
|
|
|
|
|
tcp_wrbuffer_release(wrb);
|
2019-07-01 20:25:32 +02:00
|
|
|
|
|
|
|
/* Notify any waiters if the write buffers have been
|
|
|
|
* drained.
|
|
|
|
*/
|
|
|
|
|
|
|
|
psock_writebuffer_notify(conn);
|
2014-06-22 19:27:57 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-06-22 23:27:01 +02:00
|
|
|
unsigned int trimlen;
|
|
|
|
|
2014-06-22 19:27:57 +02:00
|
|
|
/* No, then just trim the ACKed bytes from the beginning
|
|
|
|
* of the write buffer. This will free up some I/O buffers
|
|
|
|
* that can be reused while are still sending the last
|
|
|
|
* buffers in the chain.
|
|
|
|
*/
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
trimlen = ackno - TCP_WBSEQNO(wrb);
|
|
|
|
if (trimlen > TCP_WBSENT(wrb))
|
2014-06-23 17:40:17 +02:00
|
|
|
{
|
|
|
|
/* More data has been ACKed then we have sent? */
|
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
trimlen = TCP_WBSENT(wrb);
|
2014-06-23 17:40:17 +02:00
|
|
|
}
|
|
|
|
|
2016-06-20 17:37:08 +02:00
|
|
|
ninfo("ACK: wrb=%p trim %u bytes\n", wrb, trimlen);
|
2014-06-23 17:40:17 +02:00
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
TCP_WBTRIM(wrb, trimlen);
|
|
|
|
TCP_WBSEQNO(wrb) = ackno;
|
|
|
|
TCP_WBSENT(wrb) -= trimlen;
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2014-06-22 23:27:01 +02:00
|
|
|
/* Set the new sequence number for what remains */
|
|
|
|
|
2016-06-20 17:37:08 +02:00
|
|
|
ninfo("ACK: wrb=%p seqno=%u pktlen=%u\n",
|
2018-01-23 02:33:14 +01:00
|
|
|
wrb, TCP_WBSEQNO(wrb), TCP_WBPKTLEN(wrb));
|
2014-06-22 19:27:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* A special case is the head of the write_q which may be partially
|
|
|
|
* sent and so can still have un-ACKed bytes that could get ACKed
|
|
|
|
* before the entire write buffer has even been sent.
|
|
|
|
*/
|
|
|
|
|
2015-10-08 23:10:04 +02:00
|
|
|
wrb = (FAR struct tcp_wrbuffer_s *)sq_peek(&conn->write_q);
|
2018-01-23 02:33:14 +01:00
|
|
|
if (wrb && TCP_WBSENT(wrb) > 0 && ackno > TCP_WBSEQNO(wrb))
|
2014-06-22 19:27:57 +02:00
|
|
|
{
|
|
|
|
uint32_t nacked;
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2014-06-23 02:53:18 +02:00
|
|
|
/* Number of bytes that were ACKed */
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
nacked = ackno - TCP_WBSEQNO(wrb);
|
|
|
|
if (nacked > TCP_WBSENT(wrb))
|
2014-06-22 19:27:57 +02:00
|
|
|
{
|
2014-06-23 17:40:17 +02:00
|
|
|
/* More data has been ACKed then we have sent? ASSERT? */
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
nacked = TCP_WBSENT(wrb);
|
2014-01-14 00:11:01 +01:00
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2016-06-20 17:37:08 +02:00
|
|
|
ninfo("ACK: wrb=%p seqno=%u nacked=%u sent=%u ackno=%u\n",
|
2018-01-23 02:33:14 +01:00
|
|
|
wrb, TCP_WBSEQNO(wrb), nacked, TCP_WBSENT(wrb), ackno);
|
2014-06-22 19:27:57 +02:00
|
|
|
|
|
|
|
/* Trim the ACKed bytes from the beginning of the write buffer. */
|
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
TCP_WBTRIM(wrb, nacked);
|
|
|
|
TCP_WBSEQNO(wrb) = ackno;
|
|
|
|
TCP_WBSENT(wrb) -= nacked;
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2016-06-20 17:37:08 +02:00
|
|
|
ninfo("ACK: wrb=%p seqno=%u pktlen=%u sent=%u\n",
|
2018-01-23 02:33:14 +01:00
|
|
|
wrb, TCP_WBSEQNO(wrb), TCP_WBPKTLEN(wrb), TCP_WBSENT(wrb));
|
2014-01-14 00:11:01 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for a loss of connection */
|
|
|
|
|
2015-05-30 17:12:27 +02:00
|
|
|
else if ((flags & TCP_DISCONN_EVENTS) != 0)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2016-06-20 17:37:08 +02:00
|
|
|
ninfo("Lost connection: %04x\n", flags);
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2017-10-19 19:55:51 +02:00
|
|
|
/* We could get here recursively through the callback actions of
|
|
|
|
* tcp_lost_connection(). So don't repeat that action if we have
|
|
|
|
* already been disconnected.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (psock->s_conn != NULL && _SS_ISCONNECTED(psock->s_flags))
|
2016-01-22 23:22:09 +01:00
|
|
|
{
|
|
|
|
/* Report not connected */
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2017-08-29 20:27:58 +02:00
|
|
|
tcp_lost_connection(psock, psock->s_sndcb, flags);
|
2016-01-22 23:22:09 +01:00
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
|
|
|
|
/* Free write buffers and terminate polling */
|
|
|
|
|
2014-06-25 02:12:49 +02:00
|
|
|
psock_lost_connection(psock, conn);
|
2014-06-22 19:27:57 +02:00
|
|
|
return flags;
|
|
|
|
}
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2019-07-03 02:02:23 +02:00
|
|
|
/* Check if we are being asked to retransmit data */
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2019-07-03 02:02:23 +02:00
|
|
|
else if ((flags & TCP_REXMIT) != 0)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2014-06-22 19:27:57 +02:00
|
|
|
FAR struct tcp_wrbuffer_s *wrb;
|
|
|
|
FAR sq_entry_t *entry;
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2016-06-20 17:37:08 +02:00
|
|
|
ninfo("REXMIT: %04x\n", flags);
|
2014-06-22 19:27:57 +02:00
|
|
|
|
|
|
|
/* If there is a partially sent write buffer at the head of the
|
2014-06-22 23:27:01 +02:00
|
|
|
* write_q? Has anything been sent from that write buffer?
|
2014-01-14 00:11:01 +01:00
|
|
|
*/
|
|
|
|
|
2014-06-22 23:27:01 +02:00
|
|
|
wrb = (FAR struct tcp_wrbuffer_s *)sq_peek(&conn->write_q);
|
2018-01-23 02:33:14 +01:00
|
|
|
ninfo("REXMIT: wrb=%p sent=%u\n", wrb, wrb ? TCP_WBSENT(wrb) : 0);
|
2014-06-23 00:47:32 +02:00
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
if (wrb != NULL && TCP_WBSENT(wrb) > 0)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2014-06-22 23:27:01 +02:00
|
|
|
FAR struct tcp_wrbuffer_s *tmp;
|
2014-06-23 15:31:55 +02:00
|
|
|
uint16_t sent;
|
2014-06-22 23:27:01 +02:00
|
|
|
|
|
|
|
/* Yes.. Reset the number of bytes sent sent from the write buffer */
|
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
sent = TCP_WBSENT(wrb);
|
2014-06-23 15:31:55 +02:00
|
|
|
if (conn->unacked > sent)
|
|
|
|
{
|
|
|
|
conn->unacked -= sent;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
conn->unacked = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (conn->sent > sent)
|
|
|
|
{
|
|
|
|
conn->sent -= sent;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
conn->sent = 0;
|
|
|
|
}
|
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
TCP_WBSENT(wrb) = 0;
|
2016-06-20 17:37:08 +02:00
|
|
|
ninfo("REXMIT: wrb=%p sent=%u, conn unacked=%d sent=%d\n",
|
2018-01-23 02:33:14 +01:00
|
|
|
wrb, TCP_WBSENT(wrb), conn->unacked, conn->sent);
|
2014-06-22 23:27:01 +02:00
|
|
|
|
|
|
|
/* Increment the retransmit count on this write buffer. */
|
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
if (++TCP_WBNRTX(wrb) >= TCP_MAXRTX)
|
2014-06-22 23:27:01 +02:00
|
|
|
{
|
2016-06-20 17:37:08 +02:00
|
|
|
nwarn("WARNING: Expiring wrb=%p nrtx=%u\n",
|
2018-01-23 02:33:14 +01:00
|
|
|
wrb, TCP_WBNRTX(wrb));
|
2014-06-22 23:27:01 +02:00
|
|
|
|
|
|
|
/* The maximum retry count as been exhausted. Remove the write
|
|
|
|
* buffer at the head of the queue.
|
|
|
|
*/
|
|
|
|
|
|
|
|
tmp = (FAR struct tcp_wrbuffer_s *)sq_remfirst(&conn->write_q);
|
|
|
|
DEBUGASSERT(tmp == wrb);
|
2014-06-24 23:38:00 +02:00
|
|
|
UNUSED(tmp);
|
2014-06-22 23:27:01 +02:00
|
|
|
|
|
|
|
/* And return the write buffer to the free list */
|
|
|
|
|
|
|
|
tcp_wrbuffer_release(wrb);
|
|
|
|
|
2019-07-01 20:25:32 +02:00
|
|
|
/* Notify any waiters if the write buffers have been
|
|
|
|
* drained.
|
|
|
|
*/
|
|
|
|
|
|
|
|
psock_writebuffer_notify(conn);
|
|
|
|
|
2014-06-22 23:27:01 +02:00
|
|
|
/* NOTE expired is different from un-ACKed, it is designed to
|
|
|
|
* represent the number of segments that have been sent,
|
|
|
|
* retransmitted, and un-ACKed, if expired is not zero, the
|
|
|
|
* connection will be closed.
|
|
|
|
*
|
2014-07-07 00:10:26 +02:00
|
|
|
* field expired can only be updated at TCP_ESTABLISHED state
|
2014-06-22 23:27:01 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
conn->expired++;
|
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
}
|
2015-10-04 23:04:00 +02:00
|
|
|
|
2014-06-22 19:27:57 +02:00
|
|
|
/* Move all segments that have been sent but not ACKed to the write
|
|
|
|
* queue again note, the un-ACKed segments are put at the head of the
|
|
|
|
* write_q so they can be resent as soon as possible.
|
|
|
|
*/
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2014-06-22 19:27:57 +02:00
|
|
|
while ((entry = sq_remlast(&conn->unacked_q)) != NULL)
|
|
|
|
{
|
2015-10-08 23:10:04 +02:00
|
|
|
wrb = (FAR struct tcp_wrbuffer_s *)entry;
|
2014-06-23 15:31:55 +02:00
|
|
|
uint16_t sent;
|
|
|
|
|
|
|
|
/* Reset the number of bytes sent sent from the write buffer */
|
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
sent = TCP_WBSENT(wrb);
|
2014-06-23 15:31:55 +02:00
|
|
|
if (conn->unacked > sent)
|
|
|
|
{
|
|
|
|
conn->unacked -= sent;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
conn->unacked = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (conn->sent > sent)
|
|
|
|
{
|
|
|
|
conn->sent -= sent;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
conn->sent = 0;
|
|
|
|
}
|
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
TCP_WBSENT(wrb) = 0;
|
2016-06-20 17:37:08 +02:00
|
|
|
ninfo("REXMIT: wrb=%p sent=%u, conn unacked=%d sent=%d\n",
|
2018-01-23 02:33:14 +01:00
|
|
|
wrb, TCP_WBSENT(wrb), conn->unacked, conn->sent);
|
2014-06-22 19:27:57 +02:00
|
|
|
|
|
|
|
/* Free any write buffers that have exceed the retry count */
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
if (++TCP_WBNRTX(wrb) >= TCP_MAXRTX)
|
2014-06-22 19:27:57 +02:00
|
|
|
{
|
2016-06-20 17:37:08 +02:00
|
|
|
nwarn("WARNING: Expiring wrb=%p nrtx=%u\n",
|
2018-01-23 02:33:14 +01:00
|
|
|
wrb, TCP_WBNRTX(wrb));
|
2014-06-22 23:27:01 +02:00
|
|
|
|
2014-06-22 19:27:57 +02:00
|
|
|
/* Return the write buffer to the free list */
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2014-06-22 19:27:57 +02:00
|
|
|
tcp_wrbuffer_release(wrb);
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2019-07-01 20:25:32 +02:00
|
|
|
/* Notify any waiters if the write buffers have been
|
|
|
|
* drained.
|
|
|
|
*/
|
|
|
|
|
|
|
|
psock_writebuffer_notify(conn);
|
|
|
|
|
2014-01-14 00:11:01 +01:00
|
|
|
/* NOTE expired is different from un-ACKed, it is designed to
|
|
|
|
* represent the number of segments that have been sent,
|
|
|
|
* retransmitted, and un-ACKed, if expired is not zero, the
|
|
|
|
* connection will be closed.
|
|
|
|
*
|
2014-07-07 00:10:26 +02:00
|
|
|
* field expired can only be updated at TCP_ESTABLISHED state
|
2014-01-14 00:11:01 +01:00
|
|
|
*/
|
|
|
|
|
2014-01-14 22:17:53 +01:00
|
|
|
conn->expired++;
|
2014-01-14 00:11:01 +01:00
|
|
|
continue;
|
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Insert the write buffer into the write_q (in sequence
|
|
|
|
* number order). The retransmission will occur below
|
While working with version 7.10 I discovered a problem in TCP stack that could be observed on high network load. Generally speaking, the problem is that RST flag is set in unnecessary case, in which between loss of some TCP packet and its proper retransmission, another packets had been successfully sent. The scenario is as follows: NuttX did not receive ACK for some sent packet, so it has been probably lost somewhere. But before its retransmission starts, NuttX is correctly issuing next TCP packets, with sequence numbers increasing properly. When the retransmission of previously lost packet finally succeeds, tcp_input receives the accumulated ACK value, which acknowledges also the packets sent in the meantime (i.e. between unsuccessful sending of lost packet and its proper retransmission). However, variable unackseq is still set to conn->isn + conn->sent, which is truth only if no further packets transmission occurred in the meantime. Because of incorrect (in such specific case) unackseq value, few lines further condition if (ackseq <= unackseq)is not met, and, as a result, we are going to reset label.
2016-06-20 14:55:29 +02:00
|
|
|
* when the write buffer with the lowest sequence number
|
2014-06-22 19:27:57 +02:00
|
|
|
* is pulled from the write_q again.
|
|
|
|
*/
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
ninfo("REXMIT: Moving wrb=%p nrtx=%u\n", wrb, TCP_WBNRTX(wrb));
|
2014-06-23 00:47:32 +02:00
|
|
|
|
2014-06-25 02:12:49 +02:00
|
|
|
psock_insert_segment(wrb, &conn->write_q);
|
2014-06-22 19:27:57 +02:00
|
|
|
}
|
2014-01-14 00:11:01 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if the outgoing packet is available (it may have been claimed
|
2017-09-02 18:27:03 +02:00
|
|
|
* by a sendto event serving a different thread).
|
2014-01-14 00:11:01 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
if (dev->d_sndlen > 0)
|
|
|
|
{
|
|
|
|
/* Another thread has beat us sending data, wait for the next poll */
|
|
|
|
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We get here if (1) not all of the data has been ACKed, (2) we have been
|
|
|
|
* asked to retransmit data, (3) the connection is still healthy, and (4)
|
|
|
|
* the outgoing packet is available for our use. In this case, we are
|
|
|
|
* now free to send more data to receiver -- UNLESS the buffer contains
|
2019-02-14 15:39:16 +01:00
|
|
|
* unprocessed incoming data or window size is zero. In that event, we
|
|
|
|
* will have to wait for the next polling cycle.
|
2014-01-14 00:11:01 +01:00
|
|
|
*/
|
|
|
|
|
2014-07-07 00:10:26 +02:00
|
|
|
if ((conn->tcpstateflags & TCP_ESTABLISHED) &&
|
2014-07-07 01:22:02 +02:00
|
|
|
(flags & (TCP_POLL | TCP_REXMIT)) &&
|
2019-02-14 15:39:16 +01:00
|
|
|
!(sq_empty(&conn->write_q)) &&
|
|
|
|
conn->winsize > 0)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2019-09-18 20:33:41 +02:00
|
|
|
FAR struct tcp_wrbuffer_s *wrb;
|
|
|
|
uint32_t predicted_seqno;
|
|
|
|
size_t sndlen;
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
/* Peek at the head of the write queue (but don't remove anything
|
|
|
|
* from the write queue yet). We know from the above test that
|
|
|
|
* the write_q is not empty.
|
|
|
|
*/
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
wrb = (FAR struct tcp_wrbuffer_s *)sq_peek(&conn->write_q);
|
|
|
|
DEBUGASSERT(wrb);
|
2014-06-22 23:27:01 +02:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
/* Get the amount of data that we can send in the next packet.
|
|
|
|
* We will send either the remaining data in the buffer I/O
|
|
|
|
* buffer chain, or as much as will fit given the MSS and current
|
|
|
|
* window size.
|
|
|
|
*/
|
2014-06-22 23:27:01 +02:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
sndlen = TCP_WBPKTLEN(wrb) - TCP_WBSENT(wrb);
|
|
|
|
if (sndlen > conn->mss)
|
|
|
|
{
|
|
|
|
sndlen = conn->mss;
|
|
|
|
}
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
if (sndlen > conn->winsize)
|
|
|
|
{
|
|
|
|
sndlen = conn->winsize;
|
|
|
|
}
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
ninfo("SEND: wrb=%p pktlen=%u sent=%u sndlen=%u mss=%u "
|
|
|
|
"winsize=%u\n",
|
|
|
|
wrb, TCP_WBPKTLEN(wrb), TCP_WBSENT(wrb), sndlen, conn->mss,
|
|
|
|
conn->winsize);
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
/* Set the sequence number for this segment. If we are
|
|
|
|
* retransmitting, then the sequence number will already
|
|
|
|
* be set for this write buffer.
|
|
|
|
*/
|
2015-10-04 23:04:00 +02:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
if (TCP_WBSEQNO(wrb) == (unsigned)-1)
|
|
|
|
{
|
|
|
|
TCP_WBSEQNO(wrb) = conn->isn + conn->sent;
|
|
|
|
}
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
/* The TCP stack updates sndseq on receipt of ACK *before*
|
|
|
|
* this function is called. In that case sndseq will point
|
|
|
|
* to the next unacknowledged byte (which might have already
|
|
|
|
* been sent). We will overwrite the value of sndseq here
|
|
|
|
* before the packet is sent.
|
|
|
|
*/
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
tcp_setsequence(conn->sndseq, TCP_WBSEQNO(wrb) + TCP_WBSENT(wrb));
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2015-01-17 14:42:09 +01:00
|
|
|
#ifdef NEED_IPDOMAIN_SUPPORT
|
2019-09-18 20:33:41 +02:00
|
|
|
/* If both IPv4 and IPv6 support are enabled, then we will need to
|
|
|
|
* select which one to use when generating the outgoing packet.
|
|
|
|
* If only one domain is selected, then the setup is already in
|
|
|
|
* place and we need do nothing.
|
|
|
|
*/
|
2015-01-17 14:42:09 +01:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
send_ipselect(dev, conn);
|
2015-01-17 14:42:09 +01:00
|
|
|
#endif
|
2019-09-18 20:33:41 +02:00
|
|
|
/* Then set-up to send that amount of data with the offset
|
|
|
|
* corresponding to the amount of data already sent. (this
|
|
|
|
* won't actually happen until the polling cycle completes).
|
|
|
|
*/
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
devif_iob_send(dev, TCP_WBIOB(wrb), sndlen, TCP_WBSENT(wrb));
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
/* Remember how much data we send out now so that we know
|
|
|
|
* when everything has been acknowledged. Just increment
|
|
|
|
* the amount of data sent. This will be needed in sequence
|
|
|
|
* number calculations.
|
|
|
|
*/
|
While working with version 7.10 I discovered a problem in TCP stack that could be observed on high network load. Generally speaking, the problem is that RST flag is set in unnecessary case, in which between loss of some TCP packet and its proper retransmission, another packets had been successfully sent. The scenario is as follows: NuttX did not receive ACK for some sent packet, so it has been probably lost somewhere. But before its retransmission starts, NuttX is correctly issuing next TCP packets, with sequence numbers increasing properly. When the retransmission of previously lost packet finally succeeds, tcp_input receives the accumulated ACK value, which acknowledges also the packets sent in the meantime (i.e. between unsuccessful sending of lost packet and its proper retransmission). However, variable unackseq is still set to conn->isn + conn->sent, which is truth only if no further packets transmission occurred in the meantime. Because of incorrect (in such specific case) unackseq value, few lines further condition if (ackseq <= unackseq)is not met, and, as a result, we are going to reset label.
2016-06-20 14:55:29 +02:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
conn->unacked += sndlen;
|
|
|
|
conn->sent += sndlen;
|
While working with version 7.10 I discovered a problem in TCP stack that could be observed on high network load. Generally speaking, the problem is that RST flag is set in unnecessary case, in which between loss of some TCP packet and its proper retransmission, another packets had been successfully sent. The scenario is as follows: NuttX did not receive ACK for some sent packet, so it has been probably lost somewhere. But before its retransmission starts, NuttX is correctly issuing next TCP packets, with sequence numbers increasing properly. When the retransmission of previously lost packet finally succeeds, tcp_input receives the accumulated ACK value, which acknowledges also the packets sent in the meantime (i.e. between unsuccessful sending of lost packet and its proper retransmission). However, variable unackseq is still set to conn->isn + conn->sent, which is truth only if no further packets transmission occurred in the meantime. Because of incorrect (in such specific case) unackseq value, few lines further condition if (ackseq <= unackseq)is not met, and, as a result, we are going to reset label.
2016-06-20 14:55:29 +02:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
/* Below prediction will become true, unless retransmission occurrence */
|
While working with version 7.10 I discovered a problem in TCP stack that could be observed on high network load. Generally speaking, the problem is that RST flag is set in unnecessary case, in which between loss of some TCP packet and its proper retransmission, another packets had been successfully sent. The scenario is as follows: NuttX did not receive ACK for some sent packet, so it has been probably lost somewhere. But before its retransmission starts, NuttX is correctly issuing next TCP packets, with sequence numbers increasing properly. When the retransmission of previously lost packet finally succeeds, tcp_input receives the accumulated ACK value, which acknowledges also the packets sent in the meantime (i.e. between unsuccessful sending of lost packet and its proper retransmission). However, variable unackseq is still set to conn->isn + conn->sent, which is truth only if no further packets transmission occurred in the meantime. Because of incorrect (in such specific case) unackseq value, few lines further condition if (ackseq <= unackseq)is not met, and, as a result, we are going to reset label.
2016-06-20 14:55:29 +02:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
predicted_seqno = tcp_getsequence(conn->sndseq) + sndlen;
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
if ((predicted_seqno > conn->sndseq_max) ||
|
|
|
|
(tcp_getsequence(conn->sndseq) > predicted_seqno)) /* overflow */
|
|
|
|
{
|
|
|
|
conn->sndseq_max = predicted_seqno;
|
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
ninfo("SEND: wrb=%p nrtx=%u unacked=%u sent=%u\n",
|
|
|
|
wrb, TCP_WBNRTX(wrb), conn->unacked, conn->sent);
|
2014-06-23 00:25:26 +02:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
/* Increment the count of bytes sent from this write buffer */
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
TCP_WBSENT(wrb) += sndlen;
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
ninfo("SEND: wrb=%p sent=%u pktlen=%u\n",
|
|
|
|
wrb, TCP_WBSENT(wrb), TCP_WBPKTLEN(wrb));
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
/* Remove the write buffer from the write queue if the
|
|
|
|
* last of the data has been sent from the buffer.
|
|
|
|
*/
|
2014-06-23 02:53:18 +02:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
DEBUGASSERT(TCP_WBSENT(wrb) <= TCP_WBPKTLEN(wrb));
|
|
|
|
if (TCP_WBSENT(wrb) >= TCP_WBPKTLEN(wrb))
|
|
|
|
{
|
|
|
|
FAR struct tcp_wrbuffer_s *tmp;
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
ninfo("SEND: wrb=%p Move to unacked_q\n", wrb);
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
tmp = (FAR struct tcp_wrbuffer_s *)sq_remfirst(&conn->write_q);
|
|
|
|
DEBUGASSERT(tmp == wrb);
|
|
|
|
UNUSED(tmp);
|
2014-06-22 23:27:01 +02:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
/* Put the I/O buffer chain in the un-acked queue; the
|
|
|
|
* segment is waiting for ACK again
|
2014-06-22 23:27:01 +02:00
|
|
|
*/
|
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
psock_insert_segment(wrb, &conn->unacked_q);
|
2014-01-14 00:11:01 +01:00
|
|
|
}
|
2019-09-18 20:33:41 +02:00
|
|
|
|
|
|
|
/* Only one data can be sent by low level driver at once,
|
|
|
|
* tell the caller stop polling the other connection.
|
|
|
|
*/
|
|
|
|
|
|
|
|
flags &= ~TCP_POLL;
|
2014-01-14 00:11:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Continue waiting */
|
|
|
|
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
2015-01-17 15:33:14 +01:00
|
|
|
/****************************************************************************
|
2017-04-22 00:33:14 +02:00
|
|
|
* Name: send_txnotify
|
2015-01-17 15:33:14 +01:00
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Notify the appropriate device driver that we are have data ready to
|
|
|
|
* be send (TCP)
|
|
|
|
*
|
2018-03-13 16:52:27 +01:00
|
|
|
* Input Parameters:
|
2015-01-17 15:33:14 +01:00
|
|
|
* psock - Socket state structure
|
|
|
|
* conn - The TCP connection structure
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* None
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
static inline void send_txnotify(FAR struct socket *psock,
|
|
|
|
FAR struct tcp_conn_s *conn)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_NET_IPv4
|
|
|
|
#ifdef CONFIG_NET_IPv6
|
|
|
|
/* If both IPv4 and IPv6 support are enabled, then we will need to select
|
|
|
|
* the device driver using the appropriate IP domain.
|
|
|
|
*/
|
|
|
|
|
2015-01-17 16:27:05 +01:00
|
|
|
if (psock->s_domain == PF_INET)
|
2015-01-17 15:33:14 +01:00
|
|
|
#endif
|
|
|
|
{
|
|
|
|
/* Notify the device driver that send data is available */
|
|
|
|
|
|
|
|
netdev_ipv4_txnotify(conn->u.ipv4.laddr, conn->u.ipv4.raddr);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_IPv4 */
|
|
|
|
|
|
|
|
#ifdef CONFIG_NET_IPv6
|
|
|
|
#ifdef CONFIG_NET_IPv4
|
2016-02-26 13:45:37 +01:00
|
|
|
else /* if (psock->s_domain == PF_INET6) */
|
2015-01-17 15:33:14 +01:00
|
|
|
#endif /* CONFIG_NET_IPv4 */
|
|
|
|
{
|
|
|
|
/* Notify the device driver that send data is available */
|
|
|
|
|
2016-02-26 13:45:37 +01:00
|
|
|
DEBUGASSERT(psock->s_domain == PF_INET6);
|
2015-01-17 15:33:14 +01:00
|
|
|
netdev_ipv6_txnotify(conn->u.ipv6.laddr, conn->u.ipv6.raddr);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_IPv6 */
|
|
|
|
}
|
|
|
|
|
2014-01-14 00:11:01 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Public Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
2017-04-22 00:33:14 +02:00
|
|
|
* Name: psock_tcp_send
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
|
|
|
* Description:
|
2014-06-25 18:34:52 +02:00
|
|
|
* psock_tcp_send() call may be used only when the TCP socket is in a
|
2014-06-24 16:03:44 +02:00
|
|
|
* connected state (so that the intended recipient is known).
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
2018-03-13 16:52:27 +01:00
|
|
|
* Input Parameters:
|
2014-01-14 00:11:01 +01:00
|
|
|
* psock An instance of the internal socket structure.
|
|
|
|
* buf Data to send
|
|
|
|
* len Length of data to send
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* On success, returns the number of characters sent. On error,
|
|
|
|
* -1 is returned, and errno is set appropriately:
|
|
|
|
*
|
|
|
|
* EAGAIN or EWOULDBLOCK
|
|
|
|
* The socket is marked non-blocking and the requested operation
|
|
|
|
* would block.
|
|
|
|
* EBADF
|
|
|
|
* An invalid descriptor was specified.
|
|
|
|
* ECONNRESET
|
|
|
|
* Connection reset by peer.
|
|
|
|
* EDESTADDRREQ
|
|
|
|
* The socket is not connection-mode, and no peer address is set.
|
|
|
|
* EFAULT
|
|
|
|
* An invalid user space address was specified for a parameter.
|
|
|
|
* EINTR
|
|
|
|
* A signal occurred before any data was transmitted.
|
|
|
|
* EINVAL
|
|
|
|
* Invalid argument passed.
|
|
|
|
* EISCONN
|
|
|
|
* The connection-mode socket was connected already but a recipient
|
|
|
|
* was specified. (Now either this error is returned, or the recipient
|
|
|
|
* specification is ignored.)
|
|
|
|
* EMSGSIZE
|
|
|
|
* The socket type requires that message be sent atomically, and the
|
|
|
|
* size of the message to be sent made this impossible.
|
|
|
|
* ENOBUFS
|
|
|
|
* The output queue for a network interface was full. This generally
|
|
|
|
* indicates that the interface has stopped sending, but may be
|
|
|
|
* caused by transient congestion.
|
|
|
|
* ENOMEM
|
|
|
|
* No memory available.
|
|
|
|
* ENOTCONN
|
|
|
|
* The socket is not connected, and no target has been given.
|
|
|
|
* ENOTSOCK
|
|
|
|
* The argument s is not a socket.
|
|
|
|
* EPIPE
|
|
|
|
* The local end has been shut down on a connection oriented socket.
|
|
|
|
* In this case the process will also receive a SIGPIPE unless
|
|
|
|
* MSG_NOSIGNAL is set.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-06-25 02:12:49 +02:00
|
|
|
ssize_t psock_tcp_send(FAR struct socket *psock, FAR const void *buf,
|
|
|
|
size_t len)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2014-08-19 00:22:14 +02:00
|
|
|
FAR struct tcp_conn_s *conn;
|
2015-01-28 18:56:11 +01:00
|
|
|
FAR struct tcp_wrbuffer_s *wrb;
|
2014-06-22 19:27:57 +02:00
|
|
|
ssize_t result = 0;
|
2014-01-14 00:11:01 +01:00
|
|
|
int ret = OK;
|
|
|
|
|
2017-03-28 22:08:54 +02:00
|
|
|
if (psock == NULL || psock->s_crefs <= 0)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2016-06-11 23:50:49 +02:00
|
|
|
nerr("ERROR: Invalid socket\n");
|
2018-02-13 15:02:42 +01:00
|
|
|
ret = -EBADF;
|
2014-01-14 00:11:01 +01:00
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (psock->s_type != SOCK_STREAM || !_SS_ISCONNECTED(psock->s_flags))
|
|
|
|
{
|
2016-06-11 23:50:49 +02:00
|
|
|
nerr("ERROR: Not connected\n");
|
2018-02-13 15:02:42 +01:00
|
|
|
ret = -ENOTCONN;
|
2014-01-14 00:11:01 +01:00
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
2015-02-02 20:44:31 +01:00
|
|
|
/* Make sure that we have the IP address mapping */
|
2014-08-19 00:22:14 +02:00
|
|
|
|
|
|
|
conn = (FAR struct tcp_conn_s *)psock->s_conn;
|
2015-02-02 20:44:31 +01:00
|
|
|
DEBUGASSERT(conn);
|
2015-01-28 18:56:11 +01:00
|
|
|
|
2015-02-02 20:44:31 +01:00
|
|
|
#if defined(CONFIG_NET_ARP_SEND) || defined(CONFIG_NET_ICMPv6_NEIGHBOR)
|
2014-08-19 00:22:14 +02:00
|
|
|
#ifdef CONFIG_NET_ARP_SEND
|
2015-02-02 20:44:31 +01:00
|
|
|
#ifdef CONFIG_NET_ICMPv6_NEIGHBOR
|
|
|
|
if (psock->s_domain == PF_INET)
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
/* Make sure that the IP address mapping is in the ARP table */
|
|
|
|
|
|
|
|
ret = arp_send(conn->u.ipv4.raddr);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_ARP_SEND */
|
|
|
|
|
|
|
|
#ifdef CONFIG_NET_ICMPv6_NEIGHBOR
|
|
|
|
#ifdef CONFIG_NET_ARP_SEND
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
/* Make sure that the IP address mapping is in the Neighbor Table */
|
|
|
|
|
|
|
|
ret = icmpv6_neighbor(conn->u.ipv6.raddr);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_ICMPv6_NEIGHBOR */
|
|
|
|
|
|
|
|
/* Did we successfully get the address mapping? */
|
|
|
|
|
2014-08-19 00:22:14 +02:00
|
|
|
if (ret < 0)
|
|
|
|
{
|
2016-06-11 23:50:49 +02:00
|
|
|
nerr("ERROR: Not reachable\n");
|
2018-02-13 15:02:42 +01:00
|
|
|
ret = -ENETUNREACH;
|
2014-08-19 00:22:14 +02:00
|
|
|
goto errout;
|
|
|
|
}
|
2015-02-02 20:44:31 +01:00
|
|
|
#endif /* CONFIG_NET_ARP_SEND || CONFIG_NET_ICMPv6_NEIGHBOR */
|
2014-08-19 00:22:14 +02:00
|
|
|
|
2014-06-23 02:53:18 +02:00
|
|
|
/* Dump the incoming buffer */
|
|
|
|
|
2014-06-25 02:12:49 +02:00
|
|
|
BUF_DUMP("psock_tcp_send", buf, len);
|
2014-06-23 02:53:18 +02:00
|
|
|
|
2014-01-14 00:11:01 +01:00
|
|
|
/* Set the socket state to sending */
|
|
|
|
|
|
|
|
psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_SEND);
|
|
|
|
|
|
|
|
if (len > 0)
|
|
|
|
{
|
2015-01-28 18:56:11 +01:00
|
|
|
/* Allocate a write buffer. Careful, the network will be momentarily
|
|
|
|
* unlocked here.
|
|
|
|
*/
|
|
|
|
|
2016-12-03 23:28:19 +01:00
|
|
|
net_lock();
|
2018-04-20 15:37:51 +02:00
|
|
|
if (_SS_ISNONBLOCK(psock->s_flags))
|
|
|
|
{
|
|
|
|
wrb = tcp_wrbuffer_tryalloc();
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
wrb = tcp_wrbuffer_alloc();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (wrb == NULL)
|
2015-01-28 18:56:11 +01:00
|
|
|
{
|
|
|
|
/* A buffer allocation error occurred */
|
|
|
|
|
2016-06-11 23:50:49 +02:00
|
|
|
nerr("ERROR: Failed to allocate write buffer\n");
|
2018-04-20 15:37:51 +02:00
|
|
|
ret = _SS_ISNONBLOCK(psock->s_flags) ? -EAGAIN : -ENOMEM;
|
2015-01-28 18:56:11 +01:00
|
|
|
goto errout_with_lock;
|
|
|
|
}
|
|
|
|
|
2014-06-19 16:31:50 +02:00
|
|
|
/* Allocate resources to receive a callback */
|
|
|
|
|
2017-08-29 20:27:58 +02:00
|
|
|
if (psock->s_sndcb == NULL)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2014-06-29 02:36:09 +02:00
|
|
|
psock->s_sndcb = tcp_callback_alloc(conn);
|
2014-06-19 16:31:50 +02:00
|
|
|
}
|
|
|
|
|
2014-06-22 19:27:57 +02:00
|
|
|
/* Test if the callback has been allocated */
|
2014-06-19 16:31:50 +02:00
|
|
|
|
2017-08-29 20:27:58 +02:00
|
|
|
if (psock->s_sndcb == NULL)
|
2014-06-19 16:31:50 +02:00
|
|
|
{
|
|
|
|
/* A buffer allocation error occurred */
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2016-06-11 23:50:49 +02:00
|
|
|
nerr("ERROR: Failed to allocate callback\n");
|
2018-04-20 15:37:51 +02:00
|
|
|
ret = _SS_ISNONBLOCK(psock->s_flags) ? -EAGAIN : -ENOMEM;
|
2015-01-28 18:56:11 +01:00
|
|
|
goto errout_with_wrb;
|
2014-06-19 16:31:50 +02:00
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2015-01-28 18:56:11 +01:00
|
|
|
/* Set up the callback in the connection */
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2015-01-28 18:56:11 +01:00
|
|
|
psock->s_sndcb->flags = (TCP_ACKDATA | TCP_REXMIT | TCP_POLL |
|
2015-05-30 17:12:27 +02:00
|
|
|
TCP_DISCONN_EVENTS);
|
2015-10-08 23:10:04 +02:00
|
|
|
psock->s_sndcb->priv = (FAR void *)psock;
|
2017-08-29 22:08:04 +02:00
|
|
|
psock->s_sndcb->event = psock_send_eventhandler;
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2015-01-28 18:56:11 +01:00
|
|
|
/* Initialize the write buffer */
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
TCP_WBSEQNO(wrb) = (unsigned)-1;
|
|
|
|
TCP_WBNRTX(wrb) = 0;
|
2018-01-22 18:11:23 +01:00
|
|
|
|
|
|
|
/* Copy the user data into the write buffer. We cannot wait for
|
|
|
|
* buffer space if the socket was opened non-blocking.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (_SS_ISNONBLOCK(psock->s_flags))
|
|
|
|
{
|
2018-04-20 15:37:51 +02:00
|
|
|
/* The return value from TCP_WBTRYCOPYIN is either OK or
|
|
|
|
* -ENOMEM if less than the entire data chunk could be allocated.
|
|
|
|
* If -ENOMEM is returned, check if at least a part of the data
|
|
|
|
* chunk was allocated. If more than zero bytes were sent
|
|
|
|
* we return that number and let the caller deal with sending the
|
|
|
|
* remaining data.
|
|
|
|
*/
|
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
result = TCP_WBTRYCOPYIN(wrb, (FAR uint8_t *)buf, len);
|
2018-04-20 15:37:51 +02:00
|
|
|
if (result == -ENOMEM)
|
|
|
|
{
|
|
|
|
if (TCP_WBPKTLEN(wrb) > 0)
|
|
|
|
{
|
|
|
|
ninfo("INFO: Allocated part of the requested data\n");
|
|
|
|
result = TCP_WBPKTLEN(wrb);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
nerr("ERROR: Failed to add data to the I/O buffer chain\n");
|
|
|
|
ret = -EWOULDBLOCK;
|
|
|
|
goto errout_with_wrb;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
result = len;
|
|
|
|
}
|
2018-01-22 18:11:23 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-06-21 21:48:27 +02:00
|
|
|
unsigned int count;
|
2019-05-29 15:26:26 +02:00
|
|
|
int blresult;
|
|
|
|
|
|
|
|
/* iob_copyin might wait for buffers to be freed, but if network is
|
|
|
|
* locked this might never happen, since network driver is also locked,
|
|
|
|
* therefore we need to break the lock
|
|
|
|
*/
|
|
|
|
|
|
|
|
blresult = net_breaklock(&count);
|
2018-01-23 02:33:14 +01:00
|
|
|
result = TCP_WBCOPYIN(wrb, (FAR uint8_t *)buf, len);
|
2019-05-29 15:26:26 +02:00
|
|
|
if (blresult >= 0)
|
|
|
|
{
|
|
|
|
net_restorelock(count);
|
|
|
|
}
|
2018-01-22 18:11:23 +01:00
|
|
|
}
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2015-01-28 18:56:11 +01:00
|
|
|
/* Dump I/O buffer chain */
|
2014-06-23 02:53:18 +02:00
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
TCP_WBDUMP("I/O buffer chain", wrb, TCP_WBPKTLEN(wrb), 0);
|
2014-06-23 02:53:18 +02:00
|
|
|
|
2017-08-29 22:08:04 +02:00
|
|
|
/* psock_send_eventhandler() will send data in FIFO order from the
|
2015-01-28 18:56:11 +01:00
|
|
|
* conn->write_q
|
|
|
|
*/
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2015-01-28 18:56:11 +01:00
|
|
|
sq_addlast(&wrb->wb_node, &conn->write_q);
|
2016-06-20 17:37:08 +02:00
|
|
|
ninfo("Queued WRB=%p pktlen=%u write_q(%p,%p)\n",
|
2018-01-23 02:33:14 +01:00
|
|
|
wrb, TCP_WBPKTLEN(wrb),
|
2016-06-20 17:37:08 +02:00
|
|
|
conn->write_q.head, conn->write_q.tail);
|
2014-06-19 16:31:50 +02:00
|
|
|
|
2015-01-28 18:56:11 +01:00
|
|
|
/* Notify the device driver of the availability of TX data */
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2015-01-28 18:56:11 +01:00
|
|
|
send_txnotify(psock, conn);
|
2016-12-03 23:28:19 +01:00
|
|
|
net_unlock();
|
2014-01-14 00:11:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the socket state to idle */
|
|
|
|
|
|
|
|
psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_IDLE);
|
|
|
|
|
2018-02-13 15:02:42 +01:00
|
|
|
/* Check for errors. Errors are signaled by negative errno values
|
2014-01-14 00:11:01 +01:00
|
|
|
* for the send length
|
|
|
|
*/
|
|
|
|
|
2014-06-22 19:27:57 +02:00
|
|
|
if (result < 0)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2018-02-13 15:02:42 +01:00
|
|
|
ret = result;
|
2014-01-14 00:11:01 +01:00
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the number of bytes actually sent */
|
|
|
|
|
2014-06-22 19:27:57 +02:00
|
|
|
return result;
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2015-01-28 18:56:11 +01:00
|
|
|
errout_with_wrb:
|
|
|
|
tcp_wrbuffer_release(wrb);
|
|
|
|
|
|
|
|
errout_with_lock:
|
2016-12-03 23:28:19 +01:00
|
|
|
net_unlock();
|
2015-01-28 18:56:11 +01:00
|
|
|
|
2014-01-14 00:11:01 +01:00
|
|
|
errout:
|
2018-02-13 15:02:42 +01:00
|
|
|
return ret;
|
2014-01-14 00:11:01 +01:00
|
|
|
}
|
|
|
|
|
2016-01-22 22:52:14 +01:00
|
|
|
/****************************************************************************
|
2017-04-22 00:33:14 +02:00
|
|
|
* Name: psock_tcp_cansend
|
2016-01-22 22:52:14 +01:00
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* psock_tcp_cansend() returns a value indicating if a write to the socket
|
|
|
|
* would block. No space in the buffer is actually reserved, so it is
|
|
|
|
* possible that the write may still block if the buffer is filled by
|
|
|
|
* another means.
|
|
|
|
*
|
2018-03-13 16:52:27 +01:00
|
|
|
* Input Parameters:
|
2016-01-22 22:52:14 +01:00
|
|
|
* psock An instance of the internal socket structure.
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* OK
|
2018-09-11 20:00:42 +02:00
|
|
|
* At least one byte of data could be successfully written.
|
2016-01-22 22:52:14 +01:00
|
|
|
* -EWOULDBLOCK
|
|
|
|
* There is no room in the output buffer.
|
|
|
|
* -EBADF
|
|
|
|
* An invalid descriptor was specified.
|
|
|
|
* -ENOTCONN
|
|
|
|
* The socket is not connected.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
int psock_tcp_cansend(FAR struct socket *psock)
|
|
|
|
{
|
2018-09-12 16:57:06 +02:00
|
|
|
/* Verify that we received a valid socket */
|
|
|
|
|
2016-01-22 22:52:14 +01:00
|
|
|
if (!psock || psock->s_crefs <= 0)
|
|
|
|
{
|
2016-06-11 23:50:49 +02:00
|
|
|
nerr("ERROR: Invalid socket\n");
|
2016-01-22 22:52:14 +01:00
|
|
|
return -EBADF;
|
|
|
|
}
|
|
|
|
|
2018-09-12 16:57:06 +02:00
|
|
|
/* Verify that this is connected TCP socket */
|
|
|
|
|
2016-01-22 22:52:14 +01:00
|
|
|
if (psock->s_type != SOCK_STREAM || !_SS_ISCONNECTED(psock->s_flags))
|
|
|
|
{
|
2016-06-11 23:50:49 +02:00
|
|
|
nerr("ERROR: Not connected\n");
|
2016-01-22 22:52:14 +01:00
|
|
|
return -ENOTCONN;
|
|
|
|
}
|
|
|
|
|
2018-09-12 16:57:06 +02:00
|
|
|
/* In order to setup the send, we need to have at least one free write
|
2019-03-11 19:48:17 +01:00
|
|
|
* buffer head and at least one free IOB to initialize the write buffer
|
|
|
|
* head.
|
2018-09-12 16:57:06 +02:00
|
|
|
*
|
|
|
|
* REVISIT: The send will still block if we are unable to buffer the entire
|
|
|
|
* user-provided buffer which may be quite large. We will almost certainly
|
|
|
|
* need to have more than one free IOB, but we don't know how many more.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (tcp_wrbuffer_test() < 0 || iob_navail(false) <= 0)
|
2016-01-22 22:52:14 +01:00
|
|
|
{
|
|
|
|
return -EWOULDBLOCK;
|
|
|
|
}
|
|
|
|
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
2014-01-14 00:11:01 +01:00
|
|
|
#endif /* CONFIG_NET && CONFIG_NET_TCP && CONFIG_NET_TCP_WRITE_BUFFERS */
|