2014-01-14 00:11:01 +01:00
|
|
|
/****************************************************************************
|
2014-06-24 16:03:44 +02:00
|
|
|
* net/tcp/tcp_send_buffered.c
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
2024-09-11 14:39:39 +02:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*
|
2021-05-26 09:54:32 +02:00
|
|
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
* contributor license agreements. See the NOTICE file distributed with
|
|
|
|
* this work for additional information regarding copyright ownership. The
|
|
|
|
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
|
|
|
* "License"); you may not use this file except in compliance with the
|
|
|
|
* License. You may obtain a copy of the License at
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
2021-05-26 09:54:32 +02:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
2021-05-26 09:54:32 +02:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Included Files
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#include <nuttx/config.h>
|
|
|
|
|
|
|
|
#if defined(CONFIG_NET) && defined(CONFIG_NET_TCP) && \
|
|
|
|
defined(CONFIG_NET_TCP_WRITE_BUFFERS)
|
|
|
|
|
2016-06-11 22:14:08 +02:00
|
|
|
#if defined(CONFIG_DEBUG_FEATURES) && defined(CONFIG_NET_TCP_WRBUFFER_DEBUG)
|
2014-06-18 19:45:55 +02:00
|
|
|
/* Force debug output (from this file only) */
|
|
|
|
|
|
|
|
# undef CONFIG_DEBUG_NET
|
|
|
|
# define CONFIG_DEBUG_NET 1
|
|
|
|
#endif
|
|
|
|
|
2014-01-14 00:11:01 +01:00
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
|
2020-11-25 11:41:42 +01:00
|
|
|
#include <inttypes.h>
|
2014-01-14 00:11:01 +01:00
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdbool.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <string.h>
|
2021-06-08 20:00:55 +02:00
|
|
|
#include <assert.h>
|
2014-01-14 00:11:01 +01:00
|
|
|
#include <errno.h>
|
|
|
|
#include <debug.h>
|
|
|
|
|
|
|
|
#include <arch/irq.h>
|
2014-07-05 00:38:51 +02:00
|
|
|
#include <nuttx/net/net.h>
|
2017-05-09 15:34:59 +02:00
|
|
|
#include <nuttx/mm/iob.h>
|
2014-06-24 17:28:44 +02:00
|
|
|
#include <nuttx/net/netdev.h>
|
2014-06-26 17:32:39 +02:00
|
|
|
#include <nuttx/net/tcp.h>
|
2019-05-29 15:26:26 +02:00
|
|
|
#include <nuttx/net/net.h>
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2014-06-27 17:56:45 +02:00
|
|
|
#include "netdev/netdev.h"
|
2018-02-22 23:30:46 +01:00
|
|
|
#include "devif/devif.h"
|
2017-08-06 22:48:19 +02:00
|
|
|
#include "socket/socket.h"
|
|
|
|
#include "inet/inet.h"
|
2014-08-19 00:22:14 +02:00
|
|
|
#include "arp/arp.h"
|
2015-02-02 20:44:31 +01:00
|
|
|
#include "icmpv6/icmpv6.h"
|
2015-01-22 17:09:10 +01:00
|
|
|
#include "neighbor/neighbor.h"
|
2018-02-22 23:30:46 +01:00
|
|
|
#include "route/route.h"
|
2019-06-03 00:16:44 +02:00
|
|
|
#include "utils/utils.h"
|
2014-06-21 23:23:39 +02:00
|
|
|
#include "tcp/tcp.h"
|
2014-01-14 00:11:01 +01:00
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Pre-processor Definitions
|
|
|
|
****************************************************************************/
|
2019-12-31 16:26:14 +01:00
|
|
|
|
2015-01-17 14:42:09 +01:00
|
|
|
/* If both IPv4 and IPv6 support are both enabled, then we will need to build
|
|
|
|
* in some additional domain selection support.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if defined(CONFIG_NET_IPv4) && defined(CONFIG_NET_IPv6)
|
|
|
|
# define NEED_IPDOMAIN_SUPPORT 1
|
|
|
|
#endif
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2014-06-23 02:53:18 +02:00
|
|
|
/* Debug */
|
|
|
|
|
|
|
|
#ifdef CONFIG_NET_TCP_WRBUFFER_DUMP
|
|
|
|
# define BUF_DUMP(msg,buf,len) lib_dumpbuffer(msg,buf,len)
|
|
|
|
#else
|
2014-06-24 01:31:30 +02:00
|
|
|
# define BUF_DUMP(msg,buf,len)
|
2018-01-23 02:33:14 +01:00
|
|
|
# undef TCP_WBDUMP
|
|
|
|
# define TCP_WBDUMP(msg,wrb,len,offset)
|
2014-06-23 02:53:18 +02:00
|
|
|
#endif
|
|
|
|
|
2014-01-14 00:11:01 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Private Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
2017-04-22 00:33:14 +02:00
|
|
|
* Name: psock_insert_segment
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
|
|
|
* Description:
|
2014-01-14 16:43:59 +01:00
|
|
|
* Insert a new segment in a write buffer queue, keep the segment queue in
|
|
|
|
* ascending order of sequence number.
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
2018-03-13 16:52:27 +01:00
|
|
|
* Input Parameters:
|
2014-06-22 19:27:57 +02:00
|
|
|
* wrb The segment to be inserted
|
|
|
|
* q The write buffer queue in which to insert the segment
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
|
|
|
* Returned Value:
|
2014-01-14 16:43:59 +01:00
|
|
|
* None
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
|
|
|
* Assumptions:
|
2017-08-29 23:08:38 +02:00
|
|
|
* The network is locked
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-06-25 02:12:49 +02:00
|
|
|
static void psock_insert_segment(FAR struct tcp_wrbuffer_s *wrb,
|
|
|
|
FAR sq_queue_t *q)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2015-10-08 23:10:04 +02:00
|
|
|
FAR sq_entry_t *entry = (FAR sq_entry_t *)wrb;
|
|
|
|
FAR sq_entry_t *insert = NULL;
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2015-10-08 23:10:04 +02:00
|
|
|
FAR sq_entry_t *itr;
|
2014-01-14 00:11:01 +01:00
|
|
|
for (itr = sq_peek(q); itr; itr = sq_next(itr))
|
|
|
|
{
|
2015-10-08 23:10:04 +02:00
|
|
|
FAR struct tcp_wrbuffer_s *wrb0 = (FAR struct tcp_wrbuffer_s *)itr;
|
2018-01-23 02:33:14 +01:00
|
|
|
if (TCP_WBSEQNO(wrb0) < TCP_WBSEQNO(wrb))
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
|
|
|
insert = itr;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (insert)
|
|
|
|
{
|
|
|
|
sq_addafter(insert, entry, q);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
sq_addfirst(entry, q);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-01 20:25:32 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: psock_writebuffer_notify
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* The TCP connection has been lost. Free all write buffers.
|
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* psock The socket structure
|
|
|
|
* conn The connection structure associated with the socket
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* None
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2019-12-31 16:26:14 +01:00
|
|
|
#ifdef CONFIG_NET_TCP_NOTIFIER
|
2019-07-01 20:25:32 +02:00
|
|
|
static void psock_writebuffer_notify(FAR struct tcp_conn_s *conn)
|
|
|
|
{
|
|
|
|
/* Check if all write buffers have been sent and ACKed */
|
|
|
|
|
|
|
|
if (sq_empty(&conn->write_q) && sq_empty(&conn->unacked_q))
|
|
|
|
{
|
|
|
|
/* Notify any waiters that the write buffers have been drained. */
|
|
|
|
|
|
|
|
tcp_writebuffer_signal(conn);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
# define psock_writebuffer_notify(conn)
|
|
|
|
#endif
|
|
|
|
|
2023-01-10 06:41:02 +01:00
|
|
|
static void retransmit_segment(FAR struct tcp_conn_s *conn,
|
|
|
|
FAR struct tcp_wrbuffer_s *wrb)
|
|
|
|
{
|
|
|
|
/* Free any write buffers that have exceed the retry count */
|
|
|
|
|
|
|
|
if (++TCP_WBNRTX(wrb) >= TCP_MAXRTX)
|
|
|
|
{
|
|
|
|
nwarn("WARNING: Expiring wrb=%p nrtx=%u\n",
|
|
|
|
wrb, TCP_WBNRTX(wrb));
|
|
|
|
|
|
|
|
/* Return the write buffer to the free list */
|
|
|
|
|
|
|
|
tcp_wrbuffer_release(wrb);
|
|
|
|
|
|
|
|
/* Notify any waiters if the write buffers have been
|
|
|
|
* drained.
|
|
|
|
*/
|
|
|
|
|
|
|
|
psock_writebuffer_notify(conn);
|
|
|
|
|
|
|
|
/* NOTE expired is different from un-ACKed, it is designed
|
|
|
|
* to represent the number of segments that have been sent,
|
|
|
|
* retransmitted, and un-ACKed, if expired is not zero, the
|
|
|
|
* connection will be closed.
|
|
|
|
*
|
|
|
|
* field expired can only be updated at TCP_ESTABLISHED
|
|
|
|
* state
|
|
|
|
*/
|
|
|
|
|
|
|
|
conn->expired++;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2024-05-21 04:51:06 +02:00
|
|
|
uint16_t sent;
|
|
|
|
|
|
|
|
sent = TCP_WBSENT(wrb);
|
|
|
|
|
|
|
|
ninfo("REXMIT: wrb=%p sent=%u, "
|
|
|
|
"conn tx_unacked=%" PRId32 " sent=%" PRId32 "\n",
|
|
|
|
wrb, TCP_WBSENT(wrb), conn->tx_unacked, conn->sent);
|
|
|
|
|
|
|
|
/* Reset the number of bytes sent sent from the write buffer */
|
|
|
|
|
|
|
|
if (conn->tx_unacked > sent)
|
|
|
|
{
|
|
|
|
conn->tx_unacked -= sent;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
conn->tx_unacked = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (conn->sent > sent)
|
|
|
|
{
|
|
|
|
conn->sent -= sent;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
conn->sent = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
TCP_WBSENT(wrb) = 0;
|
|
|
|
|
2023-01-10 06:41:02 +01:00
|
|
|
/* Insert the write buffer into the write_q (in sequence
|
|
|
|
* number order). The retransmission will occur below
|
|
|
|
* when the write buffer with the lowest sequence number
|
|
|
|
* is pulled from the write_q again.
|
|
|
|
*/
|
|
|
|
|
|
|
|
ninfo("REXMIT: Moving wrb=%p nrtx=%u\n",
|
|
|
|
wrb, TCP_WBNRTX(wrb));
|
|
|
|
|
|
|
|
psock_insert_segment(wrb, &conn->write_q);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-22 19:27:57 +02:00
|
|
|
/****************************************************************************
|
2017-04-22 00:33:14 +02:00
|
|
|
* Name: psock_lost_connection
|
2014-06-22 19:27:57 +02:00
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* The TCP connection has been lost. Free all write buffers.
|
|
|
|
*
|
2018-03-13 16:52:27 +01:00
|
|
|
* Input Parameters:
|
2014-06-22 19:27:57 +02:00
|
|
|
* psock The socket structure
|
|
|
|
* conn The connection structure associated with the socket
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* None
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2022-02-08 17:08:15 +01:00
|
|
|
static inline void psock_lost_connection(FAR struct tcp_conn_s *conn,
|
2020-08-07 11:32:55 +02:00
|
|
|
bool abort)
|
2014-06-22 19:27:57 +02:00
|
|
|
{
|
|
|
|
FAR sq_entry_t *entry;
|
|
|
|
FAR sq_entry_t *next;
|
|
|
|
|
|
|
|
/* Do not allow any further callbacks */
|
|
|
|
|
2022-02-07 04:03:03 +01:00
|
|
|
if (conn->sndcb != NULL)
|
2016-01-22 22:54:45 +01:00
|
|
|
{
|
2022-02-07 04:03:03 +01:00
|
|
|
conn->sndcb->flags = 0;
|
|
|
|
conn->sndcb->event = NULL;
|
2016-01-22 22:54:45 +01:00
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2023-02-02 04:41:58 +01:00
|
|
|
/* Free all queued write buffers */
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2023-02-02 04:41:58 +01:00
|
|
|
for (entry = sq_peek(&conn->unacked_q); entry; entry = next)
|
|
|
|
{
|
|
|
|
next = sq_next(entry);
|
|
|
|
tcp_wrbuffer_release((FAR struct tcp_wrbuffer_s *)entry);
|
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2023-02-02 04:41:58 +01:00
|
|
|
for (entry = sq_peek(&conn->write_q); entry; entry = next)
|
|
|
|
{
|
|
|
|
next = sq_next(entry);
|
|
|
|
tcp_wrbuffer_release((FAR struct tcp_wrbuffer_s *)entry);
|
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2021-07-19 15:45:46 +02:00
|
|
|
#if CONFIG_NET_SEND_BUFSIZE > 0
|
2023-02-02 04:41:58 +01:00
|
|
|
/* Notify the send buffer available */
|
2021-07-19 15:45:46 +02:00
|
|
|
|
2023-02-02 04:41:58 +01:00
|
|
|
tcp_sendbuffer_notify(conn);
|
2021-07-19 15:45:46 +02:00
|
|
|
#endif /* CONFIG_NET_SEND_BUFSIZE */
|
|
|
|
|
2023-02-02 04:41:58 +01:00
|
|
|
/* Reset write buffering variables */
|
2018-02-22 13:29:18 +01:00
|
|
|
|
2023-02-02 04:41:58 +01:00
|
|
|
sq_init(&conn->unacked_q);
|
|
|
|
sq_init(&conn->write_q);
|
2019-07-01 20:25:32 +02:00
|
|
|
|
2023-02-02 04:41:58 +01:00
|
|
|
/* Notify any waiters if the write buffers have been drained. */
|
2019-07-01 20:25:32 +02:00
|
|
|
|
2023-02-02 04:41:58 +01:00
|
|
|
psock_writebuffer_notify(conn);
|
2019-07-01 20:25:32 +02:00
|
|
|
|
2023-02-02 04:41:58 +01:00
|
|
|
conn->sent = 0;
|
|
|
|
conn->sndseq_max = 0;
|
2020-08-07 11:32:55 +02:00
|
|
|
|
2023-02-02 04:41:58 +01:00
|
|
|
/* Force abort the connection. */
|
2020-08-07 11:32:55 +02:00
|
|
|
|
2023-02-02 04:41:58 +01:00
|
|
|
if (abort)
|
|
|
|
{
|
|
|
|
conn->tx_unacked = 0;
|
|
|
|
conn->tcpstateflags = TCP_CLOSED;
|
2018-02-22 13:29:18 +01:00
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
}
|
|
|
|
|
2023-01-10 06:41:02 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: parse_sack
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Parse sack from incoming TCP options
|
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* conn - The TCP connection of interest
|
|
|
|
* tcp - Header of tcp structure
|
|
|
|
* segs - Segments edge of sacks
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* Number of sacks
|
|
|
|
*
|
|
|
|
* Assumptions:
|
|
|
|
* The network is locked.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#ifdef CONFIG_NET_TCP_SELECTIVE_ACK
|
|
|
|
static int parse_sack(FAR struct tcp_conn_s *conn, FAR struct tcp_hdr_s *tcp,
|
|
|
|
FAR struct tcp_ofoseg_s *segs)
|
|
|
|
{
|
|
|
|
FAR struct tcp_sack_s *sacks;
|
|
|
|
int nsack = 0;
|
|
|
|
uint8_t opt;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Get the size of the link layer header,
|
|
|
|
* the IP and TCP header
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (i = 0; i < ((tcp->tcpoffset >> 4) - 5) << 2 ; )
|
|
|
|
{
|
|
|
|
opt = *(tcp->optdata + i);
|
|
|
|
if (opt == TCP_OPT_END)
|
|
|
|
{
|
|
|
|
/* End of options. */
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else if (opt == TCP_OPT_NOOP)
|
|
|
|
{
|
|
|
|
/* NOP option. */
|
|
|
|
|
|
|
|
++i;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
else if (opt == TCP_OPT_SACK)
|
|
|
|
{
|
|
|
|
nsack = (*(tcp->optdata + 1 + i) -
|
|
|
|
TCP_OPT_SACK_PERM_LEN) /
|
|
|
|
(sizeof(uint32_t) * 2);
|
|
|
|
sacks = (FAR struct tcp_sack_s *)
|
|
|
|
(tcp->optdata + i +
|
|
|
|
TCP_OPT_SACK_PERM_LEN);
|
|
|
|
|
|
|
|
for (i = 0; i < nsack; i++)
|
|
|
|
{
|
2023-08-11 14:20:31 +02:00
|
|
|
/* Use the pointer to avoid the error of 4 byte alignment. */
|
|
|
|
|
|
|
|
segs[i].left = tcp_getsequence((uint8_t *)&sacks[i]);
|
|
|
|
segs[i].right = tcp_getsequence((uint8_t *)&sacks[i] + 4);
|
2023-01-10 06:41:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
tcp_reorder_ofosegs(nsack, segs);
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* All other options have a length field,
|
|
|
|
* so that we easily can skip past them.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (*(tcp->optdata + 1 + i) == 0)
|
|
|
|
{
|
|
|
|
/* If the length field is zero,
|
|
|
|
* the options are malformed and
|
|
|
|
* we don't process them further.
|
|
|
|
*/
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
i += *(tcp->optdata + 1 + i);
|
|
|
|
}
|
|
|
|
|
|
|
|
return nsack;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_TCP_SELECTIVE_ACK */
|
|
|
|
|
2014-01-14 00:11:01 +01:00
|
|
|
/****************************************************************************
|
2017-08-29 22:08:04 +02:00
|
|
|
* Name: psock_send_eventhandler
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
|
|
|
* Description:
|
2017-09-02 18:27:03 +02:00
|
|
|
* This function is called to perform the actual send operation when
|
|
|
|
* polled by the lower, device interfacing layer.
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
2018-03-13 16:52:27 +01:00
|
|
|
* Input Parameters:
|
2017-09-02 18:27:03 +02:00
|
|
|
* dev The structure of the network driver that caused the event
|
2022-08-25 15:17:57 +02:00
|
|
|
* pvpriv An instance of struct tcp_conn_s cast to void*
|
2014-01-14 00:11:01 +01:00
|
|
|
* flags Set of events describing why the callback was invoked
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* None
|
|
|
|
*
|
|
|
|
* Assumptions:
|
2017-08-29 23:08:38 +02:00
|
|
|
* The network is locked
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2017-08-29 22:08:04 +02:00
|
|
|
static uint16_t psock_send_eventhandler(FAR struct net_driver_s *dev,
|
2022-08-25 15:17:57 +02:00
|
|
|
FAR void *pvpriv, uint16_t flags)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2022-02-08 17:08:15 +01:00
|
|
|
FAR struct tcp_conn_s *conn = pvpriv;
|
2023-01-10 06:41:02 +01:00
|
|
|
#ifdef CONFIG_NET_TCP_SELECTIVE_ACK
|
|
|
|
struct tcp_ofoseg_s ofosegs[TCP_SACK_RANGES_MAX];
|
|
|
|
uint8_t nsacks = 0;
|
|
|
|
#endif
|
2022-06-16 08:55:36 +02:00
|
|
|
#ifdef CONFIG_NET_TCP_FAST_RETRANSMIT
|
|
|
|
uint32_t rexmitno = 0;
|
|
|
|
#endif
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2022-01-17 22:08:54 +01:00
|
|
|
/* Get the TCP connection pointer reliably from
|
|
|
|
* the corresponding TCP socket.
|
|
|
|
*/
|
|
|
|
|
|
|
|
DEBUGASSERT(conn != NULL);
|
|
|
|
|
|
|
|
/* The TCP socket is connected and, hence, should be bound to a device.
|
|
|
|
* Make sure that the polling device is the one that we are bound to.
|
|
|
|
*/
|
|
|
|
|
|
|
|
DEBUGASSERT(conn->dev != NULL);
|
|
|
|
if (dev != conn->dev)
|
|
|
|
{
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
ninfo("flags: %04x\n", flags);
|
|
|
|
|
2022-01-18 20:51:03 +01:00
|
|
|
/* The TCP_ACKDATA, TCP_REXMIT and TCP_DISCONN_EVENTS flags are expected to
|
2022-02-11 15:59:26 +01:00
|
|
|
* appear here strictly one at a time, except for the FIN + ACK case.
|
2022-01-18 20:51:03 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
DEBUGASSERT((flags & TCP_ACKDATA) == 0 ||
|
|
|
|
(flags & TCP_REXMIT) == 0);
|
|
|
|
DEBUGASSERT((flags & TCP_DISCONN_EVENTS) == 0 ||
|
|
|
|
(flags & TCP_REXMIT) == 0);
|
|
|
|
|
2018-08-21 18:33:16 +02:00
|
|
|
/* If this packet contains an acknowledgment, then update the count of
|
2014-01-14 00:11:01 +01:00
|
|
|
* acknowledged bytes.
|
|
|
|
*/
|
|
|
|
|
2022-02-11 15:59:26 +01:00
|
|
|
if ((flags & TCP_ACKDATA) != 0)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2014-06-22 19:27:57 +02:00
|
|
|
FAR struct tcp_wrbuffer_s *wrb;
|
2015-01-18 15:56:05 +01:00
|
|
|
FAR struct tcp_hdr_s *tcp;
|
2014-06-12 21:32:07 +02:00
|
|
|
FAR sq_entry_t *entry;
|
|
|
|
FAR sq_entry_t *next;
|
2014-01-14 00:11:01 +01:00
|
|
|
uint32_t ackno;
|
|
|
|
|
2015-01-18 15:56:05 +01:00
|
|
|
/* Get the offset address of the TCP header */
|
|
|
|
|
|
|
|
#ifdef CONFIG_NET_IPv4
|
|
|
|
#ifdef CONFIG_NET_IPv6
|
2015-01-18 16:23:22 +01:00
|
|
|
if (conn->domain == PF_INET)
|
2015-01-18 15:56:05 +01:00
|
|
|
#endif
|
|
|
|
{
|
|
|
|
tcp = TCPIPv4BUF;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_IPv4 */
|
|
|
|
|
|
|
|
#ifdef CONFIG_NET_IPv6
|
|
|
|
#ifdef CONFIG_NET_IPv4
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
tcp = TCPIPv6BUF;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_IPv6 */
|
|
|
|
|
|
|
|
/* Get the ACK number from the TCP header */
|
|
|
|
|
|
|
|
ackno = tcp_getsequence(tcp->ackno);
|
2020-11-25 11:41:42 +01:00
|
|
|
ninfo("ACK: ackno=%" PRIu32 " flags=%04x\n", ackno, flags);
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2014-06-23 00:47:32 +02:00
|
|
|
/* Look at every write buffer in the unacked_q. The unacked_q
|
2014-06-22 19:27:57 +02:00
|
|
|
* holds write buffers that have been entirely sent, but which
|
2014-06-23 00:47:32 +02:00
|
|
|
* have not yet been ACKed.
|
2014-06-22 19:27:57 +02:00
|
|
|
*/
|
|
|
|
|
2014-01-14 00:11:01 +01:00
|
|
|
for (entry = sq_peek(&conn->unacked_q); entry; entry = next)
|
|
|
|
{
|
2014-06-22 19:27:57 +02:00
|
|
|
uint32_t lastseq;
|
|
|
|
|
|
|
|
/* Check of some or all of this write buffer has been ACKed. */
|
|
|
|
|
|
|
|
next = sq_next(entry);
|
2015-10-08 23:10:04 +02:00
|
|
|
wrb = (FAR struct tcp_wrbuffer_s *)entry;
|
2014-06-22 19:27:57 +02:00
|
|
|
|
|
|
|
/* If the ACKed sequence number is greater than the start
|
|
|
|
* sequence number of the write buffer, then some or all of
|
|
|
|
* the write buffer has been ACKed.
|
|
|
|
*/
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2021-06-03 09:42:02 +02:00
|
|
|
if (TCP_SEQ_GT(ackno, TCP_WBSEQNO(wrb)))
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2014-06-22 19:27:57 +02:00
|
|
|
/* Get the sequence number at the end of the data */
|
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
lastseq = TCP_WBSEQNO(wrb) + TCP_WBPKTLEN(wrb);
|
2020-11-25 11:41:42 +01:00
|
|
|
ninfo("ACK: wrb=%p seqno=%" PRIu32
|
|
|
|
" lastseq=%" PRIu32 " pktlen=%u ackno=%" PRIu32 "\n",
|
2018-01-23 02:33:14 +01:00
|
|
|
wrb, TCP_WBSEQNO(wrb), lastseq, TCP_WBPKTLEN(wrb),
|
2018-01-22 18:11:23 +01:00
|
|
|
ackno);
|
2014-06-22 19:27:57 +02:00
|
|
|
|
|
|
|
/* Has the entire buffer been ACKed? */
|
|
|
|
|
2021-06-03 09:42:02 +02:00
|
|
|
if (TCP_SEQ_GTE(ackno, lastseq))
|
2014-06-22 19:27:57 +02:00
|
|
|
{
|
2016-06-20 17:37:08 +02:00
|
|
|
ninfo("ACK: wrb=%p Freeing write buffer\n", wrb);
|
2014-06-23 02:53:18 +02:00
|
|
|
|
2014-06-22 19:27:57 +02:00
|
|
|
/* Yes... Remove the write buffer from ACK waiting queue */
|
|
|
|
|
|
|
|
sq_rem(entry, &conn->unacked_q);
|
|
|
|
|
2020-08-14 07:44:16 +02:00
|
|
|
/* And return the write buffer to the pool of free
|
|
|
|
* buffers
|
|
|
|
*/
|
2014-06-22 19:27:57 +02:00
|
|
|
|
|
|
|
tcp_wrbuffer_release(wrb);
|
2019-07-01 20:25:32 +02:00
|
|
|
|
|
|
|
/* Notify any waiters if the write buffers have been
|
|
|
|
* drained.
|
|
|
|
*/
|
|
|
|
|
|
|
|
psock_writebuffer_notify(conn);
|
2014-06-22 19:27:57 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-06-22 23:27:01 +02:00
|
|
|
unsigned int trimlen;
|
|
|
|
|
2014-06-22 19:27:57 +02:00
|
|
|
/* No, then just trim the ACKed bytes from the beginning
|
|
|
|
* of the write buffer. This will free up some I/O buffers
|
|
|
|
* that can be reused while are still sending the last
|
|
|
|
* buffers in the chain.
|
|
|
|
*/
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2021-06-03 09:42:02 +02:00
|
|
|
trimlen = TCP_SEQ_SUB(ackno, TCP_WBSEQNO(wrb));
|
2018-01-23 02:33:14 +01:00
|
|
|
if (trimlen > TCP_WBSENT(wrb))
|
2014-06-23 17:40:17 +02:00
|
|
|
{
|
|
|
|
/* More data has been ACKed then we have sent? */
|
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
trimlen = TCP_WBSENT(wrb);
|
2014-06-23 17:40:17 +02:00
|
|
|
}
|
|
|
|
|
2016-06-20 17:37:08 +02:00
|
|
|
ninfo("ACK: wrb=%p trim %u bytes\n", wrb, trimlen);
|
2014-06-23 17:40:17 +02:00
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
TCP_WBTRIM(wrb, trimlen);
|
2021-07-15 08:56:06 +02:00
|
|
|
TCP_WBSEQNO(wrb) += trimlen;
|
2018-01-23 02:33:14 +01:00
|
|
|
TCP_WBSENT(wrb) -= trimlen;
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2014-06-22 23:27:01 +02:00
|
|
|
/* Set the new sequence number for what remains */
|
|
|
|
|
2020-11-25 11:41:42 +01:00
|
|
|
ninfo("ACK: wrb=%p seqno=%" PRIu32 " pktlen=%u\n",
|
|
|
|
wrb, TCP_WBSEQNO(wrb), TCP_WBPKTLEN(wrb));
|
2014-06-22 19:27:57 +02:00
|
|
|
}
|
|
|
|
}
|
net/tcp: implement the fast retransmit
RFC2001: TCP Slow Start, Congestion Avoidance, Fast Retransmit,
and Fast Recovery Algorithms
...
3. Fast Retransmit
Modifications to the congestion avoidance algorithm were proposed in
1990 [3]. Before describing the change, realize that TCP may
generate an immediate acknowledgment (a duplicate ACK) when an out-
of-order segment is received (Section 4.2.2.21 of [1], with a note
that one reason for doing so was for the experimental fast-
retransmit algorithm). This duplicate ACK should not be delayed.
The purpose of this duplicate ACK is to let the other end know that a
segment was received out of order, and to tell it what sequence
number is expected.
Since TCP does not know whether a duplicate ACK is caused by a lost
segment or just a reordering of segments, it waits for a small number
of duplicate ACKs to be received. It is assumed that if there is
just a reordering of the segments, there will be only one or two
duplicate ACKs before the reordered segment is processed, which will
then generate a new ACK. If three or more duplicate ACKs are
received in a row, it is a strong indication that a segment has been
lost. TCP then performs a retransmission of what appears to be the
missing segment, without waiting for a retransmission timer to
expire.
Change-Id: Ie2cbcecab507c3d831f74390a6a85e0c5c8e0652
Signed-off-by: chao.an <anchao@xiaomi.com>
2020-10-22 09:02:52 +02:00
|
|
|
else if (ackno == TCP_WBSEQNO(wrb))
|
|
|
|
{
|
2023-04-20 05:08:02 +02:00
|
|
|
#ifdef CONFIG_NET_TCP_CC_NEWRENO
|
|
|
|
if (conn->dupacks >= TCP_FAST_RETRANSMISSION_THRESH)
|
|
|
|
#else
|
2021-05-18 14:30:11 +02:00
|
|
|
/* Reset the duplicate ack counter */
|
|
|
|
|
|
|
|
if ((flags & TCP_NEWDATA) != 0)
|
|
|
|
{
|
|
|
|
TCP_WBNACK(wrb) = 0;
|
|
|
|
}
|
|
|
|
|
net/tcp: implement the fast retransmit
RFC2001: TCP Slow Start, Congestion Avoidance, Fast Retransmit,
and Fast Recovery Algorithms
...
3. Fast Retransmit
Modifications to the congestion avoidance algorithm were proposed in
1990 [3]. Before describing the change, realize that TCP may
generate an immediate acknowledgment (a duplicate ACK) when an out-
of-order segment is received (Section 4.2.2.21 of [1], with a note
that one reason for doing so was for the experimental fast-
retransmit algorithm). This duplicate ACK should not be delayed.
The purpose of this duplicate ACK is to let the other end know that a
segment was received out of order, and to tell it what sequence
number is expected.
Since TCP does not know whether a duplicate ACK is caused by a lost
segment or just a reordering of segments, it waits for a small number
of duplicate ACKs to be received. It is assumed that if there is
just a reordering of the segments, there will be only one or two
duplicate ACKs before the reordered segment is processed, which will
then generate a new ACK. If three or more duplicate ACKs are
received in a row, it is a strong indication that a segment has been
lost. TCP then performs a retransmission of what appears to be the
missing segment, without waiting for a retransmission timer to
expire.
Change-Id: Ie2cbcecab507c3d831f74390a6a85e0c5c8e0652
Signed-off-by: chao.an <anchao@xiaomi.com>
2020-10-22 09:02:52 +02:00
|
|
|
/* Duplicate ACK? Retransmit data if need */
|
|
|
|
|
2022-01-25 18:49:04 +01:00
|
|
|
if (++TCP_WBNACK(wrb) == TCP_FAST_RETRANSMISSION_THRESH)
|
2023-04-20 05:08:02 +02:00
|
|
|
#endif
|
net/tcp: implement the fast retransmit
RFC2001: TCP Slow Start, Congestion Avoidance, Fast Retransmit,
and Fast Recovery Algorithms
...
3. Fast Retransmit
Modifications to the congestion avoidance algorithm were proposed in
1990 [3]. Before describing the change, realize that TCP may
generate an immediate acknowledgment (a duplicate ACK) when an out-
of-order segment is received (Section 4.2.2.21 of [1], with a note
that one reason for doing so was for the experimental fast-
retransmit algorithm). This duplicate ACK should not be delayed.
The purpose of this duplicate ACK is to let the other end know that a
segment was received out of order, and to tell it what sequence
number is expected.
Since TCP does not know whether a duplicate ACK is caused by a lost
segment or just a reordering of segments, it waits for a small number
of duplicate ACKs to be received. It is assumed that if there is
just a reordering of the segments, there will be only one or two
duplicate ACKs before the reordered segment is processed, which will
then generate a new ACK. If three or more duplicate ACKs are
received in a row, it is a strong indication that a segment has been
lost. TCP then performs a retransmission of what appears to be the
missing segment, without waiting for a retransmission timer to
expire.
Change-Id: Ie2cbcecab507c3d831f74390a6a85e0c5c8e0652
Signed-off-by: chao.an <anchao@xiaomi.com>
2020-10-22 09:02:52 +02:00
|
|
|
{
|
2023-01-10 06:41:02 +01:00
|
|
|
#ifdef CONFIG_NET_TCP_SELECTIVE_ACK
|
|
|
|
if ((conn->flags & TCP_SACK) &&
|
|
|
|
(tcp->tcpoffset & 0xf0) > 0x50)
|
|
|
|
{
|
|
|
|
/* Parse s-ack from tcp options */
|
net/tcp: implement the fast retransmit
RFC2001: TCP Slow Start, Congestion Avoidance, Fast Retransmit,
and Fast Recovery Algorithms
...
3. Fast Retransmit
Modifications to the congestion avoidance algorithm were proposed in
1990 [3]. Before describing the change, realize that TCP may
generate an immediate acknowledgment (a duplicate ACK) when an out-
of-order segment is received (Section 4.2.2.21 of [1], with a note
that one reason for doing so was for the experimental fast-
retransmit algorithm). This duplicate ACK should not be delayed.
The purpose of this duplicate ACK is to let the other end know that a
segment was received out of order, and to tell it what sequence
number is expected.
Since TCP does not know whether a duplicate ACK is caused by a lost
segment or just a reordering of segments, it waits for a small number
of duplicate ACKs to be received. It is assumed that if there is
just a reordering of the segments, there will be only one or two
duplicate ACKs before the reordered segment is processed, which will
then generate a new ACK. If three or more duplicate ACKs are
received in a row, it is a strong indication that a segment has been
lost. TCP then performs a retransmission of what appears to be the
missing segment, without waiting for a retransmission timer to
expire.
Change-Id: Ie2cbcecab507c3d831f74390a6a85e0c5c8e0652
Signed-off-by: chao.an <anchao@xiaomi.com>
2020-10-22 09:02:52 +02:00
|
|
|
|
2023-01-10 06:41:02 +01:00
|
|
|
nsacks = parse_sack(conn, tcp, ofosegs);
|
2022-06-16 08:55:36 +02:00
|
|
|
|
2023-01-10 06:41:02 +01:00
|
|
|
flags |= TCP_REXMIT;
|
|
|
|
}
|
|
|
|
#ifdef CONFIG_NET_TCP_FAST_RETRANSMIT
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_NET_TCP_FAST_RETRANSMIT
|
|
|
|
/* Do fast retransmit */
|
net/tcp: implement the fast retransmit
RFC2001: TCP Slow Start, Congestion Avoidance, Fast Retransmit,
and Fast Recovery Algorithms
...
3. Fast Retransmit
Modifications to the congestion avoidance algorithm were proposed in
1990 [3]. Before describing the change, realize that TCP may
generate an immediate acknowledgment (a duplicate ACK) when an out-
of-order segment is received (Section 4.2.2.21 of [1], with a note
that one reason for doing so was for the experimental fast-
retransmit algorithm). This duplicate ACK should not be delayed.
The purpose of this duplicate ACK is to let the other end know that a
segment was received out of order, and to tell it what sequence
number is expected.
Since TCP does not know whether a duplicate ACK is caused by a lost
segment or just a reordering of segments, it waits for a small number
of duplicate ACKs to be received. It is assumed that if there is
just a reordering of the segments, there will be only one or two
duplicate ACKs before the reordered segment is processed, which will
then generate a new ACK. If three or more duplicate ACKs are
received in a row, it is a strong indication that a segment has been
lost. TCP then performs a retransmission of what appears to be the
missing segment, without waiting for a retransmission timer to
expire.
Change-Id: Ie2cbcecab507c3d831f74390a6a85e0c5c8e0652
Signed-off-by: chao.an <anchao@xiaomi.com>
2020-10-22 09:02:52 +02:00
|
|
|
|
2023-01-10 06:41:02 +01:00
|
|
|
rexmitno = ackno;
|
2023-04-20 05:08:02 +02:00
|
|
|
#ifndef CONFIG_NET_TCP_CC_NEWRENO
|
2023-01-10 06:41:02 +01:00
|
|
|
/* Reset counter */
|
|
|
|
|
|
|
|
TCP_WBNACK(wrb) = 0;
|
2023-04-20 05:08:02 +02:00
|
|
|
#endif
|
|
|
|
#endif
|
2023-01-10 06:41:02 +01:00
|
|
|
}
|
2024-03-16 07:13:38 +01:00
|
|
|
|
|
|
|
#ifdef CONFIG_NET_TCP_CC_NEWRENO
|
|
|
|
conn->dupacks = 0;
|
|
|
|
#endif
|
net/tcp: implement the fast retransmit
RFC2001: TCP Slow Start, Congestion Avoidance, Fast Retransmit,
and Fast Recovery Algorithms
...
3. Fast Retransmit
Modifications to the congestion avoidance algorithm were proposed in
1990 [3]. Before describing the change, realize that TCP may
generate an immediate acknowledgment (a duplicate ACK) when an out-
of-order segment is received (Section 4.2.2.21 of [1], with a note
that one reason for doing so was for the experimental fast-
retransmit algorithm). This duplicate ACK should not be delayed.
The purpose of this duplicate ACK is to let the other end know that a
segment was received out of order, and to tell it what sequence
number is expected.
Since TCP does not know whether a duplicate ACK is caused by a lost
segment or just a reordering of segments, it waits for a small number
of duplicate ACKs to be received. It is assumed that if there is
just a reordering of the segments, there will be only one or two
duplicate ACKs before the reordered segment is processed, which will
then generate a new ACK. If three or more duplicate ACKs are
received in a row, it is a strong indication that a segment has been
lost. TCP then performs a retransmission of what appears to be the
missing segment, without waiting for a retransmission timer to
expire.
Change-Id: Ie2cbcecab507c3d831f74390a6a85e0c5c8e0652
Signed-off-by: chao.an <anchao@xiaomi.com>
2020-10-22 09:02:52 +02:00
|
|
|
}
|
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* A special case is the head of the write_q which may be partially
|
|
|
|
* sent and so can still have un-ACKed bytes that could get ACKed
|
|
|
|
* before the entire write buffer has even been sent.
|
|
|
|
*/
|
|
|
|
|
2015-10-08 23:10:04 +02:00
|
|
|
wrb = (FAR struct tcp_wrbuffer_s *)sq_peek(&conn->write_q);
|
2021-06-03 09:42:02 +02:00
|
|
|
if (wrb && TCP_WBSENT(wrb) > 0 && TCP_SEQ_GT(ackno, TCP_WBSEQNO(wrb)))
|
2014-06-22 19:27:57 +02:00
|
|
|
{
|
|
|
|
uint32_t nacked;
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2014-06-23 02:53:18 +02:00
|
|
|
/* Number of bytes that were ACKed */
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2021-06-03 09:42:02 +02:00
|
|
|
nacked = TCP_SEQ_SUB(ackno, TCP_WBSEQNO(wrb));
|
2018-01-23 02:33:14 +01:00
|
|
|
if (nacked > TCP_WBSENT(wrb))
|
2014-06-22 19:27:57 +02:00
|
|
|
{
|
2014-06-23 17:40:17 +02:00
|
|
|
/* More data has been ACKed then we have sent? ASSERT? */
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
nacked = TCP_WBSENT(wrb);
|
2014-01-14 00:11:01 +01:00
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2020-11-25 11:41:42 +01:00
|
|
|
ninfo("ACK: wrb=%p seqno=%" PRIu32
|
|
|
|
" nacked=%" PRIu32 " sent=%u ackno=%" PRIu32 "\n",
|
2018-01-23 02:33:14 +01:00
|
|
|
wrb, TCP_WBSEQNO(wrb), nacked, TCP_WBSENT(wrb), ackno);
|
2014-06-22 19:27:57 +02:00
|
|
|
|
|
|
|
/* Trim the ACKed bytes from the beginning of the write buffer. */
|
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
TCP_WBTRIM(wrb, nacked);
|
2021-07-15 08:56:06 +02:00
|
|
|
TCP_WBSEQNO(wrb) += nacked;
|
2018-01-23 02:33:14 +01:00
|
|
|
TCP_WBSENT(wrb) -= nacked;
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2020-11-25 11:41:42 +01:00
|
|
|
ninfo("ACK: wrb=%p seqno=%" PRIu32 " pktlen=%u sent=%u\n",
|
2018-01-23 02:33:14 +01:00
|
|
|
wrb, TCP_WBSEQNO(wrb), TCP_WBPKTLEN(wrb), TCP_WBSENT(wrb));
|
2014-01-14 00:11:01 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-11 15:59:26 +01:00
|
|
|
/* Check for a loss of connection */
|
|
|
|
|
|
|
|
if ((flags & TCP_DISCONN_EVENTS) != 0)
|
|
|
|
{
|
|
|
|
ninfo("Lost connection: %04x\n", flags);
|
|
|
|
|
|
|
|
/* We could get here recursively through the callback actions of
|
|
|
|
* tcp_lost_connection(). So don't repeat that action if we have
|
|
|
|
* already been disconnected.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (_SS_ISCONNECTED(conn->sconn.s_flags))
|
|
|
|
{
|
|
|
|
/* Report not connected */
|
|
|
|
|
|
|
|
tcp_lost_connection(conn, conn->sndcb, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Free write buffers and terminate polling */
|
|
|
|
|
|
|
|
psock_lost_connection(conn, !!(flags & NETDEV_DOWN));
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
2022-06-16 08:55:36 +02:00
|
|
|
#ifdef CONFIG_NET_TCP_FAST_RETRANSMIT
|
|
|
|
if (rexmitno != 0)
|
net/tcp: implement the fast retransmit
RFC2001: TCP Slow Start, Congestion Avoidance, Fast Retransmit,
and Fast Recovery Algorithms
...
3. Fast Retransmit
Modifications to the congestion avoidance algorithm were proposed in
1990 [3]. Before describing the change, realize that TCP may
generate an immediate acknowledgment (a duplicate ACK) when an out-
of-order segment is received (Section 4.2.2.21 of [1], with a note
that one reason for doing so was for the experimental fast-
retransmit algorithm). This duplicate ACK should not be delayed.
The purpose of this duplicate ACK is to let the other end know that a
segment was received out of order, and to tell it what sequence
number is expected.
Since TCP does not know whether a duplicate ACK is caused by a lost
segment or just a reordering of segments, it waits for a small number
of duplicate ACKs to be received. It is assumed that if there is
just a reordering of the segments, there will be only one or two
duplicate ACKs before the reordered segment is processed, which will
then generate a new ACK. If three or more duplicate ACKs are
received in a row, it is a strong indication that a segment has been
lost. TCP then performs a retransmission of what appears to be the
missing segment, without waiting for a retransmission timer to
expire.
Change-Id: Ie2cbcecab507c3d831f74390a6a85e0c5c8e0652
Signed-off-by: chao.an <anchao@xiaomi.com>
2020-10-22 09:02:52 +02:00
|
|
|
{
|
2022-06-16 08:55:36 +02:00
|
|
|
FAR struct tcp_wrbuffer_s *wrb;
|
|
|
|
FAR sq_entry_t *entry;
|
|
|
|
FAR sq_entry_t *next;
|
|
|
|
size_t sndlen;
|
2023-05-29 23:26:19 +02:00
|
|
|
int ret;
|
2022-06-16 08:55:36 +02:00
|
|
|
|
|
|
|
/* According to RFC 6298 (5.4), retransmit the earliest segment
|
|
|
|
* that has not been acknowledged by the TCP receiver.
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (entry = sq_peek(&conn->unacked_q); entry; entry = next)
|
|
|
|
{
|
|
|
|
wrb = (FAR struct tcp_wrbuffer_s *)entry;
|
|
|
|
next = sq_next(entry);
|
|
|
|
|
|
|
|
if (rexmitno != TCP_WBSEQNO(wrb))
|
|
|
|
{
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Reconstruct the length of the earliest segment to be
|
|
|
|
* retransmitted.
|
|
|
|
*/
|
|
|
|
|
|
|
|
sndlen = TCP_WBPKTLEN(wrb);
|
|
|
|
|
|
|
|
if (sndlen > conn->mss)
|
|
|
|
{
|
|
|
|
sndlen = conn->mss;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* As we are retransmitting, the sequence number is expected
|
|
|
|
* already set for this write buffer.
|
|
|
|
*/
|
|
|
|
|
|
|
|
DEBUGASSERT(TCP_WBSEQNO(wrb) != (unsigned)-1);
|
|
|
|
|
|
|
|
#ifdef NEED_IPDOMAIN_SUPPORT
|
|
|
|
/* If both IPv4 and IPv6 support are enabled, then we will need to
|
|
|
|
* select which one to use when generating the outgoing packet.
|
|
|
|
* If only one domain is selected, then the setup is already in
|
|
|
|
* place and we need do nothing.
|
|
|
|
*/
|
|
|
|
|
2023-01-29 06:11:57 +01:00
|
|
|
tcp_ip_select(conn);
|
2022-06-16 08:55:36 +02:00
|
|
|
#endif
|
|
|
|
/* Then set-up to send that amount of data. (this won't actually
|
|
|
|
* happen until the polling cycle completes).
|
|
|
|
*/
|
|
|
|
|
2022-08-30 16:08:05 +02:00
|
|
|
tcp_setsequence(conn->sndseq, TCP_WBSEQNO(wrb));
|
|
|
|
|
2024-05-09 09:55:24 +02:00
|
|
|
#ifdef CONFIG_NET_JUMBO_FRAME
|
|
|
|
if (sndlen <= conn->mss)
|
|
|
|
{
|
|
|
|
/* alloc iob */
|
|
|
|
|
|
|
|
netdev_iob_prepare_dynamic(dev, sndlen + tcpip_hdrsize(conn));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2023-05-29 23:26:19 +02:00
|
|
|
ret = devif_iob_send(dev, TCP_WBIOB(wrb), sndlen,
|
|
|
|
0, tcpip_hdrsize(conn));
|
|
|
|
if (ret <= 0)
|
2022-12-07 10:44:09 +01:00
|
|
|
{
|
|
|
|
return flags;
|
|
|
|
}
|
2022-06-16 08:55:36 +02:00
|
|
|
|
2023-04-20 05:08:02 +02:00
|
|
|
#ifdef CONFIG_NET_TCP_CC_NEWRENO
|
|
|
|
/* After Fast retransmitted, set ssthresh to the maximum of
|
|
|
|
* the unacked and the 2*SMSS, and enter to Fast Recovery.
|
|
|
|
* ssthresh = max (FlightSize / 2, 2*SMSS) referring to rfc5681
|
|
|
|
* cwnd=ssthresh + 3*SMSS referring to rfc5681
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (conn->flags & TCP_INFT)
|
|
|
|
{
|
|
|
|
tcp_cc_update(conn, NULL);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-06-16 08:55:36 +02:00
|
|
|
/* Reset the retransmission timer. */
|
|
|
|
|
|
|
|
tcp_update_retrantimer(conn, conn->rto);
|
|
|
|
|
|
|
|
/* Continue waiting */
|
|
|
|
|
|
|
|
return flags;
|
|
|
|
}
|
net/tcp: implement the fast retransmit
RFC2001: TCP Slow Start, Congestion Avoidance, Fast Retransmit,
and Fast Recovery Algorithms
...
3. Fast Retransmit
Modifications to the congestion avoidance algorithm were proposed in
1990 [3]. Before describing the change, realize that TCP may
generate an immediate acknowledgment (a duplicate ACK) when an out-
of-order segment is received (Section 4.2.2.21 of [1], with a note
that one reason for doing so was for the experimental fast-
retransmit algorithm). This duplicate ACK should not be delayed.
The purpose of this duplicate ACK is to let the other end know that a
segment was received out of order, and to tell it what sequence
number is expected.
Since TCP does not know whether a duplicate ACK is caused by a lost
segment or just a reordering of segments, it waits for a small number
of duplicate ACKs to be received. It is assumed that if there is
just a reordering of the segments, there will be only one or two
duplicate ACKs before the reordered segment is processed, which will
then generate a new ACK. If three or more duplicate ACKs are
received in a row, it is a strong indication that a segment has been
lost. TCP then performs a retransmission of what appears to be the
missing segment, without waiting for a retransmission timer to
expire.
Change-Id: Ie2cbcecab507c3d831f74390a6a85e0c5c8e0652
Signed-off-by: chao.an <anchao@xiaomi.com>
2020-10-22 09:02:52 +02:00
|
|
|
}
|
2022-06-16 08:55:36 +02:00
|
|
|
#endif
|
|
|
|
|
2023-01-10 06:41:02 +01:00
|
|
|
#ifdef CONFIG_NET_TCP_SELECTIVE_ACK
|
|
|
|
|
|
|
|
/* Check if we are being asked to retransmit s-ack data */
|
|
|
|
|
|
|
|
if (nsacks > 0)
|
|
|
|
{
|
|
|
|
FAR struct tcp_wrbuffer_s *wrb;
|
|
|
|
FAR sq_entry_t *entry;
|
|
|
|
FAR sq_entry_t *next;
|
|
|
|
uint32_t right;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Dump s-ack edge */
|
|
|
|
|
|
|
|
for (i = 0, right = 0; i < nsacks; i++)
|
|
|
|
{
|
|
|
|
ninfo("TCP SACK [%d]"
|
|
|
|
"[%" PRIu32 " : %" PRIu32 " : %" PRIu32 "]\n",
|
|
|
|
i, ofosegs[i].left, ofosegs[i].right,
|
|
|
|
TCP_SEQ_SUB(ofosegs[i].right, ofosegs[i].left));
|
|
|
|
}
|
|
|
|
|
|
|
|
for (entry = sq_peek(&conn->unacked_q); entry; entry = next)
|
|
|
|
{
|
|
|
|
wrb = (FAR struct tcp_wrbuffer_s *)entry;
|
|
|
|
next = sq_next(entry);
|
|
|
|
|
|
|
|
for (i = 0, right = 0; i < nsacks; i++)
|
|
|
|
{
|
|
|
|
/* Wrb seqno out of s-ack edge ? do retransmit ! */
|
|
|
|
|
|
|
|
if (TCP_SEQ_LT(TCP_WBSEQNO(wrb), ofosegs[i].left) &&
|
|
|
|
TCP_SEQ_GTE(TCP_WBSEQNO(wrb), right))
|
|
|
|
{
|
|
|
|
ninfo("TCP REXMIT "
|
|
|
|
"[%" PRIu32 " : %" PRIu32 " : %d]\n",
|
|
|
|
TCP_WBSEQNO(wrb),
|
|
|
|
TCP_SEQ_ADD(TCP_WBSEQNO(wrb), TCP_WBPKTLEN(wrb)),
|
|
|
|
TCP_WBPKTLEN(wrb));
|
|
|
|
sq_rem(entry, &conn->unacked_q);
|
|
|
|
retransmit_segment(conn, (FAR void *)entry);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
right = ofosegs[i].right;
|
|
|
|
}
|
|
|
|
}
|
2023-04-20 05:08:02 +02:00
|
|
|
|
|
|
|
#ifdef CONFIG_NET_TCP_CC_NEWRENO
|
|
|
|
/* After Fast retransmitted, set ssthresh to the maximum of
|
|
|
|
* the unacked and the 2*SMSS, and enter to Fast Recovery.
|
|
|
|
* ssthresh = max (FlightSize / 2, 2*SMSS) referring to rfc5681
|
|
|
|
* cwnd=ssthresh + 3*SMSS referring to rfc5681
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (conn->flags & TCP_INFT)
|
|
|
|
{
|
|
|
|
tcp_cc_update(conn, NULL);
|
|
|
|
}
|
|
|
|
#endif
|
2023-01-10 06:41:02 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
|
2022-06-16 08:55:36 +02:00
|
|
|
/* Check if we are being asked to retransmit data */
|
net/tcp: implement the fast retransmit
RFC2001: TCP Slow Start, Congestion Avoidance, Fast Retransmit,
and Fast Recovery Algorithms
...
3. Fast Retransmit
Modifications to the congestion avoidance algorithm were proposed in
1990 [3]. Before describing the change, realize that TCP may
generate an immediate acknowledgment (a duplicate ACK) when an out-
of-order segment is received (Section 4.2.2.21 of [1], with a note
that one reason for doing so was for the experimental fast-
retransmit algorithm). This duplicate ACK should not be delayed.
The purpose of this duplicate ACK is to let the other end know that a
segment was received out of order, and to tell it what sequence
number is expected.
Since TCP does not know whether a duplicate ACK is caused by a lost
segment or just a reordering of segments, it waits for a small number
of duplicate ACKs to be received. It is assumed that if there is
just a reordering of the segments, there will be only one or two
duplicate ACKs before the reordered segment is processed, which will
then generate a new ACK. If three or more duplicate ACKs are
received in a row, it is a strong indication that a segment has been
lost. TCP then performs a retransmission of what appears to be the
missing segment, without waiting for a retransmission timer to
expire.
Change-Id: Ie2cbcecab507c3d831f74390a6a85e0c5c8e0652
Signed-off-by: chao.an <anchao@xiaomi.com>
2020-10-22 09:02:52 +02:00
|
|
|
|
2022-06-16 08:55:36 +02:00
|
|
|
if ((flags & TCP_REXMIT) != 0)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2014-06-22 19:27:57 +02:00
|
|
|
FAR struct tcp_wrbuffer_s *wrb;
|
|
|
|
FAR sq_entry_t *entry;
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2016-06-20 17:37:08 +02:00
|
|
|
ninfo("REXMIT: %04x\n", flags);
|
2014-06-22 19:27:57 +02:00
|
|
|
|
|
|
|
/* If there is a partially sent write buffer at the head of the
|
2014-06-22 23:27:01 +02:00
|
|
|
* write_q? Has anything been sent from that write buffer?
|
2014-01-14 00:11:01 +01:00
|
|
|
*/
|
|
|
|
|
2014-06-22 23:27:01 +02:00
|
|
|
wrb = (FAR struct tcp_wrbuffer_s *)sq_peek(&conn->write_q);
|
2018-01-23 02:33:14 +01:00
|
|
|
ninfo("REXMIT: wrb=%p sent=%u\n", wrb, wrb ? TCP_WBSENT(wrb) : 0);
|
2014-06-23 00:47:32 +02:00
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
if (wrb != NULL && TCP_WBSENT(wrb) > 0)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2014-06-22 23:27:01 +02:00
|
|
|
FAR struct tcp_wrbuffer_s *tmp;
|
|
|
|
|
|
|
|
/* Increment the retransmit count on this write buffer. */
|
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
if (++TCP_WBNRTX(wrb) >= TCP_MAXRTX)
|
2014-06-22 23:27:01 +02:00
|
|
|
{
|
2016-06-20 17:37:08 +02:00
|
|
|
nwarn("WARNING: Expiring wrb=%p nrtx=%u\n",
|
2018-01-23 02:33:14 +01:00
|
|
|
wrb, TCP_WBNRTX(wrb));
|
2014-06-22 23:27:01 +02:00
|
|
|
|
|
|
|
/* The maximum retry count as been exhausted. Remove the write
|
|
|
|
* buffer at the head of the queue.
|
|
|
|
*/
|
|
|
|
|
|
|
|
tmp = (FAR struct tcp_wrbuffer_s *)sq_remfirst(&conn->write_q);
|
|
|
|
DEBUGASSERT(tmp == wrb);
|
2014-06-24 23:38:00 +02:00
|
|
|
UNUSED(tmp);
|
2014-06-22 23:27:01 +02:00
|
|
|
|
|
|
|
/* And return the write buffer to the free list */
|
|
|
|
|
|
|
|
tcp_wrbuffer_release(wrb);
|
|
|
|
|
2019-07-01 20:25:32 +02:00
|
|
|
/* Notify any waiters if the write buffers have been
|
|
|
|
* drained.
|
|
|
|
*/
|
|
|
|
|
|
|
|
psock_writebuffer_notify(conn);
|
|
|
|
|
2014-06-22 23:27:01 +02:00
|
|
|
/* NOTE expired is different from un-ACKed, it is designed to
|
|
|
|
* represent the number of segments that have been sent,
|
|
|
|
* retransmitted, and un-ACKed, if expired is not zero, the
|
|
|
|
* connection will be closed.
|
|
|
|
*
|
2014-07-07 00:10:26 +02:00
|
|
|
* field expired can only be updated at TCP_ESTABLISHED state
|
2014-06-22 23:27:01 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
conn->expired++;
|
|
|
|
}
|
2024-05-21 04:51:06 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
uint16_t sent;
|
|
|
|
|
|
|
|
sent = TCP_WBSENT(wrb);
|
|
|
|
ninfo("REXMIT: wrb=%p sent=%u, "
|
|
|
|
"conn tx_unacked=%" PRId32 " sent=%" PRId32 "\n",
|
|
|
|
wrb, TCP_WBSENT(wrb), conn->tx_unacked, conn->sent);
|
|
|
|
|
|
|
|
/* Yes.. Reset the number of bytes sent sent from
|
|
|
|
* the write buffer
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (conn->tx_unacked > sent)
|
|
|
|
{
|
|
|
|
conn->tx_unacked -= sent;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
conn->tx_unacked = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (conn->sent > sent)
|
|
|
|
{
|
|
|
|
conn->sent -= sent;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
conn->sent = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
TCP_WBSENT(wrb) = 0;
|
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
}
|
2015-10-04 23:04:00 +02:00
|
|
|
|
2014-06-22 19:27:57 +02:00
|
|
|
/* Move all segments that have been sent but not ACKed to the write
|
|
|
|
* queue again note, the un-ACKed segments are put at the head of the
|
|
|
|
* write_q so they can be resent as soon as possible.
|
|
|
|
*/
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2014-06-22 19:27:57 +02:00
|
|
|
while ((entry = sq_remlast(&conn->unacked_q)) != NULL)
|
|
|
|
{
|
2023-01-10 06:41:02 +01:00
|
|
|
retransmit_segment(conn, (FAR void *)entry);
|
2014-01-14 00:11:01 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-19 15:45:46 +02:00
|
|
|
#if CONFIG_NET_SEND_BUFSIZE > 0
|
|
|
|
/* Notify the send buffer available if wrbbuffer drained */
|
|
|
|
|
|
|
|
tcp_sendbuffer_notify(conn);
|
|
|
|
#endif /* CONFIG_NET_SEND_BUFSIZE */
|
|
|
|
|
2014-01-14 00:11:01 +01:00
|
|
|
/* Check if the outgoing packet is available (it may have been claimed
|
2017-09-02 18:27:03 +02:00
|
|
|
* by a sendto event serving a different thread).
|
2014-01-14 00:11:01 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
if (dev->d_sndlen > 0)
|
|
|
|
{
|
|
|
|
/* Another thread has beat us sending data, wait for the next poll */
|
|
|
|
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We get here if (1) not all of the data has been ACKed, (2) we have been
|
|
|
|
* asked to retransmit data, (3) the connection is still healthy, and (4)
|
|
|
|
* the outgoing packet is available for our use. In this case, we are
|
|
|
|
* now free to send more data to receiver -- UNLESS the buffer contains
|
2019-02-14 15:39:16 +01:00
|
|
|
* unprocessed incoming data or window size is zero. In that event, we
|
|
|
|
* will have to wait for the next polling cycle.
|
2014-01-14 00:11:01 +01:00
|
|
|
*/
|
|
|
|
|
2014-07-07 00:10:26 +02:00
|
|
|
if ((conn->tcpstateflags & TCP_ESTABLISHED) &&
|
2023-01-03 06:43:00 +01:00
|
|
|
((flags & TCP_NEWDATA) == 0) &&
|
|
|
|
(flags & (TCP_POLL | TCP_REXMIT | TCP_ACKDATA)) &&
|
2019-02-14 15:39:16 +01:00
|
|
|
!(sq_empty(&conn->write_q)) &&
|
2020-12-07 06:51:08 +01:00
|
|
|
conn->snd_wnd > 0)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2019-09-18 20:33:41 +02:00
|
|
|
FAR struct tcp_wrbuffer_s *wrb;
|
|
|
|
uint32_t predicted_seqno;
|
2021-07-19 04:24:06 +02:00
|
|
|
uint32_t seq;
|
|
|
|
uint32_t snd_wnd_edge;
|
2019-09-18 20:33:41 +02:00
|
|
|
size_t sndlen;
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
/* Peek at the head of the write queue (but don't remove anything
|
|
|
|
* from the write queue yet). We know from the above test that
|
|
|
|
* the write_q is not empty.
|
|
|
|
*/
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
wrb = (FAR struct tcp_wrbuffer_s *)sq_peek(&conn->write_q);
|
|
|
|
DEBUGASSERT(wrb);
|
2014-06-22 23:27:01 +02:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
/* Set the sequence number for this segment. If we are
|
|
|
|
* retransmitting, then the sequence number will already
|
|
|
|
* be set for this write buffer.
|
|
|
|
*/
|
2015-10-04 23:04:00 +02:00
|
|
|
|
2019-09-18 20:33:41 +02:00
|
|
|
if (TCP_WBSEQNO(wrb) == (unsigned)-1)
|
|
|
|
{
|
|
|
|
TCP_WBSEQNO(wrb) = conn->isn + conn->sent;
|
|
|
|
}
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2021-07-19 04:24:06 +02:00
|
|
|
/* Get the amount of data that we can send in the next packet.
|
|
|
|
* We will send either the remaining data in the buffer I/O
|
|
|
|
* buffer chain, or as much as will fit given the MSS and current
|
|
|
|
* window size.
|
2019-09-18 20:33:41 +02:00
|
|
|
*/
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2021-07-19 04:24:06 +02:00
|
|
|
seq = TCP_WBSEQNO(wrb) + TCP_WBSENT(wrb);
|
2023-04-20 05:08:02 +02:00
|
|
|
|
|
|
|
#ifdef CONFIG_NET_TCP_CC_NEWRENO
|
|
|
|
snd_wnd_edge = conn->snd_wl2 + MIN(conn->snd_wnd, conn->cwnd);
|
|
|
|
#else
|
2021-07-19 04:24:06 +02:00
|
|
|
snd_wnd_edge = conn->snd_wl2 + conn->snd_wnd;
|
2023-04-20 05:08:02 +02:00
|
|
|
#endif
|
2021-07-19 04:24:06 +02:00
|
|
|
if (TCP_SEQ_LT(seq, snd_wnd_edge))
|
|
|
|
{
|
|
|
|
uint32_t remaining_snd_wnd;
|
2023-05-29 23:26:19 +02:00
|
|
|
int ret;
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2021-07-19 04:24:06 +02:00
|
|
|
sndlen = TCP_WBPKTLEN(wrb) - TCP_WBSENT(wrb);
|
|
|
|
if (sndlen > conn->mss)
|
|
|
|
{
|
|
|
|
sndlen = conn->mss;
|
|
|
|
}
|
2015-01-17 14:42:09 +01:00
|
|
|
|
2021-07-19 04:24:06 +02:00
|
|
|
remaining_snd_wnd = TCP_SEQ_SUB(snd_wnd_edge, seq);
|
|
|
|
if (sndlen > remaining_snd_wnd)
|
|
|
|
{
|
|
|
|
sndlen = remaining_snd_wnd;
|
|
|
|
}
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2023-12-15 19:24:23 +01:00
|
|
|
/* Normally CONFIG_IOB_THROTTLE should ensure that we have enough
|
|
|
|
* iob space available for copying the data to a packet buffer.
|
|
|
|
* If it doesn't, a deadlock could happen where the iobs are used
|
|
|
|
* by queued TX data and cannot be released because a full-sized
|
|
|
|
* packet gets refused by devif_iob_send(). Detect this situation
|
|
|
|
* and send tiny TCP packets until we manage to free up some space.
|
|
|
|
* We do not want to exhaust all of the remaining iobs by sending
|
|
|
|
* the maximum size packet that would fit.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (sndlen > iob_navail(false) * CONFIG_IOB_BUFSIZE)
|
|
|
|
{
|
|
|
|
nwarn("Running low on iobs, limiting packet size\n");
|
|
|
|
sndlen = CONFIG_IOB_BUFSIZE;
|
|
|
|
}
|
|
|
|
|
2021-07-19 04:24:06 +02:00
|
|
|
ninfo("SEND: wrb=%p seq=%" PRIu32 " pktlen=%u sent=%u sndlen=%zu "
|
2023-12-19 09:55:22 +01:00
|
|
|
"mss=%u snd_wnd=%" PRIu32 " seq=%" PRIu32
|
2021-07-19 04:24:06 +02:00
|
|
|
" remaining_snd_wnd=%" PRIu32 "\n",
|
|
|
|
wrb, TCP_WBSEQNO(wrb), TCP_WBPKTLEN(wrb), TCP_WBSENT(wrb),
|
|
|
|
sndlen, conn->mss,
|
2023-12-19 09:55:22 +01:00
|
|
|
(uint32_t)conn->snd_wnd, seq, remaining_snd_wnd);
|
2021-07-19 04:24:06 +02:00
|
|
|
|
|
|
|
/* The TCP stack updates sndseq on receipt of ACK *before*
|
|
|
|
* this function is called. In that case sndseq will point
|
|
|
|
* to the next unacknowledged byte (which might have already
|
|
|
|
* been sent). We will overwrite the value of sndseq here
|
|
|
|
* before the packet is sent.
|
|
|
|
*/
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2021-07-19 04:24:06 +02:00
|
|
|
tcp_setsequence(conn->sndseq, TCP_WBSEQNO(wrb) + TCP_WBSENT(wrb));
|
While working with version 7.10 I discovered a problem in TCP stack that could be observed on high network load. Generally speaking, the problem is that RST flag is set in unnecessary case, in which between loss of some TCP packet and its proper retransmission, another packets had been successfully sent. The scenario is as follows: NuttX did not receive ACK for some sent packet, so it has been probably lost somewhere. But before its retransmission starts, NuttX is correctly issuing next TCP packets, with sequence numbers increasing properly. When the retransmission of previously lost packet finally succeeds, tcp_input receives the accumulated ACK value, which acknowledges also the packets sent in the meantime (i.e. between unsuccessful sending of lost packet and its proper retransmission). However, variable unackseq is still set to conn->isn + conn->sent, which is truth only if no further packets transmission occurred in the meantime. Because of incorrect (in such specific case) unackseq value, few lines further condition if (ackseq <= unackseq)is not met, and, as a result, we are going to reset label.
2016-06-20 14:55:29 +02:00
|
|
|
|
2023-01-29 06:11:57 +01:00
|
|
|
#ifdef NEED_IPDOMAIN_SUPPORT
|
2021-07-19 04:24:06 +02:00
|
|
|
/* If both IPv4 and IPv6 support are enabled, then we will need to
|
|
|
|
* select which one to use when generating the outgoing packet.
|
|
|
|
* If only one domain is selected, then the setup is already in
|
|
|
|
* place and we need do nothing.
|
|
|
|
*/
|
While working with version 7.10 I discovered a problem in TCP stack that could be observed on high network load. Generally speaking, the problem is that RST flag is set in unnecessary case, in which between loss of some TCP packet and its proper retransmission, another packets had been successfully sent. The scenario is as follows: NuttX did not receive ACK for some sent packet, so it has been probably lost somewhere. But before its retransmission starts, NuttX is correctly issuing next TCP packets, with sequence numbers increasing properly. When the retransmission of previously lost packet finally succeeds, tcp_input receives the accumulated ACK value, which acknowledges also the packets sent in the meantime (i.e. between unsuccessful sending of lost packet and its proper retransmission). However, variable unackseq is still set to conn->isn + conn->sent, which is truth only if no further packets transmission occurred in the meantime. Because of incorrect (in such specific case) unackseq value, few lines further condition if (ackseq <= unackseq)is not met, and, as a result, we are going to reset label.
2016-06-20 14:55:29 +02:00
|
|
|
|
2023-01-29 06:11:57 +01:00
|
|
|
tcp_ip_select(conn);
|
|
|
|
#endif
|
2021-07-19 04:24:06 +02:00
|
|
|
/* Then set-up to send that amount of data with the offset
|
|
|
|
* corresponding to the amount of data already sent. (this
|
|
|
|
* won't actually happen until the polling cycle completes).
|
|
|
|
*/
|
While working with version 7.10 I discovered a problem in TCP stack that could be observed on high network load. Generally speaking, the problem is that RST flag is set in unnecessary case, in which between loss of some TCP packet and its proper retransmission, another packets had been successfully sent. The scenario is as follows: NuttX did not receive ACK for some sent packet, so it has been probably lost somewhere. But before its retransmission starts, NuttX is correctly issuing next TCP packets, with sequence numbers increasing properly. When the retransmission of previously lost packet finally succeeds, tcp_input receives the accumulated ACK value, which acknowledges also the packets sent in the meantime (i.e. between unsuccessful sending of lost packet and its proper retransmission). However, variable unackseq is still set to conn->isn + conn->sent, which is truth only if no further packets transmission occurred in the meantime. Because of incorrect (in such specific case) unackseq value, few lines further condition if (ackseq <= unackseq)is not met, and, as a result, we are going to reset label.
2016-06-20 14:55:29 +02:00
|
|
|
|
2024-05-09 09:55:24 +02:00
|
|
|
#ifdef CONFIG_NET_JUMBO_FRAME
|
|
|
|
if (sndlen <= conn->mss)
|
|
|
|
{
|
|
|
|
/* alloc iob */
|
|
|
|
|
|
|
|
netdev_iob_prepare_dynamic(dev, sndlen + tcpip_hdrsize(conn));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2023-05-29 23:26:19 +02:00
|
|
|
ret = devif_iob_send(dev, TCP_WBIOB(wrb), sndlen,
|
|
|
|
TCP_WBSENT(wrb), tcpip_hdrsize(conn));
|
|
|
|
if (ret <= 0)
|
2022-12-07 10:44:09 +01:00
|
|
|
{
|
|
|
|
return flags;
|
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2021-07-19 04:24:06 +02:00
|
|
|
/* Remember how much data we send out now so that we know
|
|
|
|
* when everything has been acknowledged. Just increment
|
|
|
|
* the amount of data sent. This will be needed in sequence
|
|
|
|
* number calculations.
|
|
|
|
*/
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2021-07-19 04:24:06 +02:00
|
|
|
conn->tx_unacked += sndlen;
|
|
|
|
conn->sent += sndlen;
|
2014-06-23 00:25:26 +02:00
|
|
|
|
2021-07-19 04:24:06 +02:00
|
|
|
/* Below prediction will become true,
|
|
|
|
* unless retransmission occurrence
|
|
|
|
*/
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2021-07-19 04:24:06 +02:00
|
|
|
predicted_seqno = tcp_getsequence(conn->sndseq) + sndlen;
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2021-07-19 04:24:06 +02:00
|
|
|
if (TCP_SEQ_GT(predicted_seqno, conn->sndseq_max))
|
|
|
|
{
|
|
|
|
conn->sndseq_max = predicted_seqno;
|
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2021-07-19 04:24:06 +02:00
|
|
|
ninfo("SEND: wrb=%p nrtx=%u tx_unacked=%" PRIu32
|
|
|
|
" sent=%" PRIu32 "\n",
|
|
|
|
wrb, TCP_WBNRTX(wrb), conn->tx_unacked, conn->sent);
|
2014-06-23 02:53:18 +02:00
|
|
|
|
2021-07-19 04:24:06 +02:00
|
|
|
/* Increment the count of bytes sent from this write buffer */
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2021-07-19 04:24:06 +02:00
|
|
|
TCP_WBSENT(wrb) += sndlen;
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2021-07-19 04:24:06 +02:00
|
|
|
ninfo("SEND: wrb=%p sent=%u pktlen=%u\n",
|
|
|
|
wrb, TCP_WBSENT(wrb), TCP_WBPKTLEN(wrb));
|
2014-06-22 23:27:01 +02:00
|
|
|
|
2021-07-19 04:24:06 +02:00
|
|
|
/* Remove the write buffer from the write queue if the
|
|
|
|
* last of the data has been sent from the buffer.
|
2014-06-22 23:27:01 +02:00
|
|
|
*/
|
|
|
|
|
2021-07-19 04:24:06 +02:00
|
|
|
DEBUGASSERT(TCP_WBSENT(wrb) <= TCP_WBPKTLEN(wrb));
|
|
|
|
if (TCP_WBSENT(wrb) >= TCP_WBPKTLEN(wrb))
|
|
|
|
{
|
|
|
|
FAR struct tcp_wrbuffer_s *tmp;
|
2019-09-18 20:33:41 +02:00
|
|
|
|
2021-07-19 04:24:06 +02:00
|
|
|
ninfo("SEND: wrb=%p Move to unacked_q\n", wrb);
|
2019-09-18 20:33:41 +02:00
|
|
|
|
2021-07-19 04:24:06 +02:00
|
|
|
tmp = (FAR struct tcp_wrbuffer_s *)sq_remfirst(&conn->write_q);
|
|
|
|
DEBUGASSERT(tmp == wrb);
|
|
|
|
UNUSED(tmp);
|
|
|
|
|
|
|
|
/* Put the I/O buffer chain in the un-acked queue; the
|
|
|
|
* segment is waiting for ACK again
|
|
|
|
*/
|
|
|
|
|
|
|
|
psock_insert_segment(wrb, &conn->unacked_q);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Only one data can be sent by low level driver at once,
|
|
|
|
* tell the caller stop polling the other connection.
|
|
|
|
*/
|
|
|
|
|
|
|
|
flags &= ~TCP_POLL;
|
|
|
|
}
|
2014-01-14 00:11:01 +01:00
|
|
|
}
|
2023-06-20 13:43:37 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
tcp_set_zero_probe(conn, flags);
|
|
|
|
}
|
2014-01-14 00:11:01 +01:00
|
|
|
|
|
|
|
/* Continue waiting */
|
|
|
|
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
tcp_send_buffered.c: improve tcp write buffering
* Send data chunk-by-chunk
Note: A stream socket doesn't have atomicity requirement.
* Increase the chance to use full-sized segments
Benchmark numbers in my environment:
* Over ESP32 wifi
* The peer is NetBSD, which has traditional delayed ack TCP
* iperf uses 16384 bytes buffer
---
without this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
does not work.
see https://github.com/apache/incubator-nuttx/pull/2772#discussion_r592820639
---
without this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 4.11 Mbits/sec
3- 6 sec, 4.63 Mbits/sec
6- 9 sec, 4.89 Mbits/sec
9- 12 sec, 4.63 Mbits/sec
12- 15 sec, 4.85 Mbits/sec
15- 18 sec, 4.85 Mbits/sec
18- 21 sec, 5.02 Mbits/sec
21- 24 sec, 3.67 Mbits/sec
24- 27 sec, 4.94 Mbits/sec
27- 30 sec, 4.81 Mbits/sec
0- 30 sec, 4.64 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.33 Mbits/sec
3- 6 sec, 5.59 Mbits/sec
6- 9 sec, 5.55 Mbits/sec
9- 12 sec, 5.59 Mbits/sec
12- 15 sec, 5.59 Mbits/sec
15- 18 sec, 5.72 Mbits/sec
18- 21 sec, 5.68 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 4.67 Mbits/sec
27- 30 sec, 4.50 Mbits/sec
0- 30 sec, 5.35 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.51 Mbits/sec
3- 6 sec, 4.67 Mbits/sec
6- 9 sec, 4.54 Mbits/sec
9- 12 sec, 5.42 Mbits/sec
12- 15 sec, 5.37 Mbits/sec
15- 18 sec, 5.11 Mbits/sec
18- 21 sec, 5.07 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 5.77 Mbits/sec
27- 30 sec, 4.63 Mbits/sec
0- 30 sec, 5.14 Mbits/sec
nsh>
```
2021-03-15 08:19:42 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: tcp_max_wrb_size
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Calculate the desired amount of data for a single
|
|
|
|
* struct tcp_wrbuffer_s.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
static uint32_t tcp_max_wrb_size(FAR struct tcp_conn_s *conn)
|
|
|
|
{
|
|
|
|
const uint32_t mss = conn->mss;
|
|
|
|
uint32_t size;
|
|
|
|
|
|
|
|
/* a few segments should be fine */
|
|
|
|
|
|
|
|
size = 4 * mss;
|
|
|
|
|
|
|
|
/* but it should not hog too many IOB buffers */
|
|
|
|
|
|
|
|
if (size > CONFIG_IOB_NBUFFERS * CONFIG_IOB_BUFSIZE / 2)
|
|
|
|
{
|
|
|
|
size = CONFIG_IOB_NBUFFERS * CONFIG_IOB_BUFSIZE / 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* also, we prefer a multiple of mss */
|
|
|
|
|
|
|
|
if (size > mss)
|
|
|
|
{
|
|
|
|
const uint32_t odd = size % mss;
|
|
|
|
size -= odd;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEBUGASSERT(size > 0);
|
|
|
|
ninfo("tcp_max_wrb_size = %" PRIu32 " for conn %p\n", size, conn);
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2022-02-17 07:10:03 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: tcp_send_gettimeout
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Calculate the send timeout
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
static unsigned int tcp_send_gettimeout(clock_t start, unsigned int timeout)
|
|
|
|
{
|
|
|
|
unsigned int elapse;
|
|
|
|
|
|
|
|
if (timeout != UINT_MAX)
|
|
|
|
{
|
|
|
|
elapse = TICK2MSEC(clock_systime_ticks() - start);
|
|
|
|
if (elapse >= timeout)
|
|
|
|
{
|
|
|
|
timeout = 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
timeout -= elapse;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return timeout;
|
|
|
|
}
|
|
|
|
|
2014-01-14 00:11:01 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Public Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
2017-04-22 00:33:14 +02:00
|
|
|
* Name: psock_tcp_send
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
|
|
|
* Description:
|
2014-06-25 18:34:52 +02:00
|
|
|
* psock_tcp_send() call may be used only when the TCP socket is in a
|
2014-06-24 16:03:44 +02:00
|
|
|
* connected state (so that the intended recipient is known).
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
2018-03-13 16:52:27 +01:00
|
|
|
* Input Parameters:
|
2014-01-14 00:11:01 +01:00
|
|
|
* psock An instance of the internal socket structure.
|
|
|
|
* buf Data to send
|
|
|
|
* len Length of data to send
|
2020-02-19 19:21:28 +01:00
|
|
|
* flags Send flags
|
2014-01-14 00:11:01 +01:00
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* On success, returns the number of characters sent. On error,
|
|
|
|
* -1 is returned, and errno is set appropriately:
|
|
|
|
*
|
|
|
|
* EAGAIN or EWOULDBLOCK
|
|
|
|
* The socket is marked non-blocking and the requested operation
|
|
|
|
* would block.
|
|
|
|
* EBADF
|
|
|
|
* An invalid descriptor was specified.
|
|
|
|
* ECONNRESET
|
|
|
|
* Connection reset by peer.
|
|
|
|
* EDESTADDRREQ
|
|
|
|
* The socket is not connection-mode, and no peer address is set.
|
|
|
|
* EFAULT
|
|
|
|
* An invalid user space address was specified for a parameter.
|
|
|
|
* EINTR
|
|
|
|
* A signal occurred before any data was transmitted.
|
|
|
|
* EINVAL
|
|
|
|
* Invalid argument passed.
|
|
|
|
* EISCONN
|
|
|
|
* The connection-mode socket was connected already but a recipient
|
|
|
|
* was specified. (Now either this error is returned, or the recipient
|
|
|
|
* specification is ignored.)
|
|
|
|
* EMSGSIZE
|
|
|
|
* The socket type requires that message be sent atomically, and the
|
|
|
|
* size of the message to be sent made this impossible.
|
|
|
|
* ENOBUFS
|
|
|
|
* The output queue for a network interface was full. This generally
|
|
|
|
* indicates that the interface has stopped sending, but may be
|
|
|
|
* caused by transient congestion.
|
|
|
|
* ENOMEM
|
|
|
|
* No memory available.
|
|
|
|
* ENOTCONN
|
|
|
|
* The socket is not connected, and no target has been given.
|
|
|
|
* ENOTSOCK
|
|
|
|
* The argument s is not a socket.
|
|
|
|
* EPIPE
|
|
|
|
* The local end has been shut down on a connection oriented socket.
|
|
|
|
* In this case the process will also receive a SIGPIPE unless
|
|
|
|
* MSG_NOSIGNAL is set.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-06-25 02:12:49 +02:00
|
|
|
ssize_t psock_tcp_send(FAR struct socket *psock, FAR const void *buf,
|
2020-02-19 19:21:28 +01:00
|
|
|
size_t len, int flags)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2014-08-19 00:22:14 +02:00
|
|
|
FAR struct tcp_conn_s *conn;
|
2015-01-28 18:56:11 +01:00
|
|
|
FAR struct tcp_wrbuffer_s *wrb;
|
tcp_send_buffered.c: improve tcp write buffering
* Send data chunk-by-chunk
Note: A stream socket doesn't have atomicity requirement.
* Increase the chance to use full-sized segments
Benchmark numbers in my environment:
* Over ESP32 wifi
* The peer is NetBSD, which has traditional delayed ack TCP
* iperf uses 16384 bytes buffer
---
without this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
does not work.
see https://github.com/apache/incubator-nuttx/pull/2772#discussion_r592820639
---
without this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 4.11 Mbits/sec
3- 6 sec, 4.63 Mbits/sec
6- 9 sec, 4.89 Mbits/sec
9- 12 sec, 4.63 Mbits/sec
12- 15 sec, 4.85 Mbits/sec
15- 18 sec, 4.85 Mbits/sec
18- 21 sec, 5.02 Mbits/sec
21- 24 sec, 3.67 Mbits/sec
24- 27 sec, 4.94 Mbits/sec
27- 30 sec, 4.81 Mbits/sec
0- 30 sec, 4.64 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.33 Mbits/sec
3- 6 sec, 5.59 Mbits/sec
6- 9 sec, 5.55 Mbits/sec
9- 12 sec, 5.59 Mbits/sec
12- 15 sec, 5.59 Mbits/sec
15- 18 sec, 5.72 Mbits/sec
18- 21 sec, 5.68 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 4.67 Mbits/sec
27- 30 sec, 4.50 Mbits/sec
0- 30 sec, 5.35 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.51 Mbits/sec
3- 6 sec, 4.67 Mbits/sec
6- 9 sec, 4.54 Mbits/sec
9- 12 sec, 5.42 Mbits/sec
12- 15 sec, 5.37 Mbits/sec
15- 18 sec, 5.11 Mbits/sec
18- 21 sec, 5.07 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 5.77 Mbits/sec
27- 30 sec, 4.63 Mbits/sec
0- 30 sec, 5.14 Mbits/sec
nsh>
```
2021-03-15 08:19:42 +01:00
|
|
|
FAR const uint8_t *cp;
|
2022-02-17 07:10:03 +01:00
|
|
|
unsigned int timeout;
|
2014-06-22 19:27:57 +02:00
|
|
|
ssize_t result = 0;
|
2020-02-19 19:21:28 +01:00
|
|
|
bool nonblock;
|
2014-01-14 00:11:01 +01:00
|
|
|
int ret = OK;
|
2022-02-17 07:10:03 +01:00
|
|
|
clock_t start;
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2021-12-15 14:26:18 +01:00
|
|
|
if (psock == NULL || psock->s_type != SOCK_STREAM ||
|
|
|
|
psock->s_conn == NULL)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2016-06-11 23:50:49 +02:00
|
|
|
nerr("ERROR: Invalid socket\n");
|
2018-02-13 15:02:42 +01:00
|
|
|
ret = -EBADF;
|
2014-01-14 00:11:01 +01:00
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
2023-05-08 16:11:04 +02:00
|
|
|
conn = psock->s_conn;
|
2022-02-08 06:18:01 +01:00
|
|
|
|
|
|
|
if (!_SS_ISCONNECTED(conn->sconn.s_flags))
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2016-06-11 23:50:49 +02:00
|
|
|
nerr("ERROR: Not connected\n");
|
2018-02-13 15:02:42 +01:00
|
|
|
ret = -ENOTCONN;
|
2014-01-14 00:11:01 +01:00
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
2015-02-02 20:44:31 +01:00
|
|
|
/* Make sure that we have the IP address mapping */
|
2014-08-19 00:22:14 +02:00
|
|
|
|
2015-02-02 20:44:31 +01:00
|
|
|
#if defined(CONFIG_NET_ARP_SEND) || defined(CONFIG_NET_ICMPv6_NEIGHBOR)
|
2014-08-19 00:22:14 +02:00
|
|
|
#ifdef CONFIG_NET_ARP_SEND
|
2015-02-02 20:44:31 +01:00
|
|
|
if (psock->s_domain == PF_INET)
|
|
|
|
{
|
|
|
|
/* Make sure that the IP address mapping is in the ARP table */
|
|
|
|
|
|
|
|
ret = arp_send(conn->u.ipv4.raddr);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_ARP_SEND */
|
|
|
|
|
|
|
|
#ifdef CONFIG_NET_ICMPv6_NEIGHBOR
|
2024-01-05 08:35:06 +01:00
|
|
|
if (psock->s_domain == PF_INET6)
|
2015-02-02 20:44:31 +01:00
|
|
|
{
|
|
|
|
/* Make sure that the IP address mapping is in the Neighbor Table */
|
|
|
|
|
2023-08-10 05:43:27 +02:00
|
|
|
ret = icmpv6_neighbor(NULL, conn->u.ipv6.raddr);
|
2015-02-02 20:44:31 +01:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_ICMPv6_NEIGHBOR */
|
|
|
|
|
|
|
|
/* Did we successfully get the address mapping? */
|
|
|
|
|
2014-08-19 00:22:14 +02:00
|
|
|
if (ret < 0)
|
|
|
|
{
|
2016-06-11 23:50:49 +02:00
|
|
|
nerr("ERROR: Not reachable\n");
|
2018-02-13 15:02:42 +01:00
|
|
|
ret = -ENETUNREACH;
|
2014-08-19 00:22:14 +02:00
|
|
|
goto errout;
|
|
|
|
}
|
2015-02-02 20:44:31 +01:00
|
|
|
#endif /* CONFIG_NET_ARP_SEND || CONFIG_NET_ICMPv6_NEIGHBOR */
|
2014-08-19 00:22:14 +02:00
|
|
|
|
2022-02-08 06:18:01 +01:00
|
|
|
nonblock = _SS_ISNONBLOCK(conn->sconn.s_flags) ||
|
|
|
|
(flags & MSG_DONTWAIT) != 0;
|
2022-02-17 07:10:03 +01:00
|
|
|
start = clock_systime_ticks();
|
|
|
|
timeout = _SO_TIMEOUT(conn->sconn.s_sndtimeo);
|
2020-02-19 19:21:28 +01:00
|
|
|
|
2014-06-23 02:53:18 +02:00
|
|
|
/* Dump the incoming buffer */
|
|
|
|
|
2014-06-25 02:12:49 +02:00
|
|
|
BUF_DUMP("psock_tcp_send", buf, len);
|
2014-06-23 02:53:18 +02:00
|
|
|
|
tcp_send_buffered.c: improve tcp write buffering
* Send data chunk-by-chunk
Note: A stream socket doesn't have atomicity requirement.
* Increase the chance to use full-sized segments
Benchmark numbers in my environment:
* Over ESP32 wifi
* The peer is NetBSD, which has traditional delayed ack TCP
* iperf uses 16384 bytes buffer
---
without this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
does not work.
see https://github.com/apache/incubator-nuttx/pull/2772#discussion_r592820639
---
without this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 4.11 Mbits/sec
3- 6 sec, 4.63 Mbits/sec
6- 9 sec, 4.89 Mbits/sec
9- 12 sec, 4.63 Mbits/sec
12- 15 sec, 4.85 Mbits/sec
15- 18 sec, 4.85 Mbits/sec
18- 21 sec, 5.02 Mbits/sec
21- 24 sec, 3.67 Mbits/sec
24- 27 sec, 4.94 Mbits/sec
27- 30 sec, 4.81 Mbits/sec
0- 30 sec, 4.64 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.33 Mbits/sec
3- 6 sec, 5.59 Mbits/sec
6- 9 sec, 5.55 Mbits/sec
9- 12 sec, 5.59 Mbits/sec
12- 15 sec, 5.59 Mbits/sec
15- 18 sec, 5.72 Mbits/sec
18- 21 sec, 5.68 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 4.67 Mbits/sec
27- 30 sec, 4.50 Mbits/sec
0- 30 sec, 5.35 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.51 Mbits/sec
3- 6 sec, 4.67 Mbits/sec
6- 9 sec, 4.54 Mbits/sec
9- 12 sec, 5.42 Mbits/sec
12- 15 sec, 5.37 Mbits/sec
15- 18 sec, 5.11 Mbits/sec
18- 21 sec, 5.07 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 5.77 Mbits/sec
27- 30 sec, 4.63 Mbits/sec
0- 30 sec, 5.14 Mbits/sec
nsh>
```
2021-03-15 08:19:42 +01:00
|
|
|
cp = buf;
|
|
|
|
while (len > 0)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
tcp_send_buffered.c: improve tcp write buffering
* Send data chunk-by-chunk
Note: A stream socket doesn't have atomicity requirement.
* Increase the chance to use full-sized segments
Benchmark numbers in my environment:
* Over ESP32 wifi
* The peer is NetBSD, which has traditional delayed ack TCP
* iperf uses 16384 bytes buffer
---
without this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
does not work.
see https://github.com/apache/incubator-nuttx/pull/2772#discussion_r592820639
---
without this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 4.11 Mbits/sec
3- 6 sec, 4.63 Mbits/sec
6- 9 sec, 4.89 Mbits/sec
9- 12 sec, 4.63 Mbits/sec
12- 15 sec, 4.85 Mbits/sec
15- 18 sec, 4.85 Mbits/sec
18- 21 sec, 5.02 Mbits/sec
21- 24 sec, 3.67 Mbits/sec
24- 27 sec, 4.94 Mbits/sec
27- 30 sec, 4.81 Mbits/sec
0- 30 sec, 4.64 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.33 Mbits/sec
3- 6 sec, 5.59 Mbits/sec
6- 9 sec, 5.55 Mbits/sec
9- 12 sec, 5.59 Mbits/sec
12- 15 sec, 5.59 Mbits/sec
15- 18 sec, 5.72 Mbits/sec
18- 21 sec, 5.68 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 4.67 Mbits/sec
27- 30 sec, 4.50 Mbits/sec
0- 30 sec, 5.35 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.51 Mbits/sec
3- 6 sec, 4.67 Mbits/sec
6- 9 sec, 4.54 Mbits/sec
9- 12 sec, 5.42 Mbits/sec
12- 15 sec, 5.37 Mbits/sec
15- 18 sec, 5.11 Mbits/sec
18- 21 sec, 5.07 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 5.77 Mbits/sec
27- 30 sec, 4.63 Mbits/sec
0- 30 sec, 5.14 Mbits/sec
nsh>
```
2021-03-15 08:19:42 +01:00
|
|
|
uint32_t max_wrb_size;
|
|
|
|
unsigned int off;
|
|
|
|
size_t chunk_len = len;
|
|
|
|
ssize_t chunk_result;
|
2015-01-28 18:56:11 +01:00
|
|
|
|
2016-12-03 23:28:19 +01:00
|
|
|
net_lock();
|
2015-01-28 18:56:11 +01:00
|
|
|
|
2021-12-15 14:26:18 +01:00
|
|
|
/* Now that we have the network locked, we need to check the connection
|
|
|
|
* state again to ensure the connection is still valid.
|
|
|
|
*/
|
|
|
|
|
2022-02-08 06:18:01 +01:00
|
|
|
if (!_SS_ISCONNECTED(conn->sconn.s_flags))
|
2021-12-15 14:26:18 +01:00
|
|
|
{
|
|
|
|
nerr("ERROR: No longer connected\n");
|
|
|
|
ret = -ENOTCONN;
|
|
|
|
goto errout_with_lock;
|
|
|
|
}
|
|
|
|
|
2014-06-19 16:31:50 +02:00
|
|
|
/* Allocate resources to receive a callback */
|
|
|
|
|
2022-02-07 04:03:03 +01:00
|
|
|
if (conn->sndcb == NULL)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2022-02-07 04:03:03 +01:00
|
|
|
conn->sndcb = tcp_callback_alloc(conn);
|
2022-01-29 19:13:22 +01:00
|
|
|
|
2022-10-18 16:52:58 +02:00
|
|
|
/* Test if the callback has been allocated */
|
2022-01-29 19:13:22 +01:00
|
|
|
|
2022-10-18 16:52:58 +02:00
|
|
|
if (conn->sndcb == NULL)
|
|
|
|
{
|
|
|
|
/* A buffer allocation error occurred */
|
2022-01-29 19:13:22 +01:00
|
|
|
|
2022-10-18 16:52:58 +02:00
|
|
|
nerr("ERROR: Failed to allocate callback\n");
|
|
|
|
ret = nonblock ? -EAGAIN : -ENOMEM;
|
|
|
|
goto errout_with_lock;
|
2022-01-29 19:13:22 +01:00
|
|
|
}
|
2014-06-19 16:31:50 +02:00
|
|
|
}
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2015-01-28 18:56:11 +01:00
|
|
|
/* Set up the callback in the connection */
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2022-02-07 04:03:03 +01:00
|
|
|
conn->sndcb->flags = (TCP_ACKDATA | TCP_REXMIT | TCP_POLL |
|
2015-05-30 17:12:27 +02:00
|
|
|
TCP_DISCONN_EVENTS);
|
2022-02-08 17:08:15 +01:00
|
|
|
conn->sndcb->priv = (FAR void *)conn;
|
2022-02-07 04:03:03 +01:00
|
|
|
conn->sndcb->event = psock_send_eventhandler;
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2021-07-19 15:45:46 +02:00
|
|
|
#if CONFIG_NET_SEND_BUFSIZE > 0
|
|
|
|
/* If the send buffer size exceeds the send limit,
|
|
|
|
* wait for the write buffer to be released
|
|
|
|
*/
|
|
|
|
|
2022-03-31 05:40:08 +02:00
|
|
|
while (tcp_wrbuffer_inqueue_size(conn) >= conn->snd_bufs)
|
2021-07-19 15:45:46 +02:00
|
|
|
{
|
|
|
|
if (nonblock)
|
|
|
|
{
|
|
|
|
ret = -EAGAIN;
|
|
|
|
goto errout_with_lock;
|
|
|
|
}
|
|
|
|
|
2023-01-13 07:51:38 +01:00
|
|
|
ret = net_sem_timedwait_uninterruptible(&conn->snd_sem,
|
2022-02-17 07:10:03 +01:00
|
|
|
tcp_send_gettimeout(start, timeout));
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
2022-06-27 07:15:13 +02:00
|
|
|
if (ret == -ETIMEDOUT)
|
|
|
|
{
|
|
|
|
ret = -EAGAIN;
|
|
|
|
}
|
|
|
|
|
2022-02-17 07:10:03 +01:00
|
|
|
goto errout_with_lock;
|
|
|
|
}
|
2021-07-19 15:45:46 +02:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_SEND_BUFSIZE */
|
|
|
|
|
2021-07-20 02:10:43 +02:00
|
|
|
while (true)
|
|
|
|
{
|
|
|
|
struct iob_s *iob;
|
tcp_send_buffered.c: improve tcp write buffering
* Send data chunk-by-chunk
Note: A stream socket doesn't have atomicity requirement.
* Increase the chance to use full-sized segments
Benchmark numbers in my environment:
* Over ESP32 wifi
* The peer is NetBSD, which has traditional delayed ack TCP
* iperf uses 16384 bytes buffer
---
without this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
does not work.
see https://github.com/apache/incubator-nuttx/pull/2772#discussion_r592820639
---
without this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 4.11 Mbits/sec
3- 6 sec, 4.63 Mbits/sec
6- 9 sec, 4.89 Mbits/sec
9- 12 sec, 4.63 Mbits/sec
12- 15 sec, 4.85 Mbits/sec
15- 18 sec, 4.85 Mbits/sec
18- 21 sec, 5.02 Mbits/sec
21- 24 sec, 3.67 Mbits/sec
24- 27 sec, 4.94 Mbits/sec
27- 30 sec, 4.81 Mbits/sec
0- 30 sec, 4.64 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.33 Mbits/sec
3- 6 sec, 5.59 Mbits/sec
6- 9 sec, 5.55 Mbits/sec
9- 12 sec, 5.59 Mbits/sec
12- 15 sec, 5.59 Mbits/sec
15- 18 sec, 5.72 Mbits/sec
18- 21 sec, 5.68 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 4.67 Mbits/sec
27- 30 sec, 4.50 Mbits/sec
0- 30 sec, 5.35 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.51 Mbits/sec
3- 6 sec, 4.67 Mbits/sec
6- 9 sec, 4.54 Mbits/sec
9- 12 sec, 5.42 Mbits/sec
12- 15 sec, 5.37 Mbits/sec
15- 18 sec, 5.11 Mbits/sec
18- 21 sec, 5.07 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 5.77 Mbits/sec
27- 30 sec, 4.63 Mbits/sec
0- 30 sec, 5.14 Mbits/sec
nsh>
```
2021-03-15 08:19:42 +01:00
|
|
|
|
2021-07-20 02:10:43 +02:00
|
|
|
/* Allocate a write buffer. Careful, the network will be
|
|
|
|
* momentarily unlocked here.
|
|
|
|
*/
|
tcp_send_buffered.c: improve tcp write buffering
* Send data chunk-by-chunk
Note: A stream socket doesn't have atomicity requirement.
* Increase the chance to use full-sized segments
Benchmark numbers in my environment:
* Over ESP32 wifi
* The peer is NetBSD, which has traditional delayed ack TCP
* iperf uses 16384 bytes buffer
---
without this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
does not work.
see https://github.com/apache/incubator-nuttx/pull/2772#discussion_r592820639
---
without this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 4.11 Mbits/sec
3- 6 sec, 4.63 Mbits/sec
6- 9 sec, 4.89 Mbits/sec
9- 12 sec, 4.63 Mbits/sec
12- 15 sec, 4.85 Mbits/sec
15- 18 sec, 4.85 Mbits/sec
18- 21 sec, 5.02 Mbits/sec
21- 24 sec, 3.67 Mbits/sec
24- 27 sec, 4.94 Mbits/sec
27- 30 sec, 4.81 Mbits/sec
0- 30 sec, 4.64 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.33 Mbits/sec
3- 6 sec, 5.59 Mbits/sec
6- 9 sec, 5.55 Mbits/sec
9- 12 sec, 5.59 Mbits/sec
12- 15 sec, 5.59 Mbits/sec
15- 18 sec, 5.72 Mbits/sec
18- 21 sec, 5.68 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 4.67 Mbits/sec
27- 30 sec, 4.50 Mbits/sec
0- 30 sec, 5.35 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.51 Mbits/sec
3- 6 sec, 4.67 Mbits/sec
6- 9 sec, 4.54 Mbits/sec
9- 12 sec, 5.42 Mbits/sec
12- 15 sec, 5.37 Mbits/sec
15- 18 sec, 5.11 Mbits/sec
18- 21 sec, 5.07 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 5.77 Mbits/sec
27- 30 sec, 4.63 Mbits/sec
0- 30 sec, 5.14 Mbits/sec
nsh>
```
2021-03-15 08:19:42 +01:00
|
|
|
|
2021-07-20 02:10:43 +02:00
|
|
|
/* Try to coalesce into the last wrb.
|
|
|
|
*
|
|
|
|
* But only when it might yield larger segments.
|
|
|
|
* (REVISIT: It might make sense to lift this condition.
|
|
|
|
* IOB boundaries and segment boundaries usually do not match.
|
|
|
|
* It makes sense to save the number of IOBs.)
|
|
|
|
*
|
|
|
|
* Also, for simplicity, do it only when we haven't sent anything
|
|
|
|
* from the the wrb yet.
|
|
|
|
*/
|
tcp_send_buffered.c: improve tcp write buffering
* Send data chunk-by-chunk
Note: A stream socket doesn't have atomicity requirement.
* Increase the chance to use full-sized segments
Benchmark numbers in my environment:
* Over ESP32 wifi
* The peer is NetBSD, which has traditional delayed ack TCP
* iperf uses 16384 bytes buffer
---
without this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
does not work.
see https://github.com/apache/incubator-nuttx/pull/2772#discussion_r592820639
---
without this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 4.11 Mbits/sec
3- 6 sec, 4.63 Mbits/sec
6- 9 sec, 4.89 Mbits/sec
9- 12 sec, 4.63 Mbits/sec
12- 15 sec, 4.85 Mbits/sec
15- 18 sec, 4.85 Mbits/sec
18- 21 sec, 5.02 Mbits/sec
21- 24 sec, 3.67 Mbits/sec
24- 27 sec, 4.94 Mbits/sec
27- 30 sec, 4.81 Mbits/sec
0- 30 sec, 4.64 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.33 Mbits/sec
3- 6 sec, 5.59 Mbits/sec
6- 9 sec, 5.55 Mbits/sec
9- 12 sec, 5.59 Mbits/sec
12- 15 sec, 5.59 Mbits/sec
15- 18 sec, 5.72 Mbits/sec
18- 21 sec, 5.68 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 4.67 Mbits/sec
27- 30 sec, 4.50 Mbits/sec
0- 30 sec, 5.35 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.51 Mbits/sec
3- 6 sec, 4.67 Mbits/sec
6- 9 sec, 4.54 Mbits/sec
9- 12 sec, 5.42 Mbits/sec
12- 15 sec, 5.37 Mbits/sec
15- 18 sec, 5.11 Mbits/sec
18- 21 sec, 5.07 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 5.77 Mbits/sec
27- 30 sec, 4.63 Mbits/sec
0- 30 sec, 5.14 Mbits/sec
nsh>
```
2021-03-15 08:19:42 +01:00
|
|
|
|
2021-07-20 02:10:43 +02:00
|
|
|
max_wrb_size = tcp_max_wrb_size(conn);
|
|
|
|
wrb = (FAR struct tcp_wrbuffer_s *)sq_tail(&conn->write_q);
|
|
|
|
if (wrb != NULL && TCP_WBSENT(wrb) == 0 && TCP_WBNRTX(wrb) == 0 &&
|
|
|
|
TCP_WBPKTLEN(wrb) < max_wrb_size &&
|
|
|
|
(TCP_WBPKTLEN(wrb) % conn->mss) != 0)
|
|
|
|
{
|
|
|
|
wrb = (FAR struct tcp_wrbuffer_s *)sq_remlast(&conn->write_q);
|
|
|
|
ninfo("coalesce %zu bytes to wrb %p (%" PRIu16 ")\n", len, wrb,
|
|
|
|
TCP_WBPKTLEN(wrb));
|
|
|
|
DEBUGASSERT(TCP_WBPKTLEN(wrb) > 0);
|
|
|
|
}
|
|
|
|
else if (nonblock)
|
|
|
|
{
|
|
|
|
wrb = tcp_wrbuffer_tryalloc();
|
|
|
|
ninfo("new wrb %p (non blocking)\n", wrb);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2022-02-17 07:10:03 +01:00
|
|
|
wrb = tcp_wrbuffer_timedalloc(tcp_send_gettimeout(start,
|
|
|
|
timeout));
|
2021-07-20 02:10:43 +02:00
|
|
|
ninfo("new wrb %p\n", wrb);
|
|
|
|
}
|
tcp_send_buffered.c: improve tcp write buffering
* Send data chunk-by-chunk
Note: A stream socket doesn't have atomicity requirement.
* Increase the chance to use full-sized segments
Benchmark numbers in my environment:
* Over ESP32 wifi
* The peer is NetBSD, which has traditional delayed ack TCP
* iperf uses 16384 bytes buffer
---
without this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
does not work.
see https://github.com/apache/incubator-nuttx/pull/2772#discussion_r592820639
---
without this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 4.11 Mbits/sec
3- 6 sec, 4.63 Mbits/sec
6- 9 sec, 4.89 Mbits/sec
9- 12 sec, 4.63 Mbits/sec
12- 15 sec, 4.85 Mbits/sec
15- 18 sec, 4.85 Mbits/sec
18- 21 sec, 5.02 Mbits/sec
21- 24 sec, 3.67 Mbits/sec
24- 27 sec, 4.94 Mbits/sec
27- 30 sec, 4.81 Mbits/sec
0- 30 sec, 4.64 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.33 Mbits/sec
3- 6 sec, 5.59 Mbits/sec
6- 9 sec, 5.55 Mbits/sec
9- 12 sec, 5.59 Mbits/sec
12- 15 sec, 5.59 Mbits/sec
15- 18 sec, 5.72 Mbits/sec
18- 21 sec, 5.68 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 4.67 Mbits/sec
27- 30 sec, 4.50 Mbits/sec
0- 30 sec, 5.35 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.51 Mbits/sec
3- 6 sec, 4.67 Mbits/sec
6- 9 sec, 4.54 Mbits/sec
9- 12 sec, 5.42 Mbits/sec
12- 15 sec, 5.37 Mbits/sec
15- 18 sec, 5.11 Mbits/sec
18- 21 sec, 5.07 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 5.77 Mbits/sec
27- 30 sec, 4.63 Mbits/sec
0- 30 sec, 5.14 Mbits/sec
nsh>
```
2021-03-15 08:19:42 +01:00
|
|
|
|
2021-07-20 02:10:43 +02:00
|
|
|
if (wrb == NULL)
|
|
|
|
{
|
|
|
|
/* A buffer allocation error occurred */
|
tcp_send_buffered.c: improve tcp write buffering
* Send data chunk-by-chunk
Note: A stream socket doesn't have atomicity requirement.
* Increase the chance to use full-sized segments
Benchmark numbers in my environment:
* Over ESP32 wifi
* The peer is NetBSD, which has traditional delayed ack TCP
* iperf uses 16384 bytes buffer
---
without this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
does not work.
see https://github.com/apache/incubator-nuttx/pull/2772#discussion_r592820639
---
without this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 4.11 Mbits/sec
3- 6 sec, 4.63 Mbits/sec
6- 9 sec, 4.89 Mbits/sec
9- 12 sec, 4.63 Mbits/sec
12- 15 sec, 4.85 Mbits/sec
15- 18 sec, 4.85 Mbits/sec
18- 21 sec, 5.02 Mbits/sec
21- 24 sec, 3.67 Mbits/sec
24- 27 sec, 4.94 Mbits/sec
27- 30 sec, 4.81 Mbits/sec
0- 30 sec, 4.64 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.33 Mbits/sec
3- 6 sec, 5.59 Mbits/sec
6- 9 sec, 5.55 Mbits/sec
9- 12 sec, 5.59 Mbits/sec
12- 15 sec, 5.59 Mbits/sec
15- 18 sec, 5.72 Mbits/sec
18- 21 sec, 5.68 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 4.67 Mbits/sec
27- 30 sec, 4.50 Mbits/sec
0- 30 sec, 5.35 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.51 Mbits/sec
3- 6 sec, 4.67 Mbits/sec
6- 9 sec, 4.54 Mbits/sec
9- 12 sec, 5.42 Mbits/sec
12- 15 sec, 5.37 Mbits/sec
15- 18 sec, 5.11 Mbits/sec
18- 21 sec, 5.07 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 5.77 Mbits/sec
27- 30 sec, 4.63 Mbits/sec
0- 30 sec, 5.14 Mbits/sec
nsh>
```
2021-03-15 08:19:42 +01:00
|
|
|
|
2021-07-20 02:10:43 +02:00
|
|
|
nerr("ERROR: Failed to allocate write buffer\n");
|
2022-02-17 07:10:03 +01:00
|
|
|
|
2022-06-27 07:15:13 +02:00
|
|
|
if (nonblock || timeout != UINT_MAX)
|
2022-02-17 07:10:03 +01:00
|
|
|
{
|
|
|
|
ret = -EAGAIN;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ret = -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2021-07-20 02:10:43 +02:00
|
|
|
goto errout_with_lock;
|
|
|
|
}
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2021-07-20 02:10:43 +02:00
|
|
|
/* Initialize the write buffer */
|
2018-01-22 18:11:23 +01:00
|
|
|
|
2021-07-20 02:10:43 +02:00
|
|
|
TCP_WBSEQNO(wrb) = (unsigned)-1;
|
|
|
|
TCP_WBNRTX(wrb) = 0;
|
tcp_send_buffered.c: improve tcp write buffering
* Send data chunk-by-chunk
Note: A stream socket doesn't have atomicity requirement.
* Increase the chance to use full-sized segments
Benchmark numbers in my environment:
* Over ESP32 wifi
* The peer is NetBSD, which has traditional delayed ack TCP
* iperf uses 16384 bytes buffer
---
without this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
does not work.
see https://github.com/apache/incubator-nuttx/pull/2772#discussion_r592820639
---
without this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 4.11 Mbits/sec
3- 6 sec, 4.63 Mbits/sec
6- 9 sec, 4.89 Mbits/sec
9- 12 sec, 4.63 Mbits/sec
12- 15 sec, 4.85 Mbits/sec
15- 18 sec, 4.85 Mbits/sec
18- 21 sec, 5.02 Mbits/sec
21- 24 sec, 3.67 Mbits/sec
24- 27 sec, 4.94 Mbits/sec
27- 30 sec, 4.81 Mbits/sec
0- 30 sec, 4.64 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.33 Mbits/sec
3- 6 sec, 5.59 Mbits/sec
6- 9 sec, 5.55 Mbits/sec
9- 12 sec, 5.59 Mbits/sec
12- 15 sec, 5.59 Mbits/sec
15- 18 sec, 5.72 Mbits/sec
18- 21 sec, 5.68 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 4.67 Mbits/sec
27- 30 sec, 4.50 Mbits/sec
0- 30 sec, 5.35 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.51 Mbits/sec
3- 6 sec, 4.67 Mbits/sec
6- 9 sec, 4.54 Mbits/sec
9- 12 sec, 5.42 Mbits/sec
12- 15 sec, 5.37 Mbits/sec
15- 18 sec, 5.11 Mbits/sec
18- 21 sec, 5.07 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 5.77 Mbits/sec
27- 30 sec, 4.63 Mbits/sec
0- 30 sec, 5.14 Mbits/sec
nsh>
```
2021-03-15 08:19:42 +01:00
|
|
|
|
2021-07-20 02:10:43 +02:00
|
|
|
off = TCP_WBPKTLEN(wrb);
|
|
|
|
if (off + chunk_len > max_wrb_size)
|
|
|
|
{
|
|
|
|
chunk_len = max_wrb_size - off;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy the user data into the write buffer. We cannot wait for
|
|
|
|
* buffer space.
|
|
|
|
*/
|
2018-01-22 18:11:23 +01:00
|
|
|
|
2018-04-20 15:37:51 +02:00
|
|
|
/* The return value from TCP_WBTRYCOPYIN is either OK or
|
|
|
|
* -ENOMEM if less than the entire data chunk could be allocated.
|
|
|
|
* If -ENOMEM is returned, check if at least a part of the data
|
|
|
|
* chunk was allocated. If more than zero bytes were sent
|
|
|
|
* we return that number and let the caller deal with sending the
|
|
|
|
* remaining data.
|
|
|
|
*/
|
|
|
|
|
tcp_send_buffered.c: improve tcp write buffering
* Send data chunk-by-chunk
Note: A stream socket doesn't have atomicity requirement.
* Increase the chance to use full-sized segments
Benchmark numbers in my environment:
* Over ESP32 wifi
* The peer is NetBSD, which has traditional delayed ack TCP
* iperf uses 16384 bytes buffer
---
without this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
does not work.
see https://github.com/apache/incubator-nuttx/pull/2772#discussion_r592820639
---
without this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 4.11 Mbits/sec
3- 6 sec, 4.63 Mbits/sec
6- 9 sec, 4.89 Mbits/sec
9- 12 sec, 4.63 Mbits/sec
12- 15 sec, 4.85 Mbits/sec
15- 18 sec, 4.85 Mbits/sec
18- 21 sec, 5.02 Mbits/sec
21- 24 sec, 3.67 Mbits/sec
24- 27 sec, 4.94 Mbits/sec
27- 30 sec, 4.81 Mbits/sec
0- 30 sec, 4.64 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.33 Mbits/sec
3- 6 sec, 5.59 Mbits/sec
6- 9 sec, 5.55 Mbits/sec
9- 12 sec, 5.59 Mbits/sec
12- 15 sec, 5.59 Mbits/sec
15- 18 sec, 5.72 Mbits/sec
18- 21 sec, 5.68 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 4.67 Mbits/sec
27- 30 sec, 4.50 Mbits/sec
0- 30 sec, 5.35 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.51 Mbits/sec
3- 6 sec, 4.67 Mbits/sec
6- 9 sec, 4.54 Mbits/sec
9- 12 sec, 5.42 Mbits/sec
12- 15 sec, 5.37 Mbits/sec
15- 18 sec, 5.11 Mbits/sec
18- 21 sec, 5.07 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 5.77 Mbits/sec
27- 30 sec, 4.63 Mbits/sec
0- 30 sec, 5.14 Mbits/sec
nsh>
```
2021-03-15 08:19:42 +01:00
|
|
|
chunk_result = TCP_WBTRYCOPYIN(wrb, cp, chunk_len, off);
|
|
|
|
if (chunk_result == -ENOMEM)
|
2018-04-20 15:37:51 +02:00
|
|
|
{
|
|
|
|
if (TCP_WBPKTLEN(wrb) > 0)
|
|
|
|
{
|
tcp_send_buffered.c: improve tcp write buffering
* Send data chunk-by-chunk
Note: A stream socket doesn't have atomicity requirement.
* Increase the chance to use full-sized segments
Benchmark numbers in my environment:
* Over ESP32 wifi
* The peer is NetBSD, which has traditional delayed ack TCP
* iperf uses 16384 bytes buffer
---
without this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
does not work.
see https://github.com/apache/incubator-nuttx/pull/2772#discussion_r592820639
---
without this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 4.11 Mbits/sec
3- 6 sec, 4.63 Mbits/sec
6- 9 sec, 4.89 Mbits/sec
9- 12 sec, 4.63 Mbits/sec
12- 15 sec, 4.85 Mbits/sec
15- 18 sec, 4.85 Mbits/sec
18- 21 sec, 5.02 Mbits/sec
21- 24 sec, 3.67 Mbits/sec
24- 27 sec, 4.94 Mbits/sec
27- 30 sec, 4.81 Mbits/sec
0- 30 sec, 4.64 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.33 Mbits/sec
3- 6 sec, 5.59 Mbits/sec
6- 9 sec, 5.55 Mbits/sec
9- 12 sec, 5.59 Mbits/sec
12- 15 sec, 5.59 Mbits/sec
15- 18 sec, 5.72 Mbits/sec
18- 21 sec, 5.68 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 4.67 Mbits/sec
27- 30 sec, 4.50 Mbits/sec
0- 30 sec, 5.35 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.51 Mbits/sec
3- 6 sec, 4.67 Mbits/sec
6- 9 sec, 4.54 Mbits/sec
9- 12 sec, 5.42 Mbits/sec
12- 15 sec, 5.37 Mbits/sec
15- 18 sec, 5.11 Mbits/sec
18- 21 sec, 5.07 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 5.77 Mbits/sec
27- 30 sec, 4.63 Mbits/sec
0- 30 sec, 5.14 Mbits/sec
nsh>
```
2021-03-15 08:19:42 +01:00
|
|
|
DEBUGASSERT(TCP_WBPKTLEN(wrb) >= off);
|
|
|
|
chunk_result = TCP_WBPKTLEN(wrb) - off;
|
2021-03-29 09:28:41 +02:00
|
|
|
ninfo("INFO: Allocated part of the requested data "
|
|
|
|
"%zd/%zu\n",
|
|
|
|
chunk_result, chunk_len);
|
tcp_send_buffered.c: improve tcp write buffering
* Send data chunk-by-chunk
Note: A stream socket doesn't have atomicity requirement.
* Increase the chance to use full-sized segments
Benchmark numbers in my environment:
* Over ESP32 wifi
* The peer is NetBSD, which has traditional delayed ack TCP
* iperf uses 16384 bytes buffer
---
without this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
does not work.
see https://github.com/apache/incubator-nuttx/pull/2772#discussion_r592820639
---
without this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 4.11 Mbits/sec
3- 6 sec, 4.63 Mbits/sec
6- 9 sec, 4.89 Mbits/sec
9- 12 sec, 4.63 Mbits/sec
12- 15 sec, 4.85 Mbits/sec
15- 18 sec, 4.85 Mbits/sec
18- 21 sec, 5.02 Mbits/sec
21- 24 sec, 3.67 Mbits/sec
24- 27 sec, 4.94 Mbits/sec
27- 30 sec, 4.81 Mbits/sec
0- 30 sec, 4.64 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.33 Mbits/sec
3- 6 sec, 5.59 Mbits/sec
6- 9 sec, 5.55 Mbits/sec
9- 12 sec, 5.59 Mbits/sec
12- 15 sec, 5.59 Mbits/sec
15- 18 sec, 5.72 Mbits/sec
18- 21 sec, 5.68 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 4.67 Mbits/sec
27- 30 sec, 4.50 Mbits/sec
0- 30 sec, 5.35 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.51 Mbits/sec
3- 6 sec, 4.67 Mbits/sec
6- 9 sec, 4.54 Mbits/sec
9- 12 sec, 5.42 Mbits/sec
12- 15 sec, 5.37 Mbits/sec
15- 18 sec, 5.11 Mbits/sec
18- 21 sec, 5.07 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 5.77 Mbits/sec
27- 30 sec, 4.63 Mbits/sec
0- 30 sec, 5.14 Mbits/sec
nsh>
```
2021-03-15 08:19:42 +01:00
|
|
|
|
|
|
|
/* Note: chunk_result here can be 0 if we are trying
|
|
|
|
* to coalesce into the existing buffer and we failed
|
|
|
|
* to add anything.
|
|
|
|
*/
|
|
|
|
|
|
|
|
DEBUGASSERT(chunk_result >= 0);
|
2018-04-20 15:37:51 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2021-07-20 02:10:43 +02:00
|
|
|
chunk_result = 0;
|
2018-04-20 15:37:51 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
tcp_send_buffered.c: improve tcp write buffering
* Send data chunk-by-chunk
Note: A stream socket doesn't have atomicity requirement.
* Increase the chance to use full-sized segments
Benchmark numbers in my environment:
* Over ESP32 wifi
* The peer is NetBSD, which has traditional delayed ack TCP
* iperf uses 16384 bytes buffer
---
without this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
does not work.
see https://github.com/apache/incubator-nuttx/pull/2772#discussion_r592820639
---
without this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 4.11 Mbits/sec
3- 6 sec, 4.63 Mbits/sec
6- 9 sec, 4.89 Mbits/sec
9- 12 sec, 4.63 Mbits/sec
12- 15 sec, 4.85 Mbits/sec
15- 18 sec, 4.85 Mbits/sec
18- 21 sec, 5.02 Mbits/sec
21- 24 sec, 3.67 Mbits/sec
24- 27 sec, 4.94 Mbits/sec
27- 30 sec, 4.81 Mbits/sec
0- 30 sec, 4.64 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.33 Mbits/sec
3- 6 sec, 5.59 Mbits/sec
6- 9 sec, 5.55 Mbits/sec
9- 12 sec, 5.59 Mbits/sec
12- 15 sec, 5.59 Mbits/sec
15- 18 sec, 5.72 Mbits/sec
18- 21 sec, 5.68 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 4.67 Mbits/sec
27- 30 sec, 4.50 Mbits/sec
0- 30 sec, 5.35 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.51 Mbits/sec
3- 6 sec, 4.67 Mbits/sec
6- 9 sec, 4.54 Mbits/sec
9- 12 sec, 5.42 Mbits/sec
12- 15 sec, 5.37 Mbits/sec
15- 18 sec, 5.11 Mbits/sec
18- 21 sec, 5.07 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 5.77 Mbits/sec
27- 30 sec, 4.63 Mbits/sec
0- 30 sec, 5.14 Mbits/sec
nsh>
```
2021-03-15 08:19:42 +01:00
|
|
|
DEBUGASSERT(chunk_result == chunk_len);
|
2018-04-20 15:37:51 +02:00
|
|
|
}
|
2019-05-29 15:26:26 +02:00
|
|
|
|
2021-07-20 02:10:43 +02:00
|
|
|
if (chunk_result > 0)
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* release wrb */
|
2019-05-29 15:26:26 +02:00
|
|
|
|
2021-07-20 02:10:43 +02:00
|
|
|
if (TCP_WBPKTLEN(wrb) > 0)
|
2019-05-29 15:26:26 +02:00
|
|
|
{
|
2021-07-20 02:10:43 +02:00
|
|
|
DEBUGASSERT(TCP_WBSENT(wrb) == 0);
|
|
|
|
DEBUGASSERT(TCP_WBPKTLEN(wrb) > 0);
|
|
|
|
sq_addlast(&wrb->wb_node, &conn->write_q);
|
2019-05-29 15:26:26 +02:00
|
|
|
}
|
2021-07-20 02:10:43 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
tcp_wrbuffer_release(wrb);
|
|
|
|
}
|
|
|
|
|
2023-01-16 05:37:44 +01:00
|
|
|
if (nonblock || (timeout != UINT_MAX &&
|
|
|
|
tcp_send_gettimeout(start, timeout) == 0))
|
2021-07-20 02:10:43 +02:00
|
|
|
{
|
|
|
|
nerr("ERROR: Failed to add data to the I/O chain\n");
|
|
|
|
ret = -EAGAIN;
|
|
|
|
goto errout_with_lock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wait for at least one IOB getting available.
|
|
|
|
*
|
|
|
|
* Note: net_ioballoc releases the network lock when blocking.
|
|
|
|
* It allows our write_q being drained in the meantime. Otherwise,
|
|
|
|
* we risk a deadlock with other threads competing on IOBs.
|
|
|
|
*/
|
|
|
|
|
2022-08-08 04:21:03 +02:00
|
|
|
iob = net_iobtimedalloc(true, tcp_send_gettimeout(start, timeout));
|
2022-02-17 07:10:03 +01:00
|
|
|
if (iob != NULL)
|
|
|
|
{
|
2022-08-08 04:21:03 +02:00
|
|
|
iob_free_chain(iob);
|
2022-02-17 07:10:03 +01:00
|
|
|
}
|
2018-01-22 18:11:23 +01:00
|
|
|
}
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2015-01-28 18:56:11 +01:00
|
|
|
/* Dump I/O buffer chain */
|
2014-06-23 02:53:18 +02:00
|
|
|
|
2018-01-23 02:33:14 +01:00
|
|
|
TCP_WBDUMP("I/O buffer chain", wrb, TCP_WBPKTLEN(wrb), 0);
|
2014-06-23 02:53:18 +02:00
|
|
|
|
2017-08-29 22:08:04 +02:00
|
|
|
/* psock_send_eventhandler() will send data in FIFO order from the
|
2015-01-28 18:56:11 +01:00
|
|
|
* conn->write_q
|
|
|
|
*/
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2015-01-28 18:56:11 +01:00
|
|
|
sq_addlast(&wrb->wb_node, &conn->write_q);
|
2016-06-20 17:37:08 +02:00
|
|
|
ninfo("Queued WRB=%p pktlen=%u write_q(%p,%p)\n",
|
2018-01-23 02:33:14 +01:00
|
|
|
wrb, TCP_WBPKTLEN(wrb),
|
2016-06-20 17:37:08 +02:00
|
|
|
conn->write_q.head, conn->write_q.tail);
|
2014-06-19 16:31:50 +02:00
|
|
|
|
2015-01-28 18:56:11 +01:00
|
|
|
/* Notify the device driver of the availability of TX data */
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2021-06-01 10:03:57 +02:00
|
|
|
tcp_send_txnotify(psock, conn);
|
2016-12-03 23:28:19 +01:00
|
|
|
net_unlock();
|
tcp_send_buffered.c: improve tcp write buffering
* Send data chunk-by-chunk
Note: A stream socket doesn't have atomicity requirement.
* Increase the chance to use full-sized segments
Benchmark numbers in my environment:
* Over ESP32 wifi
* The peer is NetBSD, which has traditional delayed ack TCP
* iperf uses 16384 bytes buffer
---
without this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
does not work.
see https://github.com/apache/incubator-nuttx/pull/2772#discussion_r592820639
---
without this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 4.11 Mbits/sec
3- 6 sec, 4.63 Mbits/sec
6- 9 sec, 4.89 Mbits/sec
9- 12 sec, 4.63 Mbits/sec
12- 15 sec, 4.85 Mbits/sec
15- 18 sec, 4.85 Mbits/sec
18- 21 sec, 5.02 Mbits/sec
21- 24 sec, 3.67 Mbits/sec
24- 27 sec, 4.94 Mbits/sec
27- 30 sec, 4.81 Mbits/sec
0- 30 sec, 4.64 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.33 Mbits/sec
3- 6 sec, 5.59 Mbits/sec
6- 9 sec, 5.55 Mbits/sec
9- 12 sec, 5.59 Mbits/sec
12- 15 sec, 5.59 Mbits/sec
15- 18 sec, 5.72 Mbits/sec
18- 21 sec, 5.68 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 4.67 Mbits/sec
27- 30 sec, 4.50 Mbits/sec
0- 30 sec, 5.35 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.51 Mbits/sec
3- 6 sec, 4.67 Mbits/sec
6- 9 sec, 4.54 Mbits/sec
9- 12 sec, 5.42 Mbits/sec
12- 15 sec, 5.37 Mbits/sec
15- 18 sec, 5.11 Mbits/sec
18- 21 sec, 5.07 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 5.77 Mbits/sec
27- 30 sec, 4.63 Mbits/sec
0- 30 sec, 5.14 Mbits/sec
nsh>
```
2021-03-15 08:19:42 +01:00
|
|
|
|
|
|
|
if (chunk_result == 0)
|
|
|
|
{
|
|
|
|
DEBUGASSERT(nonblock);
|
2021-03-29 10:30:14 +02:00
|
|
|
if (result == 0)
|
|
|
|
{
|
|
|
|
result = -EAGAIN;
|
|
|
|
}
|
|
|
|
|
tcp_send_buffered.c: improve tcp write buffering
* Send data chunk-by-chunk
Note: A stream socket doesn't have atomicity requirement.
* Increase the chance to use full-sized segments
Benchmark numbers in my environment:
* Over ESP32 wifi
* The peer is NetBSD, which has traditional delayed ack TCP
* iperf uses 16384 bytes buffer
---
without this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
does not work.
see https://github.com/apache/incubator-nuttx/pull/2772#discussion_r592820639
---
without this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 4.11 Mbits/sec
3- 6 sec, 4.63 Mbits/sec
6- 9 sec, 4.89 Mbits/sec
9- 12 sec, 4.63 Mbits/sec
12- 15 sec, 4.85 Mbits/sec
15- 18 sec, 4.85 Mbits/sec
18- 21 sec, 5.02 Mbits/sec
21- 24 sec, 3.67 Mbits/sec
24- 27 sec, 4.94 Mbits/sec
27- 30 sec, 4.81 Mbits/sec
0- 30 sec, 4.64 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.33 Mbits/sec
3- 6 sec, 5.59 Mbits/sec
6- 9 sec, 5.55 Mbits/sec
9- 12 sec, 5.59 Mbits/sec
12- 15 sec, 5.59 Mbits/sec
15- 18 sec, 5.72 Mbits/sec
18- 21 sec, 5.68 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 4.67 Mbits/sec
27- 30 sec, 4.50 Mbits/sec
0- 30 sec, 5.35 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.51 Mbits/sec
3- 6 sec, 4.67 Mbits/sec
6- 9 sec, 4.54 Mbits/sec
9- 12 sec, 5.42 Mbits/sec
12- 15 sec, 5.37 Mbits/sec
15- 18 sec, 5.11 Mbits/sec
18- 21 sec, 5.07 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 5.77 Mbits/sec
27- 30 sec, 4.63 Mbits/sec
0- 30 sec, 5.14 Mbits/sec
nsh>
```
2021-03-15 08:19:42 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (chunk_result < 0)
|
|
|
|
{
|
|
|
|
if (result == 0)
|
|
|
|
{
|
|
|
|
result = chunk_result;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEBUGASSERT(chunk_result <= len);
|
2021-03-29 09:28:00 +02:00
|
|
|
DEBUGASSERT(chunk_result <= chunk_len);
|
tcp_send_buffered.c: improve tcp write buffering
* Send data chunk-by-chunk
Note: A stream socket doesn't have atomicity requirement.
* Increase the chance to use full-sized segments
Benchmark numbers in my environment:
* Over ESP32 wifi
* The peer is NetBSD, which has traditional delayed ack TCP
* iperf uses 16384 bytes buffer
---
without this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
does not work.
see https://github.com/apache/incubator-nuttx/pull/2772#discussion_r592820639
---
without this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 4.11 Mbits/sec
3- 6 sec, 4.63 Mbits/sec
6- 9 sec, 4.89 Mbits/sec
9- 12 sec, 4.63 Mbits/sec
12- 15 sec, 4.85 Mbits/sec
15- 18 sec, 4.85 Mbits/sec
18- 21 sec, 5.02 Mbits/sec
21- 24 sec, 3.67 Mbits/sec
24- 27 sec, 4.94 Mbits/sec
27- 30 sec, 4.81 Mbits/sec
0- 30 sec, 4.64 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.33 Mbits/sec
3- 6 sec, 5.59 Mbits/sec
6- 9 sec, 5.55 Mbits/sec
9- 12 sec, 5.59 Mbits/sec
12- 15 sec, 5.59 Mbits/sec
15- 18 sec, 5.72 Mbits/sec
18- 21 sec, 5.68 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 4.67 Mbits/sec
27- 30 sec, 4.50 Mbits/sec
0- 30 sec, 5.35 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.51 Mbits/sec
3- 6 sec, 4.67 Mbits/sec
6- 9 sec, 4.54 Mbits/sec
9- 12 sec, 5.42 Mbits/sec
12- 15 sec, 5.37 Mbits/sec
15- 18 sec, 5.11 Mbits/sec
18- 21 sec, 5.07 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 5.77 Mbits/sec
27- 30 sec, 4.63 Mbits/sec
0- 30 sec, 5.14 Mbits/sec
nsh>
```
2021-03-15 08:19:42 +01:00
|
|
|
DEBUGASSERT(result >= 0);
|
|
|
|
cp += chunk_result;
|
|
|
|
len -= chunk_result;
|
|
|
|
result += chunk_result;
|
2014-01-14 00:11:01 +01:00
|
|
|
}
|
|
|
|
|
2018-02-13 15:02:42 +01:00
|
|
|
/* Check for errors. Errors are signaled by negative errno values
|
2014-01-14 00:11:01 +01:00
|
|
|
* for the send length
|
|
|
|
*/
|
|
|
|
|
2014-06-22 19:27:57 +02:00
|
|
|
if (result < 0)
|
2014-01-14 00:11:01 +01:00
|
|
|
{
|
2018-02-13 15:02:42 +01:00
|
|
|
ret = result;
|
2014-01-14 00:11:01 +01:00
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the number of bytes actually sent */
|
|
|
|
|
2014-06-22 19:27:57 +02:00
|
|
|
return result;
|
2014-01-14 00:11:01 +01:00
|
|
|
|
2015-01-28 18:56:11 +01:00
|
|
|
errout_with_lock:
|
2016-12-03 23:28:19 +01:00
|
|
|
net_unlock();
|
2015-01-28 18:56:11 +01:00
|
|
|
|
2014-01-14 00:11:01 +01:00
|
|
|
errout:
|
tcp_send_buffered.c: improve tcp write buffering
* Send data chunk-by-chunk
Note: A stream socket doesn't have atomicity requirement.
* Increase the chance to use full-sized segments
Benchmark numbers in my environment:
* Over ESP32 wifi
* The peer is NetBSD, which has traditional delayed ack TCP
* iperf uses 16384 bytes buffer
---
without this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
does not work.
see https://github.com/apache/incubator-nuttx/pull/2772#discussion_r592820639
---
without this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 4.11 Mbits/sec
3- 6 sec, 4.63 Mbits/sec
6- 9 sec, 4.89 Mbits/sec
9- 12 sec, 4.63 Mbits/sec
12- 15 sec, 4.85 Mbits/sec
15- 18 sec, 4.85 Mbits/sec
18- 21 sec, 5.02 Mbits/sec
21- 24 sec, 3.67 Mbits/sec
24- 27 sec, 4.94 Mbits/sec
27- 30 sec, 4.81 Mbits/sec
0- 30 sec, 4.64 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=36
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.33 Mbits/sec
3- 6 sec, 5.59 Mbits/sec
6- 9 sec, 5.55 Mbits/sec
9- 12 sec, 5.59 Mbits/sec
12- 15 sec, 5.59 Mbits/sec
15- 18 sec, 5.72 Mbits/sec
18- 21 sec, 5.68 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 4.67 Mbits/sec
27- 30 sec, 4.50 Mbits/sec
0- 30 sec, 5.35 Mbits/sec
nsh>
```
---
with this patch,
CONFIG_IOB_NBUFFERS=128
CONFIG_IOB_BUFSIZE=196
```
nsh> iperf -c 192.168.8.1
IP: 192.168.8.103
mode=tcp-client sip=192.168.8.103:5001,dip=192.168.8.1:5001, interval=3, time=30
Interval Bandwidth
0- 3 sec, 5.51 Mbits/sec
3- 6 sec, 4.67 Mbits/sec
6- 9 sec, 4.54 Mbits/sec
9- 12 sec, 5.42 Mbits/sec
12- 15 sec, 5.37 Mbits/sec
15- 18 sec, 5.11 Mbits/sec
18- 21 sec, 5.07 Mbits/sec
21- 24 sec, 5.29 Mbits/sec
24- 27 sec, 5.77 Mbits/sec
27- 30 sec, 4.63 Mbits/sec
0- 30 sec, 5.14 Mbits/sec
nsh>
```
2021-03-15 08:19:42 +01:00
|
|
|
if (result > 0)
|
|
|
|
{
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2018-02-13 15:02:42 +01:00
|
|
|
return ret;
|
2014-01-14 00:11:01 +01:00
|
|
|
}
|
|
|
|
|
2016-01-22 22:52:14 +01:00
|
|
|
/****************************************************************************
|
2017-04-22 00:33:14 +02:00
|
|
|
* Name: psock_tcp_cansend
|
2016-01-22 22:52:14 +01:00
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* psock_tcp_cansend() returns a value indicating if a write to the socket
|
|
|
|
* would block. No space in the buffer is actually reserved, so it is
|
|
|
|
* possible that the write may still block if the buffer is filled by
|
|
|
|
* another means.
|
|
|
|
*
|
2018-03-13 16:52:27 +01:00
|
|
|
* Input Parameters:
|
2022-02-08 17:08:15 +01:00
|
|
|
* conn The TCP connection of interest
|
2016-01-22 22:52:14 +01:00
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* OK
|
2018-09-11 20:00:42 +02:00
|
|
|
* At least one byte of data could be successfully written.
|
2016-01-22 22:52:14 +01:00
|
|
|
* -EWOULDBLOCK
|
|
|
|
* There is no room in the output buffer.
|
|
|
|
* -EBADF
|
|
|
|
* An invalid descriptor was specified.
|
|
|
|
* -ENOTCONN
|
|
|
|
* The socket is not connected.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2022-02-08 17:08:15 +01:00
|
|
|
int psock_tcp_cansend(FAR struct tcp_conn_s *conn)
|
2016-01-22 22:52:14 +01:00
|
|
|
{
|
2018-09-12 16:57:06 +02:00
|
|
|
/* Verify that we received a valid socket */
|
|
|
|
|
2022-02-08 17:08:15 +01:00
|
|
|
if (!conn)
|
2016-01-22 22:52:14 +01:00
|
|
|
{
|
2016-06-11 23:50:49 +02:00
|
|
|
nerr("ERROR: Invalid socket\n");
|
2016-01-22 22:52:14 +01:00
|
|
|
return -EBADF;
|
|
|
|
}
|
|
|
|
|
2018-09-12 16:57:06 +02:00
|
|
|
/* Verify that this is connected TCP socket */
|
|
|
|
|
2022-02-08 17:08:15 +01:00
|
|
|
if (!_SS_ISCONNECTED(conn->sconn.s_flags))
|
2016-01-22 22:52:14 +01:00
|
|
|
{
|
2023-08-24 15:10:34 +02:00
|
|
|
nwarn("WARNING: Not connected\n");
|
2016-01-22 22:52:14 +01:00
|
|
|
return -ENOTCONN;
|
|
|
|
}
|
|
|
|
|
2018-09-12 16:57:06 +02:00
|
|
|
/* In order to setup the send, we need to have at least one free write
|
2019-03-11 19:48:17 +01:00
|
|
|
* buffer head and at least one free IOB to initialize the write buffer
|
|
|
|
* head.
|
2018-09-12 16:57:06 +02:00
|
|
|
*
|
2020-08-14 07:44:16 +02:00
|
|
|
* REVISIT: The send will still block if we are unable to buffer
|
|
|
|
* the entire user-provided buffer which may be quite large.
|
|
|
|
* We will almost certainly need to have more than one free IOB,
|
|
|
|
* but we don't know how many more.
|
2018-09-12 16:57:06 +02:00
|
|
|
*/
|
|
|
|
|
2023-09-12 16:17:44 +02:00
|
|
|
if (tcp_wrbuffer_test() < 0 || iob_navail(true) <= 0
|
|
|
|
#if CONFIG_NET_SEND_BUFSIZE > 0
|
|
|
|
|| tcp_wrbuffer_inqueue_size(conn) >= conn->snd_bufs
|
|
|
|
#endif
|
|
|
|
)
|
2016-01-22 22:52:14 +01:00
|
|
|
{
|
|
|
|
return -EWOULDBLOCK;
|
|
|
|
}
|
|
|
|
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
2021-07-19 15:45:46 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: tcp_sendbuffer_notify
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Notify the send buffer semaphore
|
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* conn - The TCP connection of interest
|
|
|
|
*
|
|
|
|
* Assumptions:
|
|
|
|
* Called from user logic with the network locked.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#if CONFIG_NET_SEND_BUFSIZE > 0
|
|
|
|
void tcp_sendbuffer_notify(FAR struct tcp_conn_s *conn)
|
|
|
|
{
|
|
|
|
int val = 0;
|
|
|
|
|
|
|
|
nxsem_get_value(&conn->snd_sem, &val);
|
|
|
|
if (val < 0)
|
|
|
|
{
|
|
|
|
nxsem_post(&conn->snd_sem);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_SEND_BUFSIZE */
|
|
|
|
|
2014-01-14 00:11:01 +01:00
|
|
|
#endif /* CONFIG_NET && CONFIG_NET_TCP && CONFIG_NET_TCP_WRITE_BUFFERS */
|