2014-06-18 19:45:55 +02:00
|
|
|
#
|
|
|
|
# For a description of the syntax of this configuration file,
|
2015-06-28 16:08:57 +02:00
|
|
|
# see the file kconfig-language.txt in the NuttX tools repository.
|
2014-06-18 19:45:55 +02:00
|
|
|
#
|
|
|
|
|
|
|
|
menu "TCP/IP Networking"
|
|
|
|
|
|
|
|
config NET_TCP
|
|
|
|
bool "TCP/IP Networking"
|
|
|
|
default n
|
2020-01-11 04:56:03 +01:00
|
|
|
select NET_READAHEAD if !NET_TCP_NO_STACK
|
2014-06-18 19:45:55 +02:00
|
|
|
---help---
|
2017-03-31 16:58:14 +02:00
|
|
|
Enable or disable TCP networking support.
|
2014-06-18 19:45:55 +02:00
|
|
|
|
2017-03-31 16:58:14 +02:00
|
|
|
config NET_TCP_NO_STACK
|
|
|
|
bool "Disable TCP/IP Stack"
|
|
|
|
default n
|
|
|
|
select NET_TCP
|
|
|
|
---help---
|
|
|
|
Build without TCP/IP stack even if TCP networking support enabled.
|
|
|
|
|
|
|
|
if NET_TCP && !NET_TCP_NO_STACK
|
2014-06-18 19:45:55 +02:00
|
|
|
|
2019-12-08 20:13:51 +01:00
|
|
|
config NET_TCP_DELAYED_ACK
|
|
|
|
bool "TCP/IP Delayed ACK"
|
|
|
|
default n
|
|
|
|
---help---
|
|
|
|
RFC 1122: A host that is receiving a stream of TCP data segments
|
|
|
|
can increase efficiency in both the Internet and the hosts
|
|
|
|
by sending fewer than one ACK (acknowledgment) segment per data
|
|
|
|
segment received; this is known as a "delayed ACK".
|
|
|
|
|
|
|
|
TCP should implement a delayed ACK, but an ACK should not be
|
|
|
|
excessively delayed; in particular, the delay MUST be less than
|
|
|
|
0.5 seconds, and in a stream of full-sized segments there should
|
|
|
|
be an ACK for at least every second segments.
|
|
|
|
|
2018-03-12 17:59:46 +01:00
|
|
|
config NET_TCP_KEEPALIVE
|
|
|
|
bool "TCP/IP Keep-alive support"
|
|
|
|
default n
|
|
|
|
select NET_TCPPROTO_OPTIONS
|
|
|
|
---help---
|
|
|
|
Enable support for the SO_KEEPALIVE socket option
|
|
|
|
|
2014-06-18 19:45:55 +02:00
|
|
|
config NET_TCPURGDATA
|
|
|
|
bool "Urgent data"
|
|
|
|
default n
|
|
|
|
---help---
|
|
|
|
Determines if support for TCP urgent data notification should be
|
|
|
|
compiled in. Urgent data (out-of-band data) is a rarely used TCP feature
|
|
|
|
that is very seldom would be required.
|
|
|
|
|
2022-11-04 11:38:07 +01:00
|
|
|
config NET_TCP_PREALLOC_CONNS
|
|
|
|
int "Preallocated TCP/IP connections"
|
2014-06-18 19:45:55 +02:00
|
|
|
default 8
|
|
|
|
---help---
|
2022-11-04 11:38:07 +01:00
|
|
|
Number of TCP/IP connections (all tasks).
|
2023-02-25 19:14:11 +01:00
|
|
|
|
2022-11-04 11:38:07 +01:00
|
|
|
This number of connections will be pre-allocated during system boot.
|
|
|
|
If dynamic connections allocation is enabled, more connections may
|
|
|
|
be allocated at a later time, as the system needs them. Else this
|
|
|
|
will be the maximum number of connections available to the system
|
|
|
|
at all times.
|
|
|
|
|
|
|
|
Set to 0 to disable (and rely only on dynamic allocations).
|
|
|
|
|
|
|
|
config NET_TCP_ALLOC_CONNS
|
|
|
|
int "Dynamic TCP/IP connections allocation"
|
|
|
|
default 0
|
|
|
|
---help---
|
|
|
|
Dynamic memory allocations for TCP/IP.
|
|
|
|
|
|
|
|
When set to 0 all dynamic allocations are disabled.
|
|
|
|
|
|
|
|
When set to 1 a new connection will be allocated every time,
|
|
|
|
and it will be free'd when no longer needed.
|
|
|
|
|
|
|
|
Setting this to 2 or more will allocate the connections in
|
|
|
|
batches (with batch size equal to this config). When a
|
|
|
|
connection is no longer needed, it will be returned to the
|
|
|
|
free connections pool, and it will never be deallocated!
|
|
|
|
|
|
|
|
config NET_TCP_MAX_CONNS
|
|
|
|
int "Maximum number of TCP/IP connections"
|
|
|
|
default 0
|
|
|
|
depends on NET_TCP_ALLOC_CONNS > 0
|
|
|
|
---help---
|
|
|
|
If dynamic connections allocation is selected (NET_TCP_ALLOC_CONNS > 0)
|
|
|
|
this will limit the number of connections that can be allocated.
|
2023-02-25 19:14:11 +01:00
|
|
|
|
2022-11-04 11:38:07 +01:00
|
|
|
This is useful in case the system is under very heavy load (or
|
|
|
|
under attack), ensuring that the heap will not be exhausted.
|
2014-06-18 19:45:55 +02:00
|
|
|
|
2019-12-31 16:26:14 +01:00
|
|
|
config NET_TCP_NPOLLWAITERS
|
|
|
|
int "Number of TCP poll waiters"
|
|
|
|
default 1
|
|
|
|
|
2019-08-26 19:03:27 +02:00
|
|
|
config NET_TCP_RTO
|
|
|
|
int "RTO of TCP/IP connections"
|
|
|
|
default 3
|
|
|
|
---help---
|
|
|
|
RTO of TCP/IP connections (all tasks)
|
|
|
|
|
2022-11-05 16:38:59 +01:00
|
|
|
config NET_TCP_MAXRTX
|
|
|
|
int "Maximum retransmitted number of TCP/IP data packet"
|
|
|
|
default 8
|
|
|
|
|
|
|
|
config NET_TCP_MAXSYNRTX
|
|
|
|
int "Maximum retransmitted number of TCP/IP SYN packet"
|
|
|
|
default 5
|
|
|
|
|
2019-08-26 19:03:27 +02:00
|
|
|
config NET_TCP_WAIT_TIMEOUT
|
|
|
|
int "TIME_WAIT Length of TCP/IP connections"
|
|
|
|
default 120
|
|
|
|
---help---
|
2019-11-24 16:19:54 +01:00
|
|
|
TIME_WAIT Length of TCP/IP connections (all tasks). In units
|
|
|
|
of seconds.
|
2019-08-26 19:03:27 +02:00
|
|
|
|
2014-06-18 19:45:55 +02:00
|
|
|
config NET_MAX_LISTENPORTS
|
|
|
|
int "Number of listening ports"
|
|
|
|
default 20
|
|
|
|
---help---
|
|
|
|
Maximum number of listening TCP/IP ports (all tasks). Default: 20
|
|
|
|
|
2022-01-25 18:49:04 +01:00
|
|
|
config NET_TCP_FAST_RETRANSMIT
|
|
|
|
bool "Enable the Fast Retransmit algorithm"
|
|
|
|
default y
|
net/tcp: implement the fast retransmit
RFC2001: TCP Slow Start, Congestion Avoidance, Fast Retransmit,
and Fast Recovery Algorithms
...
3. Fast Retransmit
Modifications to the congestion avoidance algorithm were proposed in
1990 [3]. Before describing the change, realize that TCP may
generate an immediate acknowledgment (a duplicate ACK) when an out-
of-order segment is received (Section 4.2.2.21 of [1], with a note
that one reason for doing so was for the experimental fast-
retransmit algorithm). This duplicate ACK should not be delayed.
The purpose of this duplicate ACK is to let the other end know that a
segment was received out of order, and to tell it what sequence
number is expected.
Since TCP does not know whether a duplicate ACK is caused by a lost
segment or just a reordering of segments, it waits for a small number
of duplicate ACKs to be received. It is assumed that if there is
just a reordering of the segments, there will be only one or two
duplicate ACKs before the reordered segment is processed, which will
then generate a new ACK. If three or more duplicate ACKs are
received in a row, it is a strong indication that a segment has been
lost. TCP then performs a retransmission of what appears to be the
missing segment, without waiting for a retransmission timer to
expire.
Change-Id: Ie2cbcecab507c3d831f74390a6a85e0c5c8e0652
Signed-off-by: chao.an <anchao@xiaomi.com>
2020-10-22 09:02:52 +02:00
|
|
|
---help---
|
|
|
|
RFC2001:
|
|
|
|
3. Fast Retransmit
|
|
|
|
Modifications to the congestion avoidance algorithm were proposed in
|
|
|
|
1990 [3]. Before describing the change, realize that TCP may
|
|
|
|
generate an immediate acknowledgment (a duplicate ACK) when an out-
|
|
|
|
of-order segment is received (Section 4.2.2.21 of [1], with a note
|
|
|
|
that one reason for doing so was for the experimental fast-
|
|
|
|
retransmit algorithm). This duplicate ACK should not be delayed.
|
|
|
|
The purpose of this duplicate ACK is to let the other end know that a
|
|
|
|
segment was received out of order, and to tell it what sequence
|
|
|
|
number is expected.
|
|
|
|
|
|
|
|
Since TCP does not know whether a duplicate ACK is caused by a lost
|
|
|
|
segment or just a reordering of segments, it waits for a small number
|
|
|
|
of duplicate ACKs to be received. It is assumed that if there is
|
|
|
|
just a reordering of the segments, there will be only one or two
|
|
|
|
duplicate ACKs before the reordered segment is processed, which will
|
|
|
|
then generate a new ACK. If three or more duplicate ACKs are
|
|
|
|
received in a row, it is a strong indication that a segment has been
|
|
|
|
lost. TCP then performs a retransmission of what appears to be the
|
|
|
|
missing segment, without waiting for a retransmission timer to
|
|
|
|
expire.
|
|
|
|
|
2023-04-20 05:08:02 +02:00
|
|
|
config NET_TCP_CC_NEWRENO
|
|
|
|
bool "Enable the NewReno Congestion Control algorithm"
|
|
|
|
default n
|
|
|
|
select NET_TCP_FAST_RETRANSMIT
|
|
|
|
---help---
|
|
|
|
RFC5681:
|
|
|
|
The TCP Congestion Control defines four congestion control algorithms,
|
|
|
|
slow start, congestion avoidance, fast retransmit, and fast recovery.
|
|
|
|
|
2021-07-05 11:28:11 +02:00
|
|
|
config NET_TCP_WINDOW_SCALE
|
|
|
|
bool "Enable TCP/IP Window Scale Option"
|
|
|
|
default n
|
|
|
|
---help---
|
|
|
|
RFC1323:
|
|
|
|
2. TCP WINDOW SCALE OPTION
|
|
|
|
The window scale extension expands the definition of the TCP
|
|
|
|
window to 32 bits and then uses a scale factor to carry this 32-
|
|
|
|
bit value in the 16-bit Window field of the TCP header (SEG.WND in
|
|
|
|
RFC-793).
|
|
|
|
|
|
|
|
if NET_TCP_WINDOW_SCALE
|
|
|
|
|
|
|
|
config NET_TCP_WINDOW_SCALE_FACTOR
|
|
|
|
int "TCP/IP Window Scale Factor"
|
|
|
|
default 0
|
|
|
|
---help---
|
|
|
|
This is the default value for window scale factor.
|
|
|
|
|
|
|
|
endif # NET_TCP_WINDOW_SCALE
|
|
|
|
|
2023-01-06 08:30:58 +01:00
|
|
|
config NET_TCP_OUT_OF_ORDER
|
|
|
|
bool "Enable TCP/IP Out Of Order segments"
|
|
|
|
default n
|
|
|
|
---help---
|
|
|
|
TCP will queue segments that arrive out of order.
|
|
|
|
|
|
|
|
if NET_TCP_OUT_OF_ORDER
|
|
|
|
|
|
|
|
config NET_TCP_OUT_OF_ORDER_BUFSIZE
|
|
|
|
int "TCP/IP Out Of Order buffer size"
|
|
|
|
default 16384
|
|
|
|
---help---
|
|
|
|
This is the default value for out-of-order buffer size.
|
|
|
|
|
|
|
|
endif # NET_TCP_OUT_OF_ORDER
|
|
|
|
|
2023-01-10 06:41:02 +01:00
|
|
|
config NET_TCP_SELECTIVE_ACK
|
|
|
|
bool "Enable TCP/IP Selective Acknowledgment Options"
|
|
|
|
default n
|
|
|
|
select NET_TCP_OUT_OF_ORDER
|
|
|
|
---help---
|
|
|
|
Enable RFC2018(TCP Selective Acknowledgment Options):
|
|
|
|
Selective Acknowledgment (SACK) is a strategy which corrects this
|
|
|
|
behavior in the face of multiple dropped segments. With selective
|
|
|
|
acknowledgments, the data receiver can inform the sender about all
|
|
|
|
segments that have arrived successfully, so the sender need
|
|
|
|
retransmit only the segments that have actually been lost.
|
|
|
|
|
2019-12-31 16:26:14 +01:00
|
|
|
config NET_TCP_NOTIFIER
|
2018-09-10 01:32:10 +02:00
|
|
|
bool "Support TCP notifications"
|
|
|
|
default n
|
2019-01-27 18:02:56 +01:00
|
|
|
depends on SCHED_WORKQUEUE
|
2018-09-11 15:22:23 +02:00
|
|
|
select WQUEUE_NOTIFIER
|
2018-09-10 01:32:10 +02:00
|
|
|
---help---
|
|
|
|
Enable building of TCP notifier logic that will execute a worker
|
2018-09-11 15:22:23 +02:00
|
|
|
function on the low priority work queue when read-ahead data
|
2018-09-10 01:32:10 +02:00
|
|
|
is available or when a TCP connection is lost. This is is a general
|
|
|
|
purpose notifier, but was developed specifically to support poll()
|
|
|
|
logic where the poll must wait for these events.
|
|
|
|
|
2014-06-18 19:45:55 +02:00
|
|
|
config NET_TCP_WRITE_BUFFERS
|
|
|
|
bool "Enable TCP/IP write buffering"
|
|
|
|
default n
|
2018-01-23 01:32:02 +01:00
|
|
|
select NET_WRITE_BUFFERS
|
2014-06-18 19:45:55 +02:00
|
|
|
---help---
|
|
|
|
Write buffers allows buffering of ongoing TCP/IP packets, providing
|
|
|
|
for higher performance, streamed output.
|
|
|
|
|
|
|
|
You might want to disable TCP/IP write buffering on a highly memory
|
|
|
|
memory constrained system where there are no performance issues.
|
|
|
|
|
|
|
|
if NET_TCP_WRITE_BUFFERS
|
|
|
|
|
2014-06-22 19:27:57 +02:00
|
|
|
config NET_TCP_NWRBCHAINS
|
|
|
|
int "Number of pre-allocated I/O buffer chain heads"
|
2014-06-18 19:45:55 +02:00
|
|
|
default 8
|
|
|
|
---help---
|
2018-01-23 01:32:02 +01:00
|
|
|
These tiny nodes are used as "containers" to support queuing of
|
2014-06-22 19:27:57 +02:00
|
|
|
TCP write buffers. This setting will limit the number of TCP write
|
|
|
|
operations that can be "in-flight" at any give time. So a good
|
|
|
|
choice for this value would be the same as the maximum number of
|
|
|
|
TCP connections.
|
2014-06-18 19:45:55 +02:00
|
|
|
|
|
|
|
config NET_TCP_WRBUFFER_DEBUG
|
|
|
|
bool "Force write buffer debug"
|
|
|
|
default n
|
2016-06-11 22:14:08 +02:00
|
|
|
depends on DEBUG_FEATURES
|
2014-06-24 19:53:19 +02:00
|
|
|
select IOB_DEBUG
|
2014-06-18 19:45:55 +02:00
|
|
|
---help---
|
|
|
|
This option will force debug output from TCP write buffer logic,
|
2014-06-23 00:25:26 +02:00
|
|
|
even without network debug output. This is not normally something
|
2014-06-18 19:45:55 +02:00
|
|
|
that would want to do but is convenient if you are debugging the
|
|
|
|
write buffer logic and do not want to get overloaded with other
|
|
|
|
network-related debug output.
|
|
|
|
|
2014-06-23 02:53:18 +02:00
|
|
|
config NET_TCP_WRBUFFER_DUMP
|
|
|
|
bool "Force write buffer dump"
|
|
|
|
default n
|
|
|
|
depends on DEBUG_NET || NET_TCP_WRBUFFER_DEBUG
|
2014-06-24 19:53:19 +02:00
|
|
|
select IOB_DEBUG
|
2014-06-23 02:53:18 +02:00
|
|
|
---help---
|
|
|
|
Dump the contents of the write buffers. You do not want to do this
|
|
|
|
unless you really want to analyze the write buffer transfers in
|
|
|
|
detail.
|
|
|
|
|
2014-06-18 19:45:55 +02:00
|
|
|
endif # NET_TCP_WRITE_BUFFERS
|
|
|
|
|
|
|
|
config NET_TCPBACKLOG
|
|
|
|
bool "TCP/IP backlog support"
|
|
|
|
default n
|
|
|
|
---help---
|
|
|
|
Incoming connections pend in a backlog until accept() is called.
|
|
|
|
The size of the backlog is selected when listen() is called.
|
|
|
|
|
2018-11-09 18:20:33 +01:00
|
|
|
if NET_TCPBACKLOG
|
|
|
|
|
|
|
|
config NET_TCPBACKLOG_CONNS
|
|
|
|
int "TCP backlog conns threshold"
|
|
|
|
default 8
|
|
|
|
---help---
|
|
|
|
Maximum number of TCP backlog connections (all tasks).
|
|
|
|
|
|
|
|
endif # NET_TCPBACKLOG
|
|
|
|
|
2014-06-18 19:45:55 +02:00
|
|
|
config NET_SENDFILE
|
|
|
|
bool "Optimized network sendfile()"
|
|
|
|
default n
|
|
|
|
---help---
|
|
|
|
Support larger, higher performance sendfile() for transferring
|
|
|
|
files out a TCP connection.
|
|
|
|
|
2017-03-31 16:58:14 +02:00
|
|
|
endif # NET_TCP && !NET_TCP_NO_STACK
|
2023-01-13 06:40:08 +01:00
|
|
|
|
|
|
|
if NET_STATISTICS
|
|
|
|
|
|
|
|
config NET_TCP_DEBUG_DROP_RECV
|
|
|
|
bool "TCP/IP debug feature to drop receive packet"
|
|
|
|
default n
|
|
|
|
---help---
|
|
|
|
This is the debug feature to drop TCP/IP received packet
|
|
|
|
|
|
|
|
if NET_TCP_DEBUG_DROP_RECV
|
|
|
|
|
|
|
|
config NET_TCP_DEBUG_DROP_RECV_PROBABILITY
|
|
|
|
int "TCP/IP drop probability of received packet"
|
|
|
|
range 50 10000
|
|
|
|
default 50
|
|
|
|
---help---
|
|
|
|
This is the drop probability of received packet, Default: 1/50
|
|
|
|
|
|
|
|
endif # NET_TCP_DEBUG_DROP_RECV
|
|
|
|
|
|
|
|
config NET_TCP_DEBUG_DROP_SEND
|
|
|
|
bool "TCP/IP debug feature to drop send packet"
|
|
|
|
default n
|
|
|
|
---help---
|
|
|
|
This is the debug feature to drop TCP/IP send packet
|
|
|
|
|
|
|
|
if NET_TCP_DEBUG_DROP_SEND
|
|
|
|
|
|
|
|
config NET_TCP_DEBUG_DROP_SEND_PROBABILITY
|
|
|
|
int "TCP/IP drop probability of send packet"
|
|
|
|
range 50 10000
|
|
|
|
default 50
|
|
|
|
---help---
|
|
|
|
This is the drop probability of send packet, Default: 1/50
|
|
|
|
|
|
|
|
endif # NET_TCP_DEBUG_DROP_SEND
|
|
|
|
|
|
|
|
endif # NET_STATISTICS
|
|
|
|
|
2014-06-18 19:45:55 +02:00
|
|
|
endmenu # TCP/IP Networking
|