arch/sim: change g_avail_work and g_recv_work to array in netdriver

Share one worker between multiple simulated network devices may work most of the time, but sometimes breaks the tx pipeline when sending packets on more than one interface at the same time, and leaves some packets unprocessed in network stack, delayed until next transmit on the network interface. The rx process is likely delayed in packet processing under similar situation, so keep g_avail_work and g_recv_work the same number as interfaces.

dev0 tx1 avail              tx1 done
        v                      v
work dev0 tx1 -> dev0 tx1 -> empty -> dev1 tx2 -> dev1 tx3 -> empty
                    ^                    ^           ^          ^
dev1     tx2 avail (failed to queue)  tx3 avail   tx2 done   tx3 done

Signed-off-by: Zhe Weng <wengzhe@xiaomi.com>
This commit is contained in:
Zhe Weng 2022-10-14 14:25:45 +08:00 committed by Xiang Xiao
parent 4fab2b9501
commit cb25a9dee3

View File

@ -78,8 +78,8 @@
/* Net driver worker */
static struct work_s g_avail_work;
static struct work_s g_recv_work;
static struct work_s g_avail_work[CONFIG_SIM_NETDEV_NUMBER];
static struct work_s g_recv_work[CONFIG_SIM_NETDEV_NUMBER];
/* Ethernet peripheral state */
@ -334,9 +334,11 @@ static void netdriver_txavail_work(void *arg)
static int netdriver_txavail(struct net_driver_s *dev)
{
if (work_available(&g_avail_work))
int devidx = (intptr_t)dev->d_private;
if (work_available(&g_avail_work[devidx]))
{
work_queue(LPWORK, &g_avail_work, netdriver_txavail_work, dev, 0);
work_queue(LPWORK, &g_avail_work[devidx], netdriver_txavail_work,
dev, 0);
}
return OK;
@ -344,19 +346,22 @@ static int netdriver_txavail(struct net_driver_s *dev)
static void netdriver_txdone_interrupt(void *priv)
{
if (work_available(&g_avail_work))
struct net_driver_s *dev = (struct net_driver_s *)priv;
int devidx = (intptr_t)dev->d_private;
if (work_available(&g_avail_work[devidx]))
{
struct net_driver_s *dev = (struct net_driver_s *)priv;
work_queue(LPWORK, &g_avail_work, netdriver_txavail_work, dev, 0);
work_queue(LPWORK, &g_avail_work[devidx], netdriver_txavail_work,
dev, 0);
}
}
static void netdriver_rxready_interrupt(void *priv)
{
if (work_available(&g_recv_work))
struct net_driver_s *dev = (struct net_driver_s *)priv;
int devidx = (intptr_t)dev->d_private;
if (work_available(&g_recv_work[devidx]))
{
struct net_driver_s *dev = (struct net_driver_s *)priv;
work_queue(LPWORK, &g_recv_work, netdriver_recv_work, dev, 0);
work_queue(LPWORK, &g_recv_work[devidx], netdriver_recv_work, dev, 0);
}
}
@ -427,9 +432,9 @@ void netdriver_loop(void)
int devidx;
for (devidx = 0; devidx < CONFIG_SIM_NETDEV_NUMBER; devidx++)
{
if (work_available(&g_recv_work) && netdev_avail(devidx))
if (work_available(&g_recv_work[devidx]) && netdev_avail(devidx))
{
work_queue(LPWORK, &g_recv_work, netdriver_recv_work,
work_queue(LPWORK, &g_recv_work[devidx], netdriver_recv_work,
&g_sim_dev[devidx], 0);
}
}