net/mld: There is an issue with the general query timer design. There should be a single instance, not a per-group instance of the timer (ditto for the v1 compatibility timer). Fall back for now and use the per-group MAX query.
This commit is contained in:
parent
f8d39e2c84
commit
8cfb376f81
5
TODO
5
TODO
@ -19,7 +19,7 @@ nuttx/:
|
|||||||
(9) Kernel/Protected Build
|
(9) Kernel/Protected Build
|
||||||
(3) C++ Support
|
(3) C++ Support
|
||||||
(5) Binary loaders (binfmt/)
|
(5) Binary loaders (binfmt/)
|
||||||
(18) Network (net/, drivers/net)
|
(19) Network (net/, drivers/net)
|
||||||
(4) USB (drivers/usbdev, drivers/usbhost)
|
(4) USB (drivers/usbdev, drivers/usbhost)
|
||||||
(2) Other drivers (drivers/)
|
(2) Other drivers (drivers/)
|
||||||
(11) Libraries (libs/libc/, libs/libm/)
|
(11) Libraries (libs/libc/, libs/libm/)
|
||||||
@ -1497,6 +1497,9 @@ o Network (net/, drivers/net)
|
|||||||
Instead, the design currently uses a Multicast Address Specific
|
Instead, the design currently uses a Multicast Address Specific
|
||||||
Query with one timer per group and ignores groups that we are
|
Query with one timer per group and ignores groups that we are
|
||||||
not members of.
|
not members of.
|
||||||
|
|
||||||
|
Similary, the MLDv1 compatibility timer should be a single,
|
||||||
|
separate timer, not a per-group timer.
|
||||||
Status: Open
|
Status: Open
|
||||||
Priority: Low. There are no customers of MLD as far as I know.
|
Priority: Low. There are no customers of MLD as far as I know.
|
||||||
|
|
||||||
|
@ -189,7 +189,7 @@ enum mld_msgtype_e
|
|||||||
{
|
{
|
||||||
MLD_SEND_NONE = 0, /* Nothing to send */
|
MLD_SEND_NONE = 0, /* Nothing to send */
|
||||||
MLD_SEND_GENQUERY, /* Send General Query */
|
MLD_SEND_GENQUERY, /* Send General Query */
|
||||||
MLD_SEND_MASQUERY, /* Send Multicast Address Specific (MAS) Query */
|
MLD_SEND_MASQUERY, /* Send General Query */
|
||||||
MLD_SEND_V1REPORT, /* Send MLDv1 Report message */
|
MLD_SEND_V1REPORT, /* Send MLDv1 Report message */
|
||||||
MLD_SEND_V2REPORT, /* Send MLDv2 Report message */
|
MLD_SEND_V2REPORT, /* Send MLDv2 Report message */
|
||||||
MLD_SEND_DONE /* Send Done message */
|
MLD_SEND_DONE /* Send Done message */
|
||||||
|
@ -188,10 +188,22 @@ static void mld_polldog_work(FAR void *arg)
|
|||||||
|
|
||||||
if (IS_MLD_QUERIER(group->flags))
|
if (IS_MLD_QUERIER(group->flags))
|
||||||
{
|
{
|
||||||
/* Schedule (and forget) the general query. */
|
/* Schedule (and forget) the general query or MAS query. */
|
||||||
|
|
||||||
MLD_STATINCR(g_netstats.mld.query_sched);
|
MLD_STATINCR(g_netstats.mld.query_sched);
|
||||||
|
|
||||||
|
#ifdef CONFIG_NET_MLD_ROUTER
|
||||||
|
/* REVISIT: In order to support the RFC correctly, we would need
|
||||||
|
* separate, single general query timer that is not part of the
|
||||||
|
* group structure. The Querier should query across all groups,
|
||||||
|
* with a single query, not very via multiple MAS queries as is
|
||||||
|
* done here.
|
||||||
|
*/
|
||||||
|
|
||||||
ret = mld_schedmsg(group, MLD_SEND_GENQUERY);
|
ret = mld_schedmsg(group, MLD_SEND_GENQUERY);
|
||||||
|
#else
|
||||||
|
ret = mld_schedmsg(group, MLD_SEND_MASQUERY);
|
||||||
|
#endif
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
{
|
{
|
||||||
mlderr("ERROR: Failed to schedule message: %d\n", ret);
|
mlderr("ERROR: Failed to schedule message: %d\n", ret);
|
||||||
@ -363,6 +375,8 @@ void mld_start_polltimer(FAR struct mld_group_s *group, clock_t ticks)
|
|||||||
* Description:
|
* Description:
|
||||||
* Start the MLDv1 compatibility timer.
|
* Start the MLDv1 compatibility timer.
|
||||||
*
|
*
|
||||||
|
* REVISIT: This should be a single global timer, not a per-group timer.
|
||||||
|
*
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
|
|
||||||
void mld_start_v1timer(FAR struct mld_group_s *group, clock_t ticks)
|
void mld_start_v1timer(FAR struct mld_group_s *group, clock_t ticks)
|
||||||
|
Loading…
Reference in New Issue
Block a user