Ethernet skeleton: Add some more example logic

This commit is contained in:
Gregory Nutt 2014-12-31 13:45:19 -06:00
parent 4782acb012
commit 51b220c6d5
2 changed files with 14 additions and 16 deletions

View File

@ -2097,9 +2097,7 @@ static void tiva_txtimeout_expiry(int argc, uint32_t arg, ...)
work_cancel(HPWORK, &priv->work);
/* Schedule to perform the TX timeout processing on the worker thread.
* TODO: Assure that no there is not pending interrupt or poll work.
*/
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(HPWORK, &priv->work, tiva_txtimeout_work, priv, 0);
@ -2236,9 +2234,7 @@ static void tiva_poll_expiry(int argc, uint32_t arg, ...)
if (work_available(&priv->work))
{
/* Schedule to perform the interrupt processing on the worker thread.
* TODO: Make sure that there can be no pending interrupt work.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(HPWORK, &priv->work, tiva_poll_work, priv, 0);
}

View File

@ -416,7 +416,9 @@ static void skel_interrupt_work(FAR void *arg)
skel_interrupt_process(skel);
/* TODO: Re-enable Ethernet interrupts */
/* Re-enable Ethernet interrupts */
up_enable_irq(CONFIG_skeleton_IRQ);
}
#endif
@ -442,11 +444,13 @@ static int skel_interrupt(int irq, FAR void *context)
FAR struct skel_driver_s *skel = &g_skel[0];
#ifdef CONFIG_NET_NOINTS
/* TODO: Disable further Ethernet interrupts. Because Ethernet interrupts
* are also disabled if the TX timeout event occurs, there can be no race
/* Disable further Ethernet interrupts. Because Ethernet interrupts are
* also disabled if the TX timeout event occurs, there can be no race
* condition here.
*/
up_disable_irq(CONFIG_skeleton_IRQ);
/* TODO: Determine if a TX transfer just completed */
{
@ -558,20 +562,20 @@ static void skel_txtimeout_expiry(int argc, uint32_t arg, ...)
FAR struct skel_driver_s *skel = (FAR struct skel_driver_s *)arg;
#ifdef CONFIG_NET_NOINTS
/* TODO: Disable further Ethernet interrupts. This will prevent some race
/* Disable further Ethernet interrupts. This will prevent some race
* conditions with interrupt work. There is still a potential race
* condition with interrupt work that is already queued and in progress.
*/
up_disable_irq(CONFIG_skeleton_IRQ);
/* Cancel any pending poll or interrupt work. This will have no effect
* on work that has already been started.
*/
work_cancel(HPWORK, &skel->sk_work);
/* Schedule to perform the TX timeout processing on the worker thread.
* TODO: Assure that no there is not pending interrupt or poll work.
*/
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(HPWORK, &skel->sk_work, skel_txtimeout_work, skel, 0);
#else
@ -673,9 +677,7 @@ static void skel_poll_expiry(int argc, uint32_t arg, ...)
if (work_available(&skel->sk_work))
{
/* Schedule to perform the interrupt processing on the worker thread.
* TODO: Make sure that there can be no pending interrupt work.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(HPWORK, &skel->sk_work, skel_poll_work, skel, 0);
}