sched/mqueue: remove sched_lock to improve performance

remove the sched_lock/unlock to improve the performance by 18%

mq_send Flow                        Cycle Count
mq_send                         Origin  Optimized
|
 ->nxmq_send                       24         24
   |
    ->file_mq_send                209        209
      |
      |->sched_lock               243        N/A  <-
      |->nxmq_do_send             391        348
      |  |
      |  |->sched_lock            434        N/A  <-
      |  |->up_unblock_task       545        459
      |   ->sched_unlock          675        N/A  <-
      |
       ->sched_unlock             684        N/A  <-
         |
          ->up_release_pending    701        N/A
            |
             ->arm_switchcontext  856        610

mq_receive
|
 ->arm_fullcontextrestore        1375       1133
   |
    ->up_block_task              1375       1133
      |
       ->nxmq_wait_receive       1530       1288
         |
          ->file_mq_receive      1606       1310
            |
             ->nxmq_receive      1616       1320
               |
                ->mq_receive     1628       1332  - 18%

Signed-off-by: chao.an <anchao@xiaomi.com>
This commit is contained in:
chao.an 2022-06-08 17:55:39 +08:00 committed by Masayuki Ishikawa
parent 644283c8ff
commit f5d4571abc
6 changed files with 11 additions and 48 deletions

View File

@ -29,6 +29,7 @@
#include <string.h>
#include <errno.h>
#include <nuttx/irq.h>
#include <nuttx/sched.h>
#include "sched/sched.h"
@ -97,11 +98,13 @@ int mq_notify(mqd_t mqdes, FAR const struct sigevent *notification)
FAR struct inode *inode;
FAR struct file *filep;
FAR struct tcb_s *rtcb;
irqstate_t flags;
int errval;
errval = fs_getfilep(mqdes, &filep);
if (errval < 0)
{
errval = -errval;
goto errout_without_lock;
}
@ -119,7 +122,7 @@ int mq_notify(mqd_t mqdes, FAR const struct sigevent *notification)
/* Get a pointer to the message queue */
sched_lock();
flags = enter_critical_section();
/* Get the current process ID */
@ -177,11 +180,11 @@ int mq_notify(mqd_t mqdes, FAR const struct sigevent *notification)
nxsig_cancel_notification(&msgq->ntwork);
}
sched_unlock();
leave_critical_section(flags);
return OK;
errout:
sched_unlock();
leave_critical_section(flags);
errout_without_lock:
set_errno(errval);

View File

@ -99,15 +99,6 @@ ssize_t file_mq_receive(FAR struct file *mq, FAR char *msg, size_t msglen,
return ret;
}
/* Get the next message from the message queue. We will disable
* pre-emption until we have completed the message received. This
* is not too bad because if the receipt takes a long time, it will
* be because we are blocked waiting for a message and pre-emption
* will be re-enabled while we are blocked
*/
sched_lock();
/* Furthermore, nxmq_wait_receive() expects to have interrupts disabled
* because messages can be sent from interrupt level.
*/
@ -132,7 +123,6 @@ ssize_t file_mq_receive(FAR struct file *mq, FAR char *msg, size_t msglen,
ret = nxmq_do_receive(msgq, mqmsg, msg, prio);
}
sched_unlock();
return ret;
}

View File

@ -94,10 +94,6 @@ int file_mq_send(FAR struct file *mq, FAR const char *msg, size_t msglen,
return ret;
}
/* Get a pointer to the message queue */
sched_lock();
/* Allocate a message structure:
* - Immediately if we are called from an interrupt handler.
* - Immediately if the message queue is not full, or
@ -156,7 +152,6 @@ int file_mq_send(FAR struct file *mq, FAR const char *msg, size_t msglen,
ret = nxmq_do_send(msgq, mqmsg, msg, msglen, prio);
}
sched_unlock();
return ret;
}

View File

@ -327,10 +327,6 @@ int nxmq_do_send(FAR struct mqueue_inode_s *msgq,
FAR struct mqueue_msg_s *prev;
irqstate_t flags;
/* Get a pointer to the message queue */
sched_lock();
/* Construct the message header info */
mqmsg->priority = prio;
@ -405,8 +401,9 @@ int nxmq_do_send(FAR struct mqueue_inode_s *msgq,
{
/* Find the highest priority task that is waiting for
* this queue to be non-empty in g_waitingformqnotempty
* list. sched_lock() should give us sufficient protection since
* interrupts should never cause a change in this list
* list. leave_critical_section() should give us sufficient
* protection since interrupts should never cause a change
* in this list
*/
for (btcb = (FAR struct tcb_s *)g_waitingformqnotempty.head;
@ -425,6 +422,5 @@ int nxmq_do_send(FAR struct mqueue_inode_s *msgq,
}
leave_critical_section(flags);
sched_unlock();
return OK;
}

View File

@ -170,15 +170,6 @@ ssize_t file_mq_timedreceive(FAR struct file *mq, FAR char *msg,
return -EINVAL;
}
/* Get the next message from the message queue. We will disable
* pre-emption until we have completed the message received. This
* is not too bad because if the receipt takes a long time, it will
* be because we are blocked waiting for a message and pre-emption
* will be re-enabled while we are blocked
*/
sched_lock();
/* Furthermore, nxmq_wait_receive() expects to have interrupts disabled
* because messages can be sent from interrupt level.
*/
@ -213,7 +204,6 @@ ssize_t file_mq_timedreceive(FAR struct file *mq, FAR char *msg,
if (result != OK)
{
leave_critical_section(flags);
sched_unlock();
return -result;
}
@ -250,7 +240,6 @@ ssize_t file_mq_timedreceive(FAR struct file *mq, FAR char *msg,
ret = nxmq_do_receive(msgq, mqmsg, msg, prio);
}
sched_unlock();
return ret;
}

View File

@ -185,10 +185,6 @@ int file_mq_timedsend(FAR struct file *mq, FAR const char *msg,
return -ENOMEM;
}
/* Get a pointer to the message queue */
sched_lock();
/* OpenGroup.org: "Under no circumstance shall the operation fail with a
* timeout if there is sufficient room in the queue to add the message
* immediately. The validity of the abstime parameter need not be checked
@ -209,9 +205,7 @@ int file_mq_timedsend(FAR struct file *mq, FAR const char *msg,
* Currently nxmq_do_send() always returns OK.
*/
ret = nxmq_do_send(msgq, mqmsg, msg, msglen, prio);
sched_unlock();
return ret;
return nxmq_do_send(msgq, mqmsg, msg, msglen, prio);
}
/* The message queue is full... We are going to wait. Now we must have a
@ -285,10 +279,7 @@ int file_mq_timedsend(FAR struct file *mq, FAR const char *msg,
* Currently nxmq_do_send() always returns OK.
*/
ret = nxmq_do_send(msgq, mqmsg, msg, msglen, prio);
sched_unlock();
return ret;
return nxmq_do_send(msgq, mqmsg, msg, msglen, prio);
/* Exit here with (1) the scheduler locked, (2) a message allocated, (3) a
* wdog allocated, and (4) interrupts disabled.
@ -303,7 +294,6 @@ errout_in_critical_section:
errout_with_mqmsg:
nxmq_free_msg(mqmsg);
sched_unlock();
return ret;
}