SAMA5 NAND: Do not perform DMA on small transfers
This commit is contained in:
parent
0a456987c6
commit
9632d2a7f6
@ -3348,8 +3348,26 @@ config SAMA5_NAND_DMA
|
||||
default y
|
||||
depends on SAMA5_DMAC0
|
||||
---help---
|
||||
Use DMA to perform NAND data transfers. NOTE that DMAC0 must be
|
||||
selected (DMAC1 cannot access NFC SRAM). (highly recommended)
|
||||
Use memory-to-memory DMA to perform NAND data transfers. NOTE that
|
||||
DMAC0 must be selected (DMAC1 cannot access NFC SRAM).
|
||||
|
||||
config SAMA5_NAND_DMA_THRESHOLD
|
||||
int "DMA threshold"
|
||||
default 784
|
||||
depends on SAMA5_NAND_DMA
|
||||
---help---
|
||||
Defines a threshold value for performing memory-to-memory DMA.
|
||||
|
||||
If memory-to-memory DMAs are used, then two context switches will
|
||||
occur: (1) when the NAND logic waits for the DMA to complete, and
|
||||
(2) again when the DMA completes and the NAND logic is re-awakened.
|
||||
Each context switch will required saving and restoring a set of
|
||||
registers defining the task state. Those register include the PSR,
|
||||
16 general purpose registers, and 32 floating point registers or
|
||||
about 196 bytes per task state. That is then 392*2 bytes per
|
||||
context and 784 bytes for both. Plus there is processing overhead.
|
||||
So certainly, there is no reason to use a memory-to-memory DMA
|
||||
transfer for much smaller blocks of data.
|
||||
|
||||
config SAMA5_NAND_READYBUSY
|
||||
bool "NAND Ready/Busy"
|
||||
|
@ -1599,11 +1599,13 @@ static int nand_read(struct sam_nandcs_s *priv, bool nfcsram,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SAMA5_NAND_DMA
|
||||
/* Then perform the transfer via DMA or not, depending on if we have
|
||||
* a DMA channel assigned.
|
||||
/* Then perform the transfer via memory-to-memory DMA or not, depending
|
||||
* on if we have a DMA channel assigned and if the transfer is
|
||||
* sufficiently large. Small DMAs (e.g., for spare data) are not peformed
|
||||
* because the DMA context switch can take more time that the DMA itself.
|
||||
*/
|
||||
|
||||
if (priv->dma)
|
||||
if (priv->dma && buflen > CONFIG_SAMA5_NAND_DMA_THRESHOLD)
|
||||
{
|
||||
/* Transfer using DMA */
|
||||
|
||||
@ -1909,12 +1911,14 @@ static int nand_write(struct sam_nandcs_s *priv, bool nfcsram,
|
||||
|
||||
dest += offset;
|
||||
|
||||
/* Then perform the transfer via DMA or not, depending on if we have
|
||||
* a DMA channel assigned.
|
||||
#ifdef CONFIG_SAMA5_NAND_DMA
|
||||
/* Then perform the transfer via memory-to-memory DMA or not, depending
|
||||
* on if we have a DMA channel assigned and if the transfer is
|
||||
* sufficiently large. Small DMAs (e.g., for spare data) are not peformed
|
||||
* because the DMA context switch can take more time that the DMA itself.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_SAMA5_NAND_DMA
|
||||
if (priv->dma)
|
||||
if (priv->dma && buflen > CONFIG_SAMA5_NAND_DMA_THRESHOLD)
|
||||
{
|
||||
/* Transfer using DMA */
|
||||
|
||||
|
@ -74,6 +74,24 @@
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* If memory-to-memory DMAs are used, then two context switches will occur:
|
||||
* (1) when the NAND logic waits for the DMA to complete, and (2) again when
|
||||
* the DMA completes and the NAND logic is re-awakened. Each context switch
|
||||
* will required saving and restoring a set of registers defining the task
|
||||
* state. Those register include the PSR, 16 general purpose registers, and
|
||||
* 32 floating point registers or about 196 bytes per task state. That is
|
||||
* then 392*2 bytes per context and 784 bytes for both. Plus there is
|
||||
* processing overhead. So certainly, there is no reason to use a memory-to-
|
||||
* memory DMA transfer for much smaller blocks of data.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_SAMA5_NAND_DMA
|
||||
# ifndef CONFIG_SAMA5_NAND_DMA_THRESHOLD
|
||||
# define CONFIG_SAMA5_NAND_DMA_THRESHOLD 784
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
/* Hardware ECC types. These are extensions to the NANDECC_HWECC value
|
||||
* defined in include/nuttx/mtd/nand_raw.h.
|
||||
*
|
||||
|
Loading…
Reference in New Issue
Block a user