arch/arm/src/stm32h7/stm32_dma.c: Optimization for stm32_sdma_capable

It should not be an error to clean cache beyond the dma source buffer
boundaries. It would just prematurely push some unrelated data from
cache to memory.

The only case where it would corrupt memory is that there is a dma
destination buffer overlapping the same cache line with the source
buffer. But this can't happen, because a destination buffer must always
be cache-line aligned when using write-back cache.

This patch enables doing dma tx-only transfer from unaligned source
buffer when using write-back cache.

Signed-off-by: Jukka Laitinen <jukka.laitinen@intel.com>
This commit is contained in:
Jukka Laitinen 2019-10-01 15:05:41 +03:00 committed by patacongo
parent c7acbb80d8
commit a532b0b53a

View File

@ -1523,19 +1523,21 @@ static bool stm32_sdma_capable(FAR stm32_dmacfg_t *cfg)
# if defined(CONFIG_ARMV7M_DCACHE) && \
!defined(CONFIG_ARMV7M_DCACHE_WRITETHROUGH)
/* buffer alignment is required for DMA transfers with dcache in buffered
* mode (not write-through) because a) arch_invalidate_dcache could lose
* buffered writes and b) arch_flush_dcache could corrupt adjacent memory if
* the maddr and the mend+1, the next next address are not on
* ARMV7M_DCACHE_LINESIZE boundaries.
/* buffer alignment is required for RX DMA transfers with dcache in
* buffered mode (not write-through) because arch_invalidate_dcache could
* lose buffered writes
*/
if ((cfg->maddr & (ARMV7M_DCACHE_LINESIZE - 1)) != 0 ||
((mend + 1) & (ARMV7M_DCACHE_LINESIZE - 1)) != 0)
if ((ccr & DMA_SCR_DIR_MASK) == DMA_SCR_DIR_P2M ||
(ccr & DMA_SCR_DIR_MASK) == DMA_SCR_DIR_M2M)
{
dmainfo("stm32_dmacapable: dcache unaligned maddr:0x%08x mend:0x%08x\n",
cfg->maddr, mend);
return false;
if ((cfg->maddr & (ARMV7M_DCACHE_LINESIZE - 1)) != 0 ||
((mend + 1) & (ARMV7M_DCACHE_LINESIZE - 1)) != 0)
{
dmainfo("stm32_dmacapable: dcache unaligned "
"maddr:0x%08x mend:0x%08x\n", cfg->maddr, mend);
return false;
}
}
# endif