From a1ebd499eac29db5d74ee7a2e5c4b6588b87522b Mon Sep 17 00:00:00 2001 From: David Sidrane Date: Fri, 23 Sep 2022 00:26:40 -0700 Subject: [PATCH] stm32h7:SDMMC fix unaligned access for buffers not on 32 bit boundaries The IDMA needs to have 32 bit word alignment, in fact it will AND off the lower 2 bits of the value stored in IDMABASE0R. This bug was masked by CONFIG_ARMV7M_DCACHE causing proper word alignment and also FAT_DMAMEMORY being aligned. This commit extends the unaligned logic (used for dcache) to take into account the need for a buffer copy when the buffer is ot 32 bit word. It leverages the fact that when CONFIG_ARMV7M_DCACHE is not defined the up_xxxxx_dcache are nops. --- arch/arm/src/stm32h7/stm32_sdmmc.c | 55 +++++++++++++++--------------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/arch/arm/src/stm32h7/stm32_sdmmc.c b/arch/arm/src/stm32h7/stm32_sdmmc.c index 1a934f3e47..fe6b635b61 100644 --- a/arch/arm/src/stm32h7/stm32_sdmmc.c +++ b/arch/arm/src/stm32h7/stm32_sdmmc.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -381,10 +382,14 @@ struct stm32_dev_s #if !defined(CONFIG_STM32H7_SDMMC_IDMA) struct work_s cbfifo; /* Monitor for Lame FIFO */ #endif - uint8_t rxfifo[FIFO_SIZE_IN_BYTES] /* To offload with IDMA */ + uint8_t rxfifo[FIFO_SIZE_IN_BYTES] /* To offload with IDMA and support un-alinged buffers */ aligned_data(ARMV7M_DCACHE_LINESIZE); -#if defined(CONFIG_ARMV7M_DCACHE) && defined(CONFIG_STM32H7_SDMMC_IDMA) - bool unaligned_rx; /* read buffer is not cache-line aligned */ + bool unaligned_rx; /* read buffer is not cache-line or 32 bit aligned */ + + /* Input dma buffer for unaligned transfers */ +#if defined(CONFIG_STM32H7_SDMMC_IDMA) + uint8_t sdmmc_rxbuffer[SDMMC_MAX_BLOCK_SIZE] + aligned_data(ARMV7M_DCACHE_LINESIZE); #endif }; @@ -455,7 +460,7 @@ static void stm32_datadisable(struct stm32_dev_s *priv); #ifndef CONFIG_STM32H7_SDMMC_IDMA static void stm32_sendfifo(struct stm32_dev_s *priv); static void stm32_recvfifo(struct stm32_dev_s *priv); -#elif defined(CONFIG_ARMV7M_DCACHE) +#else static void stm32_recvdma(struct stm32_dev_s *priv); #endif static void stm32_eventtimeout(wdparm_t arg); @@ -656,12 +661,6 @@ struct stm32_dev_s g_sdmmcdev2 = static struct stm32_sampleregs_s g_sampleregs[DEBUG_NSAMPLES]; #endif -/* Input dma buffer for unaligned transfers */ -#if defined(CONFIG_ARMV7M_DCACHE) && defined(CONFIG_STM32H7_SDMMC_IDMA) -static uint8_t sdmmc_rxbuffer[SDMMC_MAX_BLOCK_SIZE] -aligned_data(ARMV7M_DCACHE_LINESIZE); -#endif - /**************************************************************************** * Private Functions ****************************************************************************/ @@ -1147,14 +1146,14 @@ static void stm32_dataconfig(struct stm32_dev_s *priv, uint32_t timeout, { DEBUGASSERT((dlen % priv->blocksize) == 0); -#if defined(CONFIG_STM32H7_SDMMC_IDMA) && defined(CONFIG_ARMV7M_DCACHE) - /* If cache is enabled, and this is an unaligned receive, - * receive one block at a time to the internal buffer +#if defined(CONFIG_STM32H7_SDMMC_IDMA) + /* If this is an unaligned receive, then receive one block at a + * time to the internal buffer */ if (priv->unaligned_rx) { - DEBUGASSERT(priv->blocksize <= sizeof(sdmmc_rxbuffer)); + DEBUGASSERT(priv->blocksize <= sizeof(priv->sdmmc_rxbuffer)); dlen = priv->blocksize; } #endif @@ -1371,7 +1370,7 @@ static void stm32_recvfifo(struct stm32_dev_s *priv) * ****************************************************************************/ -#if defined (CONFIG_STM32H7_SDMMC_IDMA) && defined(CONFIG_ARMV7M_DCACHE) +#if defined (CONFIG_STM32H7_SDMMC_IDMA) static void stm32_recvdma(struct stm32_dev_s *priv) { uint32_t dctrl; @@ -1384,12 +1383,13 @@ static void stm32_recvdma(struct stm32_dev_s *priv) /* Copy the received data to client buffer */ - memcpy(priv->buffer, sdmmc_rxbuffer, priv->blocksize); + memcpy(priv->buffer, priv->sdmmc_rxbuffer, priv->blocksize); /* Invalidate the cache before receiving next block */ - up_invalidate_dcache((uintptr_t)sdmmc_rxbuffer, - (uintptr_t)sdmmc_rxbuffer + priv->blocksize); + up_invalidate_dcache((uintptr_t)priv->sdmmc_rxbuffer, + (uintptr_t)priv->sdmmc_rxbuffer + + priv->blocksize); /* Update how much there is left to receive */ @@ -1743,7 +1743,6 @@ static int stm32_sdmmc_interrupt(int irq, void *context, void *arg) memcpy(priv->buffer, priv->rxfifo, priv->remaining); } #else -# if defined(CONFIG_ARMV7M_DCACHE) if (priv->receivecnt) { /* Invalidate dcache, and copy the received data into @@ -1753,7 +1752,6 @@ static int stm32_sdmmc_interrupt(int irq, void *context, void *arg) stm32_recvdma(priv); } else -# endif #endif { /* Then terminate the transfer. @@ -3192,8 +3190,9 @@ static int stm32_dmarecvsetup(struct sdio_dev_s *dev, * buffer instead. */ - up_invalidate_dcache((uintptr_t)sdmmc_rxbuffer, - (uintptr_t)sdmmc_rxbuffer + priv->blocksize); + up_invalidate_dcache((uintptr_t)priv->sdmmc_rxbuffer, + (uintptr_t)priv->sdmmc_rxbuffer + + priv->blocksize); priv->unaligned_rx = true; } @@ -3204,6 +3203,12 @@ static int stm32_dmarecvsetup(struct sdio_dev_s *dev, priv->unaligned_rx = false; } +#else + + /* IDMA access must be 32 bit aligned */ + + priv->unaligned_rx = ((uintptr_t)buffer & 0x3) != 0; + #endif /* Reset the DPSM configuration */ @@ -3229,14 +3234,12 @@ static int stm32_dmarecvsetup(struct sdio_dev_s *dev, /* Configure the RX DMA */ -#if defined(CONFIG_ARMV7M_DCACHE) if (priv->unaligned_rx) { - sdmmc_putreg32(priv, (uintptr_t)sdmmc_rxbuffer, + sdmmc_putreg32(priv, (uintptr_t)priv->sdmmc_rxbuffer, STM32_SDMMC_IDMABASE0R_OFFSET); } else -#endif { sdmmc_putreg32(priv, (uintptr_t)priv->buffer, STM32_SDMMC_IDMABASE0R_OFFSET); @@ -3284,9 +3287,7 @@ static int stm32_dmasendsetup(struct sdio_dev_s *dev, DEBUGASSERT(stm32_dmapreflight(dev, buffer, buflen) == 0); #endif -#if defined(CONFIG_ARMV7M_DCACHE) priv->unaligned_rx = false; -#endif /* Reset the DPSM configuration */