Check the return of nxsem_wait_uninterruptible().
This commit is for the DMA files under arch/ that were missing from an earlier PR.
This commit is contained in:
parent
7dbcc71e0d
commit
952e7f6e17
@ -172,7 +172,8 @@ static inline struct dma_descriptor_s *cxd56_get_descriptor(
|
||||
{
|
||||
uintptr_t base;
|
||||
|
||||
base = alt ? getreg32(CXD56_DMA_ALTCTRLBASE) : getreg32(CXD56_DMA_CTRLBASE);
|
||||
base = alt ?
|
||||
getreg32(CXD56_DMA_ALTCTRLBASE) : getreg32(CXD56_DMA_CTRLBASE);
|
||||
return ((struct dma_descriptor_s *)base) + dmach->chan;
|
||||
}
|
||||
|
||||
@ -203,6 +204,7 @@ static int cxd56_dmac_interrupt(int irq, void *context, FAR void *arg)
|
||||
putreg32(mask, CXD56_DMA_ERR);
|
||||
result = EIO;
|
||||
}
|
||||
|
||||
if (done & mask)
|
||||
{
|
||||
/* Clear DMA done status */
|
||||
@ -304,6 +306,7 @@ DMA_HANDLE cxd56_udmachannel(void)
|
||||
struct dma_channel_s *dmach;
|
||||
unsigned int ch;
|
||||
uint32_t bit;
|
||||
int ret;
|
||||
|
||||
/* Take a count from the channel counting semaphore. We may block
|
||||
* if there are no free channels. When we get the count, then we can
|
||||
@ -311,11 +314,20 @@ DMA_HANDLE cxd56_udmachannel(void)
|
||||
* reserved for us.
|
||||
*/
|
||||
|
||||
nxsem_wait_uninterruptible(&g_dmac.chansem);
|
||||
ret = nxsem_wait_uninterruptible(&g_dmac.chansem);
|
||||
if (ret < 0)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Get exclusive access to the DMA channel list */
|
||||
|
||||
nxsem_wait_uninterruptible(&g_dmac.exclsem);
|
||||
ret = nxsem_wait_uninterruptible(&g_dmac.exclsem);
|
||||
if (ret < 0)
|
||||
{
|
||||
nxsem_post(&g_dmac.chansem);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Search for an available DMA channel */
|
||||
|
||||
@ -359,11 +371,11 @@ DMA_HANDLE cxd56_udmachannel(void)
|
||||
* Name: cxd56_udmafree
|
||||
*
|
||||
* Description:
|
||||
* Release a DMA channel. If another thread is waiting for this DMA channel
|
||||
* in a call to cxd56_udmachannel, then this function will re-assign the
|
||||
* DMA channel to that thread and wake it up. NOTE: The 'handle' used
|
||||
* in this argument must NEVER be used again until cxd56_udmachannel() is
|
||||
* called again to re-gain access to the channel.
|
||||
* Release a DMA channel. If another thread is waiting for this DMA
|
||||
* channel in a call to cxd56_udmachannel, then this function will
|
||||
* re-assign the DMA channel to that thread and wake it up. NOTE: The
|
||||
* 'handle' used in this argument must NEVER be used again until
|
||||
* cxd56_udmachannel() is called again to re-gain access to the channel.
|
||||
*
|
||||
* Returned Value:
|
||||
* None
|
||||
@ -385,8 +397,8 @@ void cxd56_udmafree(DMA_HANDLE handle)
|
||||
|
||||
putreg32(1 << dmach->chan, CXD56_DMA_CHENC);
|
||||
|
||||
/* Mark the channel no longer in use. Clearing the in-use flag is an atomic
|
||||
* operation and so should be safe.
|
||||
/* Mark the channel no longer in use. Clearing the in-use flag is an
|
||||
* atomic operation and so should be safe.
|
||||
*/
|
||||
|
||||
dmach->inuse = false;
|
||||
@ -431,8 +443,8 @@ void cxd56_rxudmasetup(DMA_HANDLE handle, uintptr_t paddr, uintptr_t maddr,
|
||||
shift = cxd56_align_shift(config);
|
||||
mask = ALIGN_MASK(shift);
|
||||
|
||||
/* Make sure that the number of bytes we are asked to transfer is a multiple
|
||||
* of the transfer size.
|
||||
/* Make sure that the number of bytes we are asked to transfer is a
|
||||
* multiple of the transfer size.
|
||||
*/
|
||||
|
||||
xfersize = (1 << shift);
|
||||
@ -528,8 +540,8 @@ void cxd56_txudmasetup(DMA_HANDLE handle, uintptr_t paddr, uintptr_t maddr,
|
||||
shift = cxd56_align_shift(config);
|
||||
mask = ALIGN_MASK(shift);
|
||||
|
||||
/* Make sure that the number of bytes we are asked to transfer is a multiple
|
||||
* of the transfer size.
|
||||
/* Make sure that the number of bytes we are asked to transfer is a
|
||||
* multiple of the transfer size.
|
||||
*/
|
||||
|
||||
xfersize = (1 << shift);
|
||||
|
@ -4,8 +4,9 @@
|
||||
* Copyright (C) 2019 Gregory Nutt. All rights reserved.
|
||||
* Author: Gregory Nutt <gnutt@nuttx.org>
|
||||
*
|
||||
* This file was leveraged from the NuttX i.MXRT port. Portions of that eDMA logic
|
||||
* derived from NXP sample code which has a compatible BSD 3-clause license:
|
||||
* This file was leveraged from the NuttX i.MXRT port. Portions of that eDMA
|
||||
* logic derived from NXP sample code which has a compatible BSD 3-clause
|
||||
* license:
|
||||
*
|
||||
* Copyright (c) 2015, Freescale Semiconductor, Inc.
|
||||
* Copyright 2016-2017 NXP
|
||||
@ -125,8 +126,8 @@ struct s32k1xx_dmach_s
|
||||
|
||||
#if CONFIG_S32K1XX_EDMA_NTCD > 0
|
||||
/* That TCD list is linked through the DLAST SGA field. The first transfer
|
||||
* to be performed is at the head of the list. Subsequent TCDs are added at
|
||||
* the tail of the list.
|
||||
* to be performed is at the head of the list. Subsequent TCDs are added
|
||||
* at the tail of the list.
|
||||
*/
|
||||
|
||||
struct s32k1xx_edmatcd_s *head; /* First TCD in the list */
|
||||
@ -182,9 +183,9 @@ static struct s32k1xx_edmatcd_s g_tcd_pool[CONFIG_S32K1XX_EDMA_NTCD]
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
static void s32k1xx_takechsem(void)
|
||||
static int s32k1xx_takechsem(void)
|
||||
{
|
||||
nxsem_wait_uninterruptible(&g_edma.chsem);
|
||||
return nxsem_wait_uninterruptible(&g_edma.chsem);
|
||||
}
|
||||
|
||||
static inline void s32k1xx_givechsem(void)
|
||||
@ -301,13 +302,14 @@ static inline void s32k1xx_tcd_initialize(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
/************************************************************************************
|
||||
/****************************************************************************
|
||||
* Name: s32k1xx_tcd_chanlink
|
||||
*
|
||||
* Description:
|
||||
* This function configures either a minor link or a major link. The minor link
|
||||
* means the channel link is triggered every time CITER decreases by 1. The major
|
||||
* link means that the channel link is triggered when the CITER is exhausted.
|
||||
* This function configures either a minor link or a major link. The minor
|
||||
* link means the channel link is triggered every time CITER decreases by 1
|
||||
* The major link means that the channel link is triggered when the CITER
|
||||
* is exhausted.
|
||||
*
|
||||
* NOTE: Users should ensure that DONE flag is cleared before calling this
|
||||
* interface, or the configuration is invalid.
|
||||
@ -320,10 +322,11 @@ static inline void s32k1xx_tcd_initialize(void)
|
||||
* Returned Value:
|
||||
* None
|
||||
*
|
||||
************************************************************************************/
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_S32K1XX_EDMA_ELINK
|
||||
static inline void s32k1xx_tcd_chanlink(uint8_t flags, struct s32k1xx_dmach_s *linkch,
|
||||
static inline void s32k1xx_tcd_chanlink(uint8_t flags,
|
||||
struct s32k1xx_dmach_s *linkch,
|
||||
struct s32k1xx_edmatcd_s *tcd)
|
||||
{
|
||||
uint16_t regval16;
|
||||
@ -392,7 +395,7 @@ static inline void s32k1xx_tcd_chanlink(uint8_t flags, struct s32k1xx_dmach_s *l
|
||||
****************************************************************************/
|
||||
|
||||
static inline void s32k1xx_tcd_configure(struct s32k1xx_edmatcd_s *tcd,
|
||||
const struct s32k1xx_edma_xfrconfig_s *config)
|
||||
const struct s32k1xx_edma_xfrconfig_s *config)
|
||||
{
|
||||
tcd->saddr = config->saddr;
|
||||
tcd->soff = config->soff;
|
||||
@ -412,8 +415,9 @@ static inline void s32k1xx_tcd_configure(struct s32k1xx_edmatcd_s *tcd,
|
||||
#ifdef CONFIG_S32K1XX_EDMA_ELINK
|
||||
/* Configure major/minor link mapping */
|
||||
|
||||
s32k1xx_tcd_chanlink(config->flags, (struct s32k1xx_dmach_s *)config->linkch,
|
||||
tcd);
|
||||
s32k1xx_tcd_chanlink(config->flags,
|
||||
(struct s32k1xx_dmach_s *)config->linkch,
|
||||
tcd);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -826,7 +830,8 @@ void weak_function up_dma_initialize(void)
|
||||
*
|
||||
* Input Parameters:
|
||||
* dmamux - DMAMUX configuration see DMAMUX channel configuration register
|
||||
* bit-field definitions in hardware/s32k1xx_dmamux.h. Settings include:
|
||||
* bit-field definitions in hardware/s32k1xx_dmamux.h.
|
||||
* Settings include:
|
||||
*
|
||||
* DMAMUX_CHCFG_SOURCE Chip-specific DMA source (required)
|
||||
* DMAMUX_CHCFG_AON DMA Channel Always Enable (optional)
|
||||
@ -854,11 +859,17 @@ DMACH_HANDLE s32k1xx_dmach_alloc(uint32_t dmamux, uint8_t dchpri)
|
||||
{
|
||||
struct s32k1xx_dmach_s *dmach;
|
||||
unsigned int chndx;
|
||||
int ret;
|
||||
|
||||
/* Search for an available DMA channel */
|
||||
|
||||
dmach = NULL;
|
||||
s32k1xx_takechsem();
|
||||
ret = s32k1xx_takechsem();
|
||||
if (ret < 0)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (chndx = 0; chndx < S32K1XX_EDMA_NCHANNELS; chndx++)
|
||||
{
|
||||
struct s32k1xx_dmach_s *candidate = &g_edma.dmach[chndx];
|
||||
@ -911,8 +922,8 @@ DMACH_HANDLE s32k1xx_dmach_alloc(uint32_t dmamux, uint8_t dchpri)
|
||||
*
|
||||
* Description:
|
||||
* Release a DMA channel. NOTE: The 'handle' used in this argument must
|
||||
* NEVER be used again until s32k1xx_dmach_alloc() is called again to re-gain
|
||||
* a valid handle.
|
||||
* NEVER be used again until s32k1xx_dmach_alloc() is called again to
|
||||
* re-gain a valid handle.
|
||||
*
|
||||
* Returned Value:
|
||||
* None
|
||||
@ -926,7 +937,8 @@ void s32k1xx_dmach_free(DMACH_HANDLE handle)
|
||||
uint8_t regval8;
|
||||
|
||||
dmainfo("dmach: %p\n", dmach);
|
||||
DEBUGASSERT(dmach != NULL && dmach->inuse && dmach->state != S32K1XX_DMA_ACTIVE);
|
||||
DEBUGASSERT(dmach != NULL && dmach->inuse &&
|
||||
dmach->state != S32K1XX_DMA_ACTIVE);
|
||||
|
||||
/* Mark the channel no longer in use. Clearing the inuse flag is an atomic
|
||||
* operation and so should be safe.
|
||||
@ -934,7 +946,7 @@ void s32k1xx_dmach_free(DMACH_HANDLE handle)
|
||||
|
||||
dmach->flags = 0;
|
||||
dmach->inuse = false; /* No longer in use */
|
||||
dmach->state = S32K1XX_DMA_IDLE; /* Better not be active! */
|
||||
dmach->state = S32K1XX_DMA_IDLE; /* Better not be active! */
|
||||
|
||||
/* Make sure that the channel is disabled. */
|
||||
|
||||
@ -953,10 +965,11 @@ void s32k1xx_dmach_free(DMACH_HANDLE handle)
|
||||
* Description:
|
||||
* This function adds the eDMA transfer to the DMA sequence. The request
|
||||
* is setup according to the content of the transfer configuration
|
||||
* structure. For "normal" DMA, s32k1xx_dmach_xfrsetup is called only once.
|
||||
* structure. For "normal" DMA, s32k1xx_dmach_xfrsetup is called only once.
|
||||
* Scatter/gather DMA is accomplished by calling this function repeatedly,
|
||||
* once for each transfer in the sequence. Scatter/gather DMA processing
|
||||
* is enabled automatically when the second transfer configuration is received.
|
||||
* is enabled automatically when the second transfer configuration is
|
||||
* received.
|
||||
*
|
||||
* This function may be called multiple times to handle multiple,
|
||||
* discontinuous transfers (scatter-gather)
|
||||
@ -1090,8 +1103,8 @@ int s32k1xx_dmach_xfrsetup(DMACH_HANDLE *handle,
|
||||
|
||||
/* Configure channel TCD registers to the values specified in config. */
|
||||
|
||||
s32k1xx_tcd_configure((struct s32k1xx_edmatcd_s *)S32K1XX_EDMA_TCD_BASE(dmach->chan),
|
||||
config);
|
||||
s32k1xx_tcd_configure((struct s32k1xx_edmatcd_s *)
|
||||
S32K1XX_EDMA_TCD_BASE(dmach->chan), config);
|
||||
|
||||
/* Enable the DONE interrupt when the major iteration count completes. */
|
||||
|
||||
@ -1132,38 +1145,41 @@ int s32k1xx_dmach_xfrsetup(DMACH_HANDLE *handle,
|
||||
return OK;
|
||||
}
|
||||
|
||||
/************************************************************************************
|
||||
/****************************************************************************
|
||||
* Name: s32k1xx_dmach_start
|
||||
*
|
||||
* Description:
|
||||
* Start the DMA transfer. This function should be called after the final call
|
||||
* to s32k1xx_dmach_xfrsetup() in order to avoid race conditions.
|
||||
* Start the DMA transfer. This function should be called after the final
|
||||
* call to s32k1xx_dmach_xfrsetup() in order to avoid race conditions.
|
||||
*
|
||||
* At the conclusion of each major DMA loop, a callback to the user-provided
|
||||
* function is made: |For "normal" DMAs, this will correspond to the DMA DONE
|
||||
* interrupt; for scatter gather DMAs, multiple interrupts will be generated
|
||||
* with the final being the DONE interrupt.
|
||||
* At the conclusion of each major DMA loop, a callback to the user
|
||||
* provided function is made: |For "normal" DMAs, this will correspond to
|
||||
* the DMA DONE interrupt; for scatter gather DMAs, multiple interrupts
|
||||
* will be generated with the final being the DONE interrupt.
|
||||
*
|
||||
* At the conclusion of the DMA, the DMA channel is reset, all TCDs are freed, and
|
||||
* the callback function is called with the the success/fail result of the DMA.
|
||||
* At the conclusion of the DMA, the DMA channel is reset, all TCDs are
|
||||
* freed, and the callback function is called with the the success/fail
|
||||
* result of the DMA.
|
||||
*
|
||||
* NOTE: On Rx DMAs (peripheral-to-memory or memory-to-memory), it is necessary
|
||||
* to invalidate the destination memory. That is not done automatically by the
|
||||
* DMA module. Invalidation of the destination memory regions is the
|
||||
* responsibility of the caller.
|
||||
* NOTE: On Rx DMAs (peripheral-to-memory or memory-to-memory), it is
|
||||
* necessary to invalidate the destination memory. That is not done
|
||||
* automatically by the DMA module. Invalidation of the destination memory
|
||||
* regions is the responsibility of the caller.
|
||||
*
|
||||
* Input Parameters:
|
||||
* handle - DMA channel handle created by s32k1xx_dmach_alloc()
|
||||
* callback - The callback to be invoked when the DMA is completes or is aborted.
|
||||
* callback - The callback to be invoked when the DMA is completes or is
|
||||
* aborted.
|
||||
* arg - An argument that accompanies the callback
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero (OK) is returned on success; a negated errno value is returned on
|
||||
* any failure.
|
||||
*
|
||||
************************************************************************************/
|
||||
****************************************************************************/
|
||||
|
||||
int s32k1xx_dmach_start(DMACH_HANDLE handle, edma_callback_t callback, void *arg)
|
||||
int s32k1xx_dmach_start(DMACH_HANDLE handle, edma_callback_t callback,
|
||||
void *arg)
|
||||
{
|
||||
struct s32k1xx_dmach_s *dmach = (struct s32k1xx_dmach_s *)handle;
|
||||
irqstate_t flags;
|
||||
@ -1182,8 +1198,8 @@ int s32k1xx_dmach_start(DMACH_HANDLE handle, edma_callback_t callback, void *arg
|
||||
dmach->state = S32K1XX_DMA_ACTIVE;
|
||||
|
||||
#if CONFIG_S32K1XX_EDMA_NTCD > 0
|
||||
/* Although it is not recommended, it might be possible to call this function
|
||||
* multiple times while adding TCDs on the fly.
|
||||
/* Although it is not recommended, it might be possible to call this
|
||||
* function multiple times while adding TCDs on the fly.
|
||||
*/
|
||||
|
||||
if (dmach->state != S32K1XX_DMA_ACTIVE)
|
||||
@ -1204,13 +1220,13 @@ int s32k1xx_dmach_start(DMACH_HANDLE handle, edma_callback_t callback, void *arg
|
||||
return OK;
|
||||
}
|
||||
|
||||
/************************************************************************************
|
||||
/****************************************************************************
|
||||
* Name: s32k1xx_dmach_stop
|
||||
*
|
||||
* Description:
|
||||
* Cancel the DMA. After s32k1xx_dmach_stop() is called, the DMA channel is reset,
|
||||
* all TCDs are freed, and s32k1xx_dmarx/txsetup() must be called before
|
||||
* s32k1xx_dmach_start() can be called again
|
||||
* Cancel the DMA. After s32k1xx_dmach_stop() is called, the DMA channel
|
||||
* is reset, all TCDs are freed, and s32k1xx_dmarx/txsetup() must be called
|
||||
* before s32k1xx_dmach_start() can be called again.
|
||||
*
|
||||
* Input Parameters:
|
||||
* handle - DMA channel handle created by s32k1xx_dmach_alloc()
|
||||
@ -1218,7 +1234,7 @@ int s32k1xx_dmach_start(DMACH_HANDLE handle, edma_callback_t callback, void *arg
|
||||
* Returned Value:
|
||||
* None.
|
||||
*
|
||||
************************************************************************************/
|
||||
****************************************************************************/
|
||||
|
||||
void s32k1xx_dmach_stop(DMACH_HANDLE handle)
|
||||
{
|
||||
@ -1256,7 +1272,8 @@ void s32k1xx_dmach_stop(DMACH_HANDLE handle)
|
||||
* initial value of NBYTES (for example copied before enabling the channel)
|
||||
* is needed. The formula to calculate it is shown below:
|
||||
*
|
||||
* RemainingBytes = RemainingMajorLoopCount * NBYTES(initially configured)
|
||||
* RemainingBytes = RemainingMajorLoopCount *
|
||||
* NBYTES(initially configured)
|
||||
*
|
||||
* Input Parameters:
|
||||
* handle - DMA channel handle created by s32k1xx_dmach_alloc()
|
||||
|
@ -105,6 +105,7 @@
|
||||
/****************************************************************************
|
||||
* Private Types
|
||||
****************************************************************************/
|
||||
|
||||
/* This structure maps a peripheral ID to an DMA channel */
|
||||
|
||||
struct sam_pidmap_s
|
||||
@ -251,7 +252,6 @@ static const struct sam_pidmap_s g_xdmac1_rxchan[] =
|
||||
{ SAM_PID_DBGU, XDMAC1_CH_DBGU_RX }, /* DBGU Receive */
|
||||
{ SAM_PID_ADC, XDMAC1_CH_ADC_RX }, /* ADC Receive */
|
||||
{ SAM_PID_SMD, XDMAC1_CH_SMD_RX }, /* SMD Receive */
|
||||
|
||||
};
|
||||
#define NXDMAC1_RXCHANNELS (sizeof(g_xdmac1_rxchan) / sizeof(struct sam_pidmap_s))
|
||||
|
||||
@ -642,9 +642,9 @@ static struct sam_xdmac_s g_xdmac1 =
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
static void sam_takechsem(struct sam_xdmac_s *xdmac)
|
||||
static int sam_takechsem(struct sam_xdmac_s *xdmac)
|
||||
{
|
||||
nxsem_wait_uninterruptible(&xdmac->chsem);
|
||||
return nxsem_wait_uninterruptible(&xdmac->chsem);
|
||||
}
|
||||
|
||||
static inline void sam_givechsem(struct sam_xdmac_s *xdmac)
|
||||
@ -660,9 +660,9 @@ static inline void sam_givechsem(struct sam_xdmac_s *xdmac)
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
static void sam_takedsem(struct sam_xdmac_s *xdmac)
|
||||
static int sam_takedsem(struct sam_xdmac_s *xdmac)
|
||||
{
|
||||
nxsem_wait_uninterruptible(&xdmac->dsem);
|
||||
return nxsem_wait_uninterruptible(&xdmac->dsem);
|
||||
}
|
||||
|
||||
static inline void sam_givedsem(struct sam_xdmac_s *xdmac)
|
||||
@ -1082,7 +1082,8 @@ static inline uint32_t sam_txcc(struct sam_xdmach_s *xdmach)
|
||||
|
||||
/* TX -> Destination is peripheral */
|
||||
|
||||
if ((xdmach->flags & DMACH_FLAG_PERIPHAHB_MASK) == DMACH_FLAG_PERIPHAHB_AHB_IF1)
|
||||
if ((xdmach->flags & DMACH_FLAG_PERIPHAHB_MASK) ==
|
||||
DMACH_FLAG_PERIPHAHB_AHB_IF1)
|
||||
{
|
||||
regval |= XDMACH_CC_DIF;
|
||||
}
|
||||
@ -1233,7 +1234,8 @@ static inline uint32_t sam_rxcc(struct sam_xdmach_s *xdmach)
|
||||
* RX -> Source is peripheral
|
||||
*/
|
||||
|
||||
if ((xdmach->flags & DMACH_FLAG_PERIPHAHB_MASK) == DMACH_FLAG_PERIPHAHB_AHB_IF1)
|
||||
if ((xdmach->flags & DMACH_FLAG_PERIPHAHB_MASK) ==
|
||||
DMACH_FLAG_PERIPHAHB_AHB_IF1)
|
||||
{
|
||||
regval |= XDMACH_CC_SIF;
|
||||
}
|
||||
@ -1296,6 +1298,7 @@ sam_allocdesc(struct sam_xdmach_s *xdmach, struct chnext_view1_s *prev,
|
||||
struct sam_xdmac_s *xdmac = sam_controller(xdmach);
|
||||
struct chnext_view1_s *descr = NULL;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
/* Sanity check -- csa == 0 is the indication that the link is unused.
|
||||
* Obviously setting it to zero would break that usage.
|
||||
@ -1305,11 +1308,15 @@ sam_allocdesc(struct sam_xdmach_s *xdmach, struct chnext_view1_s *prev,
|
||||
if (csa != 0)
|
||||
#endif
|
||||
{
|
||||
/* Table a descriptor table semaphore count. When we get one, then there
|
||||
* is at least one free descriptor in the table and it is ours.
|
||||
/* Table a descriptor table semaphore count. When we get one, then
|
||||
* there is at least one free descriptor in the table and it is ours.
|
||||
*/
|
||||
|
||||
sam_takedsem(xdmac);
|
||||
ret = sam_takedsem(xdmac);
|
||||
if (ret < 0)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Examine each link list entry to find an available one -- i.e., one
|
||||
* with csa == 0. That csa field is set to zero by the DMA transfer
|
||||
@ -1317,7 +1324,13 @@ sam_allocdesc(struct sam_xdmach_s *xdmach, struct chnext_view1_s *prev,
|
||||
* that is an atomic operation.
|
||||
*/
|
||||
|
||||
sam_takechsem(xdmac);
|
||||
ret = sam_takechsem(xdmac);
|
||||
if (ret < 0)
|
||||
{
|
||||
sam_givedsem(xdmac);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < CONFIG_SAMA5_NLLDESC; i++)
|
||||
{
|
||||
if (xdmac->descr[i].csa == 0)
|
||||
@ -1338,12 +1351,14 @@ sam_allocdesc(struct sam_xdmach_s *xdmach, struct chnext_view1_s *prev,
|
||||
* the list
|
||||
*/
|
||||
|
||||
DEBUGASSERT(xdmach->llhead == NULL && xdmach->lltail == NULL);
|
||||
DEBUGASSERT(xdmach->llhead == NULL &&
|
||||
xdmach->lltail == NULL);
|
||||
xdmach->llhead = descr;
|
||||
}
|
||||
else
|
||||
{
|
||||
DEBUGASSERT(xdmach->llhead != NULL && xdmach->lltail == prev);
|
||||
DEBUGASSERT(xdmach->llhead != NULL &&
|
||||
xdmach->lltail == prev);
|
||||
|
||||
/* When the second link is added to the list, that is the
|
||||
* cue that we are going to do the link list transfer.
|
||||
@ -1355,8 +1370,8 @@ sam_allocdesc(struct sam_xdmach_s *xdmach, struct chnext_view1_s *prev,
|
||||
prev->cubc |= CHNEXT_UBC_NDE;
|
||||
|
||||
/* Link the previous tail to the new tail.
|
||||
* REVISIT: This assumes that the next description is fetched
|
||||
* via AHB IF0.
|
||||
* REVISIT: This assumes that the next description is
|
||||
* fetched via AHB IF0.
|
||||
*/
|
||||
|
||||
prev->cnda = (uint32_t)sam_physramaddr((uintptr_t)descr);
|
||||
@ -1366,12 +1381,13 @@ sam_allocdesc(struct sam_xdmach_s *xdmach, struct chnext_view1_s *prev,
|
||||
|
||||
xdmach->lltail = descr;
|
||||
|
||||
/* Assume that we will be doing multiple buffer transfers and that
|
||||
* that hardware will be accessing the descriptor via DMA.
|
||||
/* Assume that we will be doing multiple buffer transfers and
|
||||
* that that hardware will be accessing the descriptor via DMA.
|
||||
*/
|
||||
|
||||
up_clean_dcache((uintptr_t)descr,
|
||||
(uintptr_t)descr + sizeof(struct chnext_view1_s));
|
||||
(uintptr_t)descr +
|
||||
sizeof(struct chnext_view1_s));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1971,6 +1987,7 @@ DMA_HANDLE sam_dmachannel(uint8_t dmacno, uint32_t chflags)
|
||||
struct sam_xdmac_s *xdmac;
|
||||
struct sam_xdmach_s *xdmach;
|
||||
unsigned int chndx;
|
||||
int ret;
|
||||
|
||||
/* Pick the DMA controller */
|
||||
|
||||
@ -2001,7 +2018,12 @@ DMA_HANDLE sam_dmachannel(uint8_t dmacno, uint32_t chflags)
|
||||
*/
|
||||
|
||||
xdmach = NULL;
|
||||
sam_takechsem(xdmac);
|
||||
ret = sam_takechsem(xdmac);
|
||||
if (ret < 0)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (chndx = 0; chndx < SAM_NDMACHAN; chndx++)
|
||||
{
|
||||
struct sam_xdmach_s *candidate = &xdmac->xdmach[chndx];
|
||||
@ -2046,13 +2068,14 @@ DMA_HANDLE sam_dmachannel(uint8_t dmacno, uint32_t chflags)
|
||||
return (DMA_HANDLE)xdmach;
|
||||
}
|
||||
|
||||
/************************************************************************************
|
||||
/****************************************************************************
|
||||
* Name: sam_dmaconfig
|
||||
*
|
||||
* Description:
|
||||
* There are two channel usage models: (1) The channel is allocated and configured
|
||||
* in one step. This is the typical case where a DMA channel performs a constant
|
||||
* role. The alternative is (2) where the DMA channel is reconfigured on the fly.
|
||||
* There are two channel usage models: (1) The channel is allocated and
|
||||
* configured in one step. This is the typical case where a DMA channel
|
||||
* performs a constant role. The alternative is (2) where the DMA channel
|
||||
* is reconfigured on the fly.
|
||||
* In this case, the chflags provided to sam_dmachannel are not used and
|
||||
* sam_dmaconfig() is called before each DMA to configure the DMA channel
|
||||
* appropriately.
|
||||
@ -2060,7 +2083,7 @@ DMA_HANDLE sam_dmachannel(uint8_t dmacno, uint32_t chflags)
|
||||
* Returned Value:
|
||||
* None
|
||||
*
|
||||
************************************************************************************/
|
||||
****************************************************************************/
|
||||
|
||||
void sam_dmaconfig(DMA_HANDLE handle, uint32_t chflags)
|
||||
{
|
||||
@ -2163,8 +2186,8 @@ int sam_dmatxsetup(DMA_HANDLE handle, uint32_t paddr, uint32_t maddr,
|
||||
|
||||
remaining -= maxtransfer;
|
||||
|
||||
/* Increment the memory & peripheral address (if it is appropriate to
|
||||
* do so).
|
||||
/* Increment the memory & peripheral address (if it is appropriate
|
||||
* to do so).
|
||||
*/
|
||||
|
||||
if ((xdmach->flags & DMACH_FLAG_PERIPHINCREMENT) != 0)
|
||||
@ -2242,8 +2265,8 @@ int sam_dmarxsetup(DMA_HANDLE handle, uint32_t paddr, uint32_t maddr,
|
||||
|
||||
remaining -= maxtransfer;
|
||||
|
||||
/* Increment the memory & peripheral address (if it is appropriate to
|
||||
* do so).
|
||||
/* Increment the memory & peripheral address (if it is appropriate
|
||||
* to do so).
|
||||
*/
|
||||
|
||||
if ((xdmach->flags & DMACH_FLAG_PERIPHINCREMENT) != 0)
|
||||
@ -2271,7 +2294,8 @@ int sam_dmarxsetup(DMA_HANDLE handle, uint32_t paddr, uint32_t maddr,
|
||||
|
||||
xdmach->rx = true;
|
||||
xdmach->rxaddr = maddr;
|
||||
xdmach->rxsize = (xdmach->flags & DMACH_FLAG_MEMINCREMENT) != 0 ? nbytes : sizeof(uint32_t);
|
||||
xdmach->rxsize = (xdmach->flags & DMACH_FLAG_MEMINCREMENT) != 0 ?
|
||||
nbytes : sizeof(uint32_t);
|
||||
|
||||
/* Clean caches associated with the DMA memory */
|
||||
|
||||
@ -2326,8 +2350,8 @@ int sam_dmastart(DMA_HANDLE handle, dma_callback_t callback, void *arg)
|
||||
*
|
||||
* Description:
|
||||
* Cancel the DMA. After sam_dmastop() is called, the DMA channel is
|
||||
* reset and sam_dmarx/txsetup() must be called before sam_dmastart() can be
|
||||
* called again
|
||||
* reset and sam_dmarx/txsetup() must be called before sam_dmastart() can
|
||||
* be called again
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
@ -2419,28 +2443,49 @@ void sam_dmadump(DMA_HANDLE handle, const struct sam_dmaregs_s *regs,
|
||||
|
||||
dmainfo("%s\n", msg);
|
||||
dmainfo(" DMA Global Registers:\n");
|
||||
dmainfo(" GTYPE[%08x]: %08x\n", xdmac->base + SAM_XDMAC_GTYPE_OFFSET, regs->gtype);
|
||||
dmainfo(" GCFG[%08x]: %08x\n", xdmac->base + SAM_XDMAC_GCFG_OFFSET, regs->gcfg);
|
||||
dmainfo(" GWAC[%08x]: %08x\n", xdmac->base + SAM_XDMAC_GWAC_OFFSET, regs->gwac);
|
||||
dmainfo(" GIM[%08x]: %08x\n", xdmac->base + SAM_XDMAC_GIM_OFFSET, regs->gim);
|
||||
dmainfo(" GIS[%08x]: %08x\n", xdmac->base + SAM_XDMAC_GIS_OFFSET, regs->gis);
|
||||
dmainfo(" GS[%08x]: %08x\n", xdmac->base + SAM_XDMAC_GS_OFFSET, regs->gs);
|
||||
dmainfo(" GRS[%08x]: %08x\n", xdmac->base + SAM_XDMAC_GRS_OFFSET, regs->grs);
|
||||
dmainfo(" GWS[%08x]: %08x\n", xdmac->base + SAM_XDMAC_GWS_OFFSET, regs->gws);
|
||||
dmainfo(" GSWS[%08x]: %08x\n", xdmac->base + SAM_XDMAC_GSWS_OFFSET, regs->gsws);
|
||||
dmainfo(" GTYPE[%08x]: %08x\n",
|
||||
xdmac->base + SAM_XDMAC_GTYPE_OFFSET, regs->gtype);
|
||||
dmainfo(" GCFG[%08x]: %08x\n",
|
||||
xdmac->base + SAM_XDMAC_GCFG_OFFSET, regs->gcfg);
|
||||
dmainfo(" GWAC[%08x]: %08x\n",
|
||||
xdmac->base + SAM_XDMAC_GWAC_OFFSET, regs->gwac);
|
||||
dmainfo(" GIM[%08x]: %08x\n",
|
||||
xdmac->base + SAM_XDMAC_GIM_OFFSET, regs->gim);
|
||||
dmainfo(" GIS[%08x]: %08x\n",
|
||||
xdmac->base + SAM_XDMAC_GIS_OFFSET, regs->gis);
|
||||
dmainfo(" GS[%08x]: %08x\n",
|
||||
xdmac->base + SAM_XDMAC_GS_OFFSET, regs->gs);
|
||||
dmainfo(" GRS[%08x]: %08x\n",
|
||||
xdmac->base + SAM_XDMAC_GRS_OFFSET, regs->grs);
|
||||
dmainfo(" GWS[%08x]: %08x\n",
|
||||
xdmac->base + SAM_XDMAC_GWS_OFFSET, regs->gws);
|
||||
dmainfo(" GSWS[%08x]: %08x\n",
|
||||
xdmac->base + SAM_XDMAC_GSWS_OFFSET, regs->gsws);
|
||||
dmainfo(" DMA Channel Registers:\n");
|
||||
dmainfo(" CIM[%08x]: %08x\n", xdmach->base + SAM_XDMACH_CIM_OFFSET, regs->cim);
|
||||
dmainfo(" CIS[%08x]: %08x\n", xdmach->base + SAM_XDMACH_CIS_OFFSET, regs->cis);
|
||||
dmainfo(" CSA[%08x]: %08x\n", xdmach->base + SAM_XDMACH_CSA_OFFSET, regs->csa);
|
||||
dmainfo(" CDA[%08x]: %08x\n", xdmach->base + SAM_XDMACH_CDA_OFFSET, regs->cda);
|
||||
dmainfo(" CNDA[%08x]: %08x\n", xdmach->base + SAM_XDMACH_CNDA_OFFSET, regs->cnda);
|
||||
dmainfo(" CNDC[%08x]: %08x\n", xdmach->base + SAM_XDMACH_CNDC_OFFSET, regs->cndc);
|
||||
dmainfo(" CUBC[%08x]: %08x\n", xdmach->base + SAM_XDMACH_CUBC_OFFSET, regs->cubc);
|
||||
dmainfo(" CBC[%08x]: %08x\n", xdmach->base + SAM_XDMACH_CBC_OFFSET, regs->cbc);
|
||||
dmainfo(" CC[%08x]: %08x\n", xdmach->base + SAM_XDMACH_CC_OFFSET, regs->cc);
|
||||
dmainfo(" CDSMSP[%08x]: %08x\n", xdmach->base + SAM_XDMACH_CDSMSP_OFFSET, regs->cdsmsp);
|
||||
dmainfo(" CSUS[%08x]: %08x\n", xdmach->base + SAM_XDMACH_CSUS_OFFSET, regs->csus);
|
||||
dmainfo(" CDUS[%08x]: %08x\n", xdmach->base + SAM_XDMACH_CDUS_OFFSET, regs->cdus);
|
||||
dmainfo(" CIM[%08x]: %08x\n",
|
||||
xdmach->base + SAM_XDMACH_CIM_OFFSET, regs->cim);
|
||||
dmainfo(" CIS[%08x]: %08x\n",
|
||||
xdmach->base + SAM_XDMACH_CIS_OFFSET, regs->cis);
|
||||
dmainfo(" CSA[%08x]: %08x\n",
|
||||
xdmach->base + SAM_XDMACH_CSA_OFFSET, regs->csa);
|
||||
dmainfo(" CDA[%08x]: %08x\n",
|
||||
xdmach->base + SAM_XDMACH_CDA_OFFSET, regs->cda);
|
||||
dmainfo(" CNDA[%08x]: %08x\n",
|
||||
xdmach->base + SAM_XDMACH_CNDA_OFFSET, regs->cnda);
|
||||
dmainfo(" CNDC[%08x]: %08x\n",
|
||||
xdmach->base + SAM_XDMACH_CNDC_OFFSET, regs->cndc);
|
||||
dmainfo(" CUBC[%08x]: %08x\n",
|
||||
xdmach->base + SAM_XDMACH_CUBC_OFFSET, regs->cubc);
|
||||
dmainfo(" CBC[%08x]: %08x\n",
|
||||
xdmach->base + SAM_XDMACH_CBC_OFFSET, regs->cbc);
|
||||
dmainfo(" CC[%08x]: %08x\n",
|
||||
xdmach->base + SAM_XDMACH_CC_OFFSET, regs->cc);
|
||||
dmainfo(" CDSMSP[%08x]: %08x\n",
|
||||
xdmach->base + SAM_XDMACH_CDSMSP_OFFSET, regs->cdsmsp);
|
||||
dmainfo(" CSUS[%08x]: %08x\n",
|
||||
xdmach->base + SAM_XDMACH_CSUS_OFFSET, regs->csus);
|
||||
dmainfo(" CDUS[%08x]: %08x\n",
|
||||
xdmach->base + SAM_XDMACH_CDUS_OFFSET, regs->cdus);
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_DMA_INFO */
|
||||
#endif /* CONFIG_SAMA5_XDMAC0 || CONFIG_SAMA5_XDMAC1 */
|
||||
|
@ -113,7 +113,7 @@ struct sam_dmach_s
|
||||
* Private Function Prototypes
|
||||
****************************************************************************/
|
||||
|
||||
static void sam_takechsem(void);
|
||||
static int sam_takechsem(void);
|
||||
static inline void sam_givechsem(void);
|
||||
#if CONFIG_SAMD2L2_DMAC_NDESC > 0
|
||||
static void sam_takedsem(void);
|
||||
|
@ -183,43 +183,47 @@ static struct stm32l4_dma_s g_dma[DMA_NCHANNELS] =
|
||||
|
||||
/* Get non-channel register from DMA1 or DMA2 */
|
||||
|
||||
static inline uint32_t dmabase_getreg(struct stm32l4_dma_s *dmach, uint32_t offset)
|
||||
static inline uint32_t dmabase_getreg(struct stm32l4_dma_s *dmach,
|
||||
uint32_t offset)
|
||||
{
|
||||
return getreg32(DMA_BASE(dmach->base) + offset);
|
||||
}
|
||||
|
||||
/* Write to non-channel register in DMA1 or DMA2 */
|
||||
|
||||
static inline void dmabase_putreg(struct stm32l4_dma_s *dmach, uint32_t offset, uint32_t value)
|
||||
static inline void dmabase_putreg(struct stm32l4_dma_s *dmach,
|
||||
uint32_t offset, uint32_t value)
|
||||
{
|
||||
putreg32(value, DMA_BASE(dmach->base) + offset);
|
||||
}
|
||||
|
||||
/* Get channel register from DMA1 or DMA2 */
|
||||
|
||||
static inline uint32_t dmachan_getreg(struct stm32l4_dma_s *dmach, uint32_t offset)
|
||||
static inline uint32_t dmachan_getreg(struct stm32l4_dma_s *dmach,
|
||||
uint32_t offset)
|
||||
{
|
||||
return getreg32(dmach->base + offset);
|
||||
}
|
||||
|
||||
/* Write to channel register in DMA1 or DMA2 */
|
||||
|
||||
static inline void dmachan_putreg(struct stm32l4_dma_s *dmach, uint32_t offset, uint32_t value)
|
||||
static inline void dmachan_putreg(struct stm32l4_dma_s *dmach,
|
||||
uint32_t offset, uint32_t value)
|
||||
{
|
||||
putreg32(value, dmach->base + offset);
|
||||
}
|
||||
|
||||
/************************************************************************************
|
||||
/****************************************************************************
|
||||
* Name: stm32l4_dmatake() and stm32l4_dmagive()
|
||||
*
|
||||
* Description:
|
||||
* Used to get exclusive access to a DMA channel.
|
||||
*
|
||||
************************************************************************************/
|
||||
****************************************************************************/
|
||||
|
||||
static void stm32l4_dmatake(FAR struct stm32l4_dma_s *dmach)
|
||||
static int stm32l4_dmatake(FAR struct stm32l4_dma_s *dmach)
|
||||
{
|
||||
nxsem_wait_uninterruptible(&dmach->sem);
|
||||
return nxsem_wait_uninterruptible(&dmach->sem);
|
||||
}
|
||||
|
||||
static inline void stm32l4_dmagive(FAR struct stm32l4_dma_s *dmach)
|
||||
@ -227,13 +231,13 @@ static inline void stm32l4_dmagive(FAR struct stm32l4_dma_s *dmach)
|
||||
nxsem_post(&dmach->sem);
|
||||
}
|
||||
|
||||
/************************************************************************************
|
||||
/****************************************************************************
|
||||
* Name: stm32l4_dmachandisable
|
||||
*
|
||||
* Description:
|
||||
* Disable the DMA channel
|
||||
*
|
||||
************************************************************************************/
|
||||
****************************************************************************/
|
||||
|
||||
static void stm32l4_dmachandisable(struct stm32l4_dma_s *dmach)
|
||||
{
|
||||
@ -251,16 +255,17 @@ static void stm32l4_dmachandisable(struct stm32l4_dma_s *dmach)
|
||||
|
||||
/* Clear pending channel interrupts */
|
||||
|
||||
dmabase_putreg(dmach, STM32L4_DMA_IFCR_OFFSET, DMA_ISR_CHAN_MASK(dmach->chan));
|
||||
dmabase_putreg(dmach, STM32L4_DMA_IFCR_OFFSET,
|
||||
DMA_ISR_CHAN_MASK(dmach->chan));
|
||||
}
|
||||
|
||||
/************************************************************************************
|
||||
/****************************************************************************
|
||||
* Name: stm32l4_dmainterrupt
|
||||
*
|
||||
* Description:
|
||||
* DMA interrupt handler
|
||||
*
|
||||
************************************************************************************/
|
||||
****************************************************************************/
|
||||
|
||||
static int stm32l4_dmainterrupt(int irq, void *context, FAR void *arg)
|
||||
{
|
||||
@ -289,17 +294,20 @@ static int stm32l4_dmainterrupt(int irq, void *context, FAR void *arg)
|
||||
{
|
||||
DEBUGPANIC();
|
||||
}
|
||||
|
||||
dmach = &g_dma[chndx];
|
||||
|
||||
/* Get the interrupt status (for this channel only) */
|
||||
|
||||
isr = dmabase_getreg(dmach, STM32L4_DMA_ISR_OFFSET) & DMA_ISR_CHAN_MASK(dmach->chan);
|
||||
isr = dmabase_getreg(dmach, STM32L4_DMA_ISR_OFFSET) &
|
||||
DMA_ISR_CHAN_MASK(dmach->chan);
|
||||
|
||||
/* Invoke the callback */
|
||||
|
||||
if (dmach->callback)
|
||||
{
|
||||
dmach->callback(dmach, isr >> DMA_ISR_CHAN_SHIFT(dmach->chan), dmach->arg);
|
||||
dmach->callback(dmach, isr >> DMA_ISR_CHAN_SHIFT(dmach->chan),
|
||||
dmach->arg);
|
||||
}
|
||||
|
||||
/* Clear the interrupts we are handling */
|
||||
@ -389,8 +397,9 @@ void weak_function up_dma_initialize(void)
|
||||
|
||||
DMA_HANDLE stm32l4_dmachannel(unsigned int chndef)
|
||||
{
|
||||
int chndx = (chndef & DMACHAN_SETTING_CHANNEL_MASK) >> DMACHAN_SETTING_CHANNEL_SHIFT;
|
||||
|
||||
int ret;
|
||||
int chndx = (chndef & DMACHAN_SETTING_CHANNEL_MASK) >>
|
||||
DMACHAN_SETTING_CHANNEL_SHIFT;
|
||||
struct stm32l4_dma_s *dmach = &g_dma[chndx];
|
||||
|
||||
DEBUGASSERT(chndx < DMA_NCHANNELS);
|
||||
@ -399,14 +408,20 @@ DMA_HANDLE stm32l4_dmachannel(unsigned int chndef)
|
||||
* is available if it is currently being used by another driver
|
||||
*/
|
||||
|
||||
stm32l4_dmatake(dmach);
|
||||
ret = stm32l4_dmatake(dmach);
|
||||
if (ret < 0)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* The caller now has exclusive use of the DMA channel */
|
||||
|
||||
/* Define the peripheral that will use the channel. This is stored until
|
||||
* dmasetup is called.
|
||||
*/
|
||||
dmach->function = (chndef & DMACHAN_SETTING_FUNCTION_MASK) >> DMACHAN_SETTING_FUNCTION_SHIFT;
|
||||
|
||||
dmach->function = (chndef & DMACHAN_SETTING_FUNCTION_MASK) >>
|
||||
DMACHAN_SETTING_FUNCTION_SHIFT;
|
||||
|
||||
return (DMA_HANDLE)dmach;
|
||||
}
|
||||
@ -415,7 +430,7 @@ DMA_HANDLE stm32l4_dmachannel(unsigned int chndef)
|
||||
* Name: stm32l4_dmafree
|
||||
*
|
||||
* Description:
|
||||
* Release a DMA channel. If another thread is waiting for this DMA channel
|
||||
* Release a DMA channel. If another thread is waiting for this DMA channel
|
||||
* in a call to stm32l4_dmachannel, then this function will re-assign the
|
||||
* DMA channel to that thread and wake it up. NOTE: The 'handle' used
|
||||
* in this argument must NEVER be used again until stm32l4_dmachannel() is
|
||||
@ -456,7 +471,7 @@ void stm32l4_dmasetup(DMA_HANDLE handle, uint32_t paddr, uint32_t maddr,
|
||||
uint32_t regval;
|
||||
|
||||
DEBUGASSERT(handle != NULL);
|
||||
DEBUGASSERT(ntransfers<65536);
|
||||
DEBUGASSERT(ntransfers < 65536);
|
||||
|
||||
/* Then DMA_CNDTRx register can only be modified if the DMA channel is
|
||||
* disabled.
|
||||
@ -486,18 +501,18 @@ void stm32l4_dmasetup(DMA_HANDLE handle, uint32_t paddr, uint32_t maddr,
|
||||
dmachan_putreg(dmach, STM32L4_DMACHAN_CNDTR_OFFSET, ntransfers);
|
||||
|
||||
/* Configure the channel priority using the PL[1:0] bits in the DMA_CCRx
|
||||
* register. Configure data transfer direction, circular mode, peripheral & memory
|
||||
* incremented mode, peripheral & memory data size, and interrupt after
|
||||
* half and/or full transfer in the DMA_CCRx register.
|
||||
* register. Configure data transfer direction, circular mode, peripheral
|
||||
* and memory incremented mode, peripheral & memory data size, and
|
||||
* interrupt after half and/or full transfer in the DMA_CCRx register.
|
||||
*/
|
||||
|
||||
regval = dmachan_getreg(dmach, STM32L4_DMACHAN_CCR_OFFSET);
|
||||
regval &= ~(DMA_CCR_MEM2MEM | DMA_CCR_PL_MASK | DMA_CCR_MSIZE_MASK |
|
||||
DMA_CCR_PSIZE_MASK | DMA_CCR_MINC | DMA_CCR_PINC | DMA_CCR_CIRC |
|
||||
DMA_CCR_DIR);
|
||||
DMA_CCR_PSIZE_MASK | DMA_CCR_MINC | DMA_CCR_PINC |
|
||||
DMA_CCR_CIRC | DMA_CCR_DIR);
|
||||
ccr &= (DMA_CCR_MEM2MEM | DMA_CCR_PL_MASK | DMA_CCR_MSIZE_MASK |
|
||||
DMA_CCR_PSIZE_MASK | DMA_CCR_MINC | DMA_CCR_PINC | DMA_CCR_CIRC |
|
||||
DMA_CCR_DIR);
|
||||
DMA_CCR_PSIZE_MASK | DMA_CCR_MINC | DMA_CCR_PINC |
|
||||
DMA_CCR_CIRC | DMA_CCR_DIR);
|
||||
regval |= ccr;
|
||||
dmachan_putreg(dmach, STM32L4_DMACHAN_CCR_OFFSET, regval);
|
||||
|
||||
@ -542,28 +557,31 @@ void stm32l4_dmastart(DMA_HANDLE handle, dma_callback_t callback,
|
||||
ccr = dmachan_getreg(dmach, STM32L4_DMACHAN_CCR_OFFSET);
|
||||
ccr |= DMA_CCR_EN;
|
||||
|
||||
/* In normal mode, interrupt at either half or full completion. In circular mode,
|
||||
* always interrupt on buffer wrap, and optionally interrupt at the halfway point.
|
||||
/* In normal mode, interrupt at either half or full completion. In circular
|
||||
* mode, always interrupt on buffer wrap, and optionally interrupt at the
|
||||
* halfway point.
|
||||
*/
|
||||
|
||||
if ((ccr & DMA_CCR_CIRC) == 0)
|
||||
{
|
||||
/* Once half of the bytes are transferred, the half-transfer flag (HTIF) is
|
||||
* set and an interrupt is generated if the Half-Transfer Interrupt Enable
|
||||
* bit (HTIE) is set. At the end of the transfer, the Transfer Complete Flag
|
||||
* (TCIF) is set and an interrupt is generated if the Transfer Complete
|
||||
* Interrupt Enable bit (TCIE) is set.
|
||||
/* Once half of the bytes are transferred, the half-transfer flag
|
||||
* (HTIF) is set and an interrupt is generated if the Half-Transfer
|
||||
* Interrupt Enable bit (HTIE) is set. At the end of the transfer, the
|
||||
* Transfer Complete Flag (TCIF) is set and an interrupt is generated
|
||||
* if the Transfer Complete Interrupt Enable bit (TCIE) is set.
|
||||
*/
|
||||
|
||||
ccr |= (half ? (DMA_CCR_HTIE | DMA_CCR_TEIE) : (DMA_CCR_TCIE | DMA_CCR_TEIE));
|
||||
ccr |= (half ?
|
||||
(DMA_CCR_HTIE | DMA_CCR_TEIE) : (DMA_CCR_TCIE | DMA_CCR_TEIE));
|
||||
}
|
||||
else
|
||||
{
|
||||
/* In nonstop mode, when the transfer completes it immediately resets
|
||||
* and starts again. The transfer-complete interrupt is thus always
|
||||
* enabled, and the half-complete interrupt can be used in circular
|
||||
* mode to determine when the buffer is half-full, or in double-buffered
|
||||
* mode to determine when one of the two buffers is full.
|
||||
* mode to determine when the buffer is half-full, or in
|
||||
* double-buffered mode to determine when one of the two buffers is
|
||||
* full.
|
||||
*/
|
||||
|
||||
ccr |= (half ? DMA_CCR_HTIE : 0) | DMA_CCR_TCIE | DMA_CCR_TEIE;
|
||||
@ -577,8 +595,8 @@ void stm32l4_dmastart(DMA_HANDLE handle, dma_callback_t callback,
|
||||
*
|
||||
* Description:
|
||||
* Cancel the DMA. After stm32l4_dmastop() is called, the DMA channel is
|
||||
* reset and stm32l4_dmasetup() must be called before stm32l4_dmastart() can be
|
||||
* called again
|
||||
* reset and stm32l4_dmasetup() must be called before stm32l4_dmastart()
|
||||
* can be called again
|
||||
*
|
||||
* Assumptions:
|
||||
* - DMA handle allocated by stm32l4_dmachannel()
|
||||
@ -685,11 +703,13 @@ bool stm32l4_dmacapable(uint32_t maddr, uint32_t count, uint32_t ccr)
|
||||
case STM32L4_SRAM_BASE:
|
||||
case STM32L4_SRAM2_BASE:
|
||||
case STM32L4_CODE_BASE:
|
||||
|
||||
/* All RAM and flash is supported */
|
||||
|
||||
return true;
|
||||
|
||||
default:
|
||||
|
||||
/* Everything else is unsupported by DMA */
|
||||
|
||||
return false;
|
||||
@ -744,11 +764,17 @@ void stm32l4_dmadump(DMA_HANDLE handle, const struct stm32l4_dmaregs_s *regs,
|
||||
uint32_t dmabase = DMA_BASE(dmach->base);
|
||||
|
||||
dmainfo("DMA Registers: %s\n", msg);
|
||||
dmainfo(" ISR[%08x]: %08x\n", dmabase + STM32L4_DMA_ISR_OFFSET, regs->isr);
|
||||
dmainfo(" CSELR[%08x]: %08x\n", dmabase + STM32L4_DMA_CSELR_OFFSET, regs->cselr);
|
||||
dmainfo(" CCR[%08x]: %08x\n", dmach->base + STM32L4_DMACHAN_CCR_OFFSET, regs->ccr);
|
||||
dmainfo(" CNDTR[%08x]: %08x\n", dmach->base + STM32L4_DMACHAN_CNDTR_OFFSET, regs->cndtr);
|
||||
dmainfo(" CPAR[%08x]: %08x\n", dmach->base + STM32L4_DMACHAN_CPAR_OFFSET, regs->cpar);
|
||||
dmainfo(" CMAR[%08x]: %08x\n", dmach->base + STM32L4_DMACHAN_CMAR_OFFSET, regs->cmar);
|
||||
dmainfo(" ISR[%08x]: %08x\n",
|
||||
dmabase + STM32L4_DMA_ISR_OFFSET, regs->isr);
|
||||
dmainfo(" CSELR[%08x]: %08x\n",
|
||||
dmabase + STM32L4_DMA_CSELR_OFFSET, regs->cselr);
|
||||
dmainfo(" CCR[%08x]: %08x\n",
|
||||
dmach->base + STM32L4_DMACHAN_CCR_OFFSET, regs->ccr);
|
||||
dmainfo(" CNDTR[%08x]: %08x\n",
|
||||
dmach->base + STM32L4_DMACHAN_CNDTR_OFFSET, regs->cndtr);
|
||||
dmainfo(" CPAR[%08x]: %08x\n",
|
||||
dmach->base + STM32L4_DMACHAN_CPAR_OFFSET, regs->cpar);
|
||||
dmainfo(" CMAR[%08x]: %08x\n",
|
||||
dmach->base + STM32L4_DMACHAN_CMAR_OFFSET, regs->cmar);
|
||||
}
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user