mm: nxstyle fixes
Nxstyle fixes to pass CI Signed-off-by: Alin Jerpelea <alin.jerpelea@sony.com>
This commit is contained in:
parent
bcee9c391c
commit
acb8ad4c7b
@ -183,7 +183,7 @@ static FAR struct iob_s *iob_allocwait(bool throttled,
|
||||
* Name: iob_alloc
|
||||
*
|
||||
* Description:
|
||||
* Allocate an I/O buffer by taking the buffer at the head of the free list.
|
||||
* Allocate an I/O buffer by taking the buffer at the head of the free list.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
|
@ -65,7 +65,9 @@ static int iob_copyin_internal(FAR struct iob_s *iob, FAR const uint8_t *src,
|
||||
iobinfo("iob=%p len=%u offset=%u\n", iob, len, offset);
|
||||
DEBUGASSERT(iob && src);
|
||||
|
||||
/* The offset must applied to data that is already in the I/O buffer chain */
|
||||
/* The offset must applied to data that is already in the I/O buffer
|
||||
* chain
|
||||
*/
|
||||
|
||||
if (offset > iob->io_pktlen)
|
||||
{
|
||||
@ -218,7 +220,8 @@ int iob_copyin(FAR struct iob_s *iob, FAR const uint8_t *src,
|
||||
unsigned int len, unsigned int offset, bool throttled,
|
||||
enum iob_user_e consumerid)
|
||||
{
|
||||
return iob_copyin_internal(iob, src, len, offset, throttled, true, consumerid);
|
||||
return iob_copyin_internal(iob, src, len, offset,
|
||||
throttled, true, consumerid);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -120,7 +120,9 @@ void iob_dump(FAR const char *msg, FAR struct iob_s *iob, unsigned int len,
|
||||
|
||||
for (lndx = 0; lndx < len; lndx += IOB_NITEMS, offset += IOB_NITEMS)
|
||||
{
|
||||
/* Copy IOB_NITEMS-bytes into our local buffer from the current offset */
|
||||
/* Copy IOB_NITEMS-bytes into our local buffer from the current
|
||||
* offset
|
||||
*/
|
||||
|
||||
nbytes = iob_copyout(data, head, IOB_NITEMS, offset);
|
||||
|
||||
|
@ -118,7 +118,9 @@ void iob_initialize(void)
|
||||
|
||||
nxsem_init(&g_iob_sem, 0, CONFIG_IOB_NBUFFERS);
|
||||
#if CONFIG_IOB_THROTTLE > 0
|
||||
nxsem_init(&g_throttle_sem, 0, CONFIG_IOB_NBUFFERS - CONFIG_IOB_THROTTLE);
|
||||
nxsem_init(&g_throttle_sem,
|
||||
0,
|
||||
CONFIG_IOB_NBUFFERS - CONFIG_IOB_THROTTLE);
|
||||
#endif
|
||||
|
||||
#if CONFIG_IOB_NCHAINS > 0
|
||||
@ -128,7 +130,9 @@ void iob_initialize(void)
|
||||
{
|
||||
FAR struct iob_qentry_s *iobq = &g_iob_qpool[i];
|
||||
|
||||
/* Add the pre-allocate buffer container to the head of the free list */
|
||||
/* Add the pre-allocate buffer container to the head of the free
|
||||
* list
|
||||
*/
|
||||
|
||||
iobq->qe_flink = g_iob_freeqlist;
|
||||
g_iob_freeqlist = iobq;
|
||||
|
@ -99,7 +99,9 @@ FAR struct iob_s *iob_pack(FAR struct iob_s *iob,
|
||||
|
||||
if (ncopy > 0)
|
||||
{
|
||||
/* Copy the data from the next into the current I/O buffer iob */
|
||||
/* Copy the data from the next into
|
||||
* the current I/O buffer iob
|
||||
*/
|
||||
|
||||
memcpy(&iob->io_data[iob->io_len],
|
||||
&next->io_data[next->io_offset],
|
||||
|
@ -113,7 +113,8 @@ void iob_stats_onfree(enum iob_user_e producerid)
|
||||
* Name: iob_getuserstats
|
||||
*
|
||||
* Description:
|
||||
* Return a reference to the IOB usage statistics for the IOB consumer/producer
|
||||
* Return a reference to the IOB usage statistics for the IOB
|
||||
* consumer/producer
|
||||
*
|
||||
* Input Parameters:
|
||||
* userid - id representing the IOB producer/consumer
|
||||
|
@ -93,7 +93,9 @@ FAR void *gran_alloc(GRAN_HANDLE handle, size_t size)
|
||||
DEBUGASSERT(ngranules <= 32);
|
||||
mask = 0xffffffff >> (32 - ngranules);
|
||||
|
||||
/* Now search the granule allocation table for that number of contiguous */
|
||||
/* Now search the granule allocation table for that number
|
||||
* of contiguous
|
||||
*/
|
||||
|
||||
for (granidx = 0; granidx < priv->ngranules; granidx += 32)
|
||||
{
|
||||
|
@ -614,7 +614,9 @@ void gran_info(GRAN_HANDLE handle, FAR struct graninfo_s *info)
|
||||
}
|
||||
}
|
||||
|
||||
/* Check if the last, unterminated sequence of free granules was the longest */
|
||||
/* Check if the last, unterminated sequence of free granules was
|
||||
* the longest
|
||||
*/
|
||||
|
||||
if (mxfree > info->mxfree)
|
||||
{
|
||||
|
@ -55,7 +55,7 @@
|
||||
* attribute to position a DMA heap in memory (logic in the linker script
|
||||
* would assign the section .dmaheap to the DMA memory.
|
||||
*
|
||||
* FAR uint32_t g_dmaheap[DMAHEAP_SIZE] __attribute__((section(.dmaheap)));
|
||||
* FAR uint32_t g_dmaheap[DMAHEAP_SIZE] __attribute__((section(.dmaheap)));
|
||||
*
|
||||
* The heap is created by calling gran_initialize(). Here the granule size
|
||||
* is set to 64 bytes (2**6) and the alignment to 16 bytes (2**4):
|
||||
|
@ -41,7 +41,8 @@
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void mm_addfreechunk(FAR struct mm_heap_s *heap, FAR struct mm_freenode_s *node)
|
||||
void mm_addfreechunk(FAR struct mm_heap_s *heap,
|
||||
FAR struct mm_freenode_s *node)
|
||||
{
|
||||
FAR struct mm_freenode_s *next;
|
||||
FAR struct mm_freenode_s *prev;
|
||||
|
@ -59,7 +59,8 @@ void mm_extend(FAR struct mm_heap_s *heap, FAR void *mem, size_t size,
|
||||
|
||||
DEBUGASSERT(heap && mem);
|
||||
#if CONFIG_MM_REGIONS > 1
|
||||
DEBUGASSERT(size >= MIN_EXTEND && (size_t)region < (size_t)heap->mm_nregions);
|
||||
DEBUGASSERT(size >= MIN_EXTEND &&
|
||||
(size_t)region < (size_t)heap->mm_nregions);
|
||||
#else
|
||||
DEBUGASSERT(size >= MIN_EXTEND && region == 0);
|
||||
#endif
|
||||
|
@ -54,8 +54,8 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
|
||||
size_t mask = (size_t)(alignment - 1);
|
||||
size_t allocsize;
|
||||
|
||||
/* If this requested alinement's less than or equal to the natural alignment
|
||||
* of malloc, then just let malloc do the work.
|
||||
/* If this requested alinement's less than or equal to the natural
|
||||
* alignment of malloc, then just let malloc do the work.
|
||||
*/
|
||||
|
||||
if (alignment <= MM_MIN_CHUNK)
|
||||
@ -114,8 +114,9 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
|
||||
|
||||
next = (FAR struct mm_allocnode_s *)((FAR char *)node + node->size);
|
||||
|
||||
/* Make sure that there is space to convert the preceding mm_allocnode_s
|
||||
* into an mm_freenode_s. I think that this should always be true
|
||||
/* Make sure that there is space to convert the preceding
|
||||
* mm_allocnode_s into an mm_freenode_s.
|
||||
* I think that this should always be true
|
||||
*/
|
||||
|
||||
DEBUGASSERT(alignedchunk >= rawchunk + 8);
|
||||
|
@ -67,7 +67,8 @@ void mm_shrinkchunk(FAR struct mm_heap_s *heap,
|
||||
|
||||
/* Get the chunk next the next node (which could be the tail chunk) */
|
||||
|
||||
andbeyond = (FAR struct mm_allocnode_s *)((FAR char *)next + next->size);
|
||||
andbeyond = (FAR struct mm_allocnode_s *)
|
||||
((FAR char *)next + next->size);
|
||||
|
||||
/* Remove the next node. There must be a predecessor, but there may
|
||||
* not be a successor node.
|
||||
|
@ -39,6 +39,7 @@
|
||||
/****************************************************************************
|
||||
* Pre-processor Definitions
|
||||
****************************************************************************/
|
||||
|
||||
/* Bit definitions for the struct shm_region_s sr_flags field */
|
||||
|
||||
#define SRFLAG_AVAILABLE 0 /* Available if no flag bits set */
|
||||
|
Loading…
Reference in New Issue
Block a user