2009-11-05 19:15:06 +01:00
|
|
|
/****************************************************************************
|
2022-03-11 15:13:17 +01:00
|
|
|
* drivers/misc/rwbuffer.c
|
2009-11-05 19:15:06 +01:00
|
|
|
*
|
2020-03-31 16:05:48 +02:00
|
|
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
* contributor license agreements. See the NOTICE file distributed with
|
|
|
|
* this work for additional information regarding copyright ownership. The
|
|
|
|
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
|
|
|
* "License"); you may not use this file except in compliance with the
|
|
|
|
* License. You may obtain a copy of the License at
|
2009-11-05 19:15:06 +01:00
|
|
|
*
|
2020-03-31 16:05:48 +02:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2009-11-05 19:15:06 +01:00
|
|
|
*
|
2020-03-31 16:05:48 +02:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
2009-11-05 19:15:06 +01:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Included Files
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#include <nuttx/config.h>
|
|
|
|
|
2009-12-15 15:25:14 +01:00
|
|
|
#include <sys/types.h>
|
2020-11-23 05:25:44 +01:00
|
|
|
#include <inttypes.h>
|
2009-12-15 15:25:14 +01:00
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdbool.h>
|
2009-11-05 19:15:06 +01:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <time.h>
|
2009-11-05 22:44:33 +01:00
|
|
|
#include <assert.h>
|
2009-11-05 19:15:06 +01:00
|
|
|
#include <errno.h>
|
|
|
|
#include <debug.h>
|
|
|
|
|
2011-04-06 18:40:47 +02:00
|
|
|
#include <nuttx/kmalloc.h>
|
2020-02-01 08:17:32 +01:00
|
|
|
#include <nuttx/semaphore.h>
|
2009-11-05 22:44:33 +01:00
|
|
|
#include <nuttx/wqueue.h>
|
2016-07-20 21:54:38 +02:00
|
|
|
#include <nuttx/drivers/rwbuffer.h>
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
#if defined(CONFIG_DRVR_WRITEBUFFER) || defined(CONFIG_DRVR_READAHEAD)
|
2009-11-05 19:15:06 +01:00
|
|
|
|
|
|
|
/****************************************************************************
|
2015-04-08 15:15:32 +02:00
|
|
|
* Pre-processor Definitions
|
2009-11-05 19:15:06 +01:00
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/* Configuration ************************************************************/
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
#ifndef CONFIG_DRVR_WRDELAY
|
|
|
|
# define CONFIG_DRVR_WRDELAY 350
|
2009-11-05 19:15:06 +01:00
|
|
|
#endif
|
|
|
|
|
2018-11-09 15:08:18 +01:00
|
|
|
#if !defined(CONFIG_SCHED_WORKQUEUE) && CONFIG_DRVR_WRDELAY != 0
|
|
|
|
# error "Worker thread support is required (CONFIG_SCHED_WORKQUEUE)"
|
|
|
|
#endif
|
|
|
|
|
2020-08-12 16:46:38 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Private Function Prototypes
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
static ssize_t rwb_read_(FAR struct rwbuffer_s *rwb, off_t startblock,
|
|
|
|
size_t nblocks, FAR uint8_t *rdbuffer);
|
|
|
|
|
2009-11-05 19:15:06 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Private Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: rwb_semtake
|
|
|
|
****************************************************************************/
|
|
|
|
|
2021-03-29 08:46:52 +02:00
|
|
|
#if defined(CONFIG_DRVR_WRITEBUFFER) && CONFIG_DRVR_WRDELAY != 0
|
2020-04-01 18:13:52 +02:00
|
|
|
static int rwb_semtake(FAR sem_t *sem)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2020-04-01 18:13:52 +02:00
|
|
|
return nxsem_wait_uninterruptible(sem);
|
|
|
|
}
|
2021-03-29 08:46:52 +02:00
|
|
|
#endif
|
2020-04-01 18:13:52 +02:00
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: rwb_forcetake
|
|
|
|
****************************************************************************/
|
|
|
|
|
2021-03-29 08:46:52 +02:00
|
|
|
#if defined(CONFIG_DRVR_WRITEBUFFER) && CONFIG_DRVR_WRDELAY != 0
|
2020-04-01 18:13:52 +02:00
|
|
|
static int rwb_forcetake(FAR sem_t *sem)
|
|
|
|
{
|
|
|
|
int result;
|
|
|
|
int ret = OK;
|
|
|
|
|
|
|
|
do
|
|
|
|
{
|
|
|
|
result = rwb_semtake(sem);
|
|
|
|
|
|
|
|
/* The only expected failure is if the thread is canceled */
|
|
|
|
|
|
|
|
DEBUGASSERT(result == OK || result == -ECANCELED);
|
|
|
|
if (ret == OK && result < 0)
|
|
|
|
{
|
|
|
|
/* Remember the first error */
|
|
|
|
|
|
|
|
ret = result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
while (result < 0);
|
|
|
|
|
|
|
|
return ret;
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
2021-03-29 08:46:52 +02:00
|
|
|
#endif
|
2009-11-05 19:15:06 +01:00
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: rwb_semgive
|
|
|
|
****************************************************************************/
|
|
|
|
|
2017-10-03 23:35:24 +02:00
|
|
|
#define rwb_semgive(s) nxsem_post(s)
|
2009-11-05 19:15:06 +01:00
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: rwb_overlap
|
|
|
|
****************************************************************************/
|
|
|
|
|
2009-12-15 15:25:14 +01:00
|
|
|
static inline bool rwb_overlap(off_t blockstart1, size_t nblocks1,
|
|
|
|
off_t blockstart2, size_t nblocks2)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2015-08-26 15:18:50 +02:00
|
|
|
off_t blockend1 = blockstart1 + nblocks1 - 1;
|
|
|
|
off_t blockend2 = blockstart2 + nblocks2 - 1;
|
2009-11-05 19:15:06 +01:00
|
|
|
|
|
|
|
/* If the buffer 1 is wholly outside of buffer 2, return false */
|
|
|
|
|
2009-11-06 14:43:21 +01:00
|
|
|
if ((blockend1 < blockstart2) || /* Wholly "below" */
|
|
|
|
(blockstart1 > blockend2)) /* Wholly "above" */
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2009-12-15 15:25:14 +01:00
|
|
|
return false;
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2009-12-15 15:25:14 +01:00
|
|
|
return true;
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: rwb_resetwrbuffer
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
#ifdef CONFIG_DRVR_WRITEBUFFER
|
2020-01-25 15:26:42 +01:00
|
|
|
static inline void rwb_resetwrbuffer(FAR struct rwbuffer_s *rwb)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
|
|
|
/* We assume that the caller holds the wrsem */
|
|
|
|
|
2020-08-12 16:46:38 +02:00
|
|
|
rwb->wrnblocks = 0;
|
|
|
|
rwb->wrblockstart = -1;
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: rwb_wrflush
|
2014-09-25 21:35:10 +02:00
|
|
|
*
|
|
|
|
* Assumptions:
|
|
|
|
* The caller holds the wrsem semaphore.
|
|
|
|
*
|
2009-11-05 19:15:06 +01:00
|
|
|
****************************************************************************/
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
#ifdef CONFIG_DRVR_WRITEBUFFER
|
2020-01-25 15:26:42 +01:00
|
|
|
static void rwb_wrflush(FAR struct rwbuffer_s *rwb)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
if (rwb->wrnblocks > 0)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2020-08-12 16:46:38 +02:00
|
|
|
size_t padblocks;
|
|
|
|
|
2016-06-11 19:59:51 +02:00
|
|
|
finfo("Flushing: blockstart=0x%08lx nblocks=%d from buffer=%p\n",
|
2020-08-12 16:46:38 +02:00
|
|
|
(long)rwb->wrblockstart, rwb->wrnblocks, rwb->wrbuffer);
|
|
|
|
|
|
|
|
padblocks = rwb->wrnblocks % rwb->wralignblocks;
|
|
|
|
if (padblocks)
|
|
|
|
{
|
|
|
|
padblocks = rwb->wralignblocks - padblocks;
|
|
|
|
rwb_read_(rwb, rwb->wrblockstart + rwb->wrnblocks, padblocks,
|
|
|
|
&rwb->wrbuffer[rwb->wrnblocks * rwb->blocksize]);
|
|
|
|
rwb->wrnblocks += padblocks;
|
|
|
|
}
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2009-11-25 14:50:26 +01:00
|
|
|
/* Flush cache. On success, the flush method will return the number
|
|
|
|
* of blocks written. Anything other than the number requested is
|
|
|
|
* an error.
|
|
|
|
*/
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2020-01-25 15:26:42 +01:00
|
|
|
ret = rwb->wrflush(rwb->dev, rwb->wrbuffer, rwb->wrblockstart,
|
|
|
|
rwb->wrnblocks);
|
2009-11-25 14:50:26 +01:00
|
|
|
if (ret != rwb->wrnblocks)
|
2009-11-05 21:16:15 +01:00
|
|
|
{
|
2016-06-11 23:50:49 +02:00
|
|
|
ferr("ERROR: Error flushing write buffer: %d\n", ret);
|
2009-11-05 21:16:15 +01:00
|
|
|
}
|
2014-04-13 22:32:20 +02:00
|
|
|
|
2009-11-05 19:15:06 +01:00
|
|
|
rwb_resetwrbuffer(rwb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: rwb_wrtimeout
|
|
|
|
****************************************************************************/
|
|
|
|
|
2018-11-09 15:08:18 +01:00
|
|
|
#if defined(CONFIG_DRVR_WRITEBUFFER) && CONFIG_DRVR_WRDELAY != 0
|
2009-11-05 22:44:33 +01:00
|
|
|
static void rwb_wrtimeout(FAR void *arg)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
|
|
|
/* The following assumes that the size of a pointer is 4-bytes or less */
|
|
|
|
|
2020-01-25 15:26:42 +01:00
|
|
|
FAR struct rwbuffer_s *rwb = (FAR struct rwbuffer_s *)arg;
|
2009-11-05 22:44:33 +01:00
|
|
|
DEBUGASSERT(rwb != NULL);
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2018-11-09 15:12:52 +01:00
|
|
|
finfo("Timeout!\n");
|
|
|
|
|
2020-01-27 05:55:23 +01:00
|
|
|
/* If a timeout elapses with write buffer activity, this watchdog
|
2009-11-05 22:44:33 +01:00
|
|
|
* handler function will be evoked on the thread of execution of the
|
|
|
|
* worker thread.
|
2009-11-05 19:15:06 +01:00
|
|
|
*/
|
|
|
|
|
2020-04-01 18:13:52 +02:00
|
|
|
rwb_forcetake(&rwb->wrsem);
|
2009-11-05 22:44:33 +01:00
|
|
|
rwb_wrflush(rwb);
|
2014-09-25 21:35:10 +02:00
|
|
|
rwb_semgive(&rwb->wrsem);
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
2018-11-09 15:06:33 +01:00
|
|
|
#endif
|
2009-11-05 19:15:06 +01:00
|
|
|
|
|
|
|
/****************************************************************************
|
2009-11-05 22:44:33 +01:00
|
|
|
* Name: rwb_wrstarttimeout
|
2009-11-05 19:15:06 +01:00
|
|
|
****************************************************************************/
|
|
|
|
|
2018-11-09 15:06:33 +01:00
|
|
|
#ifdef CONFIG_DRVR_WRITEBUFFER
|
2009-11-05 22:44:33 +01:00
|
|
|
static void rwb_wrstarttimeout(FAR struct rwbuffer_s *rwb)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2018-11-09 15:08:18 +01:00
|
|
|
#if CONFIG_DRVR_WRDELAY != 0
|
2014-07-11 19:20:11 +02:00
|
|
|
/* CONFIG_DRVR_WRDELAY provides the delay period in milliseconds. CLK_TCK
|
2009-11-05 19:15:06 +01:00
|
|
|
* provides the clock tick of the system (frequency in Hz).
|
|
|
|
*/
|
|
|
|
|
2018-11-09 15:08:18 +01:00
|
|
|
int ticks = MSEC2TICK(CONFIG_DRVR_WRDELAY);
|
2020-01-02 17:49:34 +01:00
|
|
|
work_queue(LPWORK, &rwb->work, rwb_wrtimeout, (FAR void *)rwb, ticks);
|
2018-11-09 15:08:18 +01:00
|
|
|
#endif
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
2018-11-09 15:06:33 +01:00
|
|
|
#endif
|
2009-11-05 19:15:06 +01:00
|
|
|
|
|
|
|
/****************************************************************************
|
2009-11-05 22:44:33 +01:00
|
|
|
* Name: rwb_wrcanceltimeout
|
2009-11-05 19:15:06 +01:00
|
|
|
****************************************************************************/
|
|
|
|
|
2018-11-09 15:06:33 +01:00
|
|
|
#ifdef CONFIG_DRVR_WRITEBUFFER
|
2020-01-25 15:26:42 +01:00
|
|
|
static inline void rwb_wrcanceltimeout(FAR struct rwbuffer_s *rwb)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2018-11-09 15:08:18 +01:00
|
|
|
#if CONFIG_DRVR_WRDELAY != 0
|
2020-01-02 17:49:34 +01:00
|
|
|
work_cancel(LPWORK, &rwb->work);
|
2018-11-09 15:08:18 +01:00
|
|
|
#endif
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
2018-11-09 15:06:33 +01:00
|
|
|
#endif
|
2009-11-05 19:15:06 +01:00
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: rwb_writebuffer
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
#ifdef CONFIG_DRVR_WRITEBUFFER
|
2009-11-05 19:15:06 +01:00
|
|
|
static ssize_t rwb_writebuffer(FAR struct rwbuffer_s *rwb,
|
2009-12-15 15:25:14 +01:00
|
|
|
off_t startblock, uint32_t nblocks,
|
|
|
|
FAR const uint8_t *wrbuffer)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2020-07-14 11:22:02 +02:00
|
|
|
uint32_t nwritten = nblocks;
|
2009-11-05 19:15:06 +01:00
|
|
|
|
|
|
|
/* Write writebuffer Logic */
|
|
|
|
|
2009-11-05 22:44:33 +01:00
|
|
|
rwb_wrcanceltimeout(rwb);
|
2014-04-13 22:32:20 +02:00
|
|
|
|
2020-07-14 11:22:02 +02:00
|
|
|
/* Is data saved in the write buffer? */
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2020-07-14 11:22:02 +02:00
|
|
|
if (rwb->wrnblocks > 0)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2020-07-14 11:22:02 +02:00
|
|
|
off_t wrbend;
|
|
|
|
off_t newend;
|
2014-04-13 22:32:20 +02:00
|
|
|
|
2020-07-14 11:22:02 +02:00
|
|
|
/* Now there are five cases:
|
|
|
|
*
|
|
|
|
* 1. We update the non-overlapping region
|
|
|
|
*/
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2020-07-14 11:22:02 +02:00
|
|
|
wrbend = rwb->wrblockstart + rwb->wrnblocks;
|
|
|
|
newend = startblock + nblocks;
|
|
|
|
|
|
|
|
if (wrbend < startblock || rwb->wrblockstart > newend)
|
2009-11-05 21:16:15 +01:00
|
|
|
{
|
2020-07-14 11:22:02 +02:00
|
|
|
/* Nothing to do */;
|
2009-11-05 21:16:15 +01:00
|
|
|
}
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2020-07-14 11:22:02 +02:00
|
|
|
/* 2. We update the entire write buffer. */
|
|
|
|
|
|
|
|
else if (rwb->wrblockstart > startblock && wrbend < newend)
|
|
|
|
{
|
|
|
|
rwb->wrnblocks = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We are going to update a subset of the write buffer. Three
|
|
|
|
* more cases to consider:
|
|
|
|
*
|
|
|
|
* 3. We update a portion in the middle of the write buffer
|
|
|
|
*/
|
|
|
|
|
|
|
|
else if (rwb->wrblockstart <= startblock && wrbend >= newend)
|
|
|
|
{
|
|
|
|
FAR uint8_t *dest;
|
|
|
|
size_t offset;
|
|
|
|
|
|
|
|
/* Copy the data to the middle of write buffer */
|
|
|
|
|
|
|
|
offset = startblock - rwb->wrblockstart;
|
|
|
|
dest = rwb->wrbuffer + offset * rwb->blocksize;
|
|
|
|
memcpy(dest, wrbuffer, nblocks * rwb->blocksize);
|
|
|
|
|
|
|
|
nblocks = 0;
|
|
|
|
}
|
|
|
|
|
2021-02-25 14:34:37 +01:00
|
|
|
/* 4. We update a portion at the end of the write buffer */
|
2020-07-14 11:22:02 +02:00
|
|
|
|
|
|
|
else if (wrbend >= startblock && wrbend <= newend)
|
|
|
|
{
|
|
|
|
FAR uint8_t *dest;
|
|
|
|
size_t offset;
|
|
|
|
size_t ncopy;
|
|
|
|
|
|
|
|
/* Copy the data from the updating region to the end
|
|
|
|
* of the write buffer.
|
|
|
|
*/
|
|
|
|
|
|
|
|
offset = rwb->wrnblocks - (wrbend - startblock);
|
|
|
|
ncopy = rwb->wrmaxblocks - offset;
|
|
|
|
if (ncopy > nblocks)
|
|
|
|
{
|
|
|
|
ncopy = nblocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
dest = rwb->wrbuffer + offset * rwb->blocksize;
|
|
|
|
memcpy(dest, wrbuffer, ncopy * rwb->blocksize);
|
|
|
|
|
|
|
|
rwb->wrnblocks = offset + ncopy;
|
|
|
|
wrbuffer += ncopy * rwb->blocksize;
|
|
|
|
startblock += ncopy;
|
|
|
|
nblocks -= ncopy;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 5. We update a portion at the beginning of the write buffer */
|
|
|
|
|
|
|
|
else /* if (rwb->wrblockstart >= startblock && wrbend >= newend) */
|
|
|
|
{
|
|
|
|
FAR uint8_t *dest;
|
|
|
|
FAR const uint8_t *src;
|
|
|
|
size_t ncopy;
|
|
|
|
|
|
|
|
DEBUGASSERT(rwb->wrblockstart >= startblock && wrbend >= newend);
|
|
|
|
|
|
|
|
/* Move the cached data to the end of the write buffer */
|
|
|
|
|
|
|
|
ncopy = rwb->wrblockstart - startblock;
|
|
|
|
if (ncopy > rwb->wrmaxblocks - rwb->wrnblocks)
|
|
|
|
{
|
|
|
|
ncopy = rwb->wrmaxblocks - rwb->wrnblocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
dest = rwb->wrbuffer + ncopy * rwb->blocksize;
|
|
|
|
memmove(dest, rwb->wrbuffer, ncopy * rwb->blocksize);
|
|
|
|
|
|
|
|
rwb->wrblockstart -= ncopy;
|
|
|
|
rwb->wrnblocks += ncopy;
|
|
|
|
|
|
|
|
/* Copy the data from the updating region to the beginning
|
|
|
|
* of the write buffer.
|
|
|
|
*/
|
|
|
|
|
|
|
|
ncopy = newend - rwb->wrblockstart;
|
|
|
|
src = wrbuffer + (nblocks - ncopy) * rwb->blocksize;
|
|
|
|
memcpy(rwb->wrbuffer, src, ncopy * rwb->blocksize);
|
|
|
|
|
|
|
|
nblocks -= ncopy;
|
|
|
|
}
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
2014-04-13 22:32:20 +02:00
|
|
|
|
2020-07-14 11:22:02 +02:00
|
|
|
/* Use the block cache unless the buffer size is bigger than block cache */
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2020-07-14 11:22:02 +02:00
|
|
|
if (nblocks > rwb->wrmaxblocks)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2020-07-14 11:22:02 +02:00
|
|
|
ssize_t ret = rwb->wrflush(rwb->dev, wrbuffer, startblock, nblocks);
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
|
|
|
return ret;
|
|
|
|
}
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
2020-07-14 11:22:02 +02:00
|
|
|
else if (nblocks)
|
|
|
|
{
|
|
|
|
/* Flush the write buffer */
|
|
|
|
|
2020-08-12 16:46:38 +02:00
|
|
|
rwb_wrflush(rwb);
|
2014-04-13 22:32:20 +02:00
|
|
|
|
2020-07-14 11:22:02 +02:00
|
|
|
/* Buffer the data in the write buffer */
|
|
|
|
|
|
|
|
memcpy(rwb->wrbuffer, wrbuffer, nblocks * rwb->blocksize);
|
|
|
|
rwb->wrblockstart = startblock;
|
|
|
|
rwb->wrnblocks = nblocks;
|
|
|
|
}
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2020-07-14 11:22:02 +02:00
|
|
|
if (rwb->wrnblocks > 0)
|
|
|
|
{
|
|
|
|
rwb_wrstarttimeout(rwb);
|
|
|
|
}
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2020-07-14 11:22:02 +02:00
|
|
|
return nwritten;
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/****************************************************************************
|
2009-11-25 14:50:26 +01:00
|
|
|
* Name: rwb_resetrhbuffer
|
2009-11-05 19:15:06 +01:00
|
|
|
****************************************************************************/
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
#ifdef CONFIG_DRVR_READAHEAD
|
2020-01-25 15:26:42 +01:00
|
|
|
static inline void rwb_resetrhbuffer(FAR struct rwbuffer_s *rwb)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2022-08-07 15:20:07 +02:00
|
|
|
/* We assume that the caller holds the readAheadBufferSemaphore */
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2009-11-06 14:43:21 +01:00
|
|
|
rwb->rhnblocks = 0;
|
2020-08-12 16:46:38 +02:00
|
|
|
rwb->rhblockstart = -1;
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/****************************************************************************
|
2009-11-25 14:50:26 +01:00
|
|
|
* Name: rwb_bufferread
|
2009-11-05 19:15:06 +01:00
|
|
|
****************************************************************************/
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
#ifdef CONFIG_DRVR_READAHEAD
|
2009-11-05 19:15:06 +01:00
|
|
|
static inline void
|
2020-01-25 15:26:42 +01:00
|
|
|
rwb_bufferread(FAR struct rwbuffer_s *rwb, off_t startblock,
|
|
|
|
size_t nblocks, FAR uint8_t **rdbuffer)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2020-01-25 15:26:42 +01:00
|
|
|
FAR uint8_t *rhbuffer;
|
|
|
|
|
2022-08-07 15:20:07 +02:00
|
|
|
/* We assume that:
|
|
|
|
* (1) the caller holds the readAheadBufferSemaphore, and
|
|
|
|
* (2) the caller already knows that all of the blocks are in the
|
|
|
|
* read-ahead buffer.
|
2009-11-05 19:15:06 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Convert the units from blocks to bytes */
|
|
|
|
|
2009-11-06 14:43:21 +01:00
|
|
|
off_t blockoffset = startblock - rwb->rhblockstart;
|
|
|
|
off_t byteoffset = rwb->blocksize * blockoffset;
|
|
|
|
size_t nbytes = rwb->blocksize * nblocks;
|
2009-11-05 19:15:06 +01:00
|
|
|
|
|
|
|
/* Get the byte address in the read-ahead buffer */
|
|
|
|
|
2020-01-25 15:26:42 +01:00
|
|
|
rhbuffer = rwb->rhbuffer + byteoffset;
|
2009-11-05 19:15:06 +01:00
|
|
|
|
|
|
|
/* Copy the data from the read-ahead buffer into the IO buffer */
|
|
|
|
|
2009-11-06 14:43:21 +01:00
|
|
|
memcpy(*rdbuffer, rhbuffer, nbytes);
|
2009-11-05 19:15:06 +01:00
|
|
|
|
|
|
|
/* Update the caller's copy for the next address */
|
|
|
|
|
2009-11-06 14:43:21 +01:00
|
|
|
*rdbuffer += nbytes;
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/****************************************************************************
|
2009-11-25 14:50:26 +01:00
|
|
|
* Name: rwb_rhreload
|
2009-11-05 19:15:06 +01:00
|
|
|
****************************************************************************/
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
#ifdef CONFIG_DRVR_READAHEAD
|
2020-01-25 15:26:42 +01:00
|
|
|
static int rwb_rhreload(FAR struct rwbuffer_s *rwb, off_t startblock)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2014-09-25 21:35:10 +02:00
|
|
|
off_t endblock;
|
2009-11-05 19:15:06 +01:00
|
|
|
size_t nblocks;
|
2009-11-06 14:43:21 +01:00
|
|
|
int ret;
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-09-25 21:35:10 +02:00
|
|
|
/* Check for attempts to read beyond the end of the media */
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-09-25 21:35:10 +02:00
|
|
|
if (startblock >= rwb->nblocks)
|
|
|
|
{
|
|
|
|
return -ESPIPE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the block number +1 of the last block that will fit in the
|
|
|
|
* read-ahead buffer
|
|
|
|
*/
|
|
|
|
|
|
|
|
endblock = startblock + rwb->rhmaxblocks;
|
2009-11-05 19:15:06 +01:00
|
|
|
|
|
|
|
/* Make sure that we don't read past the end of the device */
|
|
|
|
|
|
|
|
if (endblock > rwb->nblocks)
|
|
|
|
{
|
|
|
|
endblock = rwb->nblocks;
|
|
|
|
}
|
2012-07-15 16:56:25 +02:00
|
|
|
|
2009-11-05 19:15:06 +01:00
|
|
|
nblocks = endblock - startblock;
|
|
|
|
|
2014-09-25 21:35:10 +02:00
|
|
|
/* Reset the read buffer */
|
|
|
|
|
|
|
|
rwb_resetrhbuffer(rwb);
|
|
|
|
|
2009-11-05 19:15:06 +01:00
|
|
|
/* Now perform the read */
|
|
|
|
|
|
|
|
ret = rwb->rhreload(rwb->dev, rwb->rhbuffer, startblock, nblocks);
|
2009-11-25 14:50:26 +01:00
|
|
|
if (ret == nblocks)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
|
|
|
/* Update information about what is in the read-ahead buffer */
|
|
|
|
|
2009-11-06 14:43:21 +01:00
|
|
|
rwb->rhnblocks = nblocks;
|
2009-11-05 19:15:06 +01:00
|
|
|
rwb->rhblockstart = startblock;
|
2009-11-25 14:50:26 +01:00
|
|
|
|
2020-04-12 19:18:02 +02:00
|
|
|
/* The return value is not the number of blocks we asked to be
|
|
|
|
* loaded.
|
|
|
|
*/
|
2009-11-25 14:50:26 +01:00
|
|
|
|
|
|
|
return nblocks;
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
2009-11-25 14:50:26 +01:00
|
|
|
|
|
|
|
return -EIO;
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: rwb_invalidate_writebuffer
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Invalidate a region of the write buffer
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-07-12 00:19:17 +02:00
|
|
|
#if defined(CONFIG_DRVR_WRITEBUFFER) && defined(CONFIG_DRVR_INVALIDATE)
|
2014-07-11 19:20:11 +02:00
|
|
|
int rwb_invalidate_writebuffer(FAR struct rwbuffer_s *rwb,
|
|
|
|
off_t startblock, size_t blockcount)
|
|
|
|
{
|
2017-05-31 17:28:20 +02:00
|
|
|
int ret = OK;
|
|
|
|
|
|
|
|
/* Is there a write buffer? Is data saved in the write buffer? */
|
2014-07-11 19:20:11 +02:00
|
|
|
|
2014-09-25 19:02:30 +02:00
|
|
|
if (rwb->wrmaxblocks > 0 && rwb->wrnblocks > 0)
|
2014-07-11 19:20:11 +02:00
|
|
|
{
|
|
|
|
off_t wrbend;
|
|
|
|
off_t invend;
|
|
|
|
|
2021-07-21 09:27:36 +02:00
|
|
|
finfo("startblock=%" PRIdOFF " blockcount=%zu\n",
|
|
|
|
startblock, blockcount);
|
2014-07-11 19:20:11 +02:00
|
|
|
|
2020-04-01 18:13:52 +02:00
|
|
|
ret = rwb_semtake(&rwb->wrsem);
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
|
|
|
return ret;
|
|
|
|
}
|
2014-07-11 19:20:11 +02:00
|
|
|
|
|
|
|
/* Now there are five cases:
|
|
|
|
*
|
|
|
|
* 1. We invalidate nothing
|
|
|
|
*/
|
|
|
|
|
|
|
|
wrbend = rwb->wrblockstart + rwb->wrnblocks;
|
|
|
|
invend = startblock + blockcount;
|
|
|
|
|
2020-01-25 12:37:41 +01:00
|
|
|
if (wrbend <= startblock || rwb->wrblockstart >= invend)
|
2014-07-11 19:20:11 +02:00
|
|
|
{
|
|
|
|
ret = OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 2. We invalidate the entire write buffer. */
|
|
|
|
|
|
|
|
else if (rwb->wrblockstart >= startblock && wrbend <= invend)
|
|
|
|
{
|
|
|
|
rwb->wrnblocks = 0;
|
|
|
|
ret = OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We are going to invalidate a subset of the write buffer. Three
|
|
|
|
* more cases to consider:
|
|
|
|
*
|
2020-07-14 11:22:02 +02:00
|
|
|
* 3. We invalidate a portion in the middle of the write buffer
|
2014-07-11 19:20:11 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
else if (rwb->wrblockstart < startblock && wrbend > invend)
|
|
|
|
{
|
2020-01-25 15:26:42 +01:00
|
|
|
FAR uint8_t *src;
|
|
|
|
off_t block;
|
|
|
|
off_t offset;
|
|
|
|
size_t nblocks;
|
2014-07-11 19:20:11 +02:00
|
|
|
|
|
|
|
/* Write the blocks at the end of the media to hardware */
|
|
|
|
|
|
|
|
nblocks = wrbend - invend;
|
|
|
|
block = invend;
|
|
|
|
offset = block - rwb->wrblockstart;
|
|
|
|
src = rwb->wrbuffer + offset * rwb->blocksize;
|
|
|
|
|
2014-09-25 19:02:30 +02:00
|
|
|
ret = rwb->wrflush(rwb->dev, src, block, nblocks);
|
2014-07-11 19:20:11 +02:00
|
|
|
if (ret < 0)
|
|
|
|
{
|
2016-06-11 23:50:49 +02:00
|
|
|
ferr("ERROR: wrflush failed: %d\n", ret);
|
2014-07-11 19:20:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Keep the blocks at the beginning of the buffer up the
|
|
|
|
* start of the invalidated region.
|
|
|
|
*/
|
2020-01-25 15:26:42 +01:00
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
rwb->wrnblocks = startblock - rwb->wrblockstart;
|
|
|
|
ret = OK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-14 11:22:02 +02:00
|
|
|
/* 4. We invalidate a portion at the end of the write buffer */
|
2014-07-11 19:20:11 +02:00
|
|
|
|
|
|
|
else if (wrbend > startblock && wrbend <= invend)
|
|
|
|
{
|
2020-07-14 06:08:40 +02:00
|
|
|
rwb->wrnblocks -= wrbend - startblock;
|
2014-07-11 19:20:11 +02:00
|
|
|
ret = OK;
|
|
|
|
}
|
|
|
|
|
2020-07-14 11:22:02 +02:00
|
|
|
/* 5. We invalidate a portion at the beginning of the write buffer */
|
2014-07-11 19:20:11 +02:00
|
|
|
|
2014-09-25 21:35:10 +02:00
|
|
|
else /* if (rwb->wrblockstart >= startblock && wrbend > invend) */
|
2014-07-11 19:20:11 +02:00
|
|
|
{
|
2020-01-25 15:26:42 +01:00
|
|
|
FAR uint8_t *src;
|
|
|
|
size_t ninval;
|
|
|
|
size_t nkeep;
|
2014-07-11 19:20:11 +02:00
|
|
|
|
2014-09-25 21:35:10 +02:00
|
|
|
DEBUGASSERT(rwb->wrblockstart >= startblock && wrbend > invend);
|
2014-07-11 19:20:11 +02:00
|
|
|
|
|
|
|
/* Copy the data from the uninvalidated region to the beginning
|
|
|
|
* of the write buffer.
|
|
|
|
*
|
|
|
|
* First calculate the source and destination of the transfer.
|
|
|
|
*/
|
|
|
|
|
|
|
|
ninval = invend - rwb->wrblockstart;
|
|
|
|
src = rwb->wrbuffer + ninval * rwb->blocksize;
|
|
|
|
|
|
|
|
/* Calculate the number of blocks we are keeping. We keep
|
|
|
|
* the ones that we don't invalidate.
|
|
|
|
*/
|
|
|
|
|
|
|
|
nkeep = rwb->wrnblocks - ninval;
|
|
|
|
|
|
|
|
/* Then move the data that we are keeping to the beginning
|
|
|
|
* the write buffer.
|
|
|
|
*/
|
|
|
|
|
|
|
|
memcpy(rwb->wrbuffer, src, nkeep * rwb->blocksize);
|
|
|
|
|
|
|
|
/* Update the block info. The first block is now the one just
|
|
|
|
* after the invalidation region and the number buffered blocks
|
|
|
|
* is the number that we kept.
|
|
|
|
*/
|
|
|
|
|
|
|
|
rwb->wrblockstart = invend;
|
|
|
|
rwb->wrnblocks = nkeep;
|
|
|
|
ret = OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
rwb_semgive(&rwb->wrsem);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: rwb_invalidate_readahead
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Invalidate a region of the read-ahead buffer
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-07-12 00:19:17 +02:00
|
|
|
#if defined(CONFIG_DRVR_READAHEAD) && defined(CONFIG_DRVR_INVALIDATE)
|
2014-07-11 19:20:11 +02:00
|
|
|
int rwb_invalidate_readahead(FAR struct rwbuffer_s *rwb,
|
2020-01-25 15:26:42 +01:00
|
|
|
off_t startblock, size_t blockcount)
|
2014-07-11 19:20:11 +02:00
|
|
|
{
|
2020-04-12 18:46:40 +02:00
|
|
|
int ret = OK;
|
2014-07-11 19:20:11 +02:00
|
|
|
|
2014-09-25 19:02:30 +02:00
|
|
|
if (rwb->rhmaxblocks > 0 && rwb->rhnblocks > 0)
|
2014-07-11 19:20:11 +02:00
|
|
|
{
|
|
|
|
off_t rhbend;
|
|
|
|
off_t invend;
|
|
|
|
|
2021-07-21 09:27:36 +02:00
|
|
|
finfo("startblock=%" PRIdOFF " blockcount=%zu\n",
|
|
|
|
startblock, blockcount);
|
2014-07-11 19:20:11 +02:00
|
|
|
|
2020-04-01 18:13:52 +02:00
|
|
|
ret = rwb_semtake(&rwb->rhsem);
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
|
|
|
return ret;
|
|
|
|
}
|
2014-07-11 19:20:11 +02:00
|
|
|
|
|
|
|
/* Now there are five cases:
|
|
|
|
*
|
|
|
|
* 1. We invalidate nothing
|
|
|
|
*/
|
|
|
|
|
|
|
|
rhbend = rwb->rhblockstart + rwb->rhnblocks;
|
|
|
|
invend = startblock + blockcount;
|
|
|
|
|
2015-10-10 18:41:00 +02:00
|
|
|
if (rhbend <= startblock || rwb->rhblockstart >= invend)
|
2014-07-11 19:20:11 +02:00
|
|
|
{
|
|
|
|
ret = OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 2. We invalidate the entire read-ahead buffer. */
|
|
|
|
|
|
|
|
else if (rwb->rhblockstart >= startblock && rhbend <= invend)
|
|
|
|
{
|
|
|
|
rwb->rhnblocks = 0;
|
|
|
|
ret = OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We are going to invalidate a subset of the read-ahead buffer.
|
|
|
|
* Three more cases to consider:
|
|
|
|
*
|
2014-09-25 21:35:10 +02:00
|
|
|
* 2. We invalidate a portion in the middle of the read-ahead buffer
|
2014-07-11 19:20:11 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
else if (rwb->rhblockstart < startblock && rhbend > invend)
|
|
|
|
{
|
|
|
|
/* Keep the blocks at the beginning of the buffer up the
|
|
|
|
* start of the invalidated region.
|
|
|
|
*/
|
|
|
|
|
|
|
|
rwb->rhnblocks = startblock - rwb->rhblockstart;
|
|
|
|
ret = OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 3. We invalidate a portion at the end of the read-ahead buffer */
|
|
|
|
|
|
|
|
else if (rhbend > startblock && rhbend <= invend)
|
|
|
|
{
|
2020-07-14 06:08:40 +02:00
|
|
|
rwb->rhnblocks -= rhbend - startblock;
|
2014-07-11 19:20:11 +02:00
|
|
|
ret = OK;
|
|
|
|
}
|
|
|
|
|
2020-07-14 06:08:40 +02:00
|
|
|
/* 4. We invalidate a portion at the begin of the read-ahead buffer */
|
2014-07-11 19:20:11 +02:00
|
|
|
|
2014-09-25 21:35:10 +02:00
|
|
|
else /* if (rwb->rhblockstart >= startblock && rhbend > invend) */
|
2014-07-11 19:20:11 +02:00
|
|
|
{
|
2020-01-25 15:26:42 +01:00
|
|
|
FAR uint8_t *src;
|
|
|
|
size_t ninval;
|
|
|
|
size_t nkeep;
|
2014-07-11 19:20:11 +02:00
|
|
|
|
2014-09-25 21:35:10 +02:00
|
|
|
DEBUGASSERT(rwb->rhblockstart >= startblock && rhbend > invend);
|
2018-11-09 15:10:25 +01:00
|
|
|
/* Copy the data from the uninvalidated region to the beginning
|
|
|
|
* of the read buffer.
|
|
|
|
*
|
|
|
|
* First calculate the source and destination of the transfer.
|
|
|
|
*/
|
|
|
|
|
|
|
|
ninval = invend - rwb->rhblockstart;
|
|
|
|
src = rwb->rhbuffer + ninval * rwb->blocksize;
|
|
|
|
|
|
|
|
/* Calculate the number of blocks we are keeping. We keep
|
|
|
|
* the ones that we don't invalidate.
|
|
|
|
*/
|
|
|
|
|
|
|
|
nkeep = rwb->rhnblocks - ninval;
|
|
|
|
|
|
|
|
/* Then move the data that we are keeping to the beginning
|
|
|
|
* the read buffer.
|
|
|
|
*/
|
|
|
|
|
|
|
|
memmove(rwb->rhbuffer, src, nkeep * rwb->blocksize);
|
|
|
|
|
|
|
|
/* Update the block info. The first block is now the one just
|
|
|
|
* after the invalidation region and the number buffered blocks
|
|
|
|
* is the number that we kept.
|
|
|
|
*/
|
|
|
|
|
|
|
|
rwb->rhblockstart = invend;
|
|
|
|
rwb->rhnblocks = nkeep;
|
2014-07-11 19:20:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
rwb_semgive(&rwb->rhsem);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-11-05 19:15:06 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Public Functions
|
|
|
|
****************************************************************************/
|
2020-01-25 15:26:42 +01:00
|
|
|
|
2009-11-05 19:15:06 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: rwb_initialize
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
int rwb_initialize(FAR struct rwbuffer_s *rwb)
|
|
|
|
{
|
2009-12-15 15:25:14 +01:00
|
|
|
uint32_t allocsize;
|
2009-11-06 14:43:21 +01:00
|
|
|
|
2009-11-05 19:15:06 +01:00
|
|
|
/* Sanity checking */
|
|
|
|
|
|
|
|
DEBUGASSERT(rwb != NULL);
|
|
|
|
DEBUGASSERT(rwb->blocksize > 0);
|
|
|
|
DEBUGASSERT(rwb->nblocks > 0);
|
|
|
|
DEBUGASSERT(rwb->dev != NULL);
|
|
|
|
|
|
|
|
/* Setup so that rwb_uninitialize can handle a failure */
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
#ifdef CONFIG_DRVR_WRITEBUFFER
|
2015-10-10 18:41:00 +02:00
|
|
|
DEBUGASSERT(rwb->wrflush != NULL);
|
2009-11-05 19:15:06 +01:00
|
|
|
rwb->wrbuffer = NULL;
|
|
|
|
#endif
|
2014-07-11 19:20:11 +02:00
|
|
|
#ifdef CONFIG_DRVR_READAHEAD
|
2009-11-05 19:15:06 +01:00
|
|
|
DEBUGASSERT(rwb->rhreload != NULL);
|
|
|
|
rwb->rhbuffer = NULL;
|
|
|
|
#endif
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
#ifdef CONFIG_DRVR_WRITEBUFFER
|
|
|
|
if (rwb->wrmaxblocks > 0)
|
|
|
|
{
|
2016-06-11 19:59:51 +02:00
|
|
|
finfo("Initialize the write buffer\n");
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2020-08-12 16:46:38 +02:00
|
|
|
if (rwb->wralignblocks == 0)
|
|
|
|
{
|
|
|
|
rwb->wralignblocks = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEBUGASSERT(rwb->wralignblocks <= rwb->wrmaxblocks &&
|
|
|
|
rwb->wrmaxblocks % rwb->wralignblocks == 0);
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
/* Initialize the write buffer access semaphore */
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2017-10-03 20:51:15 +02:00
|
|
|
nxsem_init(&rwb->wrsem, 0, 1);
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
/* Initialize write buffer parameters */
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
rwb_resetwrbuffer(rwb);
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
/* Allocate the write buffer */
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2020-08-21 13:39:40 +02:00
|
|
|
allocsize = rwb->wrmaxblocks * rwb->blocksize;
|
|
|
|
rwb->wrbuffer = kmm_malloc(allocsize);
|
|
|
|
if (!rwb->wrbuffer)
|
2009-11-06 14:43:21 +01:00
|
|
|
{
|
2021-12-26 16:55:43 +01:00
|
|
|
ferr("Write buffer kmm_malloc(%" PRIu32 ") failed\n", allocsize);
|
2020-08-21 13:39:40 +02:00
|
|
|
return -ENOMEM;
|
2009-11-06 14:43:21 +01:00
|
|
|
}
|
2014-04-13 22:32:20 +02:00
|
|
|
|
2021-12-26 16:55:43 +01:00
|
|
|
finfo("Write buffer size: %" PRIu32 " bytes\n", allocsize);
|
2014-07-11 19:20:11 +02:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_DRVR_WRITEBUFFER */
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
#ifdef CONFIG_DRVR_READAHEAD
|
2014-09-25 19:02:30 +02:00
|
|
|
if (rwb->rhmaxblocks > 0)
|
2014-07-11 19:20:11 +02:00
|
|
|
{
|
2016-06-11 19:59:51 +02:00
|
|
|
finfo("Initialize the read-ahead buffer\n");
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
/* Initialize the read-ahead buffer access semaphore */
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2017-10-03 20:51:15 +02:00
|
|
|
nxsem_init(&rwb->rhsem, 0, 1);
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
/* Initialize read-ahead buffer parameters */
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
rwb_resetrhbuffer(rwb);
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
/* Allocate the read-ahead buffer */
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2020-08-21 13:39:40 +02:00
|
|
|
allocsize = rwb->rhmaxblocks * rwb->blocksize;
|
|
|
|
rwb->rhbuffer = kmm_malloc(allocsize);
|
|
|
|
if (!rwb->rhbuffer)
|
2009-11-06 14:43:21 +01:00
|
|
|
{
|
2021-03-23 20:13:09 +01:00
|
|
|
ferr("Read-ahead buffer kmm_malloc(%" PRIu32 ") failed\n",
|
|
|
|
allocsize);
|
2020-08-21 13:39:40 +02:00
|
|
|
return -ENOMEM;
|
2009-11-06 14:43:21 +01:00
|
|
|
}
|
2014-07-11 19:20:11 +02:00
|
|
|
|
2021-03-23 20:13:09 +01:00
|
|
|
finfo("Read-ahead buffer size: %" PRIu32 " bytes\n", allocsize);
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
2014-07-11 19:20:11 +02:00
|
|
|
#endif /* CONFIG_DRVR_READAHEAD */
|
2014-04-13 22:32:20 +02:00
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
return OK;
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: rwb_uninitialize
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
void rwb_uninitialize(FAR struct rwbuffer_s *rwb)
|
|
|
|
{
|
2014-07-11 19:20:11 +02:00
|
|
|
#ifdef CONFIG_DRVR_WRITEBUFFER
|
|
|
|
if (rwb->wrmaxblocks > 0)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2014-07-11 19:20:11 +02:00
|
|
|
rwb_wrcanceltimeout(rwb);
|
2020-08-21 13:46:39 +02:00
|
|
|
rwb_wrflush(rwb);
|
2017-10-03 23:35:24 +02:00
|
|
|
nxsem_destroy(&rwb->wrsem);
|
2014-07-11 19:20:11 +02:00
|
|
|
if (rwb->wrbuffer)
|
|
|
|
{
|
2014-09-01 01:04:02 +02:00
|
|
|
kmm_free(rwb->wrbuffer);
|
2014-07-11 19:20:11 +02:00
|
|
|
}
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
#ifdef CONFIG_DRVR_READAHEAD
|
2014-09-25 19:02:30 +02:00
|
|
|
if (rwb->rhmaxblocks > 0)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2017-10-03 23:35:24 +02:00
|
|
|
nxsem_destroy(&rwb->rhsem);
|
2014-07-11 19:20:11 +02:00
|
|
|
if (rwb->rhbuffer)
|
|
|
|
{
|
2014-09-01 01:04:02 +02:00
|
|
|
kmm_free(rwb->rhbuffer);
|
2014-07-11 19:20:11 +02:00
|
|
|
}
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
2018-11-09 15:10:25 +01:00
|
|
|
* Name: rwb_read_
|
2009-11-05 19:15:06 +01:00
|
|
|
****************************************************************************/
|
|
|
|
|
2018-11-09 15:10:25 +01:00
|
|
|
static ssize_t rwb_read_(FAR struct rwbuffer_s *rwb, off_t startblock,
|
2020-01-25 15:26:42 +01:00
|
|
|
size_t nblocks, FAR uint8_t *rdbuffer)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2014-09-25 19:02:30 +02:00
|
|
|
int ret = OK;
|
2014-04-13 22:32:20 +02:00
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
#ifdef CONFIG_DRVR_READAHEAD
|
2014-09-25 19:02:30 +02:00
|
|
|
if (rwb->rhmaxblocks > 0)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2018-11-09 15:10:25 +01:00
|
|
|
size_t remaining;
|
|
|
|
|
2020-04-01 18:13:52 +02:00
|
|
|
ret = nxsem_wait(&rwb->rhsem);
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
2020-08-12 16:46:38 +02:00
|
|
|
return ret;
|
2020-04-01 18:13:52 +02:00
|
|
|
}
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
/* Loop until we have read all of the requested blocks */
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2015-10-10 18:41:00 +02:00
|
|
|
for (remaining = nblocks; remaining > 0; )
|
2009-11-05 21:16:15 +01:00
|
|
|
{
|
2014-07-11 19:20:11 +02:00
|
|
|
/* Is there anything in the read-ahead buffer? */
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
if (rwb->rhnblocks > 0)
|
|
|
|
{
|
2021-07-21 09:27:36 +02:00
|
|
|
off_t bufferend;
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-09-25 21:35:10 +02:00
|
|
|
/* How many blocks are available in this buffer? */
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
bufferend = rwb->rhblockstart + rwb->rhnblocks;
|
2014-09-25 21:35:10 +02:00
|
|
|
if (startblock >= rwb->rhblockstart && startblock < bufferend)
|
2014-07-11 19:20:11 +02:00
|
|
|
{
|
2014-09-25 21:35:10 +02:00
|
|
|
size_t rdblocks = bufferend - startblock;
|
|
|
|
if (rdblocks > remaining)
|
|
|
|
{
|
|
|
|
rdblocks = remaining;
|
|
|
|
}
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-09-25 21:35:10 +02:00
|
|
|
/* Then read the data from the read-ahead buffer */
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-09-25 21:35:10 +02:00
|
|
|
rwb_bufferread(rwb, startblock, rdblocks, &rdbuffer);
|
|
|
|
startblock += rdblocks;
|
|
|
|
remaining -= rdblocks;
|
2014-07-11 19:20:11 +02:00
|
|
|
}
|
|
|
|
}
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
/* If we did not get all of the data from the buffer, then we have
|
|
|
|
* to refill the buffer and try again.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (remaining > 0)
|
2009-11-05 21:16:15 +01:00
|
|
|
{
|
2014-09-25 19:02:30 +02:00
|
|
|
ret = rwb_rhreload(rwb, startblock);
|
2014-07-11 19:20:11 +02:00
|
|
|
if (ret < 0)
|
|
|
|
{
|
2020-01-25 15:26:42 +01:00
|
|
|
ferr("ERROR: Failed to fill the read-ahead buffer: %d\n",
|
|
|
|
ret);
|
|
|
|
|
2018-11-09 15:06:33 +01:00
|
|
|
rwb_semgive(&rwb->rhsem);
|
2020-08-12 16:46:38 +02:00
|
|
|
return ret;
|
2014-07-11 19:20:11 +02:00
|
|
|
}
|
2009-11-05 21:16:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
/* On success, return the number of blocks that we were requested to
|
|
|
|
* read. This is for compatibility with the normal return of a block
|
|
|
|
* driver read method
|
|
|
|
*/
|
2009-11-25 14:50:26 +01:00
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
rwb_semgive(&rwb->rhsem);
|
|
|
|
ret = nblocks;
|
|
|
|
}
|
|
|
|
else
|
2018-11-09 15:10:25 +01:00
|
|
|
#endif
|
2014-07-12 15:44:56 +02:00
|
|
|
{
|
|
|
|
/* No read-ahead buffering, (re)load the data directly into
|
|
|
|
* the user buffer.
|
|
|
|
*/
|
|
|
|
|
2015-08-26 15:18:50 +02:00
|
|
|
ret = rwb->rhreload(rwb->dev, rdbuffer, startblock, nblocks);
|
2014-07-12 15:44:56 +02:00
|
|
|
}
|
2014-07-11 19:20:11 +02:00
|
|
|
|
2020-08-12 16:46:38 +02:00
|
|
|
return ret;
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
|
|
|
|
2018-11-09 15:10:25 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: rwb_read
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
ssize_t rwb_read(FAR struct rwbuffer_s *rwb, off_t startblock,
|
|
|
|
size_t nblocks, FAR uint8_t *rdbuffer)
|
|
|
|
{
|
|
|
|
int ret = OK;
|
|
|
|
size_t readblocks = 0;
|
|
|
|
|
|
|
|
finfo("startblock=%ld nblocks=%ld rdbuffer=%p\n",
|
|
|
|
(long)startblock, (long)nblocks, rdbuffer);
|
|
|
|
|
|
|
|
#ifdef CONFIG_DRVR_WRITEBUFFER
|
|
|
|
/* If the new read data overlaps any part of the write buffer, we
|
|
|
|
* directly copy write buffer to read buffer. This boost performance.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (rwb->wrmaxblocks > 0)
|
|
|
|
{
|
2020-04-01 18:13:52 +02:00
|
|
|
ret = nxsem_wait(&rwb->wrsem);
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
2020-08-12 16:46:38 +02:00
|
|
|
return ret;
|
2020-04-01 18:13:52 +02:00
|
|
|
}
|
|
|
|
|
2018-11-09 15:10:25 +01:00
|
|
|
/* If the write buffer overlaps the block(s) requested */
|
|
|
|
|
2020-01-25 15:26:42 +01:00
|
|
|
if (rwb_overlap(rwb->wrblockstart, rwb->wrnblocks, startblock,
|
|
|
|
nblocks))
|
2018-11-09 15:10:25 +01:00
|
|
|
{
|
|
|
|
size_t rdblocks = 0;
|
|
|
|
size_t wrnpass = 0;
|
|
|
|
|
|
|
|
if (rwb->wrblockstart > startblock)
|
|
|
|
{
|
|
|
|
rdblocks = rwb->wrblockstart - startblock;
|
|
|
|
ret = rwb_read_(rwb, startblock, rdblocks, rdbuffer);
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
|
|
|
rwb_semgive(&rwb->wrsem);
|
2020-08-12 16:46:38 +02:00
|
|
|
return ret;
|
2018-11-09 15:10:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
startblock += ret;
|
|
|
|
nblocks -= ret;
|
|
|
|
rdbuffer += ret * rwb->blocksize;
|
|
|
|
readblocks += ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rwb->wrblockstart < startblock)
|
|
|
|
{
|
|
|
|
wrnpass = startblock - rwb->wrblockstart;
|
|
|
|
}
|
|
|
|
|
|
|
|
rdblocks = nblocks > (rwb->wrnblocks - wrnpass) ?
|
|
|
|
(rwb->wrnblocks - wrnpass) : nblocks;
|
|
|
|
memcpy(rdbuffer, &rwb->wrbuffer[wrnpass * rwb->blocksize],
|
|
|
|
rdblocks * rwb->blocksize);
|
|
|
|
|
|
|
|
startblock += rdblocks;
|
|
|
|
nblocks -= rdblocks;
|
|
|
|
rdbuffer += rdblocks * rwb->blocksize;
|
|
|
|
readblocks += rdblocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
rwb_semgive(&rwb->wrsem);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ret = rwb_read_(rwb, startblock, nblocks, rdbuffer);
|
|
|
|
if (ret < 0)
|
2019-10-26 17:35:32 +02:00
|
|
|
{
|
|
|
|
return ret;
|
|
|
|
}
|
2018-11-09 15:10:25 +01:00
|
|
|
|
|
|
|
return readblocks + ret;
|
|
|
|
}
|
|
|
|
|
2009-11-05 19:15:06 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: rwb_write
|
|
|
|
****************************************************************************/
|
|
|
|
|
2015-12-05 17:36:00 +01:00
|
|
|
ssize_t rwb_write(FAR struct rwbuffer_s *rwb, off_t startblock,
|
|
|
|
size_t nblocks, FAR const uint8_t *wrbuffer)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2014-09-25 19:02:30 +02:00
|
|
|
int ret = OK;
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
#ifdef CONFIG_DRVR_READAHEAD
|
2014-09-25 19:02:30 +02:00
|
|
|
if (rwb->rhmaxblocks > 0)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2014-07-11 19:20:11 +02:00
|
|
|
/* If the new write data overlaps any part of the read buffer, then
|
|
|
|
* flush the data from the read buffer. We could attempt some more
|
|
|
|
* exotic handling -- but this simple logic is well-suited for simple
|
|
|
|
* streaming applications.
|
|
|
|
*/
|
|
|
|
|
2020-04-01 18:13:52 +02:00
|
|
|
ret = nxsem_wait(&rwb->rhsem);
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
2020-08-12 16:46:38 +02:00
|
|
|
return ret;
|
2020-04-01 18:13:52 +02:00
|
|
|
}
|
|
|
|
|
2020-01-25 15:26:42 +01:00
|
|
|
if (rwb_overlap(rwb->rhblockstart, rwb->rhnblocks, startblock,
|
|
|
|
nblocks))
|
2014-07-11 19:20:11 +02:00
|
|
|
{
|
2018-11-09 15:10:25 +01:00
|
|
|
#ifdef CONFIG_DRVR_INVALIDATE
|
|
|
|
/* Just invalidate the read buffer startblock + nblocks data */
|
|
|
|
|
|
|
|
ret = rwb_invalidate_readahead(rwb, startblock, nblocks);
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
|
|
|
ferr("ERROR: rwb_invalidate_readahead failed: %d\n", ret);
|
|
|
|
rwb_semgive(&rwb->rhsem);
|
2020-08-12 16:46:38 +02:00
|
|
|
return ret;
|
2018-11-09 15:10:25 +01:00
|
|
|
}
|
|
|
|
#else
|
2014-07-11 19:20:11 +02:00
|
|
|
rwb_resetrhbuffer(rwb);
|
2018-11-09 15:10:25 +01:00
|
|
|
#endif
|
2014-07-11 19:20:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
rwb_semgive(&rwb->rhsem);
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
#ifdef CONFIG_DRVR_WRITEBUFFER
|
|
|
|
if (rwb->wrmaxblocks > 0)
|
|
|
|
{
|
2021-07-21 09:27:36 +02:00
|
|
|
finfo("startblock=%" PRIdOFF " wrbuffer=%p\n", startblock, wrbuffer);
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2020-07-14 11:22:02 +02:00
|
|
|
ret = nxsem_wait(&rwb->wrsem);
|
|
|
|
if (ret < 0)
|
2014-07-11 19:20:11 +02:00
|
|
|
{
|
2020-08-12 16:46:38 +02:00
|
|
|
return ret;
|
2014-07-11 19:20:11 +02:00
|
|
|
}
|
2020-04-01 18:13:52 +02:00
|
|
|
|
2020-07-14 11:22:02 +02:00
|
|
|
ret = rwb_writebuffer(rwb, startblock, nblocks, wrbuffer);
|
|
|
|
rwb_semgive(&rwb->wrsem);
|
2014-07-11 19:20:11 +02:00
|
|
|
|
|
|
|
/* On success, return the number of blocks that we were requested to
|
|
|
|
* write. This is for compatibility with the normal return of a block
|
|
|
|
* driver write method
|
|
|
|
*/
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
|
|
|
else
|
2017-05-31 17:28:20 +02:00
|
|
|
#endif /* CONFIG_DRVR_WRITEBUFFER */
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2014-07-11 19:20:11 +02:00
|
|
|
/* No write buffer.. just pass the write operation through via the
|
|
|
|
* flush callback.
|
|
|
|
*/
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-09-25 19:02:30 +02:00
|
|
|
ret = rwb->wrflush(rwb->dev, wrbuffer, startblock, nblocks);
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
2009-11-25 14:50:26 +01:00
|
|
|
|
2020-08-12 16:46:38 +02:00
|
|
|
return ret;
|
2014-07-11 19:20:11 +02:00
|
|
|
}
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-09-25 21:35:10 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: rwb_readbytes
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Character-oriented read
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#ifdef CONFIG_DRVR_READBYTES
|
|
|
|
ssize_t rwb_readbytes(FAR struct rwbuffer_s *dev, off_t offset,
|
|
|
|
size_t nbytes, FAR uint8_t *buffer)
|
|
|
|
{
|
|
|
|
/* Loop while there are bytes still be be read */
|
2018-08-24 13:51:44 +02:00
|
|
|
|
2014-09-25 21:35:10 +02:00
|
|
|
/* Make sure that the sector containing the next bytes to transfer is in
|
|
|
|
* memory.
|
|
|
|
*/
|
2018-08-24 13:51:44 +02:00
|
|
|
|
2014-09-25 21:35:10 +02:00
|
|
|
/* How many bytes can be transfer from the in-memory data? */
|
2018-08-24 13:51:44 +02:00
|
|
|
|
2014-09-25 21:35:10 +02:00
|
|
|
/* Transfer the bytes */
|
2018-08-24 13:51:44 +02:00
|
|
|
|
2014-09-25 21:35:10 +02:00
|
|
|
/* Adjust counts and offsets for the next time through the loop */
|
|
|
|
|
|
|
|
#warning Not Implemented
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: rwb_mediaremoved
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* The following function is called when media is removed
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-07-12 00:19:17 +02:00
|
|
|
#ifdef CONFIG_DRVR_REMOVABLE
|
2014-07-11 19:20:11 +02:00
|
|
|
int rwb_mediaremoved(FAR struct rwbuffer_s *rwb)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_DRVR_WRITEBUFFER
|
|
|
|
if (rwb->wrmaxblocks > 0)
|
|
|
|
{
|
2020-04-01 18:13:52 +02:00
|
|
|
ret = rwb_semtake(&rwb->wrsem);
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
rwb_resetwrbuffer(rwb);
|
|
|
|
rwb_semgive(&rwb->wrsem);
|
|
|
|
}
|
|
|
|
#endif
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
#ifdef CONFIG_DRVR_READAHEAD
|
2014-11-05 22:26:48 +01:00
|
|
|
if (rwb->rhmaxblocks > 0)
|
2014-07-11 19:20:11 +02:00
|
|
|
{
|
2020-04-01 18:13:52 +02:00
|
|
|
ret = rwb_semtake(&rwb->rhsem);
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
rwb_resetrhbuffer(rwb);
|
|
|
|
rwb_semgive(&rwb->rhsem);
|
|
|
|
}
|
2009-11-05 19:15:06 +01:00
|
|
|
#endif
|
2014-07-11 19:20:11 +02:00
|
|
|
|
|
|
|
return OK;
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
2014-07-12 00:19:17 +02:00
|
|
|
#endif
|
2009-11-05 19:15:06 +01:00
|
|
|
|
|
|
|
/****************************************************************************
|
2014-07-11 19:20:11 +02:00
|
|
|
* Name: rwb_invalidate
|
2012-07-15 16:56:25 +02:00
|
|
|
*
|
|
|
|
* Description:
|
2014-07-11 19:20:11 +02:00
|
|
|
* Invalidate a region of the caches
|
2012-07-15 16:56:25 +02:00
|
|
|
*
|
2009-11-05 19:15:06 +01:00
|
|
|
****************************************************************************/
|
|
|
|
|
2014-07-12 00:19:17 +02:00
|
|
|
#ifdef CONFIG_DRVR_INVALIDATE
|
2014-07-11 19:20:11 +02:00
|
|
|
int rwb_invalidate(FAR struct rwbuffer_s *rwb,
|
|
|
|
off_t startblock, size_t blockcount)
|
2009-11-05 19:15:06 +01:00
|
|
|
{
|
2014-07-11 19:20:11 +02:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
#ifdef CONFIG_DRVR_WRITEBUFFER
|
|
|
|
ret = rwb_invalidate_writebuffer(rwb, startblock, blockcount);
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
2016-06-11 23:50:49 +02:00
|
|
|
ferr("ERROR: rwb_invalidate_writebuffer failed: %d\n", ret);
|
2014-07-11 19:20:11 +02:00
|
|
|
return ret;
|
|
|
|
}
|
2009-11-05 19:15:06 +01:00
|
|
|
#endif
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
#ifdef CONFIG_DRVR_READAHEAD
|
|
|
|
ret = rwb_invalidate_readahead(rwb, startblock, blockcount);
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
2016-06-11 23:50:49 +02:00
|
|
|
ferr("ERROR: rwb_invalidate_readahead failed: %d\n", ret);
|
2014-07-11 19:20:11 +02:00
|
|
|
return ret;
|
|
|
|
}
|
2009-11-05 19:15:06 +01:00
|
|
|
#endif
|
2014-07-11 19:20:11 +02:00
|
|
|
|
|
|
|
return OK;
|
2009-11-05 19:15:06 +01:00
|
|
|
}
|
2014-07-12 00:19:17 +02:00
|
|
|
#endif
|
2009-11-05 19:15:06 +01:00
|
|
|
|
2018-11-09 15:12:52 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: rwb_flush
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Flush the write buffer
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#ifdef CONFIG_DRVR_WRITEBUFFER
|
|
|
|
int rwb_flush(FAR struct rwbuffer_s *rwb)
|
|
|
|
{
|
2020-04-01 18:13:52 +02:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = rwb_forcetake(&rwb->wrsem);
|
2018-11-09 15:12:52 +01:00
|
|
|
rwb_wrcanceltimeout(rwb);
|
|
|
|
rwb_wrflush(rwb);
|
|
|
|
rwb_semgive(&rwb->wrsem);
|
|
|
|
|
2020-04-01 18:13:52 +02:00
|
|
|
return ret;
|
2018-11-09 15:12:52 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-07-11 19:20:11 +02:00
|
|
|
#endif /* CONFIG_DRVR_WRITEBUFFER || CONFIG_DRVR_READAHEAD */
|