Feature k210 smp (#71)

* arch: risc-v: Remove unused typedef for irqstate_t

    NOTE: irqstate_t is defined in arch/risc-v/include/types.h

  * arch: risc-v: Add typedef irqstate_t for __LP64__
  * arch: risc-v: Add SMP support to K210 (RV64GC) processor
  * boards: maxi-bit: Update READMEs and add smp/defconfig
This commit is contained in:
Masayuki Ishikawa 2020-01-10 08:04:41 -06:00 committed by Gregory Nutt
parent 237c0cca09
commit 29d3ed2ec1
36 changed files with 1728 additions and 113 deletions

View File

@ -19,6 +19,9 @@ config ARCH_CHIP_FE310
config ARCH_CHIP_K210
bool "Kendryte K210"
select ARCH_RV64GC
select ARCH_HAVE_TESTSET
select ARCH_HAVE_MULTICPU
select ARCH_GLOBAL_IRQDISABLE
---help---
Kendryte K210 processor (RISC-V 64bit core with GC extensions)

View File

@ -57,18 +57,4 @@
# include <arch/rv64gc/irq.h>
#endif
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
typedef uint32_t irqstate_t;
/****************************************************************************
* Public Types
****************************************************************************/
/****************************************************************************
* Public Variables
****************************************************************************/
#endif /* __ARCH_RISCV_INCLUDE_IRQ_H */

View File

@ -51,6 +51,7 @@
/* In mie (machine interrupt enable) register */
#define MIE_MSIE (0x1 << 3) /* Machine Software Interrupt Enable */
#define MIE_MTIE (0x1 << 7) /* Machine Timer Interrupt Enable */
#define MIE_MEIE (0x1 << 11) /* Machine External Interrupt Enable */
@ -81,7 +82,11 @@
/* Machine Grobal External Interrupt */
#ifdef CONFIG_K210_WITH_QEMU
#define K210_IRQ_UART0 (K210_IRQ_MEXT + 4)
#else
#define K210_IRQ_UART0 (K210_IRQ_MEXT + 33)
#endif
/* Total number of IRQs */

View File

@ -0,0 +1,131 @@
/****************************************************************************
* arch/risc-v/include/spinlock.h
*
* Copyright (C) 2020 Masayuki Ishikawa. All rights reserved.
* Author: Masayuki Ishikawa <masayuki.ishikawa@gmail.com>
*
* Based on arch/arm/include/armv7-m/spinlock.h
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
#ifndef __ARCH_RISCV_INCLUDE_SPINLOCK_H
#define __ARCH_RISCV_INCLUDE_SPINLOCK_H
/****************************************************************************
* Included Files
****************************************************************************/
#ifndef __ASSEMBLY__
# include <stdint.h>
#endif /* __ASSEMBLY__ */
/* Include RISC-V architecture-specific IRQ definitions (including register
* save structure and up_irq_save()/up_irq_restore() functions)
*/
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Spinlock states */
#define SP_UNLOCKED 0 /* The Un-locked state */
#define SP_LOCKED 1 /* The Locked state */
/* Memory barriers for use with NuttX spinlock logic
*
* Data Memory Barrier (DMB) acts as a memory barrier. It ensures that all
* explicit memory accesses that appear in program order before the DMB
* instruction are observed before any explicit memory accesses that appear
* in program order after the DMB instruction. It does not affect the
* ordering of any other instructions executing on the processor
*
* Data Synchronization Barrier (DSB) acts as a special kind of memory
* barrier. No instruction in program order after this instruction executes
* until this instruction completes. This instruction completes when: (1) All
* explicit memory accesses before this instruction complete, and (2) all
* Cache, Branch predictor and TLB maintenance operations before this
* instruction complete.
*
*/
#define SP_DSB(n) __asm__ __volatile__ ("fence")
#define SP_DMB(n) __asm__ __volatile__ ("fence")
/****************************************************************************
* Public Types
****************************************************************************/
#ifndef __ASSEMBLY__
/* The Type of a spinlock.
*
* RISC-V architecture introuced the concept of exclusive accesses to memory
* locations in the form of the Load-Reserved (LR) and Store-Conditional
* (SC) instructions. RV64 supports doubleword aligned data only but others
* supports word aligned data.
*
* RISC-V architecture supports fence instruction to ensure memory ordering
*/
#ifdef __LP64__
typedef uint64_t spinlock_t;
#else
typedef uint32_t spinlock_t;
#endif
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_testset
*
* Description:
* Perform an atomic test and set operation on the provided spinlock.
*
* This function must be provided via the architecture-specific logic.
*
* Input Parameters:
* lock - The address of spinlock object.
*
* Returned Value:
* The spinlock is always locked upon return. The value of previous value
* of the spinlock variable is returned, either SP_LOCKED if the spinlock
* as previously locked (meaning that the test-and-set operation failed to
* obtain the lock) or SP_UNLOCKED if the spinlock was previously unlocked
* (meaning that we successfully obtained the lock)
*
****************************************************************************/
/* See prototype in nuttx/include/nuttx/spinlock.h */
#endif /* __ASSEMBLY__ */
#endif /* __ARCH_RISCV_INCLUDE_SPINLOCK_H */

View File

@ -81,16 +81,20 @@ typedef unsigned long long _uint64_t;
typedef signed long _intptr_t;
typedef unsigned long _uintptr_t;
/* This is the size of the interrupt state save returned by irqsave(). */
typedef unsigned long long irqstate_t;
#else
/* A pointer is 4 bytes */
typedef signed int _intptr_t;
typedef unsigned int _uintptr_t;
#endif
/* This is the size of the interrupt state save returned by irqsave(). */
typedef unsigned int irqstate_t;
#endif
#endif /* __ASSEMBLY__ */

View File

@ -70,11 +70,12 @@
*/
#ifdef CONFIG_ARCH_RV64GC
#define up_savestate(regs) up_copystate(regs, (uint64_t*)g_current_regs)
#define up_savestate(regs) up_copystate(regs, (uint64_t*)CURRENT_REGS)
#define up_restorestate(regs) (CURRENT_REGS = regs)
#else
#define up_savestate(regs) up_copystate(regs, (uint32_t*)g_current_regs)
#endif
#define up_restorestate(regs) (g_current_regs = regs)
#endif
/* Determine which (if any) console driver to use. If a console is enabled
* and no other console device is specified, then a serial console is
@ -118,7 +119,13 @@ extern "C"
#endif
#ifdef CONFIG_ARCH_RV64GC
EXTERN volatile uint64_t *g_current_regs;
#ifdef CONFIG_SMP
EXTERN volatile uint64_t *g_current_regs[CONFIG_SMP_NCPUS];
# define CURRENT_REGS (g_current_regs[up_cpu_index()])
#else
EXTERN volatile uint64_t *g_current_regs[1];
# define CURRENT_REGS (g_current_regs[0])
#endif
EXTERN uintptr_t g_idle_topstack;
#else
EXTERN volatile uint32_t *g_current_regs;

View File

@ -66,5 +66,9 @@
bool up_interrupt_context(void)
{
return g_current_regs != NULL;
#ifdef CONFIG_ARCH_RV64GC
return CURRENT_REGS != NULL;
#else
return g_current_regs != NULL;
#endif
}

View File

@ -26,3 +26,11 @@ config K210_UART0
select K210_UART
endmenu
menu "K210 Others"
config K210_WITH_QEMU
bool "qemu support"
default n
endmenu

View File

@ -37,9 +37,11 @@ HEAD_ASRC = k210_vectors.S
# Specify our general Assembly files
CHIP_ASRCS = k210_head.S up_syscall.S
CMN_ASRCS += up_testset.S
# Specify C code within the common directory to be included
CMN_CSRCS += up_initialize.c up_swint.c
CMN_CSRCS += up_allocateheap.c up_createstack.c up_exit.c
CMN_CSRCS += up_allocateheap.c up_createstack.c up_exit.c up_fault.c
CMN_CSRCS += up_assert.c up_blocktask.c up_copystate.c up_initialstate.c
CMN_CSRCS += up_interruptcontext.c up_modifyreg32.c up_puts.c
CMN_CSRCS += up_releasepending.c up_reprioritizertr.c
@ -60,3 +62,7 @@ CHIP_CSRCS += k210_idle.c k210_irq.c k210_irq_dispatch.c
CHIP_CSRCS += k210_lowputc.c k210_serial.c
CHIP_CSRCS += k210_start.c k210_timerisr.c
ifeq ($(CONFIG_SMP), y)
CHIP_CSRCS += k210_cpuidlestack.c k210_cpuindex.c
CHIP_CSRCS += k210_cpupause.c k210_cpustart.c
endif

View File

@ -37,6 +37,7 @@
* Pre-processor Definitions
****************************************************************************/
#define K210_CLINT_MSIP (K210_CLINT_BASE + 0x0000)
#define K210_CLINT_MTIMECMP (K210_CLINT_BASE + 0x4000)
#define K210_CLINT_MTIME (K210_CLINT_BASE + 0xbff8)

View File

@ -42,7 +42,11 @@
#define K210_CLINT_BASE 0x02000000
#define K210_PLIC_BASE 0x0c000000
#ifdef CONFIG_K210_WITH_QEMU
#define K210_UART0_BASE 0x10010000
#else
#define K210_UART0_BASE 0x38000000
#endif
#define K210_GPIO_BASE 0x38001000
#endif /* __ARCH_RISCV_SRC_K210_HARDWARE_K210_MEMORYMAP_H */

View File

@ -0,0 +1,106 @@
/****************************************************************************
* arch/risc-v/src/k210/k210_cpuidlestack.c
*
* Copyright (C) 2020 Masayuki Ishikawa. All rights reserved.
* Author: Masayuki Ishikawa <masayuki.ishikawa@gmail.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <nuttx/arch.h>
#include <nuttx/sched.h>
#include "up_internal.h"
#ifdef CONFIG_SMP
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_cpu_idlestack
*
* Description:
* Allocate a stack for the CPU[n] IDLE task (n > 0) if appropriate and
* setup up stack-related information in the IDLE task's TCB. This
* function is always called before up_cpu_start(). This function is
* only called for the CPU's initial IDLE task; up_create_task is used for
* all normal tasks, pthreads, and kernel threads for all CPUs.
*
* The initial IDLE task is a special case because the CPUs can be started
* in different wans in different environments:
*
* 1. The CPU may already have been started and waiting in a low power
* state for up_cpu_start(). In this case, the IDLE thread's stack
* has already been allocated and is already in use. Here
* up_cpu_idlestack() only has to provide information about the
* already allocated stack.
*
* 2. The CPU may be disabled but started when up_cpu_start() is called.
* In this case, a new stack will need to be created for the IDLE
* thread and this function is then equivalent to:
*
* return up_create_stack(tcb, stack_size, TCB_FLAG_TTYPE_KERNEL);
*
* The following TCB fields must be initialized by this function:
*
* - adj_stack_size: Stack size after adjustment for hardware, processor,
* etc. This value is retained only for debug purposes.
* - stack_alloc_ptr: Pointer to allocated stack
* - adj_stack_ptr: Adjusted stack_alloc_ptr for HW. The initial value of
* the stack pointer.
*
* Input Parameters:
* - cpu: CPU index that indicates which CPU the IDLE task is
* being created for.
* - tcb: The TCB of new CPU IDLE task
* - stack_size: The requested stack size for the IDLE task. At least
* this much must be allocated. This should be
* CONFIG_SMP_STACK_SIZE.
*
****************************************************************************/
int up_cpu_idlestack(int cpu, FAR struct tcb_s *tcb, size_t stack_size)
{
#if CONFIG_SMP_NCPUS > 1
(void)up_create_stack(tcb, stack_size, TCB_FLAG_TTYPE_KERNEL);
#endif
return OK;
}
#endif /* CONFIG_SMP */

View File

@ -0,0 +1,76 @@
/****************************************************************************
* arch/risc-v/src/k210/k210_cpuindex.c
*
* Copyright (C) 2020 Masayuki Ishikawa. All rights reserved.
* Author: Masayuki Ishikawa <masayuki.ishikawa@gmail.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <nuttx/arch.h>
#include "up_arch.h"
#ifdef CONFIG_SMP
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_cpu_index
*
* Description:
* Return an index in the range of 0 through (CONFIG_SMP_NCPUS-1) that
* corresponds to the currently executing CPU.
*
* Input Parameters:
* None
*
* Returned Value:
* An integer index in the range of 0 through (CONFIG_SMP_NCPUS-1) that
* corresponds to the currently executing CPU.
*
****************************************************************************/
int up_cpu_index(void)
{
int mhartid;
asm volatile ("csrr %0, mhartid": "=r" (mhartid));
return mhartid;
}
#endif /* CONFIG_SMP */

View File

@ -0,0 +1,332 @@
/****************************************************************************
* arch/risc-v/src/k210/k210_cpupause.c
*
* Copyright (C) 2020 Masayuki Ishikawa. All rights reserved.
* Author: Masayuki Ishikawa <masayuki.ishikawa@gmail.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <assert.h>
#include <debug.h>
#include <string.h>
#include <stdio.h>
#include <nuttx/arch.h>
#include <nuttx/spinlock.h>
#include <nuttx/sched_note.h>
#include "up_arch.h"
#include "sched/sched.h"
#include "up_internal.h"
#include "chip.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#if 0
#define DPRINTF(fmt, args...) llinfo(fmt, ##args)
#else
#define DPRINTF(fmt, args...) do {} while (0)
#endif
/****************************************************************************
* Public Data
****************************************************************************/
/* These spinlocks are used in the SMP configuration in order to implement
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
*
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
* blocks CPUm in the interrupt handler.
*
* When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler
* on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m]
* so that it will be ready for the next pause operation.
*/
volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_cpu_pausereq
*
* Description:
* Return true if a pause request is pending for this CPU.
*
* Input Parameters:
* cpu - The index of the CPU to be queried
*
* Returned Value:
* true = a pause request is pending.
* false = no pasue request is pending.
*
****************************************************************************/
bool up_cpu_pausereq(int cpu)
{
return spin_islocked(&g_cpu_paused[cpu]);
}
/****************************************************************************
* Name: up_cpu_paused
*
* Description:
* Handle a pause request from another CPU. Normally, this logic is
* executed from interrupt handling logic within the architecture-specific
* However, it is sometimes necessary necessary to perform the pending
* pause operation in other contexts where the interrupt cannot be taken
* in order to avoid deadlocks.
*
* This function performs the following operations:
*
* 1. It saves the current task state at the head of the current assigned
* task list.
* 2. It waits on a spinlock, then
* 3. Returns from interrupt, restoring the state of the new task at the
* head of the ready to run list.
*
* Input Parameters:
* cpu - The index of the CPU to be paused
*
* Returned Value:
* On success, OK is returned. Otherwise, a negated errno value indicating
* the nature of the failure is returned.
*
****************************************************************************/
int up_cpu_paused(int cpu)
{
FAR struct tcb_s *tcb = this_task();
/* Update scheduler parameters */
sched_suspend_scheduler(tcb);
#ifdef CONFIG_SCHED_INSTRUMENTATION
/* Notify that we are paused */
sched_note_cpu_paused(tcb);
#endif
/* Save the current context at CURRENT_REGS into the TCB at the head
* of the assigned task list for this CPU.
*/
up_savestate(tcb->xcp.regs);
/* Wait for the spinlock to be released */
spin_unlock(&g_cpu_paused[cpu]);
spin_lock(&g_cpu_wait[cpu]);
/* Restore the exception context of the tcb at the (new) head of the
* assigned task list.
*/
tcb = this_task();
#ifdef CONFIG_SCHED_INSTRUMENTATION
/* Notify that we have resumed */
sched_note_cpu_resumed(tcb);
#endif
/* Reset scheduler parameters */
sched_resume_scheduler(tcb);
/* Then switch contexts. Any necessary address environment changes
* will be made when the interrupt returns.
*/
up_restorestate(tcb->xcp.regs);
spin_unlock(&g_cpu_wait[cpu]);
return OK;
}
/****************************************************************************
* Name: riscv_pause_handler
*
* Description:
* Inter-CPU interrupt handler
*
* Input Parameters:
* Standard interrupt handler inputs
*
* Returned Value:
* Should always return OK
*
****************************************************************************/
int riscv_pause_handler(int irq, void *c, FAR void *arg)
{
int cpu = up_cpu_index();
/* Clear machine software interrupt */
putreg32(0, (uintptr_t)K210_CLINT_MSIP + (4 * cpu));
/* Check for false alarms. Such false could occur as a consequence of
* some deadlock breaking logic that might have already serviced the SG2
* interrupt by calling up_cpu_paused.
*/
if (spin_islocked(&g_cpu_paused[cpu]))
{
return up_cpu_paused(cpu);
}
return OK;
}
/****************************************************************************
* Name: up_cpu_pause
*
* Description:
* Save the state of the current task at the head of the
* g_assignedtasks[cpu] task list and then pause task execution on the
* CPU.
*
* This function is called by the OS when the logic executing on one CPU
* needs to modify the state of the g_assignedtasks[cpu] list for another
* CPU.
*
* Input Parameters:
* cpu - The index of the CPU to be stopped/
*
* Returned Value:
* Zero on success; a negated errno value on failure.
*
****************************************************************************/
int up_cpu_pause(int cpu)
{
DPRINTF("cpu=%d\n", cpu);
#ifdef CONFIG_SCHED_INSTRUMENTATION
/* Notify of the pause event */
sched_note_cpu_pause(this_task(), cpu);
#endif
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
/* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2
* handler from returning until up_cpu_resume() is called; g_cpu_paused
* is a handshake that will prefent this function from returning until
* the CPU is actually paused.
*/
DEBUGASSERT(!spin_islocked(&g_cpu_wait[cpu]) &&
!spin_islocked(&g_cpu_paused[cpu]));
spin_lock(&g_cpu_wait[cpu]);
spin_lock(&g_cpu_paused[cpu]);
/* Execute Pause IRQ to CPU(cpu) */
putreg32(1, (uintptr_t)K210_CLINT_MSIP + (4 * cpu));
/* Wait for the other CPU to unlock g_cpu_paused meaning that
* it is fully paused and ready for up_cpu_resume();
*/
spin_lock(&g_cpu_paused[cpu]);
spin_unlock(&g_cpu_paused[cpu]);
/* On successful return g_cpu_wait will be locked, the other CPU will be
* spinninf on g_cpu_wait and will not continue until g_cpu_resume() is
* called. g_cpu_paused will be unlocked in any case.
*/
return 0;
}
/****************************************************************************
* Name: up_cpu_resume
*
* Description:
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
* state of the task at the head of the g_assignedtasks[cpu] list, and
* resume normal tasking.
*
* This function is called after up_cpu_pause in order resume operation of
* the CPU after modifying its g_assignedtasks[cpu] list.
*
* Input Parameters:
* cpu - The index of the CPU being re-started.
*
* Returned Value:
* Zero on success; a negated errno value on failure.
*
****************************************************************************/
int up_cpu_resume(int cpu)
{
DPRINTF("cpu=%d\n", cpu);
#ifdef CONFIG_SCHED_INSTRUMENTATION
/* Notify of the resume event */
sched_note_cpu_resume(this_task(), cpu);
#endif
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
/* Release the spinlock. Releasing the spinlock will cause the SGI2
* handler on 'cpu' to continue and return from interrupt to the newly
* established thread.
*/
DEBUGASSERT(spin_islocked(&g_cpu_wait[cpu]) &&
!spin_islocked(&g_cpu_paused[cpu]));
spin_unlock(&g_cpu_wait[cpu]);
return 0;
}

View File

@ -0,0 +1,193 @@
/****************************************************************************
* arch/risc-v/src/k210/k210_cpustart.c
*
* Copyright (C) 2020 Masayuki Ishikawa. All rights reserved.
* Author: Masayuki Ishikawa <masayuki.ishikawa@gmail.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <assert.h>
#include <debug.h>
#include <string.h>
#include <stdio.h>
#include <errno.h>
#include <nuttx/arch.h>
#include <nuttx/spinlock.h>
#include <nuttx/sched_note.h>
#include "up_arch.h"
#include "sched/sched.h"
#include "init/init.h"
#include "up_internal.h"
#include "chip.h"
#ifdef CONFIG_SMP
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#if 0
# define DPRINTF(fmt, args...) _err(fmt, ##args)
#else
# define DPRINTF(fmt, args...) do {} while (0)
#endif
#ifdef CONFIG_DEBUG_FEATURES
# define showprogress(c) up_lowputc(c)
#else
# define showprogress(c)
#endif
/****************************************************************************
* Public Data
****************************************************************************/
extern volatile bool g_serial_ok;
extern int riscv_pause_handler(int irq, void *c, FAR void *arg);
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: k210_cpu_boot
*
* Description:
* Boot handler for cpu1
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
****************************************************************************/
void k210_cpu_boot(int cpu)
{
if (1 < cpu)
{
return;
}
/* Wait for g_serial_ok set by cpu0 when booting */
while (!g_serial_ok)
{
}
/* Clear machine software interrupt for CPU(cpu) */
putreg32(0, (uintptr_t)K210_CLINT_MSIP + (4 * cpu));
/* Enable machine software interrupt for IPI to boot */
up_enable_irq(K210_IRQ_MSOFT);
/* Wait interrupt */
asm("WFI");
showprogress('b');
DPRINTF("CPU%d Started\n", this_cpu());
/* TODO: Setup FPU */
/* Clear machine software interrupt for CPU(cpu) */
putreg32(0, (uintptr_t)K210_CLINT_MSIP + (4 * cpu));
#ifdef CONFIG_SCHED_INSTRUMENTATION
/* Notify that this CPU has started */
sched_note_cpu_started(this_task());
#endif
(void)up_irq_enable();
/* Then transfer control to the IDLE task */
(void)nx_idle_task(0, NULL);
}
/****************************************************************************
* Name: up_cpu_start
*
* Description:
* In an SMP configution, only one CPU is initially active (CPU 0). System
* initialization occurs on that single thread. At the completion of the
* initialization of the OS, just before beginning normal multitasking,
* the additional CPUs would be started by calling this function.
*
* Each CPU is provided the entry point to is IDLE task when started. A
* TCB for each CPU's IDLE task has been initialized and placed in the
* CPU's g_assignedtasks[cpu] list. Not stack has been alloced or
* initialized.
*
* The OS initialization logic calls this function repeatedly until each
* CPU has been started, 1 through (CONFIG_SMP_NCPUS-1).
*
* Input Parameters:
* cpu - The index of the CPU being started. This will be a numeric
* value in the range of from one to (CONFIG_SMP_NCPUS-1). (CPU
* 0 is already active)
*
* Returned Value:
* Zero on success; a negated errno value on failure.
*
****************************************************************************/
int up_cpu_start(int cpu)
{
DPRINTF("cpu=%d\n", cpu);
#ifdef CONFIG_SCHED_INSTRUMENTATION
/* Notify of the start event */
sched_note_cpu_start(this_task(), cpu);
#endif
/* Send IPI to CPU(cpu) */
putreg32(1, (uintptr_t)K210_CLINT_MSIP + (cpu * 4));
return 0;
}
#endif /* CONFIG_SMP */

View File

@ -54,9 +54,18 @@
__start:
/* Load mhartid (cpuid) */
csrr a0, mhartid
/* Set stack pointer to the idle thread stack */
la sp, K210_IDLESTACK_TOP
bnez a0, 1f
la sp, K210_IDLESTACK0_TOP
j 2f
1:
la sp, K210_IDLESTACK1_TOP
2:
/* Disable all interrupts (i.e. timer, external) in mie */
@ -67,9 +76,9 @@ __start:
la t0, __trap_vec
csrw mtvec, t0
/* Jump to __k210_start */
/* Jump to __k210_start with mhartid */
jal x1, __k210_start
j __k210_start
/* We shouldn't return from __k210_start */
@ -89,6 +98,16 @@ _fini:
exception_common:
#if 0
csrr gp, mcause /* exception cause */
addi tp, zero, 10 /* 10 = machine ecall */
bgtu gp, tp, normal_irq
ld sp, g_fstack_top /* Set sp to fault stack */
normal_irq:
addi gp, zero, 0 /* clear */
#endif
addi sp, sp, -XCPTCONTEXT_SIZE
sd x1, 1*8(sp) /* ra */
@ -137,9 +156,20 @@ exception_common:
mv a1, sp /* context = sp */
#if CONFIG_ARCH_INTERRUPTSTACK > 3
/* Load mhartid (cpuid) */
csrr s0, mhartid
/* Switch to interrupt stack */
bnez s0, 3f
la sp, g_intstackbase
j 4f
3:
la sp, g_intstackbase
addi sp, sp, -((CONFIG_ARCH_INTERRUPTSTACK) & ~7)
4:
#endif
/* Call interrupt handler in C */
@ -205,9 +235,9 @@ exception_common:
.type g_intstackalloc, object
.type g_intstackbase, object
g_intstackalloc:
.skip ((CONFIG_ARCH_INTERRUPTSTACK & ~7))
.skip (((CONFIG_ARCH_INTERRUPTSTACK * 2) & ~7))
g_intstackbase:
.skip 8
.size g_intstackbase, 8
.size g_intstackalloc, (CONFIG_ARCH_INTERRUPTSTACK & ~7)
.size g_intstackalloc, ((CONFIG_ARCH_INTERRUPTSTACK * 2) & ~7)
#endif

View File

@ -51,6 +51,24 @@
#include "k210.h"
/****************************************************************************
* Public Data
****************************************************************************/
#ifdef CONFIG_SMP
/* For the case of configurations with multiple CPUs, then there must be one
* such value for each processor that can receive an interrupt.
*/
volatile uint64_t *g_current_regs[CONFIG_SMP_NCPUS];
#else
volatile uint64_t *g_current_regs[1];
#endif
#ifdef CONFIG_SMP
extern int riscv_pause_handler(int irq, void *c, FAR void *arg);
#endif
/****************************************************************************
* Public Functions
****************************************************************************/
@ -98,12 +116,23 @@ void up_irqinitialize(void)
/* currents_regs is non-NULL only while processing an interrupt */
g_current_regs = NULL;
CURRENT_REGS = NULL;
/* Attach the ecall interrupt handler */
irq_attach(K210_IRQ_ECALLM, up_swint, NULL);
#ifdef CONFIG_SMP
/* Clear MSOFT for CPU0 */
putreg32(0, K210_CLINT_MSIP);
/* Setup MSOFT for CPU0 with pause handler */
irq_attach(K210_IRQ_MSOFT, riscv_pause_handler, NULL);
up_enable_irq(K210_IRQ_MSOFT);
#endif
#ifndef CONFIG_SUPPRESS_INTERRUPTS
/* And finally, enable interrupts */
@ -123,9 +152,15 @@ void up_irqinitialize(void)
void up_disable_irq(int irq)
{
int extirq;
uint32_t oldstat;
uint64_t oldstat;
if (irq == K210_IRQ_MTIMER)
if (irq == K210_IRQ_MSOFT)
{
/* Read mstatus & clear machine software interrupt enable in mie */
asm volatile ("csrrc %0, mie, %1": "=r" (oldstat) : "r"(MIE_MSIE));
}
else if (irq == K210_IRQ_MTIMER)
{
/* Read mstatus & clear machine timer interrupt enable in mie */
@ -160,9 +195,15 @@ void up_disable_irq(int irq)
void up_enable_irq(int irq)
{
int extirq;
uint32_t oldstat;
uint64_t oldstat;
if (irq == K210_IRQ_MTIMER)
if (irq == K210_IRQ_MSOFT)
{
/* Read mstatus & set machine software interrupt enable in mie */
asm volatile ("csrrs %0, mie, %1": "=r" (oldstat) : "r"(MIE_MSIE));
}
else if (irq == K210_IRQ_MTIMER)
{
/* Read mstatus & set machine timer interrupt enable in mie */
@ -225,7 +266,7 @@ void up_ack_irq(int irq)
irqstate_t up_irq_save(void)
{
uint32_t oldstat;
uint64_t oldstat;
/* Read mstatus & clear machine interrupt enable (MIE) in mstatus */
@ -258,7 +299,7 @@ void up_irq_restore(irqstate_t flags)
irqstate_t up_irq_enable(void)
{
uint32_t oldstat;
uint64_t oldstat;
#if 1
/* Enable MEIE (machine external interrupt enable) */

View File

@ -53,7 +53,7 @@
* Public Data
****************************************************************************/
volatile uint64_t * g_current_regs;
extern void up_fault(int irq, uint64_t *regs);
/****************************************************************************
* Public Functions
@ -68,6 +68,13 @@ void *k210_dispatch_irq(uint64_t vector, uint64_t *regs)
uint32_t irq = (vector >> (27 + 32)) | (vector & 0xf);
uint64_t *mepc = regs;
/* Check if fault happened */
if (vector < 11)
{
up_fault((int)irq, regs);
}
/* Firstly, check if the irq is machine external interrupt */
if (K210_IRQ_MEXT == irq)
@ -94,17 +101,22 @@ void *k210_dispatch_irq(uint64_t vector, uint64_t *regs)
PANIC();
#else
/* Current regs non-zero indicates that we are processing an interrupt;
* g_current_regs is also used to manage interrupt level context switches.
* CURRENT_REGS is also used to manage interrupt level context switches.
*
* Nested interrupts are not supported
*/
DEBUGASSERT(g_current_regs == NULL);
g_current_regs = regs;
ASSERT(CURRENT_REGS == NULL);
CURRENT_REGS = regs;
/* Deliver the IRQ */
/* MEXT means no interrupt */
irq_dispatch(irq, regs);
if (K210_IRQ_MEXT != irq)
{
/* Deliver the IRQ */
irq_dispatch(irq, regs);
}
if (K210_IRQ_MEXT <= irq)
{
@ -115,13 +127,13 @@ void *k210_dispatch_irq(uint64_t vector, uint64_t *regs)
#endif
/* If a context switch occurred while processing the interrupt then
* g_current_regs may have change value. If we return any value different
* CURRENT_REGS may have change value. If we return any value different
* from the input regs, then the lower level will know that a context
* switch occurred during interrupt processing.
*/
regs = (uint64_t *)g_current_regs;
g_current_regs = NULL;
regs = (uint64_t *)CURRENT_REGS;
CURRENT_REGS = NULL;
return regs;
}

View File

@ -55,7 +55,11 @@
#endif
#define K210_IDLESTACK_SIZE (CONFIG_IDLETHREAD_STACKSIZE & ~7)
#define K210_IDLESTACK_TOP (K210_IDLESTACK_BASE + K210_IDLESTACK_SIZE)
#define K210_IDLESTACK0_TOP (K210_IDLESTACK_BASE + K210_IDLESTACK_SIZE)
#define K210_IDLESTACK1_TOP (K210_IDLESTACK0_TOP + K210_IDLESTACK_SIZE)
#define K210_IDLESTACK_TOP (K210_IDLESTACK1_TOP)
#endif /* _ARCH_RISCV_SRC_K210_K210_MEMORYMAP_H */

View File

@ -36,8 +36,10 @@
#include <nuttx/config.h>
#include <nuttx/arch.h>
#include <arch/board/board.h>
#include "up_arch.h"
#include "k210_clockconfig.h"
#include "k210.h"
#include "chip.h"
@ -67,6 +69,9 @@
*/
uintptr_t g_idle_topstack = K210_IDLESTACK_TOP;
volatile bool g_serial_ok = false;
extern void k210_cpu_boot(uint32_t);
/****************************************************************************
* Public Functions
@ -76,11 +81,18 @@ uintptr_t g_idle_topstack = K210_IDLESTACK_TOP;
* Name: k210_start
****************************************************************************/
void __k210_start(void)
void __k210_start(uint32_t mhartid)
{
const uint32_t *src;
uint32_t *dest;
g_serial_ok = false;
if (0 < mhartid)
{
goto cpu1;
}
/* Clear .bss. We'll do this inline (vs. calling memset) just to be
* certain that there are no issues with the state of global variables.
*/
@ -117,6 +129,8 @@ void __k210_start(void)
showprogress('B');
g_serial_ok = true;
/* Do board initialization */
k210_boardinitialize();
@ -127,7 +141,16 @@ void __k210_start(void)
nx_start();
/* Shouldn't get here */
cpu1:
for (; ; );
showprogress('a');
#if defined(CONFIG_SMP) && (CONFIG_SMP_NCPUS == 2)
k210_cpu_boot(mhartid);
#endif
while (true)
{
asm("WFI");
}
}

View File

@ -58,7 +58,11 @@
#define getreg64(a) (*(volatile uint64_t *)(a))
#define putreg64(v,a) (*(volatile uint64_t *)(a) = (v))
#ifdef CONFIG_K210_WITH_QEMU
#define TICK_COUNT (10000000 / TICK_PER_SEC)
#else
#define TICK_COUNT ((k210_get_cpuclk() / 50) / TICK_PER_SEC)
#endif
/****************************************************************************
* Private Data

View File

@ -53,6 +53,8 @@
#include "up_internal.h"
#include "up_arch.h"
#include "irq/irq.h"
/****************************************************************************
* Public Functions
****************************************************************************/
@ -90,10 +92,11 @@
*
****************************************************************************/
#ifndef CONFIG_SMP
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
irqstate_t flags;
uint32_t int_ctx;
uint64_t int_ctx;
sinfo("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver);
@ -109,8 +112,8 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
* being delivered to the currently executing task.
*/
sinfo("rtcb=0x%p g_current_regs=0x%p\n",
this_task(), g_current_regs);
sinfo("rtcb=0x%p CURRENT_REGS=0x%p\n",
this_task(), CURRENT_REGS);
if (tcb == this_task())
{
@ -118,7 +121,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
* a task is signalling itself for some reason.
*/
if (!g_current_regs)
if (!CURRENT_REGS)
{
/* In this case just deliver the signal now. */
@ -134,7 +137,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
* logic would fail in the strange case where we are in an
* interrupt handler, the thread is signalling itself, but
* a context switch to another task has occurred so that
* g_current_regs does not refer to the thread of this_task()!
* CURRENT_REGS does not refer to the thread of this_task()!
*/
else
@ -145,19 +148,19 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
*/
tcb->xcp.sigdeliver = sigdeliver;
tcb->xcp.saved_epc = g_current_regs[REG_EPC];
tcb->xcp.saved_int_ctx = g_current_regs[REG_INT_CTX];
tcb->xcp.saved_epc = CURRENT_REGS[REG_EPC];
tcb->xcp.saved_int_ctx = CURRENT_REGS[REG_INT_CTX];
/* Then set up to vector to the trampoline with interrupts
* disabled
*/
g_current_regs[REG_EPC] = (uintptr_t)up_sigdeliver;
CURRENT_REGS[REG_EPC] = (uintptr_t)up_sigdeliver;
int_ctx = g_current_regs[REG_INT_CTX];
int_ctx = CURRENT_REGS[REG_INT_CTX];
int_ctx &= ~MSTATUS_MIE;
g_current_regs[REG_INT_CTX] = int_ctx;
CURRENT_REGS[REG_INT_CTX] = int_ctx;
/* And make sure that the saved context in the TCB
* is the same as the interrupt return context.
@ -165,9 +168,9 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
up_savestate(tcb->xcp.regs);
sinfo("PC/STATUS Saved: %08x/%08x New: %08x/%08x\n",
sinfo("PC/STATUS Saved: %016x/%016x New: %016x/%016x\n",
tcb->xcp.saved_epc, tcb->xcp.saved_status,
g_current_regs[REG_EPC], g_current_regs[REG_INT_CTX]);
CURRENT_REGS[REG_EPC], CURRENT_REGS[REG_INT_CTX]);
}
}
@ -199,7 +202,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
tcb->xcp.regs[REG_INT_CTX] = int_ctx;
sinfo("PC/STATUS Saved: %08x/%08x New: %08x/%08x\n",
sinfo("PC/STATUS Saved: %016x/%016x New: %016x/%016x\n",
tcb->xcp.saved_epc, tcb->xcp.saved_status,
tcb->xcp.regs[REG_EPC], tcb->xcp.regs[REG_INT_CTX]);
}
@ -207,3 +210,192 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
leave_critical_section(flags);
}
#endif /* !CONFIG_SMP */
#ifdef CONFIG_SMP
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
irqstate_t flags;
uint64_t int_ctx;
int cpu;
int me;
sinfo("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver);
/* Make sure that interrupts are disabled */
flags = enter_critical_section();
/* Refuse to handle nested signal actions */
if (!tcb->xcp.sigdeliver)
{
/* First, handle some special cases when the signal is being delivered
* to task that is currently executing on any CPU.
*/
sinfo("rtcb=0x%p CURRENT_REGS=0x%p\n", this_task(), CURRENT_REGS);
if (tcb->task_state == TSTATE_TASK_RUNNING)
{
me = this_cpu();
cpu = tcb->cpu;
/* CASE 1: We are not in an interrupt handler and a task is
* signaling itself for some reason.
*/
if (cpu == me && !CURRENT_REGS)
{
/* In this case just deliver the signal now.
* REVISIT: Signal handler will run in a critical section!
*/
sigdeliver(tcb);
}
/* CASE 2: The task that needs to receive the signal is running.
* This could happen if the task is running on another CPU OR if
* we are in an interrupt handler and the task is running on this
* CPU. In the former case, we will have to PAUSE the other CPU
* first. But in either case, we will have to modify the return
* state as well as the state in the TCB.
*/
else
{
/* If we signaling a task running on the other CPU, we have
* to PAUSE the other CPU.
*/
if (cpu != me)
{
/* Pause the CPU */
up_cpu_pause(cpu);
/* Wait while the pause request is pending */
while (up_cpu_pausereq(cpu))
{
}
/* Now tcb on the other CPU can be accessed safely */
/* Copy tcb->xcp.regs to tcp.xcp.saved. These will be restored
* by the signal trampoline after the signal has been delivered.
*/
tcb->xcp.sigdeliver = (FAR void *)sigdeliver;
tcb->xcp.saved_epc = tcb->xcp.regs[REG_EPC];
tcb->xcp.saved_int_ctx = tcb->xcp.regs[REG_INT_CTX];
/* Then set up vector to the trampoline with interrupts
* disabled. We must already be in privileged thread mode
* to be here.
*/
tcb->xcp.regs[REG_EPC] = (uintptr_t)up_sigdeliver;
int_ctx = tcb->xcp.regs[REG_INT_CTX];
int_ctx &= ~MSTATUS_MIE;
tcb->xcp.regs[REG_INT_CTX] = int_ctx;
}
else
{
/* tcb is running on the same CPU */
/* Save the return EPC and STATUS registers. These will be
* restored by the signal trampoline after the signal has been
* delivered.
*/
tcb->xcp.sigdeliver = (FAR void *)sigdeliver;
tcb->xcp.saved_epc = CURRENT_REGS[REG_EPC];
tcb->xcp.saved_int_ctx = CURRENT_REGS[REG_INT_CTX];
/* Then set up vector to the trampoline with interrupts
* disabled. The kernel-space trampoline must run in
* privileged thread mode.
*/
CURRENT_REGS[REG_EPC] = (uintptr_t)up_sigdeliver;
int_ctx = CURRENT_REGS[REG_INT_CTX];
int_ctx &= ~MSTATUS_MIE;
CURRENT_REGS[REG_INT_CTX] = int_ctx;
/* And make sure that the saved context in the TCB is the same
* as the interrupt return context.
*/
up_savestate(tcb->xcp.regs);
}
/* Increment the IRQ lock count so that when the task is restarted,
* it will hold the IRQ spinlock.
*/
DEBUGASSERT(tcb->irqcount < INT16_MAX);
tcb->irqcount++;
/* In an SMP configuration, the interrupt disable logic also
* involves spinlocks that are configured per the TCB irqcount
* field. This is logically equivalent to enter_critical_section().
* The matching call to leave_critical_section() will be
* performed in up_sigdeliver().
*/
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
/* RESUME the other CPU if it was PAUSED */
if (cpu != me)
{
up_cpu_resume(cpu);
}
}
}
/* Otherwise, we are (1) signaling a task is not running from an
* interrupt handler or (2) we are not in an interrupt handler and the
* running task is signaling some other non-running task.
*/
else
{
/* Save the return EPC and STATUS registers. These will be
* by the signal trampoline after the signal has been delivered.
*/
tcb->xcp.sigdeliver = (FAR void *)sigdeliver;
tcb->xcp.saved_epc = tcb->xcp.regs[REG_EPC];
tcb->xcp.saved_int_ctx = tcb->xcp.regs[REG_INT_CTX];
/* Increment the IRQ lock count so that when the task is restarted,
* it will hold the IRQ spinlock.
*/
DEBUGASSERT(tcb->irqcount < INT16_MAX);
tcb->irqcount++;
/* Then set up to vector to the trampoline with interrupts
* disabled. We must already be in privileged thread mode to be
* here.
*/
tcb->xcp.regs[REG_EPC] = (uintptr_t)up_sigdeliver;
int_ctx = tcb->xcp.regs[REG_INT_CTX];
int_ctx &= ~MSTATUS_MIE;
tcb->xcp.regs[REG_INT_CTX] = int_ctx;
}
}
leave_critical_section(flags);
}
#endif /* CONFIG_SMP */

View File

@ -52,8 +52,10 @@
#include <arch/board/board.h>
#include "up_arch.h"
#include "sched/sched.h"
#include "irq/irq.h"
#include "up_arch.h"
#include "up_internal.h"
/****************************************************************************
@ -156,48 +158,48 @@ static inline void up_registerdump(void)
{
/* Are user registers available from interrupt processing? */
if (g_current_regs)
if (CURRENT_REGS)
{
_alert("EPC:%016x \n",
g_current_regs[REG_EPC]);
CURRENT_REGS[REG_EPC]);
_alert("A0:%016x A1:%016x A2:%016x A3:%016x \n",
g_current_regs[REG_A0], g_current_regs[REG_A1],
g_current_regs[REG_A2], g_current_regs[REG_A3]);
CURRENT_REGS[REG_A0], CURRENT_REGS[REG_A1],
CURRENT_REGS[REG_A2], CURRENT_REGS[REG_A3]);
_alert("A4:%016x A5:%016x A6:%016x A7:%016x \n",
g_current_regs[REG_A4], g_current_regs[REG_A5],
g_current_regs[REG_A6], g_current_regs[REG_A7]);
CURRENT_REGS[REG_A4], CURRENT_REGS[REG_A5],
CURRENT_REGS[REG_A6], CURRENT_REGS[REG_A7]);
_alert("T0:%016x T1:%016x T2:%016x T3:%016x \n",
g_current_regs[REG_T0], g_current_regs[REG_T1],
g_current_regs[REG_T2], g_current_regs[REG_T3]);
CURRENT_REGS[REG_T0], CURRENT_REGS[REG_T1],
CURRENT_REGS[REG_T2], CURRENT_REGS[REG_T3]);
_alert("T4:%016x T5:%016x T6:%016x \n",
g_current_regs[REG_T4], g_current_regs[REG_T5],
g_current_regs[REG_T6]);
CURRENT_REGS[REG_T4], CURRENT_REGS[REG_T5],
CURRENT_REGS[REG_T6]);
_alert("S0:%016x S1:%016x S2:%016x S3:%016x \n",
g_current_regs[REG_S0], g_current_regs[REG_S1],
g_current_regs[REG_S2], g_current_regs[REG_S3]);
CURRENT_REGS[REG_S0], CURRENT_REGS[REG_S1],
CURRENT_REGS[REG_S2], CURRENT_REGS[REG_S3]);
_alert("S4:%016x S5:%016x S6:%016x S7:%016x \n",
g_current_regs[REG_S4], g_current_regs[REG_S5],
g_current_regs[REG_S6], g_current_regs[REG_S7]);
CURRENT_REGS[REG_S4], CURRENT_REGS[REG_S5],
CURRENT_REGS[REG_S6], CURRENT_REGS[REG_S7]);
_alert("S8:%016x S9:%016x S10:%016x S11:%016x \n",
g_current_regs[REG_S8], g_current_regs[REG_S9],
g_current_regs[REG_S10], g_current_regs[REG_S11]);
CURRENT_REGS[REG_S8], CURRENT_REGS[REG_S9],
CURRENT_REGS[REG_S10], CURRENT_REGS[REG_S11]);
#ifdef RISCV_SAVE_GP
_alert("GP:%016x SP:%016x FP:%016x TP:%016x RA:%016x \n",
g_current_regs[REG_GP], g_current_regs[REG_SP],
g_current_regs[REG_FP], g_current_regs[REG_TP],
g_current_regs[REG_RA]);
CURRENT_REGS[REG_GP], CURRENT_REGS[REG_SP],
CURRENT_REGS[REG_FP], CURRENT_REGS[REG_TP],
CURRENT_REGS[REG_RA]);
#else
_alert("SP:%016x FP:%016x TP:%016x RA:%016x \n",
g_current_regs[REG_SP], g_current_regs[REG_FP],
g_current_regs[REG_TP], g_current_regs[REG_RA]);
CURRENT_REGS[REG_SP], CURRENT_REGS[REG_FP],
CURRENT_REGS[REG_TP], CURRENT_REGS[REG_RA]);
#endif
}
}
@ -259,10 +261,10 @@ static void up_dumpstate(void)
/* Extract the user stack pointer */
sp = g_current_regs[REG_SP];
sp = CURRENT_REGS[REG_SP];
_alert("sp: %016x\n", sp);
}
else if (g_current_regs)
else if (CURRENT_REGS)
{
_alert("ERROR: Stack pointer is not within the interrupt stack\n");
up_stackdump(istackbase - istacksize, istackbase);
@ -309,11 +311,17 @@ static void _up_assert(int errorcode)
/* Are we in an interrupt handler or the idle task? */
if (g_current_regs || running_task()->flink == NULL)
if (CURRENT_REGS || running_task()->flink == NULL)
{
(void)up_irq_save();
for (; ; )
{
#ifdef CONFIG_SMP
/* Try (again) to stop activity on other CPUs */
(void)spin_trylock(&g_cpu_irqlock);
#endif
#if CONFIG_BOARD_RESET_ON_ASSERT >= 1
board_reset(CONFIG_BOARD_ASSERT_RESET_VALUE);
#endif
@ -379,16 +387,32 @@ void up_assert(const uint8_t *filename, int lineno)
(void)syslog_flush();
#ifdef CONFIG_SMP
#if CONFIG_TASK_NAME_SIZE > 0
_alert("Assertion failed CPU%d at file:%s line: %d task: %s\n",
up_cpu_index(), filename, lineno, rtcb->name);
#else
_alert("Assertion failed CPU%d at file:%s line: %d\n",
up_cpu_index(), filename, lineno);
#endif
#else
#if CONFIG_TASK_NAME_SIZE > 0
_alert("Assertion failed at file:%s line: %d task: %s\n",
filename, lineno, rtcb->name);
#else
_alert("Assertion failed at file:%s line: %d\n",
filename, lineno);
#endif
#endif
up_dumpstate();
#ifdef CONFIG_SMP
/* Show the CPU number */
_alert("CPU%d:\n", up_cpu_index());
#endif
/* Dump the state of all tasks (if available) */
up_showtasks();

View File

@ -117,10 +117,10 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
/* Are we in an interrupt handler? */
if (g_current_regs)
if (CURRENT_REGS)
{
/* Yes, then we have to do things differently.
* Just copy the g_current_regs into the OLD rtcb.
* Just copy the CURRENT_REGS into the OLD rtcb.
*/
up_savestate(rtcb->xcp.regs);

View File

@ -0,0 +1,123 @@
/****************************************************************************
* arch/risc-v/src/rv64gc/up_fault.c
*
* Copyright (C) 2020 Masayuki Ishikawa. All rights reserved.
* Author: Masayuki Ishikawa <masayuki.ishikawa@gmail.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <stdlib.h>
#include <assert.h>
#include <debug.h>
#include <nuttx/irq.h>
#include <nuttx/arch.h>
#include <nuttx/board.h>
#include <nuttx/syslog/syslog.h>
#include <arch/board/board.h>
#include "sched/sched.h"
#include "irq/irq.h"
#include "up_arch.h"
#include "up_internal.h"
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_fault
*
* Description:
* This is Fault exception handler.
*
****************************************************************************/
void up_fault(int irq, uint64_t *regs)
{
CURRENT_REGS = regs;
_alert("EPC:%016x\n",
CURRENT_REGS[REG_EPC]);
_alert("Fault IRQ=%d \n", irq);
/* Dump register info */
_alert("A0:%016x A1:%016x A2:%016x A3:%016x \n",
CURRENT_REGS[REG_A0], CURRENT_REGS[REG_A1],
CURRENT_REGS[REG_A2], CURRENT_REGS[REG_A3]);
_alert("A4:%016x A5:%016x A6:%016x A7:%016x \n",
CURRENT_REGS[REG_A4], CURRENT_REGS[REG_A5],
CURRENT_REGS[REG_A6], CURRENT_REGS[REG_A7]);
_alert("T0:%016x T1:%016x T2:%016x T3:%016x \n",
CURRENT_REGS[REG_T0], CURRENT_REGS[REG_T1],
CURRENT_REGS[REG_T2], CURRENT_REGS[REG_T3]);
_alert("T4:%016x T5:%016x T6:%016x \n",
CURRENT_REGS[REG_T4], CURRENT_REGS[REG_T5],
CURRENT_REGS[REG_T6]);
_alert("S0:%016x S1:%016x S2:%016x S3:%016x \n",
CURRENT_REGS[REG_S0], CURRENT_REGS[REG_S1],
CURRENT_REGS[REG_S2], CURRENT_REGS[REG_S3]);
_alert("S4:%016x S5:%016x S6:%016x S7:%016x \n",
CURRENT_REGS[REG_S4], CURRENT_REGS[REG_S5],
CURRENT_REGS[REG_S6], CURRENT_REGS[REG_S7]);
_alert("S8:%016x S9:%016x S10:%016x S11:%016x \n",
CURRENT_REGS[REG_S8], CURRENT_REGS[REG_S9],
CURRENT_REGS[REG_S10], CURRENT_REGS[REG_S11]);
#ifdef RISCV_SAVE_GP
_alert("GP:%016x SP:%016x FP:%016x TP:%016x RA:%016x \n",
CURRENT_REGS[REG_GP], CURRENT_REGS[REG_SP],
CURRENT_REGS[REG_FP], CURRENT_REGS[REG_TP],
CURRENT_REGS[REG_RA]);
#else
_alert("SP:%016x FP:%016x TP:%016x RA:%016x \n",
CURRENT_REGS[REG_SP], CURRENT_REGS[REG_FP],
CURRENT_REGS[REG_TP], CURRENT_REGS[REG_RA]);
#endif
(void)up_irq_save();
}

View File

@ -70,7 +70,7 @@
void up_initial_state(struct tcb_s *tcb)
{
struct xcptcontext *xcp = &tcb->xcp;
uint32_t regval;
uint64_t regval;
/* Initialize the initial exception register context structure */

View File

@ -87,10 +87,10 @@ void up_release_pending(void)
/* Are we operating in interrupt context? */
if (g_current_regs)
if (CURRENT_REGS)
{
/* Yes, then we have to do things differently.
* Just copy the g_current_regs into the OLD rtcb.
* Just copy the CURRENT_REGS into the OLD rtcb.
*/
up_savestate(rtcb->xcp.regs);

View File

@ -140,10 +140,10 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
/* Are we in an interrupt handler? */
if (g_current_regs)
if (CURRENT_REGS)
{
/* Yes, then we have to do things differently.
* Just copy the g_current_regs into the OLD rtcb.
* Just copy the CURRENT_REGS into the OLD rtcb.
*/
up_savestate(rtcb->xcp.regs);

View File

@ -84,6 +84,15 @@ void up_sigdeliver(void)
int saved_errno = rtcb->pterrno;
#ifdef CONFIG_SMP
/* In the SMP case, we must terminate the critical section while the signal
* handler executes, but we also need to restore the irqcount when the
* we resume the main thread of the task.
*/
int16_t saved_irqcount;
#endif
board_autoled_on(LED_SIGNAL);
sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n",
@ -94,6 +103,26 @@ void up_sigdeliver(void)
up_copystate(regs, rtcb->xcp.regs);
#ifdef CONFIG_SMP
/* In the SMP case, up_schedule_sigaction(0) will have incremented
* 'irqcount' in order to force us into a critical section. Save the
* pre-incremented irqcount.
*/
saved_irqcount = rtcb->irqcount - 1;
DEBUGASSERT(saved_irqcount >= 0);
/* Now we need call leave_critical_section() repeatedly to get the irqcount
* to zero, freeing all global spinlocks that enforce the critical section.
*/
do
{
leave_critical_section(regs[REG_INT_CTX]);
}
while (rtcb->irqcount > 0);
#endif /* CONFIG_SMP */
#ifndef CONFIG_SUPPRESS_INTERRUPTS
/* Then make sure that interrupts are enabled. Signal handlers must always
* run with interrupts enabled.
@ -111,10 +140,28 @@ void up_sigdeliver(void)
* errno that is needed by the user logic (it is probably EINTR).
*/
sinfo("Resuming EPC: %08x INT_CTX: %08x\n",
sinfo("Resuming EPC: %016x INT_CTX: %016x\n",
regs[REG_EPC], regs[REG_INT_CTX]);
/* Call enter_critical_section() to disable local interrupts before
* restoring local context.
*
* Here, we should not use up_irq_save() in SMP mode.
* For example, if we call up_irq_save() here and another CPU might
* have called up_cpu_pause() to this cpu, hence g_cpu_irqlock has
* been locked by the cpu, in this case, we would see a deadlock in
* later call of enter_critical_section() to restore irqcount.
* To avoid this situation, we need to call enter_critical_section().
*/
#ifdef CONFIG_SMP
(void)enter_critical_section();
#else
(void)up_irq_save();
#endif
/* Restore the saved errno value */
rtcb->pterrno = saved_errno;
/* Modify the saved return state with the actual saved values in the
@ -131,16 +178,26 @@ void up_sigdeliver(void)
regs[REG_INT_CTX] = rtcb->xcp.saved_int_ctx;
rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */
#ifdef CONFIG_SMP
/* Restore the saved 'irqcount' and recover the critical section
* spinlocks.
*
* REVISIT: irqcount should be one from the above call to
* enter_critical_section(). Could the saved_irqcount be zero? That
* would be a problem.
*/
DEBUGASSERT(rtcb->irqcount == 1);
while (rtcb->irqcount < saved_irqcount)
{
(void)enter_critical_section();
}
#endif
/* Then restore the correct state for this thread of
* execution.
*/
board_autoled_off(LED_SIGNAL);
up_fullcontextrestore(regs);
/* up_fullcontextrestore() should not return but could if the software
* interrupts are disabled.
*/
DEBUGPANIC();
}

View File

@ -122,7 +122,7 @@ int up_swint(int irq, FAR void *context, FAR void *arg)
{
uint64_t *regs = (uint64_t *)context;
DEBUGASSERT(regs && regs == g_current_regs);
DEBUGASSERT(regs && regs == CURRENT_REGS);
/* Software interrupt 0 is invoked with REG_A0 (REG_X10) = system call
* command and REG_A1-6 = variable number of
@ -147,16 +147,16 @@ int up_swint(int irq, FAR void *context, FAR void *arg)
* A0 = SYS_restore_context
* A1 = restoreregs
*
* In this case, we simply need to set g_current_regs to restore register
* area referenced in the saved R1. context == g_current_regs is the normal
* exception return. By setting g_current_regs = context[R1], we force
* In this case, we simply need to set CURRENT_REGS to restore register
* area referenced in the saved R1. context == CURRENT_REGS is the normal
* exception return. By setting CURRENT_REGS = context[R1], we force
* the return to the saved context referenced in $a1.
*/
case SYS_restore_context:
{
DEBUGASSERT(regs[REG_A1] != 0);
g_current_regs = (uint64_t *)regs[REG_A1];
CURRENT_REGS = (uint64_t *)regs[REG_A1];
}
break;
@ -172,7 +172,7 @@ int up_swint(int irq, FAR void *context, FAR void *arg)
*
* In this case, we save the context registers to the save register
* area referenced by the saved contents of R5 and then set
* g_current_regs to the save register area referenced by the saved
* CURRENT_REGS to the save register area referenced by the saved
* contents of R6.
*/
@ -180,7 +180,7 @@ int up_swint(int irq, FAR void *context, FAR void *arg)
{
DEBUGASSERT(regs[REG_A1] != 0 && regs[REG_A2] != 0);
up_copystate((uint64_t *)regs[REG_A1], regs);
g_current_regs = (uint64_t *)regs[REG_A2];
CURRENT_REGS = (uint64_t *)regs[REG_A2];
}
break;
@ -210,7 +210,7 @@ int up_swint(int irq, FAR void *context, FAR void *arg)
* the original mode.
*/
g_current_regs[REG_EPC] = rtcb->xcp.syscall[index].sysreturn;
CURRENT_REGS[REG_EPC] = rtcb->xcp.syscall[index].sysreturn;
#error "Missing logic -- need to restore the original mode"
rtcb->xcp.nsyscalls = index;
@ -237,7 +237,7 @@ int up_swint(int irq, FAR void *context, FAR void *arg)
/* Verify that the SYS call number is within range */
DEBUGASSERT(g_current_regs[REG_A0] < SYS_maxsyscall);
DEBUGASSERT(CURRENT_REGS[REG_A0] < SYS_maxsyscall);
/* Make sure that we got here that there is a no saved syscall
* return address. We cannot yet handle nested system calls.
@ -256,7 +256,7 @@ int up_swint(int irq, FAR void *context, FAR void *arg)
/* Offset R0 to account for the reserved values */
g_current_regs[REG_A0] -= CONFIG_SYS_RESERVED;
CURRENT_REGS[REG_A0] -= CONFIG_SYS_RESERVED;
/* Indicate that we are in a syscall handler. */
@ -271,10 +271,10 @@ int up_swint(int irq, FAR void *context, FAR void *arg)
/* Report what happened. That might difficult in the case of a context switch */
#ifdef CONFIG_DEBUG_SYSCALL_INFO
if (regs != g_current_regs)
if (regs != CURRENT_REGS)
{
svcinfo("SWInt Return: Context switch!\n");
up_registerdump((const uint32_t *)g_current_regs);
up_registerdump((const uint32_t *)CURRENT_REGS);
}
else
{

View File

@ -0,0 +1,123 @@
/****************************************************************************
* arch/risc-v/src/rv64gc/up_testset.S
*
* Copyright (C) 2020 Masayuki Ishikawa. All rights reserved.
* Author: Masayuki Ishikawa <masayuki.ishikawa@gmail.com>
*
* Based on arch/arm/src/armv7-m/gnu/up_testset.S
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <arch/spinlock.h>
.file "arm_testset.S"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Public Symbols
****************************************************************************/
.globl up_testset
/****************************************************************************
* Assembly Macros
****************************************************************************/
/****************************************************************************
* Private Functions
****************************************************************************/
.text
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_testset
*
* Description:
* Perform an atomic test and set operation on the provided spinlock.
*
* This function must be provided via the architecture-specific logic.
*
* Input Parameters:
* lock - The address of spinlock object (a0).
*
* Returned Value:
* The spinlock is always locked upon return. The value of previous value
* of the spinlock variable is returned, either SP_LOCKED if the spinlock
* as previously locked (meaning that the test-and-set operation failed to
* obtain the lock) or SP_UNLOCKED if the spinlock was previously unlocked
* (meaning that we successfully obtained the lock)
*
* Modifies: a1, a2
*
****************************************************************************/
.globl up_testset
.type up_testset, %function
up_testset:
li a1, SP_LOCKED
/* Test if the spinlock is locked or not */
retry:
lr.d a2, (a0) /* Test if spinlock is locked or not */
beq a2, a1, locked /* Already locked? Go to locked: */
/* Not locked ... attempt to lock it */
sc.d a2, a1, (a0) /* Attempt to set the locked state (a1) to (a0) */
bnez a2, retry /* a2 will not be zero, if sc.b failed, try again */
/* Lock acquired -- return SP_UNLOCKED */
fence /* Required before accessing protected resource */
li a0, SP_UNLOCKED
jr ra
/* Lock not acquired -- return SP_LOCKED */
locked:
li a0, SP_LOCKED
jr ra
.size up_testset, . - up_testset
.end

View File

@ -100,10 +100,10 @@ void up_unblock_task(struct tcb_s *tcb)
/* Are we in an interrupt handler? */
if (g_current_regs)
if (CURRENT_REGS)
{
/* Yes, then we have to do things differently.
* Just copy the g_current_regs into the OLD rtcb.
* Just copy the CURRENT_REGS into the OLD rtcb.
*/
up_savestate(rtcb->xcp.regs);

View File

@ -0,0 +1,43 @@
1. Download and install toolchain
$ curl https://static.dev.sifive.com/dev-tools/riscv64-unknown-elf-gcc-8.3.0-2019.08.0-x86_64-linux-ubuntu14.tar.gz
2. Build and install qemu
$ git clone https://github.com/qemu/qemu
$ cd qemu
$ ./configure --target-list=riscv64-softmmu
$ make
$ sudo make install
3. Modify defconfig
--- a/boards/risc-v/k210/maix-bit/configs/nsh/defconfig
+++ b/boards/risc-v/k210/maix-bit/configs/nsh/defconfig
@@ -25,6 +25,7 @@ CONFIG_EXAMPLES_HELLO=y
CONFIG_FS_PROCFS=y
CONFIG_IDLETHREAD_STACKSIZE=2048
CONFIG_INTELHEX_BINARY=y
+CONFIG_K210_WITH_QEMU=y
CONFIG_LIBC_PERROR_STDOUT=y
CONFIG_LIBC_STRERROR=y
CONFIG_MAX_TASKS=64
4. Configure and build NuttX
$ mkdir ./nuttx; cd ./nuttx
$ git clone https://bitbucket.org/nuttx/nuttx.git
$ git clone https://bitbucket.org/nuttx/apps.git
$ cd nuttx
$ make distclean
$ ./tools/configure.sh maix-bit:nsh
$ make V=1
5. Run the nuttx with qemu
$ qemu-system-riscv64 -nographic -machine sifive_u -bios ./nuttx
6. TODO
Support FPU
Support RISC-V User mode

View File

@ -32,4 +32,5 @@
PLL setting (currently CPU clock freq is assumed to be 416MHz)
Boot from SPI-Flash
Support peripherals such as GPIO/SPI/I2C/...
Support FPU
Support RISC-V User mode

View File

@ -57,9 +57,7 @@ CONFIG_TASK_NAME_SIZE=20
CONFIG_TESTING_GETPRIME=y
CONFIG_TESTING_GETPRIME_STACKSIZE=2048
CONFIG_TESTING_OSTEST=y
CONFIG_UART0_RXBUFSIZE=8
CONFIG_UART0_SERIAL_CONSOLE=y
CONFIG_UART0_TXBUFSIZE=8
CONFIG_USERMAIN_STACKSIZE=3072
CONFIG_USER_ENTRYPOINT="nsh_main"
CONFIG_WDOG_INTRESERVE=0

View File

@ -0,0 +1,74 @@
#
# This file is autogenerated: PLEASE DO NOT EDIT IT.
#
# You can use "make menuconfig" to make any modifications to the installed .config file.
# You can then do "make savedefconfig" to generate a new defconfig file that includes your
# modifications.
#
# CONFIG_NSH_DISABLE_LOSMART is not set
# CONFIG_STANDARD_SERIAL is not set
CONFIG_ARCH="risc-v"
CONFIG_ARCH_BOARD="maix-bit"
CONFIG_ARCH_BOARD_MAIX_BIT=y
CONFIG_ARCH_CHIP="k210"
CONFIG_ARCH_CHIP_K210=y
CONFIG_ARCH_INTERRUPTSTACK=2048
CONFIG_ARCH_RISCV=y
CONFIG_ARCH_STACKDUMP=y
CONFIG_BINFMT_DISABLE=y
CONFIG_BOARD_LOOPSPERMSEC=15000
CONFIG_BUILTIN=y
CONFIG_BUILTIN_PROXY_STACKSIZE=2048
CONFIG_DEBUG_FULLOPT=y
CONFIG_DEBUG_SYMBOLS=y
CONFIG_DEV_ZERO=y
CONFIG_EXAMPLES_HELLO=y
CONFIG_FS_PROCFS=y
CONFIG_FS_PROCFS_REGISTER=y
CONFIG_IDLETHREAD_STACKSIZE=2048
CONFIG_INTELHEX_BINARY=y
CONFIG_LIBC_PERROR_STDOUT=y
CONFIG_LIBC_STRERROR=y
CONFIG_MAX_TASKS=64
CONFIG_MAX_WDOGPARMS=2
CONFIG_NFILE_DESCRIPTORS=8
CONFIG_NFILE_STREAMS=8
CONFIG_NSH_ARCHINIT=y
CONFIG_NSH_BUILTIN_APPS=y
CONFIG_NSH_DISABLE_IFUPDOWN=y
CONFIG_NSH_DISABLE_MKDIR=y
CONFIG_NSH_DISABLE_RM=y
CONFIG_NSH_DISABLE_RMDIR=y
CONFIG_NSH_DISABLE_UMOUNT=y
CONFIG_NSH_FILEIOSIZE=64
CONFIG_NSH_READLINE=y
CONFIG_NSH_STRERROR=y
CONFIG_POSIX_SPAWN_PROXY_STACKSIZE=2048
CONFIG_PREALLOC_MQ_MSGS=4
CONFIG_PREALLOC_TIMERS=4
CONFIG_PREALLOC_WDOGS=16
CONFIG_RAM_SIZE=2097152
CONFIG_RAM_START=0x80400000
CONFIG_RAW_BINARY=y
CONFIG_READLINE_CMD_HISTORY=y
CONFIG_RR_INTERVAL=200
CONFIG_SCHED_INSTRUMENTATION=y
CONFIG_SCHED_INSTRUMENTATION_BUFFER=y
CONFIG_SCHED_WAITPID=y
CONFIG_SMP=y
CONFIG_SMP_NCPUS=2
CONFIG_SPINLOCK_IRQ=y
CONFIG_STACK_COLORATION=y
CONFIG_START_DAY=8
CONFIG_START_MONTH=1
CONFIG_START_YEAR=2020
CONFIG_SYSTEM_NSH=y
CONFIG_SYSTEM_TASKSET=y
CONFIG_TASK_NAME_SIZE=20
CONFIG_TESTING_GETPRIME=y
CONFIG_TESTING_GETPRIME_STACKSIZE=2048
CONFIG_TESTING_OSTEST=y
CONFIG_TESTING_SMP=y
CONFIG_UART0_SERIAL_CONSOLE=y
CONFIG_USERMAIN_STACKSIZE=3072
CONFIG_USER_ENTRYPOINT="nsh_main"