Make some spacing comply better with coding standard
This commit is contained in:
parent
a9c8458458
commit
0ca999e119
@ -285,19 +285,19 @@ int up_shmdt(uintptr_t vaddr, unsigned int npages)
|
||||
flags = irqsave();
|
||||
|
||||
#ifdef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
/* Get the virtual address corresponding to the physical page\
|
||||
* address.
|
||||
*/
|
||||
/* Get the virtual address corresponding to the physical page
|
||||
* address.
|
||||
*/
|
||||
|
||||
l2table = (FAR uint32_t *)arm_pgvaddr(paddr);
|
||||
l2table = (FAR uint32_t *)arm_pgvaddr(paddr);
|
||||
#else
|
||||
/* Temporarily map the page into the virtual address space */
|
||||
/* Temporarily map the page into the virtual address space */
|
||||
|
||||
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
|
||||
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE,
|
||||
MMU_MEMFLAGS);
|
||||
l2table = (FAR uint32_t *)
|
||||
(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
|
||||
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
|
||||
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE,
|
||||
MMU_MEMFLAGS);
|
||||
l2table = (FAR uint32_t *)
|
||||
(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
|
||||
#endif
|
||||
|
||||
/* Unmap this virtual page address.
|
||||
|
@ -236,7 +236,7 @@ int arm_allocpage(FAR struct tcb_s *tcb, FAR void **vpage)
|
||||
|
||||
/* Finally, return the virtual address of allocated page */
|
||||
|
||||
*vpage = (void*)(vaddr & ~PAGEMASK);
|
||||
*vpage = (void *)(vaddr & ~PAGEMASK);
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
@ -124,7 +124,7 @@ static void up_stackdump(uint32_t sp, uint32_t stack_base)
|
||||
|
||||
for (stack = sp & ~0x1f; stack < stack_base; stack += 32)
|
||||
{
|
||||
uint32_t *ptr = (uint32_t*)stack;
|
||||
uint32_t *ptr = (uint32_t *)stack;
|
||||
lldbg("%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
stack, ptr[0], ptr[1], ptr[2], ptr[3],
|
||||
ptr[4], ptr[5], ptr[6], ptr[7]);
|
||||
@ -187,7 +187,7 @@ static inline void up_registerdump(void)
|
||||
|
||||
for (regs = REG_R0; regs <= REG_R15; regs += 8)
|
||||
{
|
||||
uint32_t *ptr = (uint32_t*)¤t_regs[regs];
|
||||
uint32_t *ptr = (uint32_t *)¤t_regs[regs];
|
||||
lldbg("R%d: %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
regs, ptr[0], ptr[1], ptr[2], ptr[3],
|
||||
ptr[4], ptr[5], ptr[6], ptr[7]);
|
||||
@ -232,7 +232,7 @@ static int assert_tracecallback(FAR struct usbtrace_s *trace, FAR void *arg)
|
||||
#ifdef CONFIG_ARCH_STACKDUMP
|
||||
static void up_dumpstate(void)
|
||||
{
|
||||
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
struct tcb_s *rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
uint32_t sp = up_getsp();
|
||||
uint32_t ustackbase;
|
||||
uint32_t ustacksize;
|
||||
@ -365,7 +365,7 @@ static void _up_assert(int errorcode)
|
||||
{
|
||||
/* Are we in an interrupt handler or the idle task? */
|
||||
|
||||
if (current_regs || ((struct tcb_s*)g_readytorun.head)->pid == 0)
|
||||
if (current_regs || ((struct tcb_s *)g_readytorun.head)->pid == 0)
|
||||
{
|
||||
(void)irqsave();
|
||||
for (;;)
|
||||
@ -395,7 +395,7 @@ static void _up_assert(int errorcode)
|
||||
void up_assert(const uint8_t *filename, int lineno)
|
||||
{
|
||||
#ifdef CONFIG_PRINT_TASKNAME
|
||||
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
struct tcb_s *rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
#endif
|
||||
board_led_on(LED_ASSERTION);
|
||||
|
||||
|
@ -76,7 +76,7 @@
|
||||
|
||||
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
|
||||
{
|
||||
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
struct tcb_s *rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
bool switch_needed;
|
||||
|
||||
/* Verify that the context switch can be performed */
|
||||
@ -128,7 +128,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
|
||||
* of the g_readytorun task list.
|
||||
*/
|
||||
|
||||
rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
/* Reset scheduler parameters */
|
||||
|
||||
@ -152,16 +152,16 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
|
||||
* of the g_readytorun task list.
|
||||
*/
|
||||
|
||||
rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
#ifdef CONFIG_ARCH_ADDRENV
|
||||
/* Make sure that the address environment for the previously
|
||||
* running task is closed down gracefully (data caches dump,
|
||||
* MMU flushed) and set up the address environment for the new
|
||||
* thread at the head of the ready-to-run list.
|
||||
*/
|
||||
/* Make sure that the address environment for the previously
|
||||
* running task is closed down gracefully (data caches dump,
|
||||
* MMU flushed) and set up the address environment for the new
|
||||
* thread at the head of the ready-to-run list.
|
||||
*/
|
||||
|
||||
(void)group_addrenv(rtcb);
|
||||
(void)group_addrenv(rtcb);
|
||||
#endif
|
||||
/* Reset scheduler parameters */
|
||||
|
||||
|
@ -112,7 +112,7 @@ uint32_t *arm_dataabort(uint32_t *regs, uint32_t dfar, uint32_t dfsr)
|
||||
* for register dumps and possibly context switching.
|
||||
*/
|
||||
|
||||
savestate = (uint32_t*)current_regs;
|
||||
savestate = (uint32_t *)current_regs;
|
||||
current_regs = regs;
|
||||
|
||||
/* In the NuttX on-demand paging implementation, only the read-only, .text
|
||||
|
@ -105,7 +105,7 @@ uint32_t *arm_doirq(int irq, uint32_t *regs)
|
||||
#ifdef CONFIG_ARCH_FPU
|
||||
/* Restore floating point registers */
|
||||
|
||||
up_restorefpu((uint32_t*)current_regs);
|
||||
up_restorefpu((uint32_t *)current_regs);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_ADDRENV
|
||||
|
@ -175,10 +175,10 @@ int up_relocate(FAR const Elf32_Rel *rel, FAR const Elf32_Sym *sym,
|
||||
case R_ARM_JUMP24:
|
||||
{
|
||||
bvdbg("Performing PC24 [%d] link at addr %08lx [%08lx] to sym '%s' st_value=%08lx\n",
|
||||
ELF32_R_TYPE(rel->r_info), (long)addr, (long)(*(uint32_t*)addr),
|
||||
ELF32_R_TYPE(rel->r_info), (long)addr, (long)(*(uint32_t *)addr),
|
||||
sym, (long)sym->st_value);
|
||||
|
||||
offset = (*(uint32_t*)addr & 0x00ffffff) << 2;
|
||||
offset = (*(uint32_t *)addr & 0x00ffffff) << 2;
|
||||
if (offset & 0x02000000)
|
||||
{
|
||||
offset -= 0x04000000;
|
||||
@ -195,8 +195,8 @@ int up_relocate(FAR const Elf32_Rel *rel, FAR const Elf32_Sym *sym,
|
||||
|
||||
offset >>= 2;
|
||||
|
||||
*(uint32_t*)addr &= 0xff000000;
|
||||
*(uint32_t*)addr |= offset & 0x00ffffff;
|
||||
*(uint32_t *)addr &= 0xff000000;
|
||||
*(uint32_t *)addr |= offset & 0x00ffffff;
|
||||
}
|
||||
break;
|
||||
|
||||
@ -204,34 +204,34 @@ int up_relocate(FAR const Elf32_Rel *rel, FAR const Elf32_Sym *sym,
|
||||
case R_ARM_TARGET1: /* New ABI: TARGET1 always treated as ABS32 */
|
||||
{
|
||||
bvdbg("Performing ABS32 link at addr=%08lx [%08lx] to sym=%p st_value=%08lx\n",
|
||||
(long)addr, (long)(*(uint32_t*)addr), sym, (long)sym->st_value);
|
||||
(long)addr, (long)(*(uint32_t *)addr), sym, (long)sym->st_value);
|
||||
|
||||
*(uint32_t*)addr += sym->st_value;
|
||||
*(uint32_t *)addr += sym->st_value;
|
||||
}
|
||||
break;
|
||||
|
||||
case R_ARM_V4BX:
|
||||
{
|
||||
bvdbg("Performing V4BX link at addr=%08lx [%08lx]\n",
|
||||
(long)addr, (long)(*(uint32_t*)addr));
|
||||
(long)addr, (long)(*(uint32_t *)addr));
|
||||
|
||||
/* Preserve only Rm and the condition code */
|
||||
|
||||
*(uint32_t*)addr &= 0xf000000f;
|
||||
*(uint32_t *)addr &= 0xf000000f;
|
||||
|
||||
/* Change instruction to 'mov pc, Rm' */
|
||||
|
||||
*(uint32_t*)addr |= 0x01a0f000;
|
||||
*(uint32_t *)addr |= 0x01a0f000;
|
||||
}
|
||||
break;
|
||||
|
||||
case R_ARM_PREL31:
|
||||
{
|
||||
bvdbg("Performing PREL31 link at addr=%08lx [%08lx] to sym=%p st_value=%08lx\n",
|
||||
(long)addr, (long)(*(uint32_t*)addr), sym, (long)sym->st_value);
|
||||
(long)addr, (long)(*(uint32_t *)addr), sym, (long)sym->st_value);
|
||||
|
||||
offset = *(uint32_t*)addr + sym->st_value - addr;
|
||||
*(uint32_t*)addr = offset & 0x7fffffff;
|
||||
offset = *(uint32_t *)addr + sym->st_value - addr;
|
||||
*(uint32_t *)addr = offset & 0x7fffffff;
|
||||
}
|
||||
break;
|
||||
|
||||
@ -239,10 +239,10 @@ int up_relocate(FAR const Elf32_Rel *rel, FAR const Elf32_Sym *sym,
|
||||
case R_ARM_MOVT_ABS:
|
||||
{
|
||||
bvdbg("Performing MOVx_ABS [%d] link at addr=%08lx [%08lx] to sym=%p st_value=%08lx\n",
|
||||
ELF32_R_TYPE(rel->r_info), (long)addr, (long)(*(uint32_t*)addr),
|
||||
ELF32_R_TYPE(rel->r_info), (long)addr, (long)(*(uint32_t *)addr),
|
||||
sym, (long)sym->st_value);
|
||||
|
||||
offset = *(uint32_t*)addr;
|
||||
offset = *(uint32_t *)addr;
|
||||
offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
|
||||
offset = (offset ^ 0x8000) - 0x8000;
|
||||
|
||||
@ -252,8 +252,8 @@ int up_relocate(FAR const Elf32_Rel *rel, FAR const Elf32_Sym *sym,
|
||||
offset >>= 16;
|
||||
}
|
||||
|
||||
*(uint32_t*)addr &= 0xfff0f000;
|
||||
*(uint32_t*)addr |= ((offset & 0xf000) << 4) | (offset & 0x0fff);
|
||||
*(uint32_t *)addr &= 0xfff0f000;
|
||||
*(uint32_t *)addr |= ((offset & 0xf000) << 4) | (offset & 0x0fff);
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -75,17 +75,17 @@
|
||||
/* The size of one depends on ARM configuration */
|
||||
|
||||
#if defined(CONFIG_ARMV7A_WAYSIZE_16KB)
|
||||
# define PL310_WAYSIZE (16*1024)
|
||||
# define PL310_WAYSIZE (16 * 1024)
|
||||
#elif defined(CONFIG_ARMV7A_WAYSIZE_32KB)
|
||||
# define PL310_WAYSIZE (32*1024)
|
||||
# define PL310_WAYSIZE (32 * 1024)
|
||||
#elif defined(CONFIG_ARMV7A_WAYSIZE_64KB)
|
||||
# define PL310_WAYSIZE (64*1024)
|
||||
# define PL310_WAYSIZE (64 * 1024)
|
||||
#elif defined(CONFIG_ARMV7A_WAYSIZE_128KB)
|
||||
# define PL310_WAYSIZE (128*1024)
|
||||
# define PL310_WAYSIZE (128 * 1024)
|
||||
#elif defined(CONFIG_ARMV7A_WAYSIZE_256KB)
|
||||
# define PL310_WAYSIZE (256*1024)
|
||||
# define PL310_WAYSIZE (256 * 1024)
|
||||
#elif defined(CONFIG_ARMV7A_WAYSIZE_512KB)
|
||||
# define PL310_WAYSIZE (512*1024)
|
||||
# define PL310_WAYSIZE (512 * 1024)
|
||||
#else
|
||||
# error "Way size not selected"
|
||||
#endif
|
||||
|
@ -79,7 +79,7 @@
|
||||
#ifndef CONFIG_ARCH_ROMPGTABLE
|
||||
void mmu_l1_setentry(uint32_t paddr, uint32_t vaddr, uint32_t mmuflags)
|
||||
{
|
||||
uint32_t *l1table = (uint32_t*)PGTABLE_BASE_VADDR;
|
||||
uint32_t *l1table = (uint32_t *)PGTABLE_BASE_VADDR;
|
||||
uint32_t index = vaddr >> 20;
|
||||
|
||||
/* Save the page table entry */
|
||||
@ -114,7 +114,7 @@ void mmu_l1_setentry(uint32_t paddr, uint32_t vaddr, uint32_t mmuflags)
|
||||
#if !defined(CONFIG_ARCH_ROMPGTABLE) && defined(CONFIG_ARCH_ADDRENV)
|
||||
void mmu_l1_restore(uintptr_t vaddr, uint32_t l1entry)
|
||||
{
|
||||
uint32_t *l1table = (uint32_t*)PGTABLE_BASE_VADDR;
|
||||
uint32_t *l1table = (uint32_t *)PGTABLE_BASE_VADDR;
|
||||
uint32_t index = vaddr >> 20;
|
||||
|
||||
/* Set the encoded page table entry */
|
||||
@ -154,7 +154,7 @@ void mmu_l1_restore(uintptr_t vaddr, uint32_t l1entry)
|
||||
void mmu_l2_setentry(uint32_t l2vaddr, uint32_t paddr, uint32_t vaddr,
|
||||
uint32_t mmuflags)
|
||||
{
|
||||
uint32_t *l2table = (uint32_t*)l2vaddr;
|
||||
uint32_t *l2table = (uint32_t *)l2vaddr;
|
||||
uint32_t index;
|
||||
|
||||
/* The table divides a 1Mb address space up into 256 entries, each
|
||||
|
@ -97,7 +97,7 @@ uint32_t *arm_prefetchabort(uint32_t *regs, uint32_t ifar, uint32_t ifsr)
|
||||
* for register dumps and possibly context switching.
|
||||
*/
|
||||
|
||||
savestate = (uint32_t*)current_regs;
|
||||
savestate = (uint32_t *)current_regs;
|
||||
current_regs = regs;
|
||||
|
||||
/* Get the (virtual) address of instruction that caused the prefetch abort.
|
||||
|
@ -65,7 +65,7 @@
|
||||
|
||||
void up_release_pending(void)
|
||||
{
|
||||
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
struct tcb_s *rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
slldbg("From TCB=%p\n", rtcb);
|
||||
|
||||
@ -96,7 +96,7 @@ void up_release_pending(void)
|
||||
* of the g_readytorun task list.
|
||||
*/
|
||||
|
||||
rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
@ -121,7 +121,7 @@ void up_release_pending(void)
|
||||
* of the g_readytorun task list.
|
||||
*/
|
||||
|
||||
rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
#ifdef CONFIG_ARCH_ADDRENV
|
||||
/* Make sure that the address environment for the previously
|
||||
|
@ -92,7 +92,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
|
||||
}
|
||||
else
|
||||
{
|
||||
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
struct tcb_s *rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
bool switch_needed;
|
||||
|
||||
slldbg("TCB=%p PRI=%d\n", tcb, priority);
|
||||
@ -150,7 +150,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
|
||||
* of the g_readytorun task list.
|
||||
*/
|
||||
|
||||
rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
@ -174,7 +174,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
|
||||
* of the g_readytorun task list.
|
||||
*/
|
||||
|
||||
rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
#ifdef CONFIG_ARCH_ADDRENV
|
||||
/* Make sure that the address environment for the previously
|
||||
|
@ -121,7 +121,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
|
||||
|
||||
sdbg("rtcb=0x%p current_regs=0x%p\n", g_readytorun.head, current_regs);
|
||||
|
||||
if (tcb == (struct tcb_s*)g_readytorun.head)
|
||||
if (tcb == (struct tcb_s *)g_readytorun.head)
|
||||
{
|
||||
/* CASE 1: We are not in an interrupt handler and a task is
|
||||
* signalling itself for some reason.
|
||||
|
@ -82,7 +82,7 @@
|
||||
|
||||
void up_sigdeliver(void)
|
||||
{
|
||||
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
struct tcb_s *rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
uint32_t regs[XCPTCONTEXT_REGS];
|
||||
sig_deliver_t sigdeliver;
|
||||
|
||||
|
@ -379,7 +379,7 @@ uint32_t *arm_syscall(uint32_t *regs)
|
||||
* parameter will reside at an offset of 4 from the stack pointer.
|
||||
*/
|
||||
|
||||
regs[REG_R3] = *(uint32_t*)(regs[REG_SP]+4);
|
||||
regs[REG_R3] = *(uint32_t *)(regs[REG_SP]+4);
|
||||
|
||||
#ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
/* If we are signalling a user process, then we must be operating
|
||||
|
@ -83,7 +83,7 @@
|
||||
|
||||
void up_unblock_task(struct tcb_s *tcb)
|
||||
{
|
||||
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
struct tcb_s *rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
/* Verify that the context switch can be performed */
|
||||
|
||||
@ -122,7 +122,7 @@ void up_unblock_task(struct tcb_s *tcb)
|
||||
* of the g_readytorun task list.
|
||||
*/
|
||||
|
||||
rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
@ -148,16 +148,16 @@ void up_unblock_task(struct tcb_s *tcb)
|
||||
* g_readytorun task list.
|
||||
*/
|
||||
|
||||
rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
#ifdef CONFIG_ARCH_ADDRENV
|
||||
/* Make sure that the address environment for the previously
|
||||
* running task is closed down gracefully (data caches dump,
|
||||
* MMU flushed) and set up the address environment for the new
|
||||
* thread at the head of the ready-to-run list.
|
||||
*/
|
||||
/* Make sure that the address environment for the previously
|
||||
* running task is closed down gracefully (data caches dump,
|
||||
* MMU flushed) and set up the address environment for the new
|
||||
* thread at the head of the ready-to-run list.
|
||||
*/
|
||||
|
||||
(void)group_addrenv(rtcb);
|
||||
(void)group_addrenv(rtcb);
|
||||
#endif
|
||||
/* Update scheduler parameters */
|
||||
|
||||
|
@ -101,11 +101,11 @@ uint32_t *arm_va2pte(uintptr_t vaddr)
|
||||
|
||||
/* Get the L1 table entry associated with this virtual address */
|
||||
|
||||
L1 = *(uint32_t*)PG_POOL_VA2L1VADDR(vaddr);
|
||||
L1 = *(uint32_t *)PG_POOL_VA2L1VADDR(vaddr);
|
||||
|
||||
/* Get the address of the L2 page table from the L1 entry */
|
||||
|
||||
L2 = (uint32_t*)PG_POOL_L12VPTABLE(L1);
|
||||
L2 = (uint32_t *)PG_POOL_L12VPTABLE(L1);
|
||||
|
||||
/* Get the index into the L2 page table. Each L1 entry maps
|
||||
* 256 x 4Kb or 1024 x 1Kb pages.
|
||||
|
@ -124,7 +124,7 @@ static void up_stackdump(uint32_t sp, uint32_t stack_base)
|
||||
|
||||
for (stack = sp & ~0x1f; stack < stack_base; stack += 32)
|
||||
{
|
||||
uint32_t *ptr = (uint32_t*)stack;
|
||||
uint32_t *ptr = (uint32_t *)stack;
|
||||
lldbg("%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
stack, ptr[0], ptr[1], ptr[2], ptr[3],
|
||||
ptr[4], ptr[5], ptr[6], ptr[7]);
|
||||
@ -374,7 +374,7 @@ static void _up_assert(int errorcode)
|
||||
{
|
||||
/* Are we in an interrupt handler or the idle task? */
|
||||
|
||||
if (current_regs || ((struct tcb_s*)g_readytorun.head)->pid == 0)
|
||||
if (current_regs || ((struct tcb_s *)g_readytorun.head)->pid == 0)
|
||||
{
|
||||
(void)irqsave();
|
||||
for (;;)
|
||||
@ -404,7 +404,7 @@ static void _up_assert(int errorcode)
|
||||
void up_assert(const uint8_t *filename, int lineno)
|
||||
{
|
||||
#ifdef CONFIG_PRINT_TASKNAME
|
||||
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
struct tcb_s *rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
#endif
|
||||
|
||||
board_led_on(LED_ASSERTION);
|
||||
|
@ -75,7 +75,7 @@
|
||||
|
||||
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
|
||||
{
|
||||
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
struct tcb_s *rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
bool switch_needed;
|
||||
|
||||
/* Verify that the context switch can be performed */
|
||||
@ -127,7 +127,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
|
||||
* of the g_readytorun task list.
|
||||
*/
|
||||
|
||||
rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
/* Reset scheduler parameters */
|
||||
|
||||
@ -142,7 +142,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
|
||||
|
||||
else
|
||||
{
|
||||
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
|
||||
struct tcb_s *nexttcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
/* Reset scheduler parameters */
|
||||
|
||||
|
@ -90,7 +90,7 @@ uint32_t *up_doirq(int irq, uint32_t *regs)
|
||||
* current_regs is also used to manage interrupt level context switches.
|
||||
*/
|
||||
|
||||
savestate = (uint32_t*)current_regs;
|
||||
savestate = (uint32_t *)current_regs;
|
||||
current_regs = regs;
|
||||
|
||||
/* Acknowledge the interrupt */
|
||||
@ -107,7 +107,7 @@ uint32_t *up_doirq(int irq, uint32_t *regs)
|
||||
* switch occurred during interrupt processing.
|
||||
*/
|
||||
|
||||
regs = (uint32_t*)current_regs;
|
||||
regs = (uint32_t *)current_regs;
|
||||
|
||||
/* Restore the previous value of current_regs. NULL would indicate that
|
||||
* we are no longer in an interrupt handler. It will be non-NULL if we
|
||||
|
@ -179,10 +179,10 @@ int up_relocate(FAR const Elf32_Rel *rel, FAR const Elf32_Sym *sym,
|
||||
case R_ARM_JUMP24:
|
||||
{
|
||||
bvdbg("Performing PC24 [%d] link at addr %08lx [%08lx] to sym '%s' st_value=%08lx\n",
|
||||
ELF32_R_TYPE(rel->r_info), (long)addr, (long)(*(uint32_t*)addr),
|
||||
ELF32_R_TYPE(rel->r_info), (long)addr, (long)(*(uint32_t *)addr),
|
||||
sym, (long)sym->st_value);
|
||||
|
||||
offset = (*(uint32_t*)addr & 0x00ffffff) << 2;
|
||||
offset = (*(uint32_t *)addr & 0x00ffffff) << 2;
|
||||
if (offset & 0x02000000)
|
||||
{
|
||||
offset -= 0x04000000;
|
||||
@ -199,8 +199,8 @@ int up_relocate(FAR const Elf32_Rel *rel, FAR const Elf32_Sym *sym,
|
||||
|
||||
offset >>= 2;
|
||||
|
||||
*(uint32_t*)addr &= 0xff000000;
|
||||
*(uint32_t*)addr |= offset & 0x00ffffff;
|
||||
*(uint32_t *)addr &= 0xff000000;
|
||||
*(uint32_t *)addr |= offset & 0x00ffffff;
|
||||
}
|
||||
break;
|
||||
|
||||
@ -208,9 +208,9 @@ int up_relocate(FAR const Elf32_Rel *rel, FAR const Elf32_Sym *sym,
|
||||
case R_ARM_TARGET1: /* New ABI: TARGET1 always treated as ABS32 */
|
||||
{
|
||||
bvdbg("Performing ABS32 link at addr=%08lx [%08lx] to sym=%p st_value=%08lx\n",
|
||||
(long)addr, (long)(*(uint32_t*)addr), sym, (long)sym->st_value);
|
||||
(long)addr, (long)(*(uint32_t *)addr), sym, (long)sym->st_value);
|
||||
|
||||
*(uint32_t*)addr += sym->st_value;
|
||||
*(uint32_t *)addr += sym->st_value;
|
||||
}
|
||||
break;
|
||||
|
||||
@ -219,9 +219,9 @@ int up_relocate(FAR const Elf32_Rel *rel, FAR const Elf32_Sym *sym,
|
||||
* performs a self relocation */
|
||||
{
|
||||
bvdbg("Performing TARGET2 link at addr=%08lx [%08lx] to sym=%p st_value=%08lx\n",
|
||||
(long)addr, (long)(*(uint32_t*)addr), sym, (long)sym->st_value);
|
||||
(long)addr, (long)(*(uint32_t *)addr), sym, (long)sym->st_value);
|
||||
|
||||
*(uint32_t*)addr += sym->st_value - addr;
|
||||
*(uint32_t *)addr += sym->st_value - addr;
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
@ -264,8 +264,8 @@ int up_relocate(FAR const Elf32_Rel *rel, FAR const Elf32_Sym *sym,
|
||||
* J2 = lower_insn[11]
|
||||
*/
|
||||
|
||||
upper_insn = (uint32_t)(*(uint16_t*)addr);
|
||||
lower_insn = (uint32_t)(*(uint16_t*)(addr + 2));
|
||||
upper_insn = (uint32_t)(*(uint16_t *)addr);
|
||||
lower_insn = (uint32_t)(*(uint16_t *)(addr + 2));
|
||||
|
||||
bvdbg("Performing THM_JUMP24 [%d] link at addr=%08lx [%04x %04x] to sym=%p st_value=%08lx\n",
|
||||
ELF32_R_TYPE(rel->r_info), (long)addr, (int)upper_insn, (int)lower_insn,
|
||||
@ -337,10 +337,10 @@ int up_relocate(FAR const Elf32_Rel *rel, FAR const Elf32_Sym *sym,
|
||||
J2 = S ^ (~(offset >> 22) & 1);
|
||||
|
||||
upper_insn = ((upper_insn & 0xf800) | (S << 10) | ((offset >> 12) & 0x03ff));
|
||||
*(uint16_t*)addr = (uint16_t)upper_insn;
|
||||
*(uint16_t *)addr = (uint16_t)upper_insn;
|
||||
|
||||
lower_insn = ((lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | ((offset >> 1) & 0x07ff));
|
||||
*(uint16_t*)(addr + 2) = (uint16_t)lower_insn;
|
||||
*(uint16_t *)(addr + 2) = (uint16_t)lower_insn;
|
||||
|
||||
bvdbg(" S=%d J1=%d J2=%d insn [%04x %04x]\n",
|
||||
S, J1, J2, (int)upper_insn, (int)lower_insn);
|
||||
@ -350,25 +350,25 @@ int up_relocate(FAR const Elf32_Rel *rel, FAR const Elf32_Sym *sym,
|
||||
case R_ARM_V4BX:
|
||||
{
|
||||
bvdbg("Performing V4BX link at addr=%08lx [%08lx]\n",
|
||||
(long)addr, (long)(*(uint32_t*)addr));
|
||||
(long)addr, (long)(*(uint32_t *)addr));
|
||||
|
||||
/* Preserve only Rm and the condition code */
|
||||
|
||||
*(uint32_t*)addr &= 0xf000000f;
|
||||
*(uint32_t *)addr &= 0xf000000f;
|
||||
|
||||
/* Change instruction to 'mov pc, Rm' */
|
||||
|
||||
*(uint32_t*)addr |= 0x01a0f000;
|
||||
*(uint32_t *)addr |= 0x01a0f000;
|
||||
}
|
||||
break;
|
||||
|
||||
case R_ARM_PREL31:
|
||||
{
|
||||
bvdbg("Performing PREL31 link at addr=%08lx [%08lx] to sym=%p st_value=%08lx\n",
|
||||
(long)addr, (long)(*(uint32_t*)addr), sym, (long)sym->st_value);
|
||||
(long)addr, (long)(*(uint32_t *)addr), sym, (long)sym->st_value);
|
||||
|
||||
offset = *(uint32_t*)addr + sym->st_value - addr;
|
||||
*(uint32_t*)addr = offset & 0x7fffffff;
|
||||
offset = *(uint32_t *)addr + sym->st_value - addr;
|
||||
*(uint32_t *)addr = offset & 0x7fffffff;
|
||||
}
|
||||
break;
|
||||
|
||||
@ -376,10 +376,10 @@ int up_relocate(FAR const Elf32_Rel *rel, FAR const Elf32_Sym *sym,
|
||||
case R_ARM_MOVT_ABS:
|
||||
{
|
||||
bvdbg("Performing MOVx_ABS [%d] link at addr=%08lx [%08lx] to sym=%p st_value=%08lx\n",
|
||||
ELF32_R_TYPE(rel->r_info), (long)addr, (long)(*(uint32_t*)addr),
|
||||
ELF32_R_TYPE(rel->r_info), (long)addr, (long)(*(uint32_t *)addr),
|
||||
sym, (long)sym->st_value);
|
||||
|
||||
offset = *(uint32_t*)addr;
|
||||
offset = *(uint32_t *)addr;
|
||||
offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
|
||||
offset = (offset ^ 0x8000) - 0x8000;
|
||||
|
||||
@ -389,8 +389,8 @@ int up_relocate(FAR const Elf32_Rel *rel, FAR const Elf32_Sym *sym,
|
||||
offset >>= 16;
|
||||
}
|
||||
|
||||
*(uint32_t*)addr &= 0xfff0f000;
|
||||
*(uint32_t*)addr |= ((offset & 0xf000) << 4) | (offset & 0x0fff);
|
||||
*(uint32_t *)addr &= 0xfff0f000;
|
||||
*(uint32_t *)addr |= ((offset & 0xf000) << 4) | (offset & 0x0fff);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -427,8 +427,8 @@ int up_relocate(FAR const Elf32_Rel *rel, FAR const Elf32_Sym *sym,
|
||||
* imm8 = imm16[0:7] = lower_insn[7:0]
|
||||
*/
|
||||
|
||||
upper_insn = (uint32_t)(*(uint16_t*)addr);
|
||||
lower_insn = (uint32_t)(*(uint16_t*)(addr + 2));
|
||||
upper_insn = (uint32_t)(*(uint16_t *)addr);
|
||||
lower_insn = (uint32_t)(*(uint16_t *)(addr + 2));
|
||||
|
||||
bvdbg("Performing THM_MOVx [%d] link at addr=%08lx [%04x %04x] to sym=%p st_value=%08lx\n",
|
||||
ELF32_R_TYPE(rel->r_info), (long)addr, (int)upper_insn, (int)lower_insn,
|
||||
@ -461,11 +461,13 @@ int up_relocate(FAR const Elf32_Rel *rel, FAR const Elf32_Sym *sym,
|
||||
offset >>= 16;
|
||||
}
|
||||
|
||||
upper_insn = ((upper_insn & 0xfbf0) | ((offset & 0xf000) >> 12) | ((offset & 0x0800) >> 1));
|
||||
*(uint16_t*)addr = (uint16_t)upper_insn;
|
||||
upper_insn = ((upper_insn & 0xfbf0) | ((offset & 0xf000) >> 12) |
|
||||
((offset & 0x0800) >> 1));
|
||||
*(uint16_t *)addr = (uint16_t)upper_insn;
|
||||
|
||||
lower_insn = ((lower_insn & 0x8f00) | ((offset & 0x0700) << 4) | (offset & 0x00ff));
|
||||
*(uint16_t*)(addr + 2) = (uint16_t)lower_insn;
|
||||
lower_insn = ((lower_insn & 0x8f00) | ((offset & 0x0700) << 4) |
|
||||
(offset & 0x00ff));
|
||||
*(uint16_t *)(addr + 2) = (uint16_t)lower_insn;
|
||||
|
||||
bvdbg(" insn [%04x %04x]\n",
|
||||
(int)upper_insn, (int)lower_insn);
|
||||
|
@ -95,13 +95,13 @@
|
||||
int up_hardfault(int irq, FAR void *context)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_HARDFAULT) || !defined(CONFIG_ARMV7M_USEBASEPRI)
|
||||
uint32_t *regs = (uint32_t*)context;
|
||||
uint32_t *regs = (uint32_t *)context;
|
||||
#endif
|
||||
|
||||
/* Get the value of the program counter where the fault occurred */
|
||||
|
||||
#ifndef CONFIG_ARMV7M_USEBASEPRI
|
||||
uint16_t *pc = (uint16_t*)regs[REG_PC] - 1;
|
||||
uint16_t *pc = (uint16_t *)regs[REG_PC] - 1;
|
||||
|
||||
/* Check if the pc lies in known FLASH memory.
|
||||
* REVISIT: What if the PC lies in "unknown" external memory? Best
|
||||
|
@ -147,7 +147,7 @@ void up_initial_state(struct tcb_s *tcb)
|
||||
#if defined(CONFIG_ARMV7M_CMNVECTOR) && !defined(CONFIG_ARMV7M_LAZYFPU) && \
|
||||
defined(CONFIG_ARCH_FPU)
|
||||
|
||||
xcp->regs[REG_FPSCR] = 0; // XXX initial FPSCR should be configurable
|
||||
xcp->regs[REG_FPSCR] = 0; /* REVISIT: Initial FPSCR should be configurable */
|
||||
xcp->regs[REG_FPReserved] = 0;
|
||||
|
||||
#endif /* CONFIG_ARMV7M_CMNVECTOR && !CONFIG_ARMV7M_LAZYFPU && CONFIG_ARCH_FPU */
|
||||
|
@ -64,7 +64,7 @@
|
||||
|
||||
void up_release_pending(void)
|
||||
{
|
||||
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
struct tcb_s *rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
slldbg("From TCB=%p\n", rtcb);
|
||||
|
||||
@ -95,7 +95,7 @@ void up_release_pending(void)
|
||||
* of the g_readytorun task list.
|
||||
*/
|
||||
|
||||
rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
@ -110,7 +110,7 @@ void up_release_pending(void)
|
||||
|
||||
else
|
||||
{
|
||||
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
|
||||
struct tcb_s *nexttcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
|
@ -91,7 +91,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
|
||||
}
|
||||
else
|
||||
{
|
||||
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
struct tcb_s *rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
bool switch_needed;
|
||||
|
||||
slldbg("TCB=%p PRI=%d\n", tcb, priority);
|
||||
@ -150,7 +150,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
|
||||
* of the g_readytorun task list.
|
||||
*/
|
||||
|
||||
rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
@ -165,7 +165,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
|
||||
|
||||
else
|
||||
{
|
||||
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
|
||||
struct tcb_s *nexttcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
|
@ -123,7 +123,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
|
||||
|
||||
sdbg("rtcb=0x%p current_regs=0x%p\n", g_readytorun.head, current_regs);
|
||||
|
||||
if (tcb == (struct tcb_s*)g_readytorun.head)
|
||||
if (tcb == (struct tcb_s *)g_readytorun.head)
|
||||
{
|
||||
/* CASE 1: We are not in an interrupt handler and a task is
|
||||
* signalling itself for some reason.
|
||||
|
@ -82,7 +82,7 @@
|
||||
|
||||
void up_sigdeliver(void)
|
||||
{
|
||||
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
struct tcb_s *rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
uint32_t regs[XCPTCONTEXT_REGS];
|
||||
sig_deliver_t sigdeliver;
|
||||
|
||||
|
@ -154,7 +154,7 @@ static void dispatch_syscall(void)
|
||||
|
||||
int up_svcall(int irq, FAR void *context)
|
||||
{
|
||||
uint32_t *regs = (uint32_t*)context;
|
||||
uint32_t *regs = (uint32_t *)context;
|
||||
uint32_t cmd;
|
||||
|
||||
DEBUGASSERT(regs && regs == current_regs);
|
||||
@ -205,10 +205,10 @@ int up_svcall(int irq, FAR void *context)
|
||||
case SYS_save_context:
|
||||
{
|
||||
DEBUGASSERT(regs[REG_R1] != 0);
|
||||
memcpy((uint32_t*)regs[REG_R1], regs, XCPTCONTEXT_SIZE);
|
||||
memcpy((uint32_t *)regs[REG_R1], regs, XCPTCONTEXT_SIZE);
|
||||
#if defined(CONFIG_ARCH_FPU) && \
|
||||
(!defined(CONFIG_ARMV7M_CMNVECTOR) || defined(CONFIG_ARMV7M_LAZYFPU))
|
||||
up_savefpu((uint32_t*)regs[REG_R1]);
|
||||
up_savefpu((uint32_t *)regs[REG_R1]);
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
@ -231,7 +231,7 @@ int up_svcall(int irq, FAR void *context)
|
||||
case SYS_restore_context:
|
||||
{
|
||||
DEBUGASSERT(regs[REG_R1] != 0);
|
||||
current_regs = (uint32_t*)regs[REG_R1];
|
||||
current_regs = (uint32_t *)regs[REG_R1];
|
||||
}
|
||||
break;
|
||||
|
||||
@ -254,12 +254,12 @@ int up_svcall(int irq, FAR void *context)
|
||||
case SYS_switch_context:
|
||||
{
|
||||
DEBUGASSERT(regs[REG_R1] != 0 && regs[REG_R2] != 0);
|
||||
memcpy((uint32_t*)regs[REG_R1], regs, XCPTCONTEXT_SIZE);
|
||||
memcpy((uint32_t *)regs[REG_R1], regs, XCPTCONTEXT_SIZE);
|
||||
#if defined(CONFIG_ARCH_FPU) && \
|
||||
(!defined(CONFIG_ARMV7M_CMNVECTOR) || defined(CONFIG_ARMV7M_LAZYFPU))
|
||||
up_savefpu((uint32_t*)regs[REG_R1]);
|
||||
up_savefpu((uint32_t *)regs[REG_R1]);
|
||||
#endif
|
||||
current_regs = (uint32_t*)regs[REG_R2];
|
||||
current_regs = (uint32_t *)regs[REG_R2];
|
||||
}
|
||||
break;
|
||||
|
||||
@ -409,7 +409,7 @@ int up_svcall(int irq, FAR void *context)
|
||||
* parameter will reside at an offset of 4 from the stack pointer.
|
||||
*/
|
||||
|
||||
regs[REG_R3] = *(uint32_t*)(regs[REG_SP]+4);
|
||||
regs[REG_R3] = *(uint32_t *)(regs[REG_SP]+4);
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
|
@ -70,7 +70,7 @@
|
||||
|
||||
void up_unblock_task(struct tcb_s *tcb)
|
||||
{
|
||||
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
struct tcb_s *rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
/* Verify that the context switch can be performed */
|
||||
|
||||
@ -109,7 +109,7 @@ void up_unblock_task(struct tcb_s *tcb)
|
||||
* of the g_readytorun task list.
|
||||
*/
|
||||
|
||||
rtcb = (struct tcb_s*)g_readytorun.head;
|
||||
rtcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
@ -124,7 +124,7 @@ void up_unblock_task(struct tcb_s *tcb)
|
||||
|
||||
else
|
||||
{
|
||||
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
|
||||
struct tcb_s *nexttcb = (struct tcb_s *)g_readytorun.head;
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
|
@ -541,7 +541,7 @@ static int c5471_mdrxbit (void)
|
||||
|
||||
/* MDCLK falling edge. */
|
||||
|
||||
putreg32((getreg32(GPIO_IO)&~GPIO_IO_MDCLK), GPIO_IO); /* MDCLK falling edge */
|
||||
putreg32((getreg32(GPIO_IO) & ~GPIO_IO_MDCLK), GPIO_IO); /* MDCLK falling edge */
|
||||
if (bit_state)
|
||||
{
|
||||
return 1;
|
||||
@ -909,12 +909,12 @@ static int c5471_transmit(struct c5471_driver_s *c5471)
|
||||
|
||||
/* Words #2 and #3 of descriptor */
|
||||
|
||||
packetmem = (uint16_t*)getreg32(c5471->c_rxcpudesc + sizeof(uint32_t));
|
||||
packetmem = (uint16_t *)getreg32(c5471->c_rxcpudesc + sizeof(uint32_t));
|
||||
for (i = 0; i < nshorts; i++, j++)
|
||||
{
|
||||
/* 16-bits at a time. */
|
||||
|
||||
packetmem[i] = htons(((uint16_t*)dev->d_buf)[j]);
|
||||
packetmem[i] = htons(((uint16_t *)dev->d_buf)[j]);
|
||||
}
|
||||
|
||||
putreg32(((getreg32(c5471->c_rxcpudesc) & ~EIM_RXDESC_BYTEMASK) | framelen), c5471->c_rxcpudesc);
|
||||
@ -1192,7 +1192,7 @@ static void c5471_receive(struct c5471_driver_s *c5471)
|
||||
{
|
||||
/* Get the packet memory from words #2 and #3 of descriptor */
|
||||
|
||||
packetmem = (uint16_t*)getreg32(c5471->c_txcpudesc + sizeof(uint32_t));
|
||||
packetmem = (uint16_t *)getreg32(c5471->c_txcpudesc + sizeof(uint32_t));
|
||||
|
||||
/* Divide by 2 with round up to get the number of 16-bit words. */
|
||||
|
||||
@ -1206,7 +1206,7 @@ static void c5471_receive(struct c5471_driver_s *c5471)
|
||||
* a time.
|
||||
*/
|
||||
|
||||
((uint16_t*)dev->d_buf)[j] = htons(packetmem[i]);
|
||||
((uint16_t *)dev->d_buf)[j] = htons(packetmem[i]);
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -1223,7 +1223,7 @@ static void c5471_receive(struct c5471_driver_s *c5471)
|
||||
* the settings of a select few. Can leave descriptor words 2/3 alone.
|
||||
*/
|
||||
|
||||
putreg32((getreg32(c5471->c_txcpudesc) & (EIM_TXDESC_WRAP_NEXT|EIM_TXDESC_INTRE)),
|
||||
putreg32((getreg32(c5471->c_txcpudesc) & (EIM_TXDESC_WRAP_NEXT | EIM_TXDESC_INTRE)),
|
||||
c5471->c_txcpudesc);
|
||||
|
||||
/* Next, Give ownership of now emptied descriptor back to the Ether Module's SWITCH */
|
||||
@ -1700,7 +1700,8 @@ static int c5471_ifup(struct net_driver_s *dev)
|
||||
|
||||
/* Enable interrupts going from EIM Module to Interrupt Module. */
|
||||
|
||||
putreg32(((getreg32(EIM_INTEN) | EIM_INTEN_CPU_TX|EIM_INTEN_CPU_RX)), EIM_INTEN);
|
||||
putreg32(((getreg32(EIM_INTEN) | EIM_INTEN_CPU_TX | EIM_INTEN_CPU_RX)),
|
||||
EIM_INTEN);
|
||||
|
||||
/* Next, go on-line. According to the C547X documentation the enables have to
|
||||
* occur in this order to insure proper operation; ESM first then the ENET.
|
||||
@ -1751,7 +1752,8 @@ static int c5471_ifdown(struct net_driver_s *dev)
|
||||
|
||||
/* Disable interrupts going from EIM Module to Interrupt Module. */
|
||||
|
||||
putreg32((getreg32(EIM_INTEN) & ~(EIM_INTEN_CPU_TX|EIM_INTEN_CPU_RX)), EIM_INTEN);
|
||||
putreg32((getreg32(EIM_INTEN) & ~(EIM_INTEN_CPU_TX | EIM_INTEN_CPU_RX)),
|
||||
EIM_INTEN);
|
||||
|
||||
/* Disable ENET */
|
||||
|
||||
@ -1809,7 +1811,7 @@ static int c5471_txavail(struct net_driver_s *dev)
|
||||
*/
|
||||
|
||||
if ((EIM_TXDESC_OWN_HOST & getreg32(c5471->c_rxcpudesc)) == 0)
|
||||
{
|
||||
{
|
||||
/* If so, then poll uIP for new XMIT data */
|
||||
|
||||
(void)devif_poll(&c5471->c_dev, c5471_txpoll);
|
||||
|
Loading…
Reference in New Issue
Block a user