arch/arm64: Fixed error in getting cache size when there was no mmu

Signed-off-by: zhangyuan21 <zhangyuan21@xiaomi.com>
This commit is contained in:
zhangyuan21 2023-03-06 18:49:26 +08:00 committed by Xiang Xiao
parent 4f86a62f91
commit 652fc7648e
3 changed files with 83 additions and 73 deletions

View File

@ -99,7 +99,7 @@ static inline void __ic_ialluis(void)
__asm__ volatile ("ic ialluis" : : : "memory");
}
size_t g_dcache_line_size;
static size_t g_dcache_line_size;
/****************************************************************************
* Private Function Prototypes
@ -110,9 +110,11 @@ size_t g_dcache_line_size;
static inline int arm64_dcache_range(uintptr_t start_addr,
uintptr_t end_addr, int op)
{
size_t line_size = up_get_dcache_linesize();
/* Align address to line size */
start_addr = LINE_ALIGN_DOWN(start_addr, g_dcache_line_size);
start_addr = LINE_ALIGN_DOWN(start_addr, line_size);
while (start_addr < end_addr)
{
@ -141,7 +143,7 @@ static inline int arm64_dcache_range(uintptr_t start_addr,
DEBUGASSERT(0);
}
}
start_addr += g_dcache_line_size;
start_addr += line_size;
}
ARM64_DSB();
@ -283,7 +285,27 @@ static inline int arm64_dcache_all(int op)
size_t up_get_icache_linesize(void)
{
return g_dcache_line_size;
return 64;
}
/****************************************************************************
* Name: up_invalidate_icache_all
*
* Description:
* Invalidate all instruction caches to PoU, also flushes branch target
* cache
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
****************************************************************************/
void up_invalidate_icache_all(void)
{
__ic_ialluis();
}
/****************************************************************************
@ -335,26 +357,6 @@ void up_invalidate_dcache_all(void)
arm64_dcache_all(CACHE_OP_INVD);
}
/****************************************************************************
* Name: up_invalidate_icache_all
*
* Description:
* Invalidate all instruction caches to PoU, also flushes branch target
* cache
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
****************************************************************************/
void up_invalidate_icache_all(void)
{
__ic_ialluis();
}
/****************************************************************************
* Name: up_get_dcache_linesize
*
@ -371,6 +373,20 @@ void up_invalidate_icache_all(void)
size_t up_get_dcache_linesize(void)
{
uint64_t ctr_el0;
uint32_t dminline;
if (g_dcache_line_size != 0)
{
return g_dcache_line_size;
}
/* get cache line size */
ctr_el0 = read_sysreg(CTR_EL0);
dminline = (ctr_el0 >> CTR_EL0_DMINLINE_SHIFT) & CTR_EL0_DMINLINE_MASK;
g_dcache_line_size = 4 << dminline;
return g_dcache_line_size;
}
@ -397,7 +413,9 @@ size_t up_get_dcache_linesize(void)
void up_clean_dcache(uintptr_t start, uintptr_t end)
{
if (g_dcache_line_size < (end - start))
size_t cache_line = up_get_dcache_linesize();
if (cache_line < (end - start))
{
arm64_dcache_range(start, end, CACHE_OP_WB);
}
@ -458,7 +476,9 @@ void up_clean_dcache_all(void)
void up_flush_dcache(uintptr_t start, uintptr_t end)
{
if (g_dcache_line_size < (end - start))
size_t cache_line = up_get_dcache_linesize();
if (cache_line < (end - start))
{
arm64_dcache_range(start, end, CACHE_OP_WB_INVD);
}
@ -494,4 +514,3 @@ void up_flush_dcache_all(void)
{
arm64_dcache_all(CACHE_OP_WB_INVD);
}

View File

@ -198,9 +198,9 @@ static const struct arm_mmu_config g_mmu_nxrt_config =
static uint64_t get_tcr(int el)
{
uint64_t tcr;
uint64_t va_bits = CONFIG_ARM64_VA_BITS;
uint64_t tcr_ps_bits;
uint64_t tcr;
uint64_t va_bits = CONFIG_ARM64_VA_BITS;
uint64_t tcr_ps_bits;
tcr_ps_bits = TCR_PS_BITS;
@ -237,10 +237,10 @@ static int pte_desc_type(uint64_t *pte)
static uint64_t *calculate_pte_index(uint64_t addr, int level)
{
int base_level = XLAT_TABLE_BASE_LEVEL;
uint64_t *pte;
uint64_t idx;
unsigned int i;
int base_level = XLAT_TABLE_BASE_LEVEL;
uint64_t *pte;
uint64_t idx;
unsigned int i;
/* Walk through all translation tables to find pte index */
@ -288,8 +288,8 @@ static void set_pte_table_desc(uint64_t *pte, uint64_t *table,
static void set_pte_block_desc(uint64_t *pte, uint64_t addr_pa,
unsigned int attrs, unsigned int level)
{
uint64_t desc = addr_pa;
unsigned int mem_type;
uint64_t desc = addr_pa;
unsigned int mem_type;
desc |= (level == 3) ? PTE_PAGE_DESC : PTE_BLOCK_DESC;
@ -307,8 +307,8 @@ static void set_pte_block_desc(uint64_t *pte, uint64_t addr_pa,
/* memory attribute index field */
mem_type = MT_TYPE(attrs);
desc |= PTE_BLOCK_DESC_MEMTYPE(mem_type);
mem_type = MT_TYPE(attrs);
desc |= PTE_BLOCK_DESC_MEMTYPE(mem_type);
switch (mem_type)
{
@ -326,8 +326,8 @@ static void set_pte_block_desc(uint64_t *pte, uint64_t addr_pa,
/* Map device memory as execute-never */
desc |= PTE_BLOCK_DESC_PXN;
desc |= PTE_BLOCK_DESC_UXN;
desc |= PTE_BLOCK_DESC_PXN;
desc |= PTE_BLOCK_DESC_UXN;
break;
}
@ -383,9 +383,9 @@ static uint64_t *new_prealloc_table(void)
static void split_pte_block_desc(uint64_t *pte, int level)
{
uint64_t old_block_desc = *pte;
uint64_t *new_table;
unsigned int i = 0;
uint64_t old_block_desc = *pte;
uint64_t *new_table;
unsigned int i = 0;
/* get address size shift bits for next level */
@ -416,14 +416,14 @@ static void split_pte_block_desc(uint64_t *pte, int level)
static void init_xlat_tables(const struct arm_mmu_region *region)
{
uint64_t *pte;
uint64_t virt = region->base_va;
uint64_t phys = region->base_pa;
uint64_t size = region->size;
uint64_t attrs = region->attrs;
uint64_t level_size;
uint64_t *new_table;
unsigned int level = XLAT_TABLE_BASE_LEVEL;
unsigned int level = XLAT_TABLE_BASE_LEVEL;
uint64_t virt = region->base_va;
uint64_t phys = region->base_pa;
uint64_t size = region->size;
uint64_t attrs = region->attrs;
uint64_t *pte;
uint64_t *new_table;
uint64_t level_size;
#ifdef CONFIG_MMU_DEBUG
sinfo("mmap: virt %llx phys %llx size %llx\n", virt, phys, size);
@ -454,9 +454,9 @@ static void init_xlat_tables(const struct arm_mmu_region *region)
*/
set_pte_block_desc(pte, phys, attrs, level);
virt += level_size;
phys += level_size;
size -= level_size;
virt += level_size;
phys += level_size;
size -= level_size;
/* Range is mapped, start again for next range */
@ -484,15 +484,15 @@ static void init_xlat_tables(const struct arm_mmu_region *region)
static void setup_page_tables(void)
{
unsigned int index;
const struct arm_mmu_region *region;
uint64_t max_va = 0, max_pa = 0;
uint64_t max_va = 0, max_pa = 0;
const struct arm_mmu_region *region;
unsigned int index;
for (index = 0; index < g_mmu_config.num_regions; index++)
{
region = &g_mmu_config.mmu_regions[index];
max_va = MAX(max_va, region->base_va + region->size);
max_pa = MAX(max_pa, region->base_pa + region->size);
region = &g_mmu_config.mmu_regions[index];
max_va = MAX(max_va, region->base_va + region->size);
max_pa = MAX(max_pa, region->base_pa + region->size);
}
__MMU_ASSERT(max_va <= (1ULL << CONFIG_ARM64_VA_BITS),
@ -557,8 +557,8 @@ static void enable_mmu_el1(unsigned int flags)
int arm_mmu_set_memregion(const struct arm_mmu_region *region)
{
uint64_t virt = region->base_va;
uint64_t size = region->size;
uint64_t virt = region->base_va;
uint64_t size = region->size;
if (((virt & (PAGE_SIZE - 1)) == 0) &&
((size & (PAGE_SIZE - 1)) == 0))
@ -582,10 +582,8 @@ int arm_mmu_set_memregion(const struct arm_mmu_region *region)
int arm64_mmu_init(bool is_primary_core)
{
uint64_t val;
unsigned flags = 0;
uint64_t ctr_el0;
uint32_t dminline;
uint64_t val;
unsigned flags = 0;
/* Current MMU code supports only EL1 */
@ -618,11 +616,5 @@ int arm64_mmu_init(bool is_primary_core)
enable_mmu_el1(flags);
/* get cache line size */
ctr_el0 = read_sysreg(CTR_EL0);
dminline = (ctr_el0 >> CTR_EL0_DMINLINE_SHIFT) & CTR_EL0_DMINLINE_MASK;
g_dcache_line_size = 4 << dminline;
return 0;
}

View File

@ -241,7 +241,6 @@ struct arm_mmu_ptables
*/
extern const struct arm_mmu_config g_mmu_config;
extern size_t g_dcache_line_size;
/****************************************************************************
* Public Function Prototypes