add cache module for XiZI_AIOT

This commit is contained in:
sivip 2023-04-04 15:53:31 +08:00
parent 779444f998
commit 19967930ff
4 changed files with 371 additions and 0 deletions

View File

@ -4,5 +4,6 @@ ifeq ($(CONFIG_BOARD_IMX6Q_SABRELITE_EVB),y)
SRC_DIR := cortex-a9 SRC_DIR := cortex-a9
endif endif
SRC_FILES := cache.c
include $(KERNEL_ROOT)/compiler.mk include $(KERNEL_ROOT)/compiler.mk

View File

@ -0,0 +1,2 @@
#include "cortex-a9/cortex_a9.h"
#include "cortex-a9/arm_cp_registers.h"

View File

@ -0,0 +1,316 @@
#include "arm_v7.h"
////////////////////////////////////////////////////////////////////////////////
// Code
////////////////////////////////////////////////////////////////////////////////
//! @brief Check if dcache is enabled or disabled
int arm_dcache_state_query()
{
uint32_t sctlr; // System Control Register
// read sctlr
_ARM_MRC(15, 0, sctlr, 1, 0, 0);
if (sctlr & BM_SCTLR_C)
{
return 1;
} else {
return 0;
}
}
void arm_dcache_enable()
{
uint32_t sctlr; // System Control Register
// read sctlr
_ARM_MRC(15, 0, sctlr, 1, 0, 0);
if (!(sctlr & BM_SCTLR_C))
{
// set C bit (data caching enable)
sctlr |= BM_SCTLR_C;
// write modified sctlr
_ARM_MCR(15, 0, sctlr, 1, 0, 0);
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
_ARM_DSB();
}
}
void arm_dcache_disable()
{
uint32_t sctlr; // System Control Register
// read sctlr
_ARM_MRC(15, 0, sctlr, 1, 0, 0);
// set C bit (data caching enable)
sctlr &= ~BM_SCTLR_C;
// write modified sctlr
_ARM_MCR(15, 0, sctlr, 1, 0, 0);
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
_ARM_DSB();
}
void arm_dcache_invalidate()
{
uint32_t csid; // Cache Size ID
uint32_t wayset; // wayset parameter
int num_sets; // number of sets
int num_ways; // number of ways
_ARM_MRC(15, 1, csid, 0, 0, 0); // Read Cache Size ID
// Fill number of sets and number of ways from csid register This walues are decremented by 1
num_ways = (csid >> 0x03) & 0x3FFu; //((csid& csid_ASSOCIATIVITY_MASK) >> csid_ASSOCIATIVITY_SHIFT)
// Invalidation all lines (all Sets in all ways)
while (num_ways >= 0)
{
num_sets = (csid >> 0x0D) & 0x7FFFu; //((csid & csid_NUMSETS_MASK) >> csid_NUMSETS_SHIFT)
while (num_sets >= 0 )
{
wayset = (num_sets << 5u) | (num_ways << 30u); //(num_sets << SETWAY_SET_SHIFT) | (num_sets << 3SETWAY_WAY_SHIFT)
// invalidate line if we know set and way
_ARM_MCR(15, 0, wayset, 7, 6, 2);
num_sets--;
}
num_ways--;
}
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
_ARM_DSB();
}
void arm_dcache_invalidate_line(const void * addr)
{
uint32_t csidr = 0, line_size = 0;
uint32_t va;
// get the cache line size
_ARM_MRC(15, 1, csidr, 0, 0, 0);
line_size = 1 << ((csidr & 0x7) + 4);
va = (uint32_t) addr & (~(line_size - 1)); //addr & va_VIRTUAL_ADDRESS_MASK
// Invalidate data cache line by va to PoC (Point of Coherency).
_ARM_MCR(15, 0, va, 7, 6, 1);
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
_ARM_DSB();
}
void arm_dcache_invalidate_mlines(const void * addr, size_t length)
{
uint32_t va;
uint32_t csidr = 0, line_size = 0;
// get the cache line size
_ARM_MRC(15, 1, csidr, 0, 0, 0);
line_size = 1 << ((csidr & 0x7) + 4);
// align the address with line
const void * end_addr = (const void *)((uint32_t)addr + length);
do
{
// Clean data cache line to PoC (Point of Coherence) by va.
va = (uint32_t) ((uint32_t)addr & (~(line_size - 1))); //addr & va_VIRTUAL_ADDRESS_MASK
_ARM_MCR(15, 0, va, 7, 6, 1);
// increment addres to next line and decrement lenght
addr = (const void *) ((uint32_t)addr + line_size);
} while (addr < end_addr);
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
_ARM_DSB();
}
void arm_dcache_flush()
{
uint32_t csid; // Cache Size ID
uint32_t wayset; // wayset parameter
int num_sets; // number of sets
int num_ways; // number of ways
_ARM_MRC(15, 1, csid, 0, 0, 0); // Read Cache Size ID
// Fill number of sets and number of ways from csid register This walues are decremented by 1
num_ways = (csid >> 0x03) & 0x3FFu; //((csid& csid_ASSOCIATIVITY_MASK) >> csid_ASSOCIATIVITY_SHIFT`)
while (num_ways >= 0)
{
num_sets = (csid >> 0x0D) & 0x7FFFu; //((csid & csid_NUMSETS_MASK) >> csid_NUMSETS_SHIFT )
while (num_sets >= 0 )
{
wayset = (num_sets << 5u) | (num_ways << 30u); //(num_sets << SETWAY_SET_SHIFT) | (num_ways << 3SETWAY_WAY_SHIFT)
// FLUSH (clean) line if we know set and way
_ARM_MCR(15, 0, wayset, 7, 10, 2);
num_sets--;
}
num_ways--;
}
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
_ARM_DSB();
}
void arm_dcache_flush_line(const void * addr)
{
uint32_t csidr = 0, line_size = 0;
uint32_t va;
// get the cache line size
_ARM_MRC(15, 1, csidr, 0, 0, 0);
line_size = 1 << ((csidr & 0x7) + 4);
va = (uint32_t) addr & (~(line_size - 1)); //addr & va_VIRTUAL_ADDRESS_MASK
// Clean data cache line to PoC (Point of Coherence) by va.
_ARM_MCR(15, 0, va, 7, 10, 1);
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
_ARM_DSB();
}
void arm_dcache_flush_mlines(const void * addr, size_t length)
{
uint32_t va;
uint32_t csidr = 0, line_size = 0;
const void * end_addr = (const void *)((uint32_t)addr + length);
// get the cache line size
_ARM_MRC(15, 1, csidr, 0, 0, 0);
line_size = 1 << ((csidr & 0x7) + 4);
do
{
// Clean data cache line to PoC (Point of Coherence) by va.
va = (uint32_t) ((uint32_t)addr & (~(line_size - 1))); //addr & va_VIRTUAL_ADDRESS_MASK
_ARM_MCR(15, 0, va, 7, 10, 1);
// increment addres to next line and decrement lenght
addr = (const void *) ((uint32_t)addr + line_size);
} while (addr < end_addr);
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
_ARM_DSB();
}
int arm_icache_state_query()
{
uint32_t sctlr; // System Control Register
// read sctlr
_ARM_MRC(15, 0, sctlr, 1, 0, 0);
if (sctlr & BM_SCTLR_I)
{
return 1;
} else {
return 0;
}
}
void arm_icache_enable()
{
uint32_t sctlr ;// System Control Register
// read sctlr
_ARM_MRC(15, 0, sctlr, 1, 0, 0);
// ignore the operation if I is enabled already
if(!(sctlr & BM_SCTLR_I))
{
// set I bit (instruction caching enable)
sctlr |= BM_SCTLR_I;
// write modified sctlr
_ARM_MCR(15, 0, sctlr, 1, 0, 0);
// synchronize context on this processor
_ARM_ISB();
}
}
void arm_icache_disable()
{
uint32_t sctlr ;// System Control Register
// read sctlr
_ARM_MRC(15, 0, sctlr, 1, 0, 0);
// Clear I bit (instruction caching enable)
sctlr &= ~BM_SCTLR_I;
// write modified sctlr
_ARM_MCR(15, 0, sctlr, 1, 0, 0);
// synchronize context on this processor
_ARM_ISB();
}
void arm_icache_invalidate()
{
uint32_t SBZ = 0x0u;
_ARM_MCR(15, 0, SBZ, 7, 5, 0);
// synchronize context on this processor
_ARM_ISB();
}
void arm_icache_invalidate_is()
{
uint32_t SBZ = 0x0u;
_ARM_MCR(15, 0, SBZ, 7, 1, 0);
// synchronize context on this processor
_ARM_ISB();
}
void arm_icache_invalidate_line(const void * addr)
{
uint32_t csidr = 0, line_size = 0;
uint32_t va;
// get the cache line size
_ARM_MRC(15, 1, csidr, 0, 0, 0);
line_size = 1 << ((csidr & 0x7) + 4);
va = (uint32_t) addr & (~(line_size - 1)); //addr & va_VIRTUAL_ADDRESS_MASK
// Invalidate instruction cache by va to PoU (Point of unification).
_ARM_MCR(15, 0, va, 7, 5, 1);
// synchronize context on this processor
_ARM_ISB();
}
void arm_icache_invalidate_mlines(const void * addr, size_t length)
{
uint32_t va;
uint32_t csidr = 0, line_size = 0;
const void * end_addr = (const void *)((uint32_t)addr + length);
// get the cache line size
_ARM_MRC(15, 1, csidr, 0, 0, 0);
line_size = 1 << ((csidr & 0x7) + 4);
do
{
// Clean data cache line to PoC (Point of Coherence) by va.
va = (uint32_t) ((uint32_t)addr & (~(line_size - 1))); //addr & va_VIRTUAL_ADDRESS_MASK
_ARM_MCR(15, 0, va, 7, 5, 1);
// increment addres to next line and decrement lenght
addr = (const void *) ((uint32_t)addr + line_size);
} while (addr < end_addr);
// synchronize context on this processor
_ARM_ISB();
}
////////////////////////////////////////////////////////////////////////////////
// EOF
////////////////////////////////////////////////////////////////////////////////

View File

@ -0,0 +1,52 @@
/*!
* @file arm_cp_registers.h
* @brief Definitions for ARM coprocessor registers.
*/
#ifndef __ARM_CP_REGISTERS_H__
#define __ARM_CP_REGISTERS_H__
////////////////////////////////////////////////////////////////////////////////
// Definitions
////////////////////////////////////////////////////////////////////////////////
//! @name ACTLR
//@{
#define BM_ACTLR_SMP (1 << 6)
//@}
//! @name DFSR
//@{
#define BM_DFSR_WNR (1 << 11) //!< Write not Read bit. 0=read, 1=write.
#define BM_DFSR_FS4 (0x400) //!< Fault status bit 4..
#define BP_DFSR_FS4 (10) //!< Bit position for FS[4].
#define BM_DFSR_FS (0xf) //!< Fault status bits [3:0].
//@}
//! @name SCTLR
//@{
#define BM_SCTLR_TE (1 << 30) //!< Thumb exception enable.
#define BM_SCTLR_AFE (1 << 29) //!< Access flag enable.
#define BM_SCTLR_TRE (1 << 28) //!< TEX remap enable.
#define BM_SCTLR_NMFI (1 << 27) //!< Non-maskable FIQ support.
#define BM_SCTLR_EE (1 << 25) //!< Exception endianess.
#define BM_SCTLR_VE (1 << 24) //!< Interrupt vectors enable.
#define BM_SCTLR_FI (1 << 21) //!< Fast interrupt configurable enable.
#define BM_SCTLR_RR (1 << 14) //!< Round Robin
#define BM_SCTLR_V (1 << 13) //!< Vectors
#define BM_SCTLR_I (1 << 12) //!< Instruction cache enable
#define BM_SCTLR_Z (1 << 11) //!< Branch prediction enable
#define BM_SCTLR_SW (1 << 10) //!< SWP and SWPB enable
#define BM_SCTLR_CP15BEN (1 << 5) //!< CP15 barrier enable
#define BM_SCTLR_C (1 << 2) //!< Data cache enable
#define BM_SCTLR_A (1 << 1) //!< Alignment check enable
#define BM_SCTLR_M (1 << 0) //!< MMU enable
//@}
//! @}
#endif // __ARM_CP_REGISTERS_H__
////////////////////////////////////////////////////////////////////////////////
// EOF
////////////////////////////////////////////////////////////////////////////////