玄铁全系列RISC-V内核支持(E系列/R系列/C系列) (#10343)

This commit is contained in:
Yaochenger 2025-06-10 16:18:07 +08:00 committed by GitHub
parent 24e5954764
commit cec2dbd7e9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
318 changed files with 24245 additions and 38420 deletions

View File

@ -468,7 +468,16 @@
"RTT_BSP": "xuantie",
"RTT_TOOL_CHAIN": "sourcery-Xuantie-900-gcc-elf-newlib",
"SUB_RTT_BSP": [
"xuantie/smartl/e906"
"xuantie/smartl/e902",
"xuantie/smartl/e906",
"xuantie/smartl/e907",
"xuantie/xiaohui/c906",
"xuantie/xiaohui/c907",
"xuantie/xiaohui/c908",
"xuantie/xiaohui/c910",
"xuantie/xiaohui/r908",
"xuantie/xiaohui/r910",
"xuantie/xiaohui/r920"
]
}
]

View File

@ -21,7 +21,7 @@ MEMORY
DRAM : ORIGIN = 0x50000000, LENGTH = 0x100000 /* on-chip DRAM 1*1MB */
}
__min_heap_size = 0x200;
__min_heap_size = 0x20000;
PROVIDE (__ram_end = 0x50100000 - 0x8);
PROVIDE (__heap_end = __ram_end);

View File

@ -1,128 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler
j Default_Handler
j Default_Handler
j Default_Handler
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
#ifdef CONFIG_KERNEL_NONE
la sp, g_top_mainstack
#else
la sp, g_top_irqstack
#endif
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK
g_top_mainstack:
#endif

View File

@ -1,274 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable(MMU_MODE_32);
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#if __riscv_flen == 64
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,412 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
csrw mscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
sd s0, (sp)
#endif
csrw mscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -(140+140)
sd x1, ( 0 + 0 )(sp)
sd x3, ( 8 + 8 )(sp)
sd x4, ( 12+ 12)(sp)
sd x5, ( 16+ 16)(sp)
sd x6, ( 20+ 20)(sp)
sd x7, ( 24+ 24)(sp)
sd x8, ( 28+ 28)(sp)
sd x9, ( 32+ 32)(sp)
sd x10,( 36+ 36)(sp)
sd x11,( 40+ 40)(sp)
sd x12,( 44+ 44)(sp)
sd x13,( 48+ 48)(sp)
sd x14,( 52+ 52)(sp)
sd x15,( 56+ 56)(sp)
sd x16,( 60+ 60)(sp)
sd x17,( 64+ 64)(sp)
sd x18,( 68+ 68)(sp)
sd x19,( 72+ 72)(sp)
sd x20,( 76+ 76)(sp)
sd x21,( 80+ 80)(sp)
sd x22,( 84+ 84)(sp)
sd x23,( 88+ 88)(sp)
sd x24,( 92+ 92)(sp)
sd x25,( 96+ 96)(sp)
sd x26,(100+100)(sp)
sd x27,(104+104)(sp)
sd x28,(108+108)(sp)
sd x29,(112+112)(sp)
sd x30,(116+116)(sp)
sd x31,(120+120)(sp)
csrr a0, mepc
sd a0, (124+124)(sp)
csrr a0, mstatus
sd a0, (128+128)(sp)
csrr a0, mcause
sd a0, (132+132)(sp)
csrr a0, mtval
sd a0, (136+136)(sp)
csrr a0, mscratch
sd a0, ( 4 + 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -19,7 +19,9 @@
#include <csi_config.h>
.globl Reset_Handler
.global __rt_rvstack
.equ Mcoret_Handler, SW_handler
.equ Mirq_Handler, SW_handler
.section .vectors
.align 6
.globl __Vectors
@ -102,7 +104,7 @@ Reset_Handler:
#endif
#endif
la a0, pre_main
la a0, rtthread_startup
jalr a0
.size Reset_Handler, . - Reset_Handler
@ -118,6 +120,7 @@ __exit:
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_irqstack:
__rt_rvstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack

View File

@ -1,128 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler
j Default_Handler
j Default_Handler
j Default_Handler
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
#ifdef CONFIG_KERNEL_NONE
la sp, g_top_mainstack
#else
la sp, g_top_irqstack
#endif
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK
g_top_mainstack:
#endif

View File

@ -1,274 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable(MMU_MODE_32);
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#if __riscv_flen == 64
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,412 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
csrw mscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
sd s0, (sp)
#endif
csrw mscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -(140+140)
sd x1, ( 0 + 0 )(sp)
sd x3, ( 8 + 8 )(sp)
sd x4, ( 12+ 12)(sp)
sd x5, ( 16+ 16)(sp)
sd x6, ( 20+ 20)(sp)
sd x7, ( 24+ 24)(sp)
sd x8, ( 28+ 28)(sp)
sd x9, ( 32+ 32)(sp)
sd x10,( 36+ 36)(sp)
sd x11,( 40+ 40)(sp)
sd x12,( 44+ 44)(sp)
sd x13,( 48+ 48)(sp)
sd x14,( 52+ 52)(sp)
sd x15,( 56+ 56)(sp)
sd x16,( 60+ 60)(sp)
sd x17,( 64+ 64)(sp)
sd x18,( 68+ 68)(sp)
sd x19,( 72+ 72)(sp)
sd x20,( 76+ 76)(sp)
sd x21,( 80+ 80)(sp)
sd x22,( 84+ 84)(sp)
sd x23,( 88+ 88)(sp)
sd x24,( 92+ 92)(sp)
sd x25,( 96+ 96)(sp)
sd x26,(100+100)(sp)
sd x27,(104+104)(sp)
sd x28,(108+108)(sp)
sd x29,(112+112)(sp)
sd x30,(116+116)(sp)
sd x31,(120+120)(sp)
csrr a0, mepc
sd a0, (124+124)(sp)
csrr a0, mstatus
sd a0, (128+128)(sp)
csrr a0, mcause
sd a0, (132+132)(sp)
csrr a0, mtval
sd a0, (136+136)(sp)
csrr a0, mscratch
sd a0, ( 4 + 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -1,188 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
lw a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -1,326 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix */
status &= ~(1ul << 0);
#endif /* __riscv_matrix || __riscv_xtheadmatrix */
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_MS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,842 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
store_x s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
#if __riscv_xlen == 64
addi sp, sp, -(140+140)
store_x x1, ( 0 + 0 )(sp)
store_x x3, ( 8 + 8 )(sp)
store_x x4, ( 12+ 12)(sp)
store_x x5, ( 16+ 16)(sp)
store_x x6, ( 20+ 20)(sp)
store_x x7, ( 24+ 24)(sp)
store_x x8, ( 28+ 28)(sp)
store_x x9, ( 32+ 32)(sp)
store_x x10,( 36+ 36)(sp)
store_x x11,( 40+ 40)(sp)
store_x x12,( 44+ 44)(sp)
store_x x13,( 48+ 48)(sp)
store_x x14,( 52+ 52)(sp)
store_x x15,( 56+ 56)(sp)
store_x x16,( 60+ 60)(sp)
store_x x17,( 64+ 64)(sp)
store_x x18,( 68+ 68)(sp)
store_x x19,( 72+ 72)(sp)
store_x x20,( 76+ 76)(sp)
store_x x21,( 80+ 80)(sp)
store_x x22,( 84+ 84)(sp)
store_x x23,( 88+ 88)(sp)
store_x x24,( 92+ 92)(sp)
store_x x25,( 96+ 96)(sp)
store_x x26,(100+100)(sp)
store_x x27,(104+104)(sp)
store_x x28,(108+108)(sp)
store_x x29,(112+112)(sp)
store_x x30,(116+116)(sp)
store_x x31,(120+120)(sp)
csrr a0, mepc
store_x a0, (124+124)(sp)
csrr a0, mstatus
store_x a0, (128+128)(sp)
csrr a0, mcause
store_x a0, (132+132)(sp)
csrr a0, mtval
store_x a0, (136+136)(sp)
csrr a0, mscratch
store_x a0, ( 4 + 4 )(sp)
#else
addi sp, sp, -140
store_x x1, ( 0 )(sp)
store_x x3, ( 8 )(sp)
store_x x4, ( 12)(sp)
store_x x5, ( 16)(sp)
store_x x6, ( 20)(sp)
store_x x7, ( 24)(sp)
store_x x8, ( 28)(sp)
store_x x9, ( 32)(sp)
store_x x10,( 36)(sp)
store_x x11,( 40)(sp)
store_x x12,( 44)(sp)
store_x x13,( 48)(sp)
store_x x14,( 52)(sp)
store_x x15,( 56)(sp)
store_x x16,( 60)(sp)
store_x x17,( 64)(sp)
store_x x18,( 68)(sp)
store_x x19,( 72)(sp)
store_x x20,( 76)(sp)
store_x x21,( 80)(sp)
store_x x22,( 84)(sp)
store_x x23,( 88)(sp)
store_x x24,( 92)(sp)
store_x x25,( 96)(sp)
store_x x26,(100)(sp)
store_x x27,(104)(sp)
store_x x28,(108)(sp)
store_x x29,(112)(sp)
store_x x30,(116)(sp)
store_x x31,(120)(sp)
csrr a0, mepc
store_x a0, (124)(sp)
csrr a0, mstatus
store_x a0, (128)(sp)
csrr a0, mcause
store_x a0, (132)(sp)
csrr a0, mtval
store_x a0, (136)(sp)
csrr a0, mscratch
store_x a0, ( 4 )(sp)
#endif
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,13 @@
from building import *
import os
cwd = GetCurrentDir()
CPPPATH = [cwd]
src = ['startup.S']
src += ['system.c']
src += ['trap_c.c']
src += ['vectors.S']
group = DefineGroup('sys', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -23,7 +23,9 @@
#endif
.globl Reset_Handler
.global __rt_rvstack
.equ Mcoret_Handler, SW_handler
.equ Mirq_Handler, SW_handler
.section .vectors
.align 6
.globl __Vectors
@ -132,7 +134,7 @@ Reset_Handler:
#endif
#endif
la a0, pre_main
la a0, rtthread_startup
jalr a0
.size Reset_Handler, . - Reset_Handler
@ -169,6 +171,7 @@ hart_out_of_bounds_loop:
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
__rt_rvstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
@ -177,6 +180,7 @@ g_top_irqstack:
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP

View File

@ -1,188 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
lw a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -1,326 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix */
status &= ~(1ul << 0);
#endif /* __riscv_matrix || __riscv_xtheadmatrix */
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_MS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,842 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
store_x s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
#if __riscv_xlen == 64
addi sp, sp, -(140+140)
store_x x1, ( 0 + 0 )(sp)
store_x x3, ( 8 + 8 )(sp)
store_x x4, ( 12+ 12)(sp)
store_x x5, ( 16+ 16)(sp)
store_x x6, ( 20+ 20)(sp)
store_x x7, ( 24+ 24)(sp)
store_x x8, ( 28+ 28)(sp)
store_x x9, ( 32+ 32)(sp)
store_x x10,( 36+ 36)(sp)
store_x x11,( 40+ 40)(sp)
store_x x12,( 44+ 44)(sp)
store_x x13,( 48+ 48)(sp)
store_x x14,( 52+ 52)(sp)
store_x x15,( 56+ 56)(sp)
store_x x16,( 60+ 60)(sp)
store_x x17,( 64+ 64)(sp)
store_x x18,( 68+ 68)(sp)
store_x x19,( 72+ 72)(sp)
store_x x20,( 76+ 76)(sp)
store_x x21,( 80+ 80)(sp)
store_x x22,( 84+ 84)(sp)
store_x x23,( 88+ 88)(sp)
store_x x24,( 92+ 92)(sp)
store_x x25,( 96+ 96)(sp)
store_x x26,(100+100)(sp)
store_x x27,(104+104)(sp)
store_x x28,(108+108)(sp)
store_x x29,(112+112)(sp)
store_x x30,(116+116)(sp)
store_x x31,(120+120)(sp)
csrr a0, mepc
store_x a0, (124+124)(sp)
csrr a0, mstatus
store_x a0, (128+128)(sp)
csrr a0, mcause
store_x a0, (132+132)(sp)
csrr a0, mtval
store_x a0, (136+136)(sp)
csrr a0, mscratch
store_x a0, ( 4 + 4 )(sp)
#else
addi sp, sp, -140
store_x x1, ( 0 )(sp)
store_x x3, ( 8 )(sp)
store_x x4, ( 12)(sp)
store_x x5, ( 16)(sp)
store_x x6, ( 20)(sp)
store_x x7, ( 24)(sp)
store_x x8, ( 28)(sp)
store_x x9, ( 32)(sp)
store_x x10,( 36)(sp)
store_x x11,( 40)(sp)
store_x x12,( 44)(sp)
store_x x13,( 48)(sp)
store_x x14,( 52)(sp)
store_x x15,( 56)(sp)
store_x x16,( 60)(sp)
store_x x17,( 64)(sp)
store_x x18,( 68)(sp)
store_x x19,( 72)(sp)
store_x x20,( 76)(sp)
store_x x21,( 80)(sp)
store_x x22,( 84)(sp)
store_x x23,( 88)(sp)
store_x x24,( 92)(sp)
store_x x25,( 96)(sp)
store_x x26,(100)(sp)
store_x x27,(104)(sp)
store_x x28,(108)(sp)
store_x x29,(112)(sp)
store_x x30,(116)(sp)
store_x x31,(120)(sp)
csrr a0, mepc
store_x a0, (124)(sp)
csrr a0, mstatus
store_x a0, (128)(sp)
csrr a0, mcause
store_x a0, (132)(sp)
csrr a0, mtval
store_x a0, (136)(sp)
csrr a0, mscratch
store_x a0, ( 4 )(sp)
#endif
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -1,188 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -1,326 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix */
status &= ~(1ul << 0);
#endif /* __riscv_matrix || __riscv_xtheadmatrix */
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_MS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,842 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
store_x s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
#if __riscv_xlen == 64
addi sp, sp, -(140+140)
store_x x1, ( 0 + 0 )(sp)
store_x x3, ( 8 + 8 )(sp)
store_x x4, ( 12+ 12)(sp)
store_x x5, ( 16+ 16)(sp)
store_x x6, ( 20+ 20)(sp)
store_x x7, ( 24+ 24)(sp)
store_x x8, ( 28+ 28)(sp)
store_x x9, ( 32+ 32)(sp)
store_x x10,( 36+ 36)(sp)
store_x x11,( 40+ 40)(sp)
store_x x12,( 44+ 44)(sp)
store_x x13,( 48+ 48)(sp)
store_x x14,( 52+ 52)(sp)
store_x x15,( 56+ 56)(sp)
store_x x16,( 60+ 60)(sp)
store_x x17,( 64+ 64)(sp)
store_x x18,( 68+ 68)(sp)
store_x x19,( 72+ 72)(sp)
store_x x20,( 76+ 76)(sp)
store_x x21,( 80+ 80)(sp)
store_x x22,( 84+ 84)(sp)
store_x x23,( 88+ 88)(sp)
store_x x24,( 92+ 92)(sp)
store_x x25,( 96+ 96)(sp)
store_x x26,(100+100)(sp)
store_x x27,(104+104)(sp)
store_x x28,(108+108)(sp)
store_x x29,(112+112)(sp)
store_x x30,(116+116)(sp)
store_x x31,(120+120)(sp)
csrr a0, mepc
store_x a0, (124+124)(sp)
csrr a0, mstatus
store_x a0, (128+128)(sp)
csrr a0, mcause
store_x a0, (132+132)(sp)
csrr a0, mtval
store_x a0, (136+136)(sp)
csrr a0, mscratch
store_x a0, ( 4 + 4 )(sp)
#else
addi sp, sp, -140
store_x x1, ( 0 )(sp)
store_x x3, ( 8 )(sp)
store_x x4, ( 12)(sp)
store_x x5, ( 16)(sp)
store_x x6, ( 20)(sp)
store_x x7, ( 24)(sp)
store_x x8, ( 28)(sp)
store_x x9, ( 32)(sp)
store_x x10,( 36)(sp)
store_x x11,( 40)(sp)
store_x x12,( 44)(sp)
store_x x13,( 48)(sp)
store_x x14,( 52)(sp)
store_x x15,( 56)(sp)
store_x x16,( 60)(sp)
store_x x17,( 64)(sp)
store_x x18,( 68)(sp)
store_x x19,( 72)(sp)
store_x x20,( 76)(sp)
store_x x21,( 80)(sp)
store_x x22,( 84)(sp)
store_x x23,( 88)(sp)
store_x x24,( 92)(sp)
store_x x25,( 96)(sp)
store_x x26,(100)(sp)
store_x x27,(104)(sp)
store_x x28,(108)(sp)
store_x x29,(112)(sp)
store_x x30,(116)(sp)
store_x x31,(120)(sp)
csrr a0, mepc
store_x a0, (124)(sp)
csrr a0, mstatus
store_x a0, (128)(sp)
csrr a0, mcause
store_x a0, (132)(sp)
csrr a0, mtval
store_x a0, (136)(sp)
csrr a0, mscratch
store_x a0, ( 4 )(sp)
#endif
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -1,188 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
lw a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -1,326 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix */
status &= ~(1ul << 0);
#endif /* __riscv_matrix || __riscv_xtheadmatrix */
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_MS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,842 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
store_x s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
#if __riscv_xlen == 64
addi sp, sp, -(140+140)
store_x x1, ( 0 + 0 )(sp)
store_x x3, ( 8 + 8 )(sp)
store_x x4, ( 12+ 12)(sp)
store_x x5, ( 16+ 16)(sp)
store_x x6, ( 20+ 20)(sp)
store_x x7, ( 24+ 24)(sp)
store_x x8, ( 28+ 28)(sp)
store_x x9, ( 32+ 32)(sp)
store_x x10,( 36+ 36)(sp)
store_x x11,( 40+ 40)(sp)
store_x x12,( 44+ 44)(sp)
store_x x13,( 48+ 48)(sp)
store_x x14,( 52+ 52)(sp)
store_x x15,( 56+ 56)(sp)
store_x x16,( 60+ 60)(sp)
store_x x17,( 64+ 64)(sp)
store_x x18,( 68+ 68)(sp)
store_x x19,( 72+ 72)(sp)
store_x x20,( 76+ 76)(sp)
store_x x21,( 80+ 80)(sp)
store_x x22,( 84+ 84)(sp)
store_x x23,( 88+ 88)(sp)
store_x x24,( 92+ 92)(sp)
store_x x25,( 96+ 96)(sp)
store_x x26,(100+100)(sp)
store_x x27,(104+104)(sp)
store_x x28,(108+108)(sp)
store_x x29,(112+112)(sp)
store_x x30,(116+116)(sp)
store_x x31,(120+120)(sp)
csrr a0, mepc
store_x a0, (124+124)(sp)
csrr a0, mstatus
store_x a0, (128+128)(sp)
csrr a0, mcause
store_x a0, (132+132)(sp)
csrr a0, mtval
store_x a0, (136+136)(sp)
csrr a0, mscratch
store_x a0, ( 4 + 4 )(sp)
#else
addi sp, sp, -140
store_x x1, ( 0 )(sp)
store_x x3, ( 8 )(sp)
store_x x4, ( 12)(sp)
store_x x5, ( 16)(sp)
store_x x6, ( 20)(sp)
store_x x7, ( 24)(sp)
store_x x8, ( 28)(sp)
store_x x9, ( 32)(sp)
store_x x10,( 36)(sp)
store_x x11,( 40)(sp)
store_x x12,( 44)(sp)
store_x x13,( 48)(sp)
store_x x14,( 52)(sp)
store_x x15,( 56)(sp)
store_x x16,( 60)(sp)
store_x x17,( 64)(sp)
store_x x18,( 68)(sp)
store_x x19,( 72)(sp)
store_x x20,( 76)(sp)
store_x x21,( 80)(sp)
store_x x22,( 84)(sp)
store_x x23,( 88)(sp)
store_x x24,( 92)(sp)
store_x x25,( 96)(sp)
store_x x26,(100)(sp)
store_x x27,(104)(sp)
store_x x28,(108)(sp)
store_x x29,(112)(sp)
store_x x30,(116)(sp)
store_x x31,(120)(sp)
csrr a0, mepc
store_x a0, (124)(sp)
csrr a0, mstatus
store_x a0, (128)(sp)
csrr a0, mcause
store_x a0, (132)(sp)
csrr a0, mtval
store_x a0, (136)(sp)
csrr a0, mscratch
store_x a0, ( 4 )(sp)
#endif
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -1,188 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -1,326 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix */
status &= ~(1ul << 0);
#endif /* __riscv_matrix || __riscv_xtheadmatrix */
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_MS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,842 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
store_x s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
#if __riscv_xlen == 64
addi sp, sp, -(140+140)
store_x x1, ( 0 + 0 )(sp)
store_x x3, ( 8 + 8 )(sp)
store_x x4, ( 12+ 12)(sp)
store_x x5, ( 16+ 16)(sp)
store_x x6, ( 20+ 20)(sp)
store_x x7, ( 24+ 24)(sp)
store_x x8, ( 28+ 28)(sp)
store_x x9, ( 32+ 32)(sp)
store_x x10,( 36+ 36)(sp)
store_x x11,( 40+ 40)(sp)
store_x x12,( 44+ 44)(sp)
store_x x13,( 48+ 48)(sp)
store_x x14,( 52+ 52)(sp)
store_x x15,( 56+ 56)(sp)
store_x x16,( 60+ 60)(sp)
store_x x17,( 64+ 64)(sp)
store_x x18,( 68+ 68)(sp)
store_x x19,( 72+ 72)(sp)
store_x x20,( 76+ 76)(sp)
store_x x21,( 80+ 80)(sp)
store_x x22,( 84+ 84)(sp)
store_x x23,( 88+ 88)(sp)
store_x x24,( 92+ 92)(sp)
store_x x25,( 96+ 96)(sp)
store_x x26,(100+100)(sp)
store_x x27,(104+104)(sp)
store_x x28,(108+108)(sp)
store_x x29,(112+112)(sp)
store_x x30,(116+116)(sp)
store_x x31,(120+120)(sp)
csrr a0, mepc
store_x a0, (124+124)(sp)
csrr a0, mstatus
store_x a0, (128+128)(sp)
csrr a0, mcause
store_x a0, (132+132)(sp)
csrr a0, mtval
store_x a0, (136+136)(sp)
csrr a0, mscratch
store_x a0, ( 4 + 4 )(sp)
#else
addi sp, sp, -140
store_x x1, ( 0 )(sp)
store_x x3, ( 8 )(sp)
store_x x4, ( 12)(sp)
store_x x5, ( 16)(sp)
store_x x6, ( 20)(sp)
store_x x7, ( 24)(sp)
store_x x8, ( 28)(sp)
store_x x9, ( 32)(sp)
store_x x10,( 36)(sp)
store_x x11,( 40)(sp)
store_x x12,( 44)(sp)
store_x x13,( 48)(sp)
store_x x14,( 52)(sp)
store_x x15,( 56)(sp)
store_x x16,( 60)(sp)
store_x x17,( 64)(sp)
store_x x18,( 68)(sp)
store_x x19,( 72)(sp)
store_x x20,( 76)(sp)
store_x x21,( 80)(sp)
store_x x22,( 84)(sp)
store_x x23,( 88)(sp)
store_x x24,( 92)(sp)
store_x x25,( 96)(sp)
store_x x26,(100)(sp)
store_x x27,(104)(sp)
store_x x28,(108)(sp)
store_x x29,(112)(sp)
store_x x30,(116)(sp)
store_x x31,(120)(sp)
csrr a0, mepc
store_x a0, (124)(sp)
csrr a0, mstatus
store_x a0, (128)(sp)
csrr a0, mcause
store_x a0, (132)(sp)
csrr a0, mtval
store_x a0, (136)(sp)
csrr a0, mscratch
store_x a0, ( 4 )(sp)
#endif
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -1,188 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
lw a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -1,326 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix */
status &= ~(1ul << 0);
#endif /* __riscv_matrix || __riscv_xtheadmatrix */
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_MS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,842 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
store_x s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
#if __riscv_xlen == 64
addi sp, sp, -(140+140)
store_x x1, ( 0 + 0 )(sp)
store_x x3, ( 8 + 8 )(sp)
store_x x4, ( 12+ 12)(sp)
store_x x5, ( 16+ 16)(sp)
store_x x6, ( 20+ 20)(sp)
store_x x7, ( 24+ 24)(sp)
store_x x8, ( 28+ 28)(sp)
store_x x9, ( 32+ 32)(sp)
store_x x10,( 36+ 36)(sp)
store_x x11,( 40+ 40)(sp)
store_x x12,( 44+ 44)(sp)
store_x x13,( 48+ 48)(sp)
store_x x14,( 52+ 52)(sp)
store_x x15,( 56+ 56)(sp)
store_x x16,( 60+ 60)(sp)
store_x x17,( 64+ 64)(sp)
store_x x18,( 68+ 68)(sp)
store_x x19,( 72+ 72)(sp)
store_x x20,( 76+ 76)(sp)
store_x x21,( 80+ 80)(sp)
store_x x22,( 84+ 84)(sp)
store_x x23,( 88+ 88)(sp)
store_x x24,( 92+ 92)(sp)
store_x x25,( 96+ 96)(sp)
store_x x26,(100+100)(sp)
store_x x27,(104+104)(sp)
store_x x28,(108+108)(sp)
store_x x29,(112+112)(sp)
store_x x30,(116+116)(sp)
store_x x31,(120+120)(sp)
csrr a0, mepc
store_x a0, (124+124)(sp)
csrr a0, mstatus
store_x a0, (128+128)(sp)
csrr a0, mcause
store_x a0, (132+132)(sp)
csrr a0, mtval
store_x a0, (136+136)(sp)
csrr a0, mscratch
store_x a0, ( 4 + 4 )(sp)
#else
addi sp, sp, -140
store_x x1, ( 0 )(sp)
store_x x3, ( 8 )(sp)
store_x x4, ( 12)(sp)
store_x x5, ( 16)(sp)
store_x x6, ( 20)(sp)
store_x x7, ( 24)(sp)
store_x x8, ( 28)(sp)
store_x x9, ( 32)(sp)
store_x x10,( 36)(sp)
store_x x11,( 40)(sp)
store_x x12,( 44)(sp)
store_x x13,( 48)(sp)
store_x x14,( 52)(sp)
store_x x15,( 56)(sp)
store_x x16,( 60)(sp)
store_x x17,( 64)(sp)
store_x x18,( 68)(sp)
store_x x19,( 72)(sp)
store_x x20,( 76)(sp)
store_x x21,( 80)(sp)
store_x x22,( 84)(sp)
store_x x23,( 88)(sp)
store_x x24,( 92)(sp)
store_x x25,( 96)(sp)
store_x x26,(100)(sp)
store_x x27,(104)(sp)
store_x x28,(108)(sp)
store_x x29,(112)(sp)
store_x x30,(116)(sp)
store_x x31,(120)(sp)
csrr a0, mepc
store_x a0, (124)(sp)
csrr a0, mstatus
store_x a0, (128)(sp)
csrr a0, mcause
store_x a0, (132)(sp)
csrr a0, mtval
store_x a0, (136)(sp)
csrr a0, mscratch
store_x a0, ( 4 )(sp)
#endif
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -1,188 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -1,326 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix */
status &= ~(1ul << 0);
#endif /* __riscv_matrix || __riscv_xtheadmatrix */
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_MS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,842 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
store_x s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
#if __riscv_xlen == 64
addi sp, sp, -(140+140)
store_x x1, ( 0 + 0 )(sp)
store_x x3, ( 8 + 8 )(sp)
store_x x4, ( 12+ 12)(sp)
store_x x5, ( 16+ 16)(sp)
store_x x6, ( 20+ 20)(sp)
store_x x7, ( 24+ 24)(sp)
store_x x8, ( 28+ 28)(sp)
store_x x9, ( 32+ 32)(sp)
store_x x10,( 36+ 36)(sp)
store_x x11,( 40+ 40)(sp)
store_x x12,( 44+ 44)(sp)
store_x x13,( 48+ 48)(sp)
store_x x14,( 52+ 52)(sp)
store_x x15,( 56+ 56)(sp)
store_x x16,( 60+ 60)(sp)
store_x x17,( 64+ 64)(sp)
store_x x18,( 68+ 68)(sp)
store_x x19,( 72+ 72)(sp)
store_x x20,( 76+ 76)(sp)
store_x x21,( 80+ 80)(sp)
store_x x22,( 84+ 84)(sp)
store_x x23,( 88+ 88)(sp)
store_x x24,( 92+ 92)(sp)
store_x x25,( 96+ 96)(sp)
store_x x26,(100+100)(sp)
store_x x27,(104+104)(sp)
store_x x28,(108+108)(sp)
store_x x29,(112+112)(sp)
store_x x30,(116+116)(sp)
store_x x31,(120+120)(sp)
csrr a0, mepc
store_x a0, (124+124)(sp)
csrr a0, mstatus
store_x a0, (128+128)(sp)
csrr a0, mcause
store_x a0, (132+132)(sp)
csrr a0, mtval
store_x a0, (136+136)(sp)
csrr a0, mscratch
store_x a0, ( 4 + 4 )(sp)
#else
addi sp, sp, -140
store_x x1, ( 0 )(sp)
store_x x3, ( 8 )(sp)
store_x x4, ( 12)(sp)
store_x x5, ( 16)(sp)
store_x x6, ( 20)(sp)
store_x x7, ( 24)(sp)
store_x x8, ( 28)(sp)
store_x x9, ( 32)(sp)
store_x x10,( 36)(sp)
store_x x11,( 40)(sp)
store_x x12,( 44)(sp)
store_x x13,( 48)(sp)
store_x x14,( 52)(sp)
store_x x15,( 56)(sp)
store_x x16,( 60)(sp)
store_x x17,( 64)(sp)
store_x x18,( 68)(sp)
store_x x19,( 72)(sp)
store_x x20,( 76)(sp)
store_x x21,( 80)(sp)
store_x x22,( 84)(sp)
store_x x23,( 88)(sp)
store_x x24,( 92)(sp)
store_x x25,( 96)(sp)
store_x x26,(100)(sp)
store_x x27,(104)(sp)
store_x x28,(108)(sp)
store_x x29,(112)(sp)
store_x x30,(116)(sp)
store_x x31,(120)(sp)
csrr a0, mepc
store_x a0, (124)(sp)
csrr a0, mstatus
store_x a0, (128)(sp)
csrr a0, mcause
store_x a0, (132)(sp)
csrr a0, mtval
store_x a0, (136)(sp)
csrr a0, mscratch
store_x a0, ( 4 )(sp)
#endif
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -1,188 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -1,316 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#if __riscv_flen == 64
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,527 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
sd s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -(140+140)
sd x1, ( 0 + 0 )(sp)
sd x3, ( 8 + 8 )(sp)
sd x4, ( 12+ 12)(sp)
sd x5, ( 16+ 16)(sp)
sd x6, ( 20+ 20)(sp)
sd x7, ( 24+ 24)(sp)
sd x8, ( 28+ 28)(sp)
sd x9, ( 32+ 32)(sp)
sd x10,( 36+ 36)(sp)
sd x11,( 40+ 40)(sp)
sd x12,( 44+ 44)(sp)
sd x13,( 48+ 48)(sp)
sd x14,( 52+ 52)(sp)
sd x15,( 56+ 56)(sp)
sd x16,( 60+ 60)(sp)
sd x17,( 64+ 64)(sp)
sd x18,( 68+ 68)(sp)
sd x19,( 72+ 72)(sp)
sd x20,( 76+ 76)(sp)
sd x21,( 80+ 80)(sp)
sd x22,( 84+ 84)(sp)
sd x23,( 88+ 88)(sp)
sd x24,( 92+ 92)(sp)
sd x25,( 96+ 96)(sp)
sd x26,(100+100)(sp)
sd x27,(104+104)(sp)
sd x28,(108+108)(sp)
sd x29,(112+112)(sp)
sd x30,(116+116)(sp)
sd x31,(120+120)(sp)
csrr a0, mepc
sd a0, (124+124)(sp)
csrr a0, mstatus
sd a0, (128+128)(sp)
csrr a0, mcause
sd a0, (132+132)(sp)
csrr a0, mtval
sd a0, (136+136)(sp)
csrr a0, mscratch
sd a0, ( 4 + 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -1,188 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -1,316 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#if __riscv_flen == 64
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,527 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
sd s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -(140+140)
sd x1, ( 0 + 0 )(sp)
sd x3, ( 8 + 8 )(sp)
sd x4, ( 12+ 12)(sp)
sd x5, ( 16+ 16)(sp)
sd x6, ( 20+ 20)(sp)
sd x7, ( 24+ 24)(sp)
sd x8, ( 28+ 28)(sp)
sd x9, ( 32+ 32)(sp)
sd x10,( 36+ 36)(sp)
sd x11,( 40+ 40)(sp)
sd x12,( 44+ 44)(sp)
sd x13,( 48+ 48)(sp)
sd x14,( 52+ 52)(sp)
sd x15,( 56+ 56)(sp)
sd x16,( 60+ 60)(sp)
sd x17,( 64+ 64)(sp)
sd x18,( 68+ 68)(sp)
sd x19,( 72+ 72)(sp)
sd x20,( 76+ 76)(sp)
sd x21,( 80+ 80)(sp)
sd x22,( 84+ 84)(sp)
sd x23,( 88+ 88)(sp)
sd x24,( 92+ 92)(sp)
sd x25,( 96+ 96)(sp)
sd x26,(100+100)(sp)
sd x27,(104+104)(sp)
sd x28,(108+108)(sp)
sd x29,(112+112)(sp)
sd x30,(116+116)(sp)
sd x31,(120+120)(sp)
csrr a0, mepc
sd a0, (124+124)(sp)
csrr a0, mstatus
sd a0, (128+128)(sp)
csrr a0, mcause
sd a0, (132+132)(sp)
csrr a0, mtval
sd a0, (136+136)(sp)
csrr a0, mscratch
sd a0, ( 4 + 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,13 @@
from building import *
import os
cwd = GetCurrentDir()
CPPPATH = [cwd]
src = ['startup.S']
src += ['system.c']
src += ['trap_c.c']
src += ['vectors.S']
group = DefineGroup('sys', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -23,7 +23,9 @@
#endif
.globl Reset_Handler
.global __rt_rvstack
.equ Mcoret_Handler, SW_handler
.equ Mirq_Handler, SW_handler
.section .vectors
.align 6
.globl __Vectors
@ -132,7 +134,7 @@ Reset_Handler:
#endif
#endif
la a0, pre_main
la a0, rtthread_startup
jalr a0
.size Reset_Handler, . - Reset_Handler
@ -169,6 +171,7 @@ hart_out_of_bounds_loop:
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
__rt_rvstack:
#ifdef CONFIG_KERNEL_NONE
.align 4

View File

@ -1,188 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -1,316 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#if __riscv_flen == 64
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,521 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
sd s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -(140+140)
sd x1, ( 0 + 0 )(sp)
sd x3, ( 8 + 8 )(sp)
sd x4, ( 12+ 12)(sp)
sd x5, ( 16+ 16)(sp)
sd x6, ( 20+ 20)(sp)
sd x7, ( 24+ 24)(sp)
sd x8, ( 28+ 28)(sp)
sd x9, ( 32+ 32)(sp)
sd x10,( 36+ 36)(sp)
sd x11,( 40+ 40)(sp)
sd x12,( 44+ 44)(sp)
sd x13,( 48+ 48)(sp)
sd x14,( 52+ 52)(sp)
sd x15,( 56+ 56)(sp)
sd x16,( 60+ 60)(sp)
sd x17,( 64+ 64)(sp)
sd x18,( 68+ 68)(sp)
sd x19,( 72+ 72)(sp)
sd x20,( 76+ 76)(sp)
sd x21,( 80+ 80)(sp)
sd x22,( 84+ 84)(sp)
sd x23,( 88+ 88)(sp)
sd x24,( 92+ 92)(sp)
sd x25,( 96+ 96)(sp)
sd x26,(100+100)(sp)
sd x27,(104+104)(sp)
sd x28,(108+108)(sp)
sd x29,(112+112)(sp)
sd x30,(116+116)(sp)
sd x31,(120+120)(sp)
csrr a0, mepc
sd a0, (124+124)(sp)
csrr a0, mstatus
sd a0, (128+128)(sp)
csrr a0, mcause
sd a0, (132+132)(sp)
csrr a0, mtval
sd a0, (136+136)(sp)
csrr a0, mscratch
sd a0, ( 4 + 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -1,188 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -1,324 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
#if CONFIG_CPU_XUANTIE_C910V3_CP || CONFIG_CPU_XUANTIE_C920V3_CP
/* disable theadisaee & enable MM */
unsigned long status = __get_MXSTATUS();
status &= ~(1 << 22);
status |= (1 << 24 | 1 << 15);
__set_MXSTATUS(status);
#else
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#endif
#if __riscv_flen == 64
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,521 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
sd s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -(140+140)
sd x1, ( 0 + 0 )(sp)
sd x3, ( 8 + 8 )(sp)
sd x4, ( 12+ 12)(sp)
sd x5, ( 16+ 16)(sp)
sd x6, ( 20+ 20)(sp)
sd x7, ( 24+ 24)(sp)
sd x8, ( 28+ 28)(sp)
sd x9, ( 32+ 32)(sp)
sd x10,( 36+ 36)(sp)
sd x11,( 40+ 40)(sp)
sd x12,( 44+ 44)(sp)
sd x13,( 48+ 48)(sp)
sd x14,( 52+ 52)(sp)
sd x15,( 56+ 56)(sp)
sd x16,( 60+ 60)(sp)
sd x17,( 64+ 64)(sp)
sd x18,( 68+ 68)(sp)
sd x19,( 72+ 72)(sp)
sd x20,( 76+ 76)(sp)
sd x21,( 80+ 80)(sp)
sd x22,( 84+ 84)(sp)
sd x23,( 88+ 88)(sp)
sd x24,( 92+ 92)(sp)
sd x25,( 96+ 96)(sp)
sd x26,(100+100)(sp)
sd x27,(104+104)(sp)
sd x28,(108+108)(sp)
sd x29,(112+112)(sp)
sd x30,(116+116)(sp)
sd x31,(120+120)(sp)
csrr a0, mepc
sd a0, (124+124)(sp)
csrr a0, mstatus
sd a0, (128+128)(sp)
csrr a0, mcause
sd a0, (132+132)(sp)
csrr a0, mtval
sd a0, (136+136)(sp)
csrr a0, mscratch
sd a0, ( 4 + 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,13 @@
from building import *
import os
cwd = GetCurrentDir()
CPPPATH = [cwd]
src = ['startup.S']
src += ['system.c']
src += ['trap_c.c']
src += ['vectors.S']
group = DefineGroup('sys', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -23,7 +23,9 @@
#endif
.globl Reset_Handler
.global __rt_rvstack
.equ Mcoret_Handler, SW_handler
.equ Mirq_Handler, SW_handler
.section .vectors
.align 6
.globl __Vectors
@ -132,7 +134,7 @@ Reset_Handler:
#endif
#endif
la a0, pre_main
la a0, rtthread_startup
jalr a0
.size Reset_Handler, . - Reset_Handler
@ -169,6 +171,7 @@ hart_out_of_bounds_loop:
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
__rt_rvstack:
#ifdef CONFIG_KERNEL_NONE
.align 4

View File

@ -1,188 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -1,316 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#if __riscv_flen == 64
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,521 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
sd s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -(140+140)
sd x1, ( 0 + 0 )(sp)
sd x3, ( 8 + 8 )(sp)
sd x4, ( 12+ 12)(sp)
sd x5, ( 16+ 16)(sp)
sd x6, ( 20+ 20)(sp)
sd x7, ( 24+ 24)(sp)
sd x8, ( 28+ 28)(sp)
sd x9, ( 32+ 32)(sp)
sd x10,( 36+ 36)(sp)
sd x11,( 40+ 40)(sp)
sd x12,( 44+ 44)(sp)
sd x13,( 48+ 48)(sp)
sd x14,( 52+ 52)(sp)
sd x15,( 56+ 56)(sp)
sd x16,( 60+ 60)(sp)
sd x17,( 64+ 64)(sp)
sd x18,( 68+ 68)(sp)
sd x19,( 72+ 72)(sp)
sd x20,( 76+ 76)(sp)
sd x21,( 80+ 80)(sp)
sd x22,( 84+ 84)(sp)
sd x23,( 88+ 88)(sp)
sd x24,( 92+ 92)(sp)
sd x25,( 96+ 96)(sp)
sd x26,(100+100)(sp)
sd x27,(104+104)(sp)
sd x28,(108+108)(sp)
sd x29,(112+112)(sp)
sd x30,(116+116)(sp)
sd x31,(120+120)(sp)
csrr a0, mepc
sd a0, (124+124)(sp)
csrr a0, mstatus
sd a0, (128+128)(sp)
csrr a0, mcause
sd a0, (132+132)(sp)
csrr a0, mtval
sd a0, (136+136)(sp)
csrr a0, mscratch
sd a0, ( 4 + 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -1,188 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -1,324 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
#if CONFIG_CPU_XUANTIE_C910V3_CP || CONFIG_CPU_XUANTIE_C920V3_CP
/* disable theadisaee & enable MM */
unsigned long status = __get_MXSTATUS();
status &= ~(1 << 22);
status |= (1 << 24 | 1 << 15);
__set_MXSTATUS(status);
#else
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#endif
#if __riscv_flen == 64
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,521 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
sd s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -(140+140)
sd x1, ( 0 + 0 )(sp)
sd x3, ( 8 + 8 )(sp)
sd x4, ( 12+ 12)(sp)
sd x5, ( 16+ 16)(sp)
sd x6, ( 20+ 20)(sp)
sd x7, ( 24+ 24)(sp)
sd x8, ( 28+ 28)(sp)
sd x9, ( 32+ 32)(sp)
sd x10,( 36+ 36)(sp)
sd x11,( 40+ 40)(sp)
sd x12,( 44+ 44)(sp)
sd x13,( 48+ 48)(sp)
sd x14,( 52+ 52)(sp)
sd x15,( 56+ 56)(sp)
sd x16,( 60+ 60)(sp)
sd x17,( 64+ 64)(sp)
sd x18,( 68+ 68)(sp)
sd x19,( 72+ 72)(sp)
sd x20,( 76+ 76)(sp)
sd x21,( 80+ 80)(sp)
sd x22,( 84+ 84)(sp)
sd x23,( 88+ 88)(sp)
sd x24,( 92+ 92)(sp)
sd x25,( 96+ 96)(sp)
sd x26,(100+100)(sp)
sd x27,(104+104)(sp)
sd x28,(108+108)(sp)
sd x29,(112+112)(sp)
sd x30,(116+116)(sp)
sd x31,(120+120)(sp)
csrr a0, mepc
sd a0, (124+124)(sp)
csrr a0, mstatus
sd a0, (128+128)(sp)
csrr a0, mcause
sd a0, (132+132)(sp)
csrr a0, mtval
sd a0, (136+136)(sp)
csrr a0, mscratch
sd a0, ( 4 + 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -1,134 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
.section .vectors, "aw", @progbits
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long tspend_handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_IRQHandler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
/* External interrupts */
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.globl Reset_Handler
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
_start:
.text
.long Reset_Handler
.align 2
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
la gp, __global_pointer$
.option pop
la a0, Default_Handler
ori a0, a0, 3
csrw mtvec, a0
la a0, __Vectors
csrw mtvt, a0
la sp, g_top_irqstack
csrw mscratch, sp
#ifdef CONFIG_KERNEL_NONE
la sp, g_top_mainstack
#endif
#ifndef __NO_SYSTEM_INIT
jal SystemInit
#endif
jal pre_main
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.section .stack
.align 3
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 3
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK
g_top_mainstack:
#endif

View File

@ -1,100 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#include <soc.h>
#include <csi_core.h>
#include <drv/irq.h>
#include <drv/dma.h>
#include <drv/tick.h>
#include <drv/etb.h>
#include <drv/spiflash.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
static void cache_init(void)
{
csi_icache_enable();
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
#endif
section_bss_clear();
}
static void clic_init(void)
{
int i;
/* get interrupt level from info */
CLIC->CLICCFG = (((CLIC->CLICINFO & CLIC_INFO_CLICINTCTLBITS_Msk) >> CLIC_INFO_CLICINTCTLBITS_Pos) << CLIC_CLICCFG_NLBIT_Pos);
for (i = 0; i < 64; i++) {
CLIC->CLICINT[i].IP = 0;
CLIC->CLICINT[i].ATTR = 1; /* use vector interrupt */
csi_vic_set_prio(i, 1);
}
#ifndef CONFIG_KERNEL_NONE
/* tspend use lower priority */
csi_vic_set_prio(Machine_Software_IRQn, 0);
/* tspend use positive interrupt */
CLIC->CLICINT[Machine_Software_IRQn].ATTR = 0x3;
csi_irq_enable(Machine_Software_IRQn);
#endif
}
static void interrupt_init(void)
{
clic_init();
#ifdef CONFIG_KERNEL_NONE
__enable_excp_irq();
#endif
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee */
uint32_t status = __get_MXSTATUS();
status |= (1 << 22);
__set_MXSTATUS(status);
cache_init();
section_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 15; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[15]);
printk("mstatus: %p\n", (void *)regs[16]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,383 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
/* Enable interrupts when returning from the handler */
#define MSTATUS_PRV1 0x1880
.section .stack
.align 2
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
#if CONFIG_SUPPORT_IRQ_NESTED
#define IRQ_NESTED_MAX (6)
.section .bss
irq_nested_level:
.long 0
irq_nested_mcause:
.long 0, 0, 0, 0, 0, 0
#endif
.text
#if !CONFIG_SUPPORT_IRQ_NESTED
.align 2
.weak Default_IRQHandler
.type Default_IRQHandler, %function
Default_IRQHandler:
csrw mscratch, sp
la sp, g_top_irqstack
addi sp, sp, -48
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 40(sp)
sw t0, 44(sp)
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
la t0, do_irq
jalr t0
lw a1, 40(sp)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
lw t0, 44(sp)
csrw mepc, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
addi sp, sp, 48
csrr sp, mscratch
mret
#else
.align 2
.weak Default_IRQHandler
.type Default_IRQHandler, %function
Default_IRQHandler:
addi sp, sp, -8
sw t0, 0(sp)
sw t1, 4(sp)
la t0, irq_nested_level
lw t1, (t0)
addi t1, t1, 1
sw t1, (t0)
li t0, IRQ_NESTED_MAX
/* nested too deeply, may be error happens */
bgt t1, t0, Default_Handler
addi t1, t1, -1
la t0, irq_nested_mcause
slli t1, t1, 2
add t0, t0, t1
csrr t1, mcause
sw t1, (t0)
la t0, irq_nested_level
lw t1, (t0)
li t0, 1
bgt t1, t0, .Lnested1
lw t0, 0(sp)
lw t1, 4(sp)
addi sp, sp, 8
csrw mscratch, sp
la sp, g_top_irqstack
j .Lnested2
.Lnested1:
lw t0, 0(sp)
lw t1, 4(sp)
addi sp, sp, 8
.Lnested2:
addi sp, sp, -48
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 40(sp)
sw t0, 44(sp)
csrs mstatus, 8
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
la t0, do_irq
jalr t0
csrc mstatus, 8
lw a1, 40(sp)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
la t0, irq_nested_level
lw t1, (t0)
addi t1, t1, -1
sw t1, (t0)
bgt t1, zero, .Lnested3
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
lw t0, 44(sp)
csrw mepc, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
addi sp, sp, 48
csrr sp, mscratch
mret
.Lnested3:
/* keep mpil in current mcause & load exception code before */
addi t1, t1, -1
la t0, irq_nested_mcause
slli t1, t1, 2
add t1, t0, t1
lw t0, (t1)
andi t0, t0, 0x3FF
andi a0, a1, 0xFFFFFC00
or t0, a0, t0
csrw mcause, t0
lw t0, 44(sp)
csrw mepc, t0
li t0, MSTATUS_PRV1
csrs mstatus, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
addi sp, sp, 48
mret
#endif
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 2
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -76
sw x1, (0 )(sp)
sw x3, (8 )(sp)
sw x4, (12)(sp)
sw x5, (16)(sp)
sw x6, (20)(sp)
sw x7, (24)(sp)
sw x8, (28)(sp)
sw x9, (32)(sp)
sw x10,(36)(sp)
sw x11,(40)(sp)
sw x12,(44)(sp)
sw x13,(48)(sp)
sw x14,(52)(sp)
sw x15,(56)(sp)
csrr a0, mepc
sw a0, (60)(sp)
csrr a0, mstatus
sw a0, (64)(sp)
csrr a0, mcause
sw a0, (68)(sp)
csrr a0, mtval
sw a0, (72)(sp)
csrr a0, mscratch
sw a0, (4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 6
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
/* Check for nmi */
addi sp, sp, -8
sw t0, 0x0(sp)
sw t1, 0x4(sp)
csrr t0, mcause
andi t0, t0, 0x3FF
li t1, 24
beq t0, t1, .NMI_Handler
lw t0, 0x0(sp)
lw t1, 0x4(sp)
addi sp, sp, 8
j trap
.NMI_Handler:
/* mscratch may be used before */
addi sp, sp, -4
csrr t0, mscratch
sw t0, 0x0(sp)
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -48
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 40(sp)
sw t0, 44(sp)
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
la t0, handle_nmi_exception
jalr t0
lw a1, 40(sp)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
lw t0, 44(sp)
csrw mepc, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
addi sp, sp, 48
csrr sp, mscratch
/* restore mscratch */
lw t0, 0x0(sp)
csrw mscratch, t0
addi sp, sp, 4
lw t0, 0x0(sp)
lw t1, 0x4(sp)
addi sp, sp, 8
mret
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler tspend_handler

View File

@ -1,134 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
.section .vectors, "aw", @progbits
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long tspend_handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_IRQHandler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
/* External interrupts */
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.globl Reset_Handler
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
_start:
.text
.long Reset_Handler
.align 2
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
la gp, __global_pointer$
.option pop
la a0, Default_Handler
ori a0, a0, 3
csrw mtvec, a0
la a0, __Vectors
csrw mtvt, a0
la sp, g_top_irqstack
csrw mscratch, sp
#ifdef CONFIG_KERNEL_NONE
la sp, g_top_mainstack
#endif
#ifndef __NO_SYSTEM_INIT
jal SystemInit
#endif
jal pre_main
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.section .stack
.align 3
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 3
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK
g_top_mainstack:
#endif

View File

@ -1,100 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#include <soc.h>
#include <csi_core.h>
#include <drv/irq.h>
#include <drv/dma.h>
#include <drv/tick.h>
#include <drv/etb.h>
#include <drv/spiflash.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
static void cache_init(void)
{
csi_icache_enable();
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
#endif
section_bss_clear();
}
static void clic_init(void)
{
int i;
/* get interrupt level from info */
CLIC->CLICCFG = (((CLIC->CLICINFO & CLIC_INFO_CLICINTCTLBITS_Msk) >> CLIC_INFO_CLICINTCTLBITS_Pos) << CLIC_CLICCFG_NLBIT_Pos);
for (i = 0; i < 64; i++) {
CLIC->CLICINT[i].IP = 0;
CLIC->CLICINT[i].ATTR = 1; /* use vector interrupt */
csi_vic_set_prio(i, 1);
}
#ifndef CONFIG_KERNEL_NONE
/* tspend use lower priority */
csi_vic_set_prio(Machine_Software_IRQn, 0);
/* tspend use positive interrupt */
CLIC->CLICINT[Machine_Software_IRQn].ATTR = 0x3;
csi_irq_enable(Machine_Software_IRQn);
#endif
}
static void interrupt_init(void)
{
clic_init();
#ifdef CONFIG_KERNEL_NONE
__enable_excp_irq();
#endif
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee */
uint32_t status = __get_MXSTATUS();
status |= (1 << 22);
__set_MXSTATUS(status);
cache_init();
section_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 15; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[15]);
printk("mstatus: %p\n", (void *)regs[16]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,383 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
/* Enable interrupts when returning from the handler */
#define MSTATUS_PRV1 0x1880
.section .stack
.align 2
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
#if CONFIG_SUPPORT_IRQ_NESTED
#define IRQ_NESTED_MAX (6)
.section .bss
irq_nested_level:
.long 0
irq_nested_mcause:
.long 0, 0, 0, 0, 0, 0
#endif
.text
#if !CONFIG_SUPPORT_IRQ_NESTED
.align 2
.weak Default_IRQHandler
.type Default_IRQHandler, %function
Default_IRQHandler:
csrw mscratch, sp
la sp, g_top_irqstack
addi sp, sp, -48
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 40(sp)
sw t0, 44(sp)
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
la t0, do_irq
jalr t0
lw a1, 40(sp)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
lw t0, 44(sp)
csrw mepc, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
addi sp, sp, 48
csrr sp, mscratch
mret
#else
.align 2
.weak Default_IRQHandler
.type Default_IRQHandler, %function
Default_IRQHandler:
addi sp, sp, -8
sw t0, 0(sp)
sw t1, 4(sp)
la t0, irq_nested_level
lw t1, (t0)
addi t1, t1, 1
sw t1, (t0)
li t0, IRQ_NESTED_MAX
/* nested too deeply, may be error happens */
bgt t1, t0, Default_Handler
addi t1, t1, -1
la t0, irq_nested_mcause
slli t1, t1, 2
add t0, t0, t1
csrr t1, mcause
sw t1, (t0)
la t0, irq_nested_level
lw t1, (t0)
li t0, 1
bgt t1, t0, .Lnested1
lw t0, 0(sp)
lw t1, 4(sp)
addi sp, sp, 8
csrw mscratch, sp
la sp, g_top_irqstack
j .Lnested2
.Lnested1:
lw t0, 0(sp)
lw t1, 4(sp)
addi sp, sp, 8
.Lnested2:
addi sp, sp, -48
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 40(sp)
sw t0, 44(sp)
csrs mstatus, 8
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
la t0, do_irq
jalr t0
csrc mstatus, 8
lw a1, 40(sp)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
la t0, irq_nested_level
lw t1, (t0)
addi t1, t1, -1
sw t1, (t0)
bgt t1, zero, .Lnested3
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
lw t0, 44(sp)
csrw mepc, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
addi sp, sp, 48
csrr sp, mscratch
mret
.Lnested3:
/* keep mpil in current mcause & load exception code before */
addi t1, t1, -1
la t0, irq_nested_mcause
slli t1, t1, 2
add t1, t0, t1
lw t0, (t1)
andi t0, t0, 0x3FF
andi a0, a1, 0xFFFFFC00
or t0, a0, t0
csrw mcause, t0
lw t0, 44(sp)
csrw mepc, t0
li t0, MSTATUS_PRV1
csrs mstatus, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
addi sp, sp, 48
mret
#endif
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 2
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -76
sw x1, (0 )(sp)
sw x3, (8 )(sp)
sw x4, (12)(sp)
sw x5, (16)(sp)
sw x6, (20)(sp)
sw x7, (24)(sp)
sw x8, (28)(sp)
sw x9, (32)(sp)
sw x10,(36)(sp)
sw x11,(40)(sp)
sw x12,(44)(sp)
sw x13,(48)(sp)
sw x14,(52)(sp)
sw x15,(56)(sp)
csrr a0, mepc
sw a0, (60)(sp)
csrr a0, mstatus
sw a0, (64)(sp)
csrr a0, mcause
sw a0, (68)(sp)
csrr a0, mtval
sw a0, (72)(sp)
csrr a0, mscratch
sw a0, (4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 6
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
/* Check for nmi */
addi sp, sp, -8
sw t0, 0x0(sp)
sw t1, 0x4(sp)
csrr t0, mcause
andi t0, t0, 0x3FF
li t1, 24
beq t0, t1, .NMI_Handler
lw t0, 0x0(sp)
lw t1, 0x4(sp)
addi sp, sp, 8
j trap
.NMI_Handler:
/* mscratch may be used before */
addi sp, sp, -4
csrr t0, mscratch
sw t0, 0x0(sp)
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -48
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 40(sp)
sw t0, 44(sp)
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
la t0, handle_nmi_exception
jalr t0
lw a1, 40(sp)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
lw t0, 44(sp)
csrw mepc, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
addi sp, sp, 48
csrr sp, mscratch
/* restore mscratch */
lw t0, 0x0(sp)
csrw mscratch, t0
addi sp, sp, 4
lw t0, 0x0(sp)
lw t1, 0x4(sp)
addi sp, sp, 8
mret
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler tspend_handler

View File

@ -1,134 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
.section .vectors, "aw", @progbits
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long tspend_handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_IRQHandler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
/* External interrupts */
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.globl Reset_Handler
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
_start:
.text
.long Reset_Handler
.align 2
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
la gp, __global_pointer$
.option pop
la a0, Default_Handler
ori a0, a0, 3
csrw mtvec, a0
la a0, __Vectors
csrw mtvt, a0
la sp, g_top_irqstack
csrw mscratch, sp
#ifdef CONFIG_KERNEL_NONE
la sp, g_top_mainstack
#endif
#ifndef __NO_SYSTEM_INIT
jal SystemInit
#endif
jal pre_main
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.section .stack
.align 3
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 3
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK
g_top_mainstack:
#endif

View File

@ -1,100 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#include <soc.h>
#include <csi_core.h>
#include <drv/irq.h>
#include <drv/dma.h>
#include <drv/tick.h>
#include <drv/etb.h>
#include <drv/spiflash.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
static void cache_init(void)
{
csi_icache_enable();
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
#endif
section_bss_clear();
}
static void clic_init(void)
{
int i;
/* get interrupt level from info */
CLIC->CLICCFG = (((CLIC->CLICINFO & CLIC_INFO_CLICINTCTLBITS_Msk) >> CLIC_INFO_CLICINTCTLBITS_Pos) << CLIC_CLICCFG_NLBIT_Pos);
for (i = 0; i < 64; i++) {
CLIC->CLICINT[i].IP = 0;
CLIC->CLICINT[i].ATTR = 1; /* use vector interrupt */
csi_vic_set_prio(i, 1);
}
#ifndef CONFIG_KERNEL_NONE
/* tspend use lower priority */
csi_vic_set_prio(Machine_Software_IRQn, 0);
/* tspend use positive interrupt */
CLIC->CLICINT[Machine_Software_IRQn].ATTR = 0x3;
csi_irq_enable(Machine_Software_IRQn);
#endif
}
static void interrupt_init(void)
{
clic_init();
#ifdef CONFIG_KERNEL_NONE
__enable_excp_irq();
#endif
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee */
uint32_t status = __get_MXSTATUS();
status |= (1 << 22);
__set_MXSTATUS(status);
cache_init();
section_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 15; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[15]);
printk("mstatus: %p\n", (void *)regs[16]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,383 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
/* Enable interrupts when returning from the handler */
#define MSTATUS_PRV1 0x1880
.section .stack
.align 2
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
#if CONFIG_SUPPORT_IRQ_NESTED
#define IRQ_NESTED_MAX (6)
.section .bss
irq_nested_level:
.long 0
irq_nested_mcause:
.long 0, 0, 0, 0, 0, 0
#endif
.text
#if !CONFIG_SUPPORT_IRQ_NESTED
.align 2
.weak Default_IRQHandler
.type Default_IRQHandler, %function
Default_IRQHandler:
csrw mscratch, sp
la sp, g_top_irqstack
addi sp, sp, -48
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 40(sp)
sw t0, 44(sp)
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
la t0, do_irq
jalr t0
lw a1, 40(sp)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
lw t0, 44(sp)
csrw mepc, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
addi sp, sp, 48
csrr sp, mscratch
mret
#else
.align 2
.weak Default_IRQHandler
.type Default_IRQHandler, %function
Default_IRQHandler:
addi sp, sp, -8
sw t0, 0(sp)
sw t1, 4(sp)
la t0, irq_nested_level
lw t1, (t0)
addi t1, t1, 1
sw t1, (t0)
li t0, IRQ_NESTED_MAX
/* nested too deeply, may be error happens */
bgt t1, t0, Default_Handler
addi t1, t1, -1
la t0, irq_nested_mcause
slli t1, t1, 2
add t0, t0, t1
csrr t1, mcause
sw t1, (t0)
la t0, irq_nested_level
lw t1, (t0)
li t0, 1
bgt t1, t0, .Lnested1
lw t0, 0(sp)
lw t1, 4(sp)
addi sp, sp, 8
csrw mscratch, sp
la sp, g_top_irqstack
j .Lnested2
.Lnested1:
lw t0, 0(sp)
lw t1, 4(sp)
addi sp, sp, 8
.Lnested2:
addi sp, sp, -48
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 40(sp)
sw t0, 44(sp)
csrs mstatus, 8
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
la t0, do_irq
jalr t0
csrc mstatus, 8
lw a1, 40(sp)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
la t0, irq_nested_level
lw t1, (t0)
addi t1, t1, -1
sw t1, (t0)
bgt t1, zero, .Lnested3
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
lw t0, 44(sp)
csrw mepc, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
addi sp, sp, 48
csrr sp, mscratch
mret
.Lnested3:
/* keep mpil in current mcause & load exception code before */
addi t1, t1, -1
la t0, irq_nested_mcause
slli t1, t1, 2
add t1, t0, t1
lw t0, (t1)
andi t0, t0, 0x3FF
andi a0, a1, 0xFFFFFC00
or t0, a0, t0
csrw mcause, t0
lw t0, 44(sp)
csrw mepc, t0
li t0, MSTATUS_PRV1
csrs mstatus, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
addi sp, sp, 48
mret
#endif
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 2
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -76
sw x1, (0 )(sp)
sw x3, (8 )(sp)
sw x4, (12)(sp)
sw x5, (16)(sp)
sw x6, (20)(sp)
sw x7, (24)(sp)
sw x8, (28)(sp)
sw x9, (32)(sp)
sw x10,(36)(sp)
sw x11,(40)(sp)
sw x12,(44)(sp)
sw x13,(48)(sp)
sw x14,(52)(sp)
sw x15,(56)(sp)
csrr a0, mepc
sw a0, (60)(sp)
csrr a0, mstatus
sw a0, (64)(sp)
csrr a0, mcause
sw a0, (68)(sp)
csrr a0, mtval
sw a0, (72)(sp)
csrr a0, mscratch
sw a0, (4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 6
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
/* Check for nmi */
addi sp, sp, -8
sw t0, 0x0(sp)
sw t1, 0x4(sp)
csrr t0, mcause
andi t0, t0, 0x3FF
li t1, 24
beq t0, t1, .NMI_Handler
lw t0, 0x0(sp)
lw t1, 0x4(sp)
addi sp, sp, 8
j trap
.NMI_Handler:
/* mscratch may be used before */
addi sp, sp, -4
csrr t0, mscratch
sw t0, 0x0(sp)
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -48
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 40(sp)
sw t0, 44(sp)
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
la t0, handle_nmi_exception
jalr t0
lw a1, 40(sp)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
lw t0, 44(sp)
csrw mepc, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
addi sp, sp, 48
csrr sp, mscratch
/* restore mscratch */
lw t0, 0x0(sp)
csrw mscratch, t0
addi sp, sp, 4
lw t0, 0x0(sp)
lw t1, 0x4(sp)
addi sp, sp, 8
mret
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler tspend_handler

View File

@ -1,168 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
.section .vectors, "aw", @progbits
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long tspend_handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_IRQHandler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
/* External interrupts */
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.size __Vectors, . - __Vectors
.globl Reset_Handler
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
_start:
.text
.long Reset_Handler
.align 2
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
la gp, __global_pointer$
.option pop
la a0, Default_Handler
ori a0, a0, 3
csrw mtvec, a0
la a0, __Vectors
csrw mtvt, a0
la sp, g_top_irqstack
csrw mscratch, sp
#ifdef CONFIG_KERNEL_NONE
la sp, g_top_mainstack
#endif
#ifndef __NO_SYSTEM_INIT
jal SystemInit
#endif
jal pre_main
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK
g_top_mainstack:
#endif

View File

@ -1,113 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#include <soc.h>
#include <csi_core.h>
#include <drv/irq.h>
#include <drv/dma.h>
#include <drv/tick.h>
#include <drv/etb.h>
#include <drv/spiflash.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
static void cache_init(void)
{
csi_dcache_enable();
csi_icache_enable();
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void clic_init(void)
{
int i;
/* get interrupt level from info */
CLIC->CLICCFG = (((CLIC->CLICINFO & CLIC_INFO_CLICINTCTLBITS_Msk) >> CLIC_INFO_CLICINTCTLBITS_Pos) << CLIC_CLICCFG_NLBIT_Pos);
for (i = 0; i < 64; i++) {
CLIC->CLICINT[i].IP = 0;
CLIC->CLICINT[i].ATTR = 1; /* use vector interrupt */
}
/* tspend use positive interrupt */
CLIC->CLICINT[Machine_Software_IRQn].ATTR = 0x3;
csi_irq_enable(Machine_Software_IRQn);
}
static void interrupt_init(void)
{
clic_init();
#ifdef CONFIG_KERNEL_NONE
__enable_excp_irq();
#endif
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
uint32_t status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
/* enable mexstatus SPUSHEN and disable SPSWAPEN */
#if CONFIG_CPU_XUANTIE_E906 || CONFIG_CPU_XUANTIE_E906F || CONFIG_CPU_XUANTIE_E906FD || CONFIG_CPU_XUANTIE_E906P || CONFIG_CPU_XUANTIE_E906FP || CONFIG_CPU_XUANTIE_E906FDP \
|| CONFIG_CPU_XUANTIE_E907 || CONFIG_CPU_XUANTIE_E907F || CONFIG_CPU_XUANTIE_E907FD || CONFIG_CPU_XUANTIE_E907P || CONFIG_CPU_XUANTIE_E907FP || CONFIG_CPU_XUANTIE_E907FDP
status = __get_MEXSTATUS();
status |= (0x1 << 16);
status &= ~(0x2 << 16);
__set_MEXSTATUS(status);
#endif
cache_init();
section_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,591 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
/* Enable interrupts when returning from the handler */
#define MSTATUS_PRV1 0x1880
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
#if CONFIG_SUPPORT_IRQ_NESTED
#define IRQ_NESTED_MAX (6)
.section .bss
irq_nested_level:
.long 0
irq_nested_mcause:
.long 0, 0, 0, 0, 0, 0
#endif
.text
#if !CONFIG_SUPPORT_IRQ_NESTED
.align 2
.weak Default_IRQHandler
.type Default_IRQHandler, %function
Default_IRQHandler:
csrw mscratch, sp
la sp, g_top_irqstack
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, -76
#else
addi sp, sp, -72
#endif
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 64(sp)
sw t0, 68(sp)
#if CONFIG_CHECK_FPU_DIRTY
csrr t0, mstatus
sw t0, 72(sp)
#endif
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
sw a6, 40(sp)
sw a7, 44(sp)
sw t3, 48(sp)
sw t4, 52(sp)
sw t5, 56(sp)
sw t6, 60(sp)
#if __riscv_dsp
addi sp, sp, -4
csrr t0, vxsat
sw t0, 0(sp)
#endif /*__riscv_dsp */
#if CONFIG_CHECK_FPU_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
la t0, do_irq
jalr t0
/* get mcause from sp */
addi t0, sp, 64
#if __riscv_dsp
addi t0, t0, 4
#endif /*__riscv_dsp */
#if __riscv_flen == 64
addi t0, t0, 164
#elif __riscv_flen == 32
addi t0, t0, 84
#endif
lw a1, (t0)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
#if CONFIG_CHECK_FPU_DIRTY
RESTORE_MSTATUS
#endif
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
RESTORE_FLOAT_REGISTERS
#if __riscv_dsp
lw t0, 0(sp)
csrw vxsat, t0
addi sp, sp, 4
#endif /*__riscv_dsp */
lw t0, 68(sp)
csrw mepc, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, 76
#else
addi sp, sp, 72
#endif
csrr sp, mscratch
mret
#else
.align 2
.weak Default_IRQHandler
.type Default_IRQHandler, %function
Default_IRQHandler:
addi sp, sp, -8
sw t0, 0(sp)
sw t1, 4(sp)
la t0, irq_nested_level
lw t1, (t0)
addi t1, t1, 1
sw t1, (t0)
li t0, IRQ_NESTED_MAX
/* nested too deeply, may be error happens */
bgt t1, t0, Default_Handler
addi t1, t1, -1
la t0, irq_nested_mcause
slli t1, t1, 2
add t0, t0, t1
csrr t1, mcause
sw t1, (t0)
la t0, irq_nested_level
lw t1, (t0)
li t0, 1
bgt t1, t0, .Lnested1
lw t0, 0(sp)
lw t1, 4(sp)
addi sp, sp, 8
csrw mscratch, sp
la sp, g_top_irqstack
j .Lnested2
.Lnested1:
lw t0, 0(sp)
lw t1, 4(sp)
addi sp, sp, 8
.Lnested2:
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, -76
#else
addi sp, sp, -72
#endif
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 64(sp)
sw t0, 68(sp)
#if CONFIG_CHECK_FPU_DIRTY
csrr t0, mstatus
sw t0, 72(sp)
#endif
csrs mstatus, 8
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
sw a6, 40(sp)
sw a7, 44(sp)
sw t3, 48(sp)
sw t4, 52(sp)
sw t5, 56(sp)
sw t6, 60(sp)
#if __riscv_dsp
addi sp, sp, -4
csrr t0, vxsat
sw t0, 0(sp)
#endif /*__riscv_dsp */
#if CONFIG_CHECK_FPU_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
la t0, do_irq
jalr t0
csrc mstatus, 8
/* get mcause from sp */
addi t0, sp, 64
#if __riscv_dsp
addi t0, t0, 4
#endif /*__riscv_dsp */
#if __riscv_flen == 64
addi t0, t0, 164
#elif __riscv_flen == 32
addi t0, t0, 84
#endif
lw a1, (t0)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
la t0, irq_nested_level
lw t1, (t0)
addi t1, t1, -1
sw t1, (t0)
bgt t1, zero, .Lnested3
#if CONFIG_CHECK_FPU_DIRTY
RESTORE_MSTATUS
#endif
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
RESTORE_FLOAT_REGISTERS
#if __riscv_dsp
lw t0, 0(sp)
csrw vxsat, t0
addi sp, sp, 4
#endif /*__riscv_dsp */
lw t0, 68(sp)
csrw mepc, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, 76
#else
addi sp, sp, 72
#endif
csrr sp, mscratch
mret
.Lnested3:
/* keep mpil in current mcause & load exception code before */
addi t1, t1, -1
la t0, irq_nested_mcause
slli t1, t1, 2
add t1, t0, t1
lw t0, (t1)
andi t0, t0, 0x3FF
andi a0, a1, 0xFFFFFC00
or t0, a0, t0
csrw mcause, t0
#if CONFIG_CHECK_FPU_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_FLOAT_REGISTERS
#if __riscv_dsp
lw t0, 0(sp)
csrw vxsat, t0
addi sp, sp, 4
#endif /*__riscv_dsp */
lw t0, 68(sp)
csrw mepc, t0
li t0, MSTATUS_PRV1
csrs mstatus, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, 76
#else
addi sp, sp, 72
#endif
mret
#endif
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 2
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -140
sw x1, ( 0 )(sp)
sw x3, ( 8 )(sp)
sw x4, ( 12)(sp)
sw x5, ( 16)(sp)
sw x6, ( 20)(sp)
sw x7, ( 24)(sp)
sw x8, ( 28)(sp)
sw x9, ( 32)(sp)
sw x10,( 36)(sp)
sw x11,( 40)(sp)
sw x12,( 44)(sp)
sw x13,( 48)(sp)
sw x14,( 52)(sp)
sw x15,( 56)(sp)
sw x16,( 60)(sp)
sw x17,( 64)(sp)
sw x18,( 68)(sp)
sw x19,( 72)(sp)
sw x20,( 76)(sp)
sw x21,( 80)(sp)
sw x22,( 84)(sp)
sw x23,( 88)(sp)
sw x24,( 92)(sp)
sw x25,( 96)(sp)
sw x26,(100)(sp)
sw x27,(104)(sp)
sw x28,(108)(sp)
sw x29,(112)(sp)
sw x30,(116)(sp)
sw x31,(120)(sp)
csrr a0, mepc
sw a0, (124)(sp)
csrr a0, mstatus
sw a0, (128)(sp)
csrr a0, mcause
sw a0, (132)(sp)
csrr a0, mtval
sw a0, (136)(sp)
csrr a0, mscratch
sw a0, ( 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 6
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
/* Check for nmi */
addi sp, sp, -8
sw t0, 0x0(sp)
sw t1, 0x4(sp)
csrr t0, mcause
andi t0, t0, 0x3FF
li t1, 24
beq t0, t1, .NMI_Handler
lw t0, 0x0(sp)
lw t1, 0x4(sp)
addi sp, sp, 8
j trap
.NMI_Handler:
/* mscratch may be used before */
addi sp, sp, -4
csrr t0, mscratch
sw t0, 0x0(sp)
csrw mscratch, sp
la sp, g_top_trapstack
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, -76
#else
addi sp, sp, -72
#endif
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 64(sp)
sw t0, 68(sp)
#if CONFIG_CHECK_FPU_DIRTY
csrr t0, mstatus
sw t0, 72(sp)
#endif
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
sw a6, 40(sp)
sw a7, 44(sp)
sw t3, 48(sp)
sw t4, 52(sp)
sw t5, 56(sp)
sw t6, 60(sp)
#if __riscv_dsp
addi sp, sp, -4
csrr t0, vxsat
sw t0, 0(sp)
#endif /*__riscv_dsp */
#if CONFIG_CHECK_FPU_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
la t0, handle_nmi_exception
jalr t0
/* get mcause from sp */
addi t0, sp, 64
#if __riscv_dsp
addi t0, t0, 4
#endif /*__riscv_dsp */
#if __riscv_flen == 64
addi t0, t0, 164
#elif __riscv_flen == 32
addi t0, t0, 84
#endif
lw a1, (t0)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
#if CONFIG_CHECK_FPU_DIRTY
RESTORE_MSTATUS
#endif
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
RESTORE_FLOAT_REGISTERS
#if __riscv_dsp
lw t0, 0(sp)
csrw vxsat, t0
addi sp, sp, 4
#endif /*__riscv_dsp */
lw t0, 68(sp)
csrw mepc, t0
#if CONFIG_CHECK_FPU_DIRTY
lw t0, 72(sp)
csrw mstatus, t0
#endif
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, 76
#else
addi sp, sp, 72
#endif
csrr sp, mscratch
/* restore mscratch */
lw t0, 0x0(sp)
csrw mscratch, t0
addi sp, sp, 4
lw t0, 0x0(sp)
lw t1, 0x4(sp)
addi sp, sp, 8
mret
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler tspend_handler

View File

@ -1,169 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
.global __rt_rvstack
.section .vectors, "aw", @progbits
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long tspend_handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_IRQHandler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
/* External interrupts */
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.size __Vectors, . - __Vectors
.globl Reset_Handler
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
_start:
.text
.long Reset_Handler
.align 2
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
la gp, __global_pointer$
.option pop
la a0, Default_Handler
ori a0, a0, 3
csrw mtvec, a0
la a0, __Vectors
csrw mtvt, a0
la sp, g_top_irqstack
csrw mscratch, sp
#ifdef CONFIG_KERNEL_NONE
la sp, g_top_mainstack
#endif
#ifndef __NO_SYSTEM_INIT
jal SystemInit
#endif
jal rtthread_startup
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_irqstack:
__rt_rvstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK
g_top_mainstack:
#endif

View File

@ -1,113 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#include <soc.h>
#include <csi_core.h>
#include <drv/irq.h>
#include <drv/dma.h>
#include <drv/tick.h>
#include <drv/etb.h>
#include <drv/spiflash.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
static void cache_init(void)
{
csi_dcache_enable();
csi_icache_enable();
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void clic_init(void)
{
int i;
/* get interrupt level from info */
CLIC->CLICCFG = (((CLIC->CLICINFO & CLIC_INFO_CLICINTCTLBITS_Msk) >> CLIC_INFO_CLICINTCTLBITS_Pos) << CLIC_CLICCFG_NLBIT_Pos);
for (i = 0; i < 64; i++) {
CLIC->CLICINT[i].IP = 0;
CLIC->CLICINT[i].ATTR = 1; /* use vector interrupt */
}
/* tspend use positive interrupt */
CLIC->CLICINT[Machine_Software_IRQn].ATTR = 0x3;
csi_irq_enable(Machine_Software_IRQn);
}
static void interrupt_init(void)
{
clic_init();
#ifdef CONFIG_KERNEL_NONE
__enable_excp_irq();
#endif
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
uint32_t status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
/* enable mexstatus SPUSHEN and disable SPSWAPEN */
#if CONFIG_CPU_XUANTIE_E906 || CONFIG_CPU_XUANTIE_E906F || CONFIG_CPU_XUANTIE_E906FD || CONFIG_CPU_XUANTIE_E906P || CONFIG_CPU_XUANTIE_E906FP || CONFIG_CPU_XUANTIE_E906FDP \
|| CONFIG_CPU_XUANTIE_E907 || CONFIG_CPU_XUANTIE_E907F || CONFIG_CPU_XUANTIE_E907FD || CONFIG_CPU_XUANTIE_E907P || CONFIG_CPU_XUANTIE_E907FP || CONFIG_CPU_XUANTIE_E907FDP
status = __get_MEXSTATUS();
status |= (0x1 << 16);
status &= ~(0x2 << 16);
__set_MEXSTATUS(status);
#endif
cache_init();
section_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,591 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
/* Enable interrupts when returning from the handler */
#define MSTATUS_PRV1 0x1880
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
#if CONFIG_SUPPORT_IRQ_NESTED
#define IRQ_NESTED_MAX (6)
.section .bss
irq_nested_level:
.long 0
irq_nested_mcause:
.long 0, 0, 0, 0, 0, 0
#endif
.text
#if !CONFIG_SUPPORT_IRQ_NESTED
.align 2
.weak Default_IRQHandler
.type Default_IRQHandler, %function
Default_IRQHandler:
csrw mscratch, sp
la sp, g_top_irqstack
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, -76
#else
addi sp, sp, -72
#endif
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 64(sp)
sw t0, 68(sp)
#if CONFIG_CHECK_FPU_DIRTY
csrr t0, mstatus
sw t0, 72(sp)
#endif
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
sw a6, 40(sp)
sw a7, 44(sp)
sw t3, 48(sp)
sw t4, 52(sp)
sw t5, 56(sp)
sw t6, 60(sp)
#if __riscv_dsp
addi sp, sp, -4
csrr t0, vxsat
sw t0, 0(sp)
#endif /*__riscv_dsp */
#if CONFIG_CHECK_FPU_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
la t0, do_irq
jalr t0
/* get mcause from sp */
addi t0, sp, 64
#if __riscv_dsp
addi t0, t0, 4
#endif /*__riscv_dsp */
#if __riscv_flen == 64
addi t0, t0, 164
#elif __riscv_flen == 32
addi t0, t0, 84
#endif
lw a1, (t0)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
#if CONFIG_CHECK_FPU_DIRTY
RESTORE_MSTATUS
#endif
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
RESTORE_FLOAT_REGISTERS
#if __riscv_dsp
lw t0, 0(sp)
csrw vxsat, t0
addi sp, sp, 4
#endif /*__riscv_dsp */
lw t0, 68(sp)
csrw mepc, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, 76
#else
addi sp, sp, 72
#endif
csrr sp, mscratch
mret
#else
.align 2
.weak Default_IRQHandler
.type Default_IRQHandler, %function
Default_IRQHandler:
addi sp, sp, -8
sw t0, 0(sp)
sw t1, 4(sp)
la t0, irq_nested_level
lw t1, (t0)
addi t1, t1, 1
sw t1, (t0)
li t0, IRQ_NESTED_MAX
/* nested too deeply, may be error happens */
bgt t1, t0, Default_Handler
addi t1, t1, -1
la t0, irq_nested_mcause
slli t1, t1, 2
add t0, t0, t1
csrr t1, mcause
sw t1, (t0)
la t0, irq_nested_level
lw t1, (t0)
li t0, 1
bgt t1, t0, .Lnested1
lw t0, 0(sp)
lw t1, 4(sp)
addi sp, sp, 8
csrw mscratch, sp
la sp, g_top_irqstack
j .Lnested2
.Lnested1:
lw t0, 0(sp)
lw t1, 4(sp)
addi sp, sp, 8
.Lnested2:
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, -76
#else
addi sp, sp, -72
#endif
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 64(sp)
sw t0, 68(sp)
#if CONFIG_CHECK_FPU_DIRTY
csrr t0, mstatus
sw t0, 72(sp)
#endif
csrs mstatus, 8
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
sw a6, 40(sp)
sw a7, 44(sp)
sw t3, 48(sp)
sw t4, 52(sp)
sw t5, 56(sp)
sw t6, 60(sp)
#if __riscv_dsp
addi sp, sp, -4
csrr t0, vxsat
sw t0, 0(sp)
#endif /*__riscv_dsp */
#if CONFIG_CHECK_FPU_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
la t0, do_irq
jalr t0
csrc mstatus, 8
/* get mcause from sp */
addi t0, sp, 64
#if __riscv_dsp
addi t0, t0, 4
#endif /*__riscv_dsp */
#if __riscv_flen == 64
addi t0, t0, 164
#elif __riscv_flen == 32
addi t0, t0, 84
#endif
lw a1, (t0)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
la t0, irq_nested_level
lw t1, (t0)
addi t1, t1, -1
sw t1, (t0)
bgt t1, zero, .Lnested3
#if CONFIG_CHECK_FPU_DIRTY
RESTORE_MSTATUS
#endif
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
RESTORE_FLOAT_REGISTERS
#if __riscv_dsp
lw t0, 0(sp)
csrw vxsat, t0
addi sp, sp, 4
#endif /*__riscv_dsp */
lw t0, 68(sp)
csrw mepc, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, 76
#else
addi sp, sp, 72
#endif
csrr sp, mscratch
mret
.Lnested3:
/* keep mpil in current mcause & load exception code before */
addi t1, t1, -1
la t0, irq_nested_mcause
slli t1, t1, 2
add t1, t0, t1
lw t0, (t1)
andi t0, t0, 0x3FF
andi a0, a1, 0xFFFFFC00
or t0, a0, t0
csrw mcause, t0
#if CONFIG_CHECK_FPU_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_FLOAT_REGISTERS
#if __riscv_dsp
lw t0, 0(sp)
csrw vxsat, t0
addi sp, sp, 4
#endif /*__riscv_dsp */
lw t0, 68(sp)
csrw mepc, t0
li t0, MSTATUS_PRV1
csrs mstatus, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, 76
#else
addi sp, sp, 72
#endif
mret
#endif
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 2
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -140
sw x1, ( 0 )(sp)
sw x3, ( 8 )(sp)
sw x4, ( 12)(sp)
sw x5, ( 16)(sp)
sw x6, ( 20)(sp)
sw x7, ( 24)(sp)
sw x8, ( 28)(sp)
sw x9, ( 32)(sp)
sw x10,( 36)(sp)
sw x11,( 40)(sp)
sw x12,( 44)(sp)
sw x13,( 48)(sp)
sw x14,( 52)(sp)
sw x15,( 56)(sp)
sw x16,( 60)(sp)
sw x17,( 64)(sp)
sw x18,( 68)(sp)
sw x19,( 72)(sp)
sw x20,( 76)(sp)
sw x21,( 80)(sp)
sw x22,( 84)(sp)
sw x23,( 88)(sp)
sw x24,( 92)(sp)
sw x25,( 96)(sp)
sw x26,(100)(sp)
sw x27,(104)(sp)
sw x28,(108)(sp)
sw x29,(112)(sp)
sw x30,(116)(sp)
sw x31,(120)(sp)
csrr a0, mepc
sw a0, (124)(sp)
csrr a0, mstatus
sw a0, (128)(sp)
csrr a0, mcause
sw a0, (132)(sp)
csrr a0, mtval
sw a0, (136)(sp)
csrr a0, mscratch
sw a0, ( 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 6
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
/* Check for nmi */
addi sp, sp, -8
sw t0, 0x0(sp)
sw t1, 0x4(sp)
csrr t0, mcause
andi t0, t0, 0x3FF
li t1, 24
beq t0, t1, .NMI_Handler
lw t0, 0x0(sp)
lw t1, 0x4(sp)
addi sp, sp, 8
j trap
.NMI_Handler:
/* mscratch may be used before */
addi sp, sp, -4
csrr t0, mscratch
sw t0, 0x0(sp)
csrw mscratch, sp
la sp, g_top_trapstack
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, -76
#else
addi sp, sp, -72
#endif
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 64(sp)
sw t0, 68(sp)
#if CONFIG_CHECK_FPU_DIRTY
csrr t0, mstatus
sw t0, 72(sp)
#endif
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
sw a6, 40(sp)
sw a7, 44(sp)
sw t3, 48(sp)
sw t4, 52(sp)
sw t5, 56(sp)
sw t6, 60(sp)
#if __riscv_dsp
addi sp, sp, -4
csrr t0, vxsat
sw t0, 0(sp)
#endif /*__riscv_dsp */
#if CONFIG_CHECK_FPU_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
la t0, handle_nmi_exception
jalr t0
/* get mcause from sp */
addi t0, sp, 64
#if __riscv_dsp
addi t0, t0, 4
#endif /*__riscv_dsp */
#if __riscv_flen == 64
addi t0, t0, 164
#elif __riscv_flen == 32
addi t0, t0, 84
#endif
lw a1, (t0)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
#if CONFIG_CHECK_FPU_DIRTY
RESTORE_MSTATUS
#endif
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
RESTORE_FLOAT_REGISTERS
#if __riscv_dsp
lw t0, 0(sp)
csrw vxsat, t0
addi sp, sp, 4
#endif /*__riscv_dsp */
lw t0, 68(sp)
csrw mepc, t0
#if CONFIG_CHECK_FPU_DIRTY
lw t0, 72(sp)
csrw mstatus, t0
#endif
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, 76
#else
addi sp, sp, 72
#endif
csrr sp, mscratch
/* restore mscratch */
lw t0, 0x0(sp)
csrw mscratch, t0
addi sp, sp, 4
lw t0, 0x0(sp)
lw t1, 0x4(sp)
addi sp, sp, 8
mret
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler tspend_handler

View File

@ -1,168 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
.section .vectors, "aw", @progbits
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long tspend_handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_IRQHandler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
/* External interrupts */
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.size __Vectors, . - __Vectors
.globl Reset_Handler
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
_start:
.text
.long Reset_Handler
.align 2
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
la gp, __global_pointer$
.option pop
la a0, Default_Handler
ori a0, a0, 3
csrw mtvec, a0
la a0, __Vectors
csrw mtvt, a0
la sp, g_top_irqstack
csrw mscratch, sp
#ifdef CONFIG_KERNEL_NONE
la sp, g_top_mainstack
#endif
#ifndef __NO_SYSTEM_INIT
jal SystemInit
#endif
jal pre_main
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK
g_top_mainstack:
#endif

View File

@ -1,113 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#include <soc.h>
#include <csi_core.h>
#include <drv/irq.h>
#include <drv/dma.h>
#include <drv/tick.h>
#include <drv/etb.h>
#include <drv/spiflash.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
static void cache_init(void)
{
csi_dcache_enable();
csi_icache_enable();
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void clic_init(void)
{
int i;
/* get interrupt level from info */
CLIC->CLICCFG = (((CLIC->CLICINFO & CLIC_INFO_CLICINTCTLBITS_Msk) >> CLIC_INFO_CLICINTCTLBITS_Pos) << CLIC_CLICCFG_NLBIT_Pos);
for (i = 0; i < 64; i++) {
CLIC->CLICINT[i].IP = 0;
CLIC->CLICINT[i].ATTR = 1; /* use vector interrupt */
}
/* tspend use positive interrupt */
CLIC->CLICINT[Machine_Software_IRQn].ATTR = 0x3;
csi_irq_enable(Machine_Software_IRQn);
}
static void interrupt_init(void)
{
clic_init();
#ifdef CONFIG_KERNEL_NONE
__enable_excp_irq();
#endif
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
uint32_t status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
/* enable mexstatus SPUSHEN and disable SPSWAPEN */
#if CONFIG_CPU_XUANTIE_E906 || CONFIG_CPU_XUANTIE_E906F || CONFIG_CPU_XUANTIE_E906FD || CONFIG_CPU_XUANTIE_E906P || CONFIG_CPU_XUANTIE_E906FP || CONFIG_CPU_XUANTIE_E906FDP \
|| CONFIG_CPU_XUANTIE_E907 || CONFIG_CPU_XUANTIE_E907F || CONFIG_CPU_XUANTIE_E907FD || CONFIG_CPU_XUANTIE_E907P || CONFIG_CPU_XUANTIE_E907FP || CONFIG_CPU_XUANTIE_E907FDP
status = __get_MEXSTATUS();
status |= (0x1 << 16);
status &= ~(0x2 << 16);
__set_MEXSTATUS(status);
#endif
cache_init();
section_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,591 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
/* Enable interrupts when returning from the handler */
#define MSTATUS_PRV1 0x1880
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
#if CONFIG_SUPPORT_IRQ_NESTED
#define IRQ_NESTED_MAX (6)
.section .bss
irq_nested_level:
.long 0
irq_nested_mcause:
.long 0, 0, 0, 0, 0, 0
#endif
.text
#if !CONFIG_SUPPORT_IRQ_NESTED
.align 2
.weak Default_IRQHandler
.type Default_IRQHandler, %function
Default_IRQHandler:
csrw mscratch, sp
la sp, g_top_irqstack
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, -76
#else
addi sp, sp, -72
#endif
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 64(sp)
sw t0, 68(sp)
#if CONFIG_CHECK_FPU_DIRTY
csrr t0, mstatus
sw t0, 72(sp)
#endif
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
sw a6, 40(sp)
sw a7, 44(sp)
sw t3, 48(sp)
sw t4, 52(sp)
sw t5, 56(sp)
sw t6, 60(sp)
#if __riscv_dsp
addi sp, sp, -4
csrr t0, vxsat
sw t0, 0(sp)
#endif /*__riscv_dsp */
#if CONFIG_CHECK_FPU_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
la t0, do_irq
jalr t0
/* get mcause from sp */
addi t0, sp, 64
#if __riscv_dsp
addi t0, t0, 4
#endif /*__riscv_dsp */
#if __riscv_flen == 64
addi t0, t0, 164
#elif __riscv_flen == 32
addi t0, t0, 84
#endif
lw a1, (t0)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
#if CONFIG_CHECK_FPU_DIRTY
RESTORE_MSTATUS
#endif
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
RESTORE_FLOAT_REGISTERS
#if __riscv_dsp
lw t0, 0(sp)
csrw vxsat, t0
addi sp, sp, 4
#endif /*__riscv_dsp */
lw t0, 68(sp)
csrw mepc, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, 76
#else
addi sp, sp, 72
#endif
csrr sp, mscratch
mret
#else
.align 2
.weak Default_IRQHandler
.type Default_IRQHandler, %function
Default_IRQHandler:
addi sp, sp, -8
sw t0, 0(sp)
sw t1, 4(sp)
la t0, irq_nested_level
lw t1, (t0)
addi t1, t1, 1
sw t1, (t0)
li t0, IRQ_NESTED_MAX
/* nested too deeply, may be error happens */
bgt t1, t0, Default_Handler
addi t1, t1, -1
la t0, irq_nested_mcause
slli t1, t1, 2
add t0, t0, t1
csrr t1, mcause
sw t1, (t0)
la t0, irq_nested_level
lw t1, (t0)
li t0, 1
bgt t1, t0, .Lnested1
lw t0, 0(sp)
lw t1, 4(sp)
addi sp, sp, 8
csrw mscratch, sp
la sp, g_top_irqstack
j .Lnested2
.Lnested1:
lw t0, 0(sp)
lw t1, 4(sp)
addi sp, sp, 8
.Lnested2:
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, -76
#else
addi sp, sp, -72
#endif
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 64(sp)
sw t0, 68(sp)
#if CONFIG_CHECK_FPU_DIRTY
csrr t0, mstatus
sw t0, 72(sp)
#endif
csrs mstatus, 8
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
sw a6, 40(sp)
sw a7, 44(sp)
sw t3, 48(sp)
sw t4, 52(sp)
sw t5, 56(sp)
sw t6, 60(sp)
#if __riscv_dsp
addi sp, sp, -4
csrr t0, vxsat
sw t0, 0(sp)
#endif /*__riscv_dsp */
#if CONFIG_CHECK_FPU_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
la t0, do_irq
jalr t0
csrc mstatus, 8
/* get mcause from sp */
addi t0, sp, 64
#if __riscv_dsp
addi t0, t0, 4
#endif /*__riscv_dsp */
#if __riscv_flen == 64
addi t0, t0, 164
#elif __riscv_flen == 32
addi t0, t0, 84
#endif
lw a1, (t0)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
la t0, irq_nested_level
lw t1, (t0)
addi t1, t1, -1
sw t1, (t0)
bgt t1, zero, .Lnested3
#if CONFIG_CHECK_FPU_DIRTY
RESTORE_MSTATUS
#endif
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
RESTORE_FLOAT_REGISTERS
#if __riscv_dsp
lw t0, 0(sp)
csrw vxsat, t0
addi sp, sp, 4
#endif /*__riscv_dsp */
lw t0, 68(sp)
csrw mepc, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, 76
#else
addi sp, sp, 72
#endif
csrr sp, mscratch
mret
.Lnested3:
/* keep mpil in current mcause & load exception code before */
addi t1, t1, -1
la t0, irq_nested_mcause
slli t1, t1, 2
add t1, t0, t1
lw t0, (t1)
andi t0, t0, 0x3FF
andi a0, a1, 0xFFFFFC00
or t0, a0, t0
csrw mcause, t0
#if CONFIG_CHECK_FPU_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_FLOAT_REGISTERS
#if __riscv_dsp
lw t0, 0(sp)
csrw vxsat, t0
addi sp, sp, 4
#endif /*__riscv_dsp */
lw t0, 68(sp)
csrw mepc, t0
li t0, MSTATUS_PRV1
csrs mstatus, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, 76
#else
addi sp, sp, 72
#endif
mret
#endif
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 2
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -140
sw x1, ( 0 )(sp)
sw x3, ( 8 )(sp)
sw x4, ( 12)(sp)
sw x5, ( 16)(sp)
sw x6, ( 20)(sp)
sw x7, ( 24)(sp)
sw x8, ( 28)(sp)
sw x9, ( 32)(sp)
sw x10,( 36)(sp)
sw x11,( 40)(sp)
sw x12,( 44)(sp)
sw x13,( 48)(sp)
sw x14,( 52)(sp)
sw x15,( 56)(sp)
sw x16,( 60)(sp)
sw x17,( 64)(sp)
sw x18,( 68)(sp)
sw x19,( 72)(sp)
sw x20,( 76)(sp)
sw x21,( 80)(sp)
sw x22,( 84)(sp)
sw x23,( 88)(sp)
sw x24,( 92)(sp)
sw x25,( 96)(sp)
sw x26,(100)(sp)
sw x27,(104)(sp)
sw x28,(108)(sp)
sw x29,(112)(sp)
sw x30,(116)(sp)
sw x31,(120)(sp)
csrr a0, mepc
sw a0, (124)(sp)
csrr a0, mstatus
sw a0, (128)(sp)
csrr a0, mcause
sw a0, (132)(sp)
csrr a0, mtval
sw a0, (136)(sp)
csrr a0, mscratch
sw a0, ( 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 6
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
/* Check for nmi */
addi sp, sp, -8
sw t0, 0x0(sp)
sw t1, 0x4(sp)
csrr t0, mcause
andi t0, t0, 0x3FF
li t1, 24
beq t0, t1, .NMI_Handler
lw t0, 0x0(sp)
lw t1, 0x4(sp)
addi sp, sp, 8
j trap
.NMI_Handler:
/* mscratch may be used before */
addi sp, sp, -4
csrr t0, mscratch
sw t0, 0x0(sp)
csrw mscratch, sp
la sp, g_top_trapstack
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, -76
#else
addi sp, sp, -72
#endif
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 64(sp)
sw t0, 68(sp)
#if CONFIG_CHECK_FPU_DIRTY
csrr t0, mstatus
sw t0, 72(sp)
#endif
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
sw a6, 40(sp)
sw a7, 44(sp)
sw t3, 48(sp)
sw t4, 52(sp)
sw t5, 56(sp)
sw t6, 60(sp)
#if __riscv_dsp
addi sp, sp, -4
csrr t0, vxsat
sw t0, 0(sp)
#endif /*__riscv_dsp */
#if CONFIG_CHECK_FPU_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
la t0, handle_nmi_exception
jalr t0
/* get mcause from sp */
addi t0, sp, 64
#if __riscv_dsp
addi t0, t0, 4
#endif /*__riscv_dsp */
#if __riscv_flen == 64
addi t0, t0, 164
#elif __riscv_flen == 32
addi t0, t0, 84
#endif
lw a1, (t0)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
#if CONFIG_CHECK_FPU_DIRTY
RESTORE_MSTATUS
#endif
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
RESTORE_FLOAT_REGISTERS
#if __riscv_dsp
lw t0, 0(sp)
csrw vxsat, t0
addi sp, sp, 4
#endif /*__riscv_dsp */
lw t0, 68(sp)
csrw mepc, t0
#if CONFIG_CHECK_FPU_DIRTY
lw t0, 72(sp)
csrw mstatus, t0
#endif
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, 76
#else
addi sp, sp, 72
#endif
csrr sp, mscratch
/* restore mscratch */
lw t0, 0x0(sp)
csrw mscratch, t0
addi sp, sp, 4
lw t0, 0x0(sp)
lw t1, 0x4(sp)
addi sp, sp, 8
mret
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler tspend_handler

View File

@ -1,168 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
.section .vectors, "aw", @progbits
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long tspend_handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_IRQHandler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
/* External interrupts */
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.size __Vectors, . - __Vectors
.globl Reset_Handler
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
_start:
.text
.long Reset_Handler
.align 2
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
la gp, __global_pointer$
.option pop
la a0, Default_Handler
ori a0, a0, 3
csrw mtvec, a0
la a0, __Vectors
csrw mtvt, a0
la sp, g_top_irqstack
csrw mscratch, sp
#ifdef CONFIG_KERNEL_NONE
la sp, g_top_mainstack
#endif
#ifndef __NO_SYSTEM_INIT
jal SystemInit
#endif
jal pre_main
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK
g_top_mainstack:
#endif

View File

@ -1,113 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#include <soc.h>
#include <csi_core.h>
#include <drv/irq.h>
#include <drv/dma.h>
#include <drv/tick.h>
#include <drv/etb.h>
#include <drv/spiflash.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
static void cache_init(void)
{
csi_dcache_enable();
csi_icache_enable();
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void clic_init(void)
{
int i;
/* get interrupt level from info */
CLIC->CLICCFG = (((CLIC->CLICINFO & CLIC_INFO_CLICINTCTLBITS_Msk) >> CLIC_INFO_CLICINTCTLBITS_Pos) << CLIC_CLICCFG_NLBIT_Pos);
for (i = 0; i < 64; i++) {
CLIC->CLICINT[i].IP = 0;
CLIC->CLICINT[i].ATTR = 1; /* use vector interrupt */
}
/* tspend use positive interrupt */
CLIC->CLICINT[Machine_Software_IRQn].ATTR = 0x3;
csi_irq_enable(Machine_Software_IRQn);
}
static void interrupt_init(void)
{
clic_init();
#ifdef CONFIG_KERNEL_NONE
__enable_excp_irq();
#endif
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
uint32_t status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
/* enable mexstatus SPUSHEN and disable SPSWAPEN */
#if CONFIG_CPU_XUANTIE_E906 || CONFIG_CPU_XUANTIE_E906F || CONFIG_CPU_XUANTIE_E906FD || CONFIG_CPU_XUANTIE_E906P || CONFIG_CPU_XUANTIE_E906FP || CONFIG_CPU_XUANTIE_E906FDP \
|| CONFIG_CPU_XUANTIE_E907 || CONFIG_CPU_XUANTIE_E907F || CONFIG_CPU_XUANTIE_E907FD || CONFIG_CPU_XUANTIE_E907P || CONFIG_CPU_XUANTIE_E907FP || CONFIG_CPU_XUANTIE_E907FDP
status = __get_MEXSTATUS();
status |= (0x1 << 16);
status &= ~(0x2 << 16);
__set_MEXSTATUS(status);
#endif
cache_init();
section_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -1,591 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
/* Enable interrupts when returning from the handler */
#define MSTATUS_PRV1 0x1880
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
#if CONFIG_SUPPORT_IRQ_NESTED
#define IRQ_NESTED_MAX (6)
.section .bss
irq_nested_level:
.long 0
irq_nested_mcause:
.long 0, 0, 0, 0, 0, 0
#endif
.text
#if !CONFIG_SUPPORT_IRQ_NESTED
.align 2
.weak Default_IRQHandler
.type Default_IRQHandler, %function
Default_IRQHandler:
csrw mscratch, sp
la sp, g_top_irqstack
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, -76
#else
addi sp, sp, -72
#endif
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 64(sp)
sw t0, 68(sp)
#if CONFIG_CHECK_FPU_DIRTY
csrr t0, mstatus
sw t0, 72(sp)
#endif
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
sw a6, 40(sp)
sw a7, 44(sp)
sw t3, 48(sp)
sw t4, 52(sp)
sw t5, 56(sp)
sw t6, 60(sp)
#if __riscv_dsp
addi sp, sp, -4
csrr t0, vxsat
sw t0, 0(sp)
#endif /*__riscv_dsp */
#if CONFIG_CHECK_FPU_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
la t0, do_irq
jalr t0
/* get mcause from sp */
addi t0, sp, 64
#if __riscv_dsp
addi t0, t0, 4
#endif /*__riscv_dsp */
#if __riscv_flen == 64
addi t0, t0, 164
#elif __riscv_flen == 32
addi t0, t0, 84
#endif
lw a1, (t0)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
#if CONFIG_CHECK_FPU_DIRTY
RESTORE_MSTATUS
#endif
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
RESTORE_FLOAT_REGISTERS
#if __riscv_dsp
lw t0, 0(sp)
csrw vxsat, t0
addi sp, sp, 4
#endif /*__riscv_dsp */
lw t0, 68(sp)
csrw mepc, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, 76
#else
addi sp, sp, 72
#endif
csrr sp, mscratch
mret
#else
.align 2
.weak Default_IRQHandler
.type Default_IRQHandler, %function
Default_IRQHandler:
addi sp, sp, -8
sw t0, 0(sp)
sw t1, 4(sp)
la t0, irq_nested_level
lw t1, (t0)
addi t1, t1, 1
sw t1, (t0)
li t0, IRQ_NESTED_MAX
/* nested too deeply, may be error happens */
bgt t1, t0, Default_Handler
addi t1, t1, -1
la t0, irq_nested_mcause
slli t1, t1, 2
add t0, t0, t1
csrr t1, mcause
sw t1, (t0)
la t0, irq_nested_level
lw t1, (t0)
li t0, 1
bgt t1, t0, .Lnested1
lw t0, 0(sp)
lw t1, 4(sp)
addi sp, sp, 8
csrw mscratch, sp
la sp, g_top_irqstack
j .Lnested2
.Lnested1:
lw t0, 0(sp)
lw t1, 4(sp)
addi sp, sp, 8
.Lnested2:
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, -76
#else
addi sp, sp, -72
#endif
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 64(sp)
sw t0, 68(sp)
#if CONFIG_CHECK_FPU_DIRTY
csrr t0, mstatus
sw t0, 72(sp)
#endif
csrs mstatus, 8
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
sw a6, 40(sp)
sw a7, 44(sp)
sw t3, 48(sp)
sw t4, 52(sp)
sw t5, 56(sp)
sw t6, 60(sp)
#if __riscv_dsp
addi sp, sp, -4
csrr t0, vxsat
sw t0, 0(sp)
#endif /*__riscv_dsp */
#if CONFIG_CHECK_FPU_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
la t0, do_irq
jalr t0
csrc mstatus, 8
/* get mcause from sp */
addi t0, sp, 64
#if __riscv_dsp
addi t0, t0, 4
#endif /*__riscv_dsp */
#if __riscv_flen == 64
addi t0, t0, 164
#elif __riscv_flen == 32
addi t0, t0, 84
#endif
lw a1, (t0)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
la t0, irq_nested_level
lw t1, (t0)
addi t1, t1, -1
sw t1, (t0)
bgt t1, zero, .Lnested3
#if CONFIG_CHECK_FPU_DIRTY
RESTORE_MSTATUS
#endif
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
RESTORE_FLOAT_REGISTERS
#if __riscv_dsp
lw t0, 0(sp)
csrw vxsat, t0
addi sp, sp, 4
#endif /*__riscv_dsp */
lw t0, 68(sp)
csrw mepc, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, 76
#else
addi sp, sp, 72
#endif
csrr sp, mscratch
mret
.Lnested3:
/* keep mpil in current mcause & load exception code before */
addi t1, t1, -1
la t0, irq_nested_mcause
slli t1, t1, 2
add t1, t0, t1
lw t0, (t1)
andi t0, t0, 0x3FF
andi a0, a1, 0xFFFFFC00
or t0, a0, t0
csrw mcause, t0
#if CONFIG_CHECK_FPU_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_FLOAT_REGISTERS
#if __riscv_dsp
lw t0, 0(sp)
csrw vxsat, t0
addi sp, sp, 4
#endif /*__riscv_dsp */
lw t0, 68(sp)
csrw mepc, t0
li t0, MSTATUS_PRV1
csrs mstatus, t0
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, 76
#else
addi sp, sp, 72
#endif
mret
#endif
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 2
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -140
sw x1, ( 0 )(sp)
sw x3, ( 8 )(sp)
sw x4, ( 12)(sp)
sw x5, ( 16)(sp)
sw x6, ( 20)(sp)
sw x7, ( 24)(sp)
sw x8, ( 28)(sp)
sw x9, ( 32)(sp)
sw x10,( 36)(sp)
sw x11,( 40)(sp)
sw x12,( 44)(sp)
sw x13,( 48)(sp)
sw x14,( 52)(sp)
sw x15,( 56)(sp)
sw x16,( 60)(sp)
sw x17,( 64)(sp)
sw x18,( 68)(sp)
sw x19,( 72)(sp)
sw x20,( 76)(sp)
sw x21,( 80)(sp)
sw x22,( 84)(sp)
sw x23,( 88)(sp)
sw x24,( 92)(sp)
sw x25,( 96)(sp)
sw x26,(100)(sp)
sw x27,(104)(sp)
sw x28,(108)(sp)
sw x29,(112)(sp)
sw x30,(116)(sp)
sw x31,(120)(sp)
csrr a0, mepc
sw a0, (124)(sp)
csrr a0, mstatus
sw a0, (128)(sp)
csrr a0, mcause
sw a0, (132)(sp)
csrr a0, mtval
sw a0, (136)(sp)
csrr a0, mscratch
sw a0, ( 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 6
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
/* Check for nmi */
addi sp, sp, -8
sw t0, 0x0(sp)
sw t1, 0x4(sp)
csrr t0, mcause
andi t0, t0, 0x3FF
li t1, 24
beq t0, t1, .NMI_Handler
lw t0, 0x0(sp)
lw t1, 0x4(sp)
addi sp, sp, 8
j trap
.NMI_Handler:
/* mscratch may be used before */
addi sp, sp, -4
csrr t0, mscratch
sw t0, 0x0(sp)
csrw mscratch, sp
la sp, g_top_trapstack
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, -76
#else
addi sp, sp, -72
#endif
sw t0, 4(sp)
sw t1, 8(sp)
csrr t0, mepc
csrr t1, mcause
sw t1, 64(sp)
sw t0, 68(sp)
#if CONFIG_CHECK_FPU_DIRTY
csrr t0, mstatus
sw t0, 72(sp)
#endif
sw ra, 0(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
sw a6, 40(sp)
sw a7, 44(sp)
sw t3, 48(sp)
sw t4, 52(sp)
sw t5, 56(sp)
sw t6, 60(sp)
#if __riscv_dsp
addi sp, sp, -4
csrr t0, vxsat
sw t0, 0(sp)
#endif /*__riscv_dsp */
#if CONFIG_CHECK_FPU_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
la t0, handle_nmi_exception
jalr t0
/* get mcause from sp */
addi t0, sp, 64
#if __riscv_dsp
addi t0, t0, 4
#endif /*__riscv_dsp */
#if __riscv_flen == 64
addi t0, t0, 164
#elif __riscv_flen == 32
addi t0, t0, 84
#endif
lw a1, (t0)
andi a0, a1, 0x3FF
slli a0, a0, 2
/* clear pending */
li a2, 0xE0801000
add a2, a2, a0
lb a3, 0(a2)
li a4, 1
not a4, a4
and a5, a4, a3
sb a5, 0(a2)
#if CONFIG_CHECK_FPU_DIRTY
RESTORE_MSTATUS
#endif
li t0, MSTATUS_PRV1
csrs mstatus, t0
csrw mcause, a1
RESTORE_FLOAT_REGISTERS
#if __riscv_dsp
lw t0, 0(sp)
csrw vxsat, t0
addi sp, sp, 4
#endif /*__riscv_dsp */
lw t0, 68(sp)
csrw mepc, t0
#if CONFIG_CHECK_FPU_DIRTY
lw t0, 72(sp)
csrw mstatus, t0
#endif
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
#if CONFIG_CHECK_FPU_DIRTY
addi sp, sp, 76
#else
addi sp, sp, 72
#endif
csrr sp, mscratch
/* restore mscratch */
lw t0, 0x0(sp)
csrw mscratch, t0
addi sp, sp, 4
lw t0, 0x0(sp)
lw t1, 0x4(sp)
addi sp, sp, 8
mret
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler tspend_handler

View File

@ -1,168 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
.section .vectors, "aw", @progbits
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long tspend_handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_IRQHandler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
.long Default_Handler
/* External interrupts */
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.long Default_IRQHandler
.size __Vectors, . - __Vectors
.globl Reset_Handler
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
_start:
.text
.long Reset_Handler
.align 2
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
la gp, __global_pointer$
.option pop
la a0, Default_Handler
ori a0, a0, 3
csrw mtvec, a0
la a0, __Vectors
csrw mtvt, a0
la sp, g_top_irqstack
csrw mscratch, sp
#ifdef CONFIG_KERNEL_NONE
la sp, g_top_mainstack
#endif
#ifndef __NO_SYSTEM_INIT
jal SystemInit
#endif
jal pre_main
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK
g_top_mainstack:
#endif

View File

@ -1,113 +0,0 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#include <soc.h>
#include <csi_core.h>
#include <drv/irq.h>
#include <drv/dma.h>
#include <drv/tick.h>
#include <drv/etb.h>
#include <drv/spiflash.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
static void cache_init(void)
{
csi_dcache_enable();
csi_icache_enable();
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void clic_init(void)
{
int i;
/* get interrupt level from info */
CLIC->CLICCFG = (((CLIC->CLICINFO & CLIC_INFO_CLICINTCTLBITS_Msk) >> CLIC_INFO_CLICINTCTLBITS_Pos) << CLIC_CLICCFG_NLBIT_Pos);
for (i = 0; i < 64; i++) {
CLIC->CLICINT[i].IP = 0;
CLIC->CLICINT[i].ATTR = 1; /* use vector interrupt */
}
/* tspend use positive interrupt */
CLIC->CLICINT[Machine_Software_IRQn].ATTR = 0x3;
csi_irq_enable(Machine_Software_IRQn);
}
static void interrupt_init(void)
{
clic_init();
#ifdef CONFIG_KERNEL_NONE
__enable_excp_irq();
#endif
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
uint32_t status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
/* enable mexstatus SPUSHEN and disable SPSWAPEN */
#if CONFIG_CPU_XUANTIE_E906 || CONFIG_CPU_XUANTIE_E906F || CONFIG_CPU_XUANTIE_E906FD || CONFIG_CPU_XUANTIE_E906P || CONFIG_CPU_XUANTIE_E906FP || CONFIG_CPU_XUANTIE_E906FDP \
|| CONFIG_CPU_XUANTIE_E907 || CONFIG_CPU_XUANTIE_E907F || CONFIG_CPU_XUANTIE_E907FD || CONFIG_CPU_XUANTIE_E907P || CONFIG_CPU_XUANTIE_E907FP || CONFIG_CPU_XUANTIE_E907FDP
status = __get_MEXSTATUS();
status |= (0x1 << 16);
status &= ~(0x2 << 16);
__set_MEXSTATUS(status);
#endif
cache_init();
section_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
}

Some files were not shown because too many files have changed in this diff Show More