[bsp] add xuantie e906 series bsp (#10275)

* [add] add xuantie e906 series

* [ci] 添加玄铁e906 bsp ci

* 移除脚本中输出彩色的print
This commit is contained in:
Yaochenger 2025-05-14 23:48:39 +08:00 committed by GitHub
parent d67c02cd33
commit 3775ea0611
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
334 changed files with 97901 additions and 0 deletions

View File

@ -455,6 +455,13 @@
"SUB_RTT_BSP": [
"qemu-vexpress-a9"
]
},
{
"RTT_BSP": "xuantie",
"RTT_TOOL_CHAIN": "sourcery-Xuantie-900-gcc-elf-newlib",
"SUB_RTT_BSP": [
"xuantie/smartl/e906"
]
}
]
}

View File

@ -188,6 +188,14 @@ jobs:
/opt/riscv64-unknown-elf-toolchain-10.2.0-2020.12.8-x86_64-linux-ubuntu14/bin/riscv64-unknown-elf-gcc --version
echo "RTT_EXEC_PATH=/opt/riscv64-unknown-elf-toolchain-10.2.0-2020.12.8-x86_64-linux-ubuntu14/bin" >> $GITHUB_ENV
- name: Install Xuantie-900-gcc-elf-newlib Tools
if: ${{ matrix.legs.RTT_TOOL_CHAIN == 'sourcery-Xuantie-900-gcc-elf-newlib' && success() }}
run: |
wget -q https://occ-oss-prod.oss-cn-hangzhou.aliyuncs.com/resource//1744884010580/Xuantie-900-gcc-elf-newlib-x86_64-V3.0.2-20250410.tar.gz
sudo tar -zxvf Xuantie-900-gcc-elf-newlib-x86_64-V3.0.2-20250410.tar.gz -C /opt
/opt/Xuantie-900-gcc-elf-newlib-x86_64-V3.0.2/bin/riscv64-unknown-elf-gcc --version
echo "RTT_EXEC_PATH=/opt/Xuantie-900-gcc-elf-newlib-x86_64-V3.0.2/bin" >> $GITHUB_ENV
- name: Install k230 MUSL ToolChains
if: ${{ matrix.legs.RTT_TOOL_CHAIN == 'riscv64-unknown-linux-musl-' && matrix.legs.RTT_BSP == 'K230' && success() }}
run: |

View File

@ -0,0 +1,15 @@
# for module compiling
import os
Import('RTT_ROOT')
from building import *
cwd = GetCurrentDir()
objs = []
list = os.listdir(cwd)
for d in list:
path = os.path.join(cwd, d)
if os.path.isfile(os.path.join(path, 'SConscript')):
objs = objs + SConscript(os.path.join(d, 'SConscript'))
Return('objs')

View File

@ -0,0 +1,16 @@
from building import *
import os
cwd = GetCurrentDir()
CPPPATH = [cwd]
src = Split("""
""")
if GetDepend('SOC_XUANTIE'):
if GetDepend('BSP_USING_UART'):
src += ['drv_usart.c']
group = DefineGroup('Drivers', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,219 @@
/*
* Copyright (c) 2006-2025, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2025-04-27 WangShun the first version
*/
#include "board.h"
#include "rtthread.h"
#include <rtdevice.h>
#include <string.h>
#include <drv_usart.h>
#ifdef RT_USING_SERIAL
#define DRV_DEBUG
#define LOG_TAG "drv.uart"
#ifdef DRV_DEBUG
#define DBG_LVL DBG_LOG
#else
#define DBG_LVL DBG_INFO
#endif /* DRV_DEBUG */
#include <rtdbg.h>
#if !defined(BSP_USING_UART0) && !defined(BSP_USING_UART1) && !defined(BSP_USING_UART2) && !defined(BSP_USING_UART3) \
&& !defined(BSP_USING_UART4) && !defined(BSP_USING_UART5) && !defined(BSP_USING_UART6) && !defined(BSP_USING_UART7)
#error "Please define at least one BSP_USING_UARTx"
/* this driver can be disabled at menuconfig -> RT-Thread Components -> Device Drivers */
#endif
static void xuantie_uart0_rx_isr(csi_uart_t *uart_handler, csi_uart_event_t event, void *arg);
enum
{
#ifdef BSP_USING_UART0
UART0_INDEX,
#endif
};
struct xuantie_uart_config uart_config[] =
{
#ifdef BSP_USING_UART0
{
"uart0",
0,
xuantie_uart0_rx_isr
}
#endif
};
struct xuantie_uart uart_obj[sizeof(uart_config) / sizeof(uart_config[0])] = {0};
static rt_err_t xuantie_configure(struct rt_serial_device *serial, struct serial_configure *cfg)
{
struct xuantie_uart *uart = (struct xuantie_uart *)serial->parent.user_data;
csi_error_t ret;
ret = csi_uart_baud(&uart->uart, cfg->baud_rate);
if (ret != CSI_OK)
{
return -RT_ERROR;
}
csi_uart_data_bits_t data_bits;
csi_uart_parity_t parity;
csi_uart_stop_bits_t stop_bits;
switch (cfg->data_bits)
{
case DATA_BITS_5:
data_bits = UART_DATA_BITS_5;
break;
case DATA_BITS_6:
data_bits = UART_DATA_BITS_6;
break;
case DATA_BITS_7:
data_bits = UART_DATA_BITS_7;
break;
case DATA_BITS_8:
data_bits = UART_DATA_BITS_8;
break;
case DATA_BITS_9:
data_bits = UART_DATA_BITS_9;
break;
default:
data_bits = UART_DATA_BITS_8;
break;
}
switch (cfg->parity)
{
case PARITY_NONE:
parity = UART_PARITY_NONE;
break;
case PARITY_ODD:
parity = UART_PARITY_ODD;
break;
case PARITY_EVEN:
parity = UART_PARITY_EVEN;
break;
default:
parity = UART_PARITY_NONE;
break;
}
switch (cfg->stop_bits)
{
case STOP_BITS_1:
stop_bits = UART_STOP_BITS_1;
break;
case STOP_BITS_2:
stop_bits = UART_STOP_BITS_2;
break;
default:
stop_bits = UART_STOP_BITS_1;
break;
}
ret = csi_uart_format(&uart->uart, data_bits, parity, stop_bits);
if (ret != CSI_OK)
{
return -RT_ERROR;
}
return RT_EOK;
}
static rt_err_t xuantie_control(struct rt_serial_device *serial, int cmd, void *arg)
{
switch (cmd)
{
case RT_DEVICE_CTRL_CONFIG:
return xuantie_configure(serial, (struct serial_configure *)arg);
default:
return -RT_ERROR;
}
}
static int xuantie_putc(struct rt_serial_device *serial, char c)
{
struct xuantie_uart *uart = (struct xuantie_uart *)serial->parent.user_data;
int32_t ret;
ret = csi_uart_send(&uart->uart, &c, 1, 50);
if (ret == 1)
return RT_EOK;
return -RT_ERROR;
}
static int xuantie_getc(struct rt_serial_device *serial)
{
int c = -1;
struct xuantie_uart *uart = (struct xuantie_uart *)serial->parent.user_data;
dw_uart_regs_t *uart_base = (dw_uart_regs_t *)HANDLE_REG_BASE((&uart->uart));
csi_uart_receive(&uart->uart, &c, 1, 0x5);
dw_uart_enable_recv_irq(uart_base);
return c;
}
static const struct rt_uart_ops xuantie_uart_ops =
{
xuantie_configure,
xuantie_control,
xuantie_putc,
xuantie_getc,
RT_NULL,
};
static void xuantie_uart0_rx_isr(csi_uart_t *uart_handler, csi_uart_event_t event, void *arg)
{
rt_interrupt_enter();
rt_hw_serial_isr(&uart_obj[UART0_INDEX].serial, RT_SERIAL_EVENT_RX_IND);
rt_interrupt_leave();
}
int rt_hw_usart_init(void)
{
rt_size_t obj_num = sizeof(uart_obj) / sizeof(struct xuantie_uart);
struct serial_configure config = RT_SERIAL_CONFIG_DEFAULT;
rt_err_t result = 0;
for (int i = 0; i < obj_num; i++)
{
/* Initialize YOC UART */
result = csi_uart_init(&uart_obj[i].uart, 0);
if (result != CSI_OK)
{
LOG_E("Failed to initialize UART %d", uart_obj[i].config->idx);
return -RT_ERROR;
}
/* Init UART object */
uart_obj[i].config = &uart_config[i];
uart_obj[i].serial.ops = &xuantie_uart_ops;
uart_obj[i].serial.config = config;
csi_uart_attach_callback(&uart_obj[i].uart, uart_obj[i].config->xuantie_uart_rx_isr, NULL);
/* Register UART device */
result = rt_hw_serial_register(&uart_obj[i].serial, uart_obj[i].config->name,
RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_INT_RX,
&uart_obj[i]);
if (result != RT_EOK)
{
LOG_E("Failed to register UART device %s", uart_obj[i].config->name);
csi_uart_uninit(&uart_obj[i].uart);
return -RT_ERROR;
}
}
return result;
}
#endif /* RT_USING_SERIAL */

View File

@ -0,0 +1,38 @@
/*
* Copyright (c) 2006-2025, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-12-08 WangShun the first version
*/
#ifndef __DRV_USART_H__
#define __DRV_USART_H__
#include <rtthread.h>
#include "rtdevice.h"
#include <rthw.h>
#include <soc.h>
#include <dw_uart_ll.h>
#include <drv/uart.h>
/* xuantie config class */
struct xuantie_uart_config
{
const char *name;
uint32_t idx;
void (*xuantie_uart_rx_isr)(csi_uart_t *uart_handler, csi_uart_event_t event, void *arg);
};
/* xuantie uart dirver class */
struct xuantie_uart
{
struct xuantie_uart_config *config;
struct rt_serial_device serial;
csi_uart_t uart;
};
extern void uart_rx_isr (void *id);
int rt_hw_usart_init(void);
#endif /* __DRV_USART_H__ */

View File

@ -0,0 +1,15 @@
# for module compiling
import os
Import('RTT_ROOT')
from building import *
cwd = GetCurrentDir()
objs = []
list = os.listdir(cwd)
for d in list:
path = os.path.join(cwd, d)
if os.path.isfile(os.path.join(path, 'SConscript')):
objs = objs + SConscript(os.path.join(d, 'SConscript'))
Return('objs')

View File

@ -0,0 +1,15 @@
# for module compiling
import os
Import('RTT_ROOT')
from building import *
cwd = GetCurrentDir()
objs = []
list = os.listdir(cwd)
for d in list:
path = os.path.join(cwd, d)
if os.path.isfile(os.path.join(path, 'SConscript')):
objs = objs + SConscript(os.path.join(d, 'SConscript'))
Return('objs')

View File

@ -0,0 +1,259 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/******************************************************************************
* @file gcc_csky.ld
* @brief csky linker file
* @version V1.0
* @date 02. June 2017
******************************************************************************/
MEMORY
{
ISRAM : ORIGIN = 0x00000000 , LENGTH = 0x20000 /* ISRAM 128KB*/
DSRAM : ORIGIN = 0x20000000 , LENGTH = 0x80000 /* DSRAM 512KB*/
SRAM : ORIGIN = 0x60000000 , LENGTH = 0x20000 /* SRAM 128KB, no cacheable*/
}
__min_heap_size = 0x200;
PROVIDE (__ram_end = 0x20080000);
PROVIDE (__heap_end = __ram_end);
REGION_ALIAS("REGION_TEXT", ISRAM);
REGION_ALIAS("REGION_RODATA", ISRAM);
REGION_ALIAS("REGION_DATA", DSRAM);
REGION_ALIAS("REGION_BSS", DSRAM);
ENTRY(Reset_Handler)
SECTIONS
{
.text : {
. = ALIGN(0x4) ;
__stext = . ;
KEEP(*startup.o(*.text))
KEEP(*startup.o(*.vectors))
KEEP(*vectors.o(*.text))
KEEP(*(.text.entry))
*(.text*)
*(.gnu.warning)
*(.stub)
*(.gnu.linkonce.t*)
*(.glue_7t)
*(.glue_7)
*(.jcr)
KEEP (*(.init))
KEEP (*(.fini))
. = ALIGN (0x4) ;
PROVIDE(__ctbp = .);
*(.call_table_data)
*(.call_table_text)
. = ALIGN(0x10) ;
__etext = . ;
} > REGION_TEXT
.rodata : {
. = ALIGN(0x4) ;
__srodata = .;
*(.rdata)
*(.rdata*)
*(.rdata1)
*(.rdata.*)
*(.rodata*)
*(.srodata*)
. = ALIGN(0x4) ;
__init_array_start = .;
__ctors_start__ = .;
KEEP (*(SORT(.init_array.*)))
KEEP (*(.init_array))
__init_array_end = .;
__ctors_end__ = .;
__fini_array_start = .;
__dtors_start__ = .;
KEEP (*(SORT(.fini_array.*)))
KEEP (*(.fini_array))
__fini_array_end = .;
__dtors_end__ = .;
. = ALIGN(0x4) ;
__ctor_start__ = .;
KEEP (*(SORT(.ctors.*)))
KEEP (*(.ctors))
__ctor_end__ = .;
KEEP (*(SORT(.dtors.*)))
KEEP (*(.dtors))
__dtor_end__ = .;
. = ALIGN(0x4) ;
/*****************************************/
/* section information for finsh shell */
. = ALIGN(0x4);
__fsymtab_start = .;
KEEP(*(FSymTab))
__fsymtab_end = .;
. = ALIGN(0x4);
__vsymtab_start = .;
KEEP(*(VSymTab))
__vsymtab_end = .;
. = ALIGN(0x4);
/* section information for initial. */
__rt_init_start = .;
KEEP(*(SORT(.rti_fn*)))
__rt_init_end = .;
. = ALIGN(0x4) ;
/* section information for at utest */
__rt_utest_tc_tab_start = .;
KEEP(*(UtestTcTab))
__rt_utest_tc_tab_end = .;
. = ALIGN(0x4);
/* section information for at server */
. = ALIGN(0x4);
__rtatcmdtab_start = .;
KEEP(*(RtAtCmdTab))
__rtatcmdtab_end = .;
. = ALIGN(0x4);
/* section information for modules */
. = ALIGN(0x4);
__rtmsymtab_start = .;
KEEP(*(RTMSymTab))
__rtmsymtab_end = .;
/* section information for uPRC */
. = ALIGN(0x4);
__uRPCSvcTab_start = .;
KEEP(*(uRPCSvcTab))
__uRPCSvcTab_end = .;
/* section information for var export */
. = ALIGN(0x4);
__ve_table_start = .;
KEEP(*(SORT(*.VarExpTab.*)))
__ve_table_end = .;
/*****************************************/
/************** added drivers **************/
_cli_region_begin = .;
KEEP(*(CliRegion))
. = ALIGN(0x4);
_cli_region_end = .;
__core_driver_start__ = .;
KEEP(*(.core_driver_entry))
. = ALIGN(0x4);
__core_driver_end__ = .;
__bus_driver_start__ = .;
KEEP(*(*.bus_driver_entry))
__bus_driver_end__ = .;
__early_driver_start__ = .;
KEEP(*(*.early_driver_entry))
__early_driver_end__ = .;
__vfs_driver_start__ = .;
KEEP(*(*.vfs_driver_entry))
__vfs_driver_end__ = .;
__level0_driver_start__ = .;
KEEP(*(*.level0_driver_entry))
__level0_driver_end__ = .;
__level1_driver_start__ = .;
KEEP(*(*.level1_driver_entry))
__level1_driver_end__ = .;
__level2_driver_start__ = .;
KEEP(*(*.level2_driver_entry))
__level2_driver_end__ = .;
__level3_driver_start__ = .;
KEEP(*(*.level3_driver_entry))
__level3_driver_end__ = .;
__post_driver_start__ = .;
KEEP(*(*.post_driver_entry))
__post_driver_end__ = .;
/************** end of drivers *********/
. = ALIGN(0x4) ;
__erodata = .;
__rodata_end__ = .;
} > REGION_RODATA
.data : {
. = ALIGN(0x4) ;
__sdata = . ;
__data_start__ = . ;
data_start = . ;
*(.got.plt)
*(.got)
*(.gnu.linkonce.r*)
*(.data*)
*(.gnu.linkonce.d*)
*(.gcc_except_table*)
__start_init_call = .;
*(.initcall.init)
__stop_init_call = .;
__start_cmd = .;
*(.bootloaddata.cmd)
. = ALIGN(0x4) ;
__stop_cmd = .;
__global_pointer$ = .;
*(.sdata)
*(.sdata.*)
*(.sdata2.*)
*(.gnu.linkonce.s.*)
*(__libc_atexit)
*(__libc_subinit)
*(__libc_subfreeres)
*(.note.ABI-tag)
. = ALIGN(0x4) ;
__edata = .;
__data_end__ = .;
} > REGION_DATA AT > REGION_RODATA
._ram_code : {
. = ALIGN(0x4) ;
__ram_code_start__ = .;
*(.ram.code*)
. = ALIGN(0x4) ;
__ram_code_end__ = .;
} > REGION_DATA AT > REGION_RODATA
.bss : {
. = ALIGN(0x4) ;
__sbss = ALIGN(0x4) ;
__bss_start__ = . ;
*(.dynsbss)
*(.sbss)
*(.sbss.*)
*(.scommon)
*(.dynbss)
*(.bss*)
*(COMMON)
. = ALIGN(0x4) ;
__ebss = . ;
__bss_end__ = .;
__end = . ;
end = . ;
} > REGION_BSS AT > REGION_BSS
._user_heap (NOLOAD): {
. = ALIGN(0x4) ;
*(.stack*)
. = ALIGN(0x4) ;
__heap_start = .;
. += __min_heap_size;
. = ALIGN(0x4) ;
} > REGION_BSS AT > REGION_BSS
}

View File

@ -0,0 +1,268 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
MEMORY
{
DRAM : ORIGIN = 0x50000000, LENGTH = 0x100000 /* on-chip DRAM 1*1MB */
}
__min_heap_size = 0x200;
PROVIDE (__ram_end = 0x50100000 - 0x8);
PROVIDE (__heap_end = __ram_end);
REGION_ALIAS("REGION_TEXT", DRAM);
REGION_ALIAS("REGION_RODATA", DRAM);
REGION_ALIAS("REGION_DATA", DRAM);
REGION_ALIAS("REGION_BSS", DRAM);
ENTRY(Reset_Handler)
SECTIONS
{
.text : {
. = ALIGN(0x4) ;
__stext = . ;
KEEP(*startup.o(*.text))
KEEP(*startup.o(*.vectors))
KEEP(*vectors.o(*.text))
KEEP(*(.text.entry))
*(.text)
*(.text*)
*(.text.*)
*(.gnu.warning)
*(.stub)
*(.gnu.linkonce.t*)
*(.glue_7t)
*(.glue_7)
*(.jcr)
KEEP (*(.init))
KEEP (*(.fini))
. = ALIGN(0x4) ;
PROVIDE(__ctbp = .);
*(.call_table_data)
*(.call_table_text)
. = ALIGN(0x4) ;
__etext = . ;
} > REGION_TEXT
.gcc_except_table : ONLY_IF_RO {
*(.gcc_except_table .gcc_except_table.*)
} > REGION_TEXT
.rodata : {
. = ALIGN(0x4) ;
__srodata = .;
*(.rdata)
*(.rdata*)
*(.rdata1)
*(.rdata.*)
*(.rodata)
*(.rodata1)
*(.rodata*)
*(.rodata.*)
*(.rodata.str1.4)
*(.srodata*)
. = ALIGN(0x8) ;
__init_array_start = .;
__ctors_start__ = .;
KEEP (*(SORT(.init_array.*)))
KEEP (*(.init_array))
__init_array_end = .;
__ctors_end__ = .;
. = ALIGN(0x8) ;
__fini_array_start = .;
__dtors_start__ = .;
KEEP (*(SORT(.fini_array.*)))
KEEP (*(.fini_array))
__fini_array_end = .;
__dtors_end__ = .;
. = ALIGN(0x8) ;
__ctor_start__ = .;
KEEP (*(SORT(.ctors.*)))
KEEP (*(.ctors))
__ctor_end__ = .;
KEEP (*(SORT(.dtors.*)))
KEEP (*(.dtors))
__dtor_end__ = .;
. = ALIGN(0x4) ;
/*****************************************/
/* section information for finsh shell */
. = ALIGN(0x4);
__fsymtab_start = .;
KEEP(*(FSymTab))
__fsymtab_end = .;
. = ALIGN(0x4);
__vsymtab_start = .;
KEEP(*(VSymTab))
__vsymtab_end = .;
. = ALIGN(0x4);
/* section information for initial. */
__rt_init_start = .;
KEEP(*(SORT(.rti_fn*)))
__rt_init_end = .;
. = ALIGN(0x4) ;
/* section information for at utest */
__rt_utest_tc_tab_start = .;
KEEP(*(UtestTcTab))
__rt_utest_tc_tab_end = .;
. = ALIGN(0x4);
/* section information for at server */
. = ALIGN(0x4);
__rtatcmdtab_start = .;
KEEP(*(RtAtCmdTab))
__rtatcmdtab_end = .;
. = ALIGN(0x4);
/* section information for modules */
. = ALIGN(0x4);
__rtmsymtab_start = .;
KEEP(*(RTMSymTab))
__rtmsymtab_end = .;
/* section information for uPRC */
. = ALIGN(0x4);
__uRPCSvcTab_start = .;
KEEP(*(uRPCSvcTab))
__uRPCSvcTab_end = .;
/* section information for var export */
. = ALIGN(0x4);
__ve_table_start = .;
KEEP(*(SORT(*.VarExpTab.*)))
__ve_table_end = .;
/*****************************************/
/************** added drivers **************/
_cli_region_begin = .;
KEEP(*(CliRegion))
. = ALIGN(0x4) ;
_cli_region_end = .;
__core_driver_start__ = .;
KEEP(*(.core_driver_entry))
. = ALIGN(0x4) ;
__core_driver_end__ = .;
__bus_driver_start__ = .;
KEEP(*(*.bus_driver_entry))
__bus_driver_end__ = .;
__early_driver_start__ = .;
KEEP(*(*.early_driver_entry))
__early_driver_end__ = .;
__vfs_driver_start__ = .;
KEEP(*(*.vfs_driver_entry))
__vfs_driver_end__ = .;
__level0_driver_start__ = .;
KEEP(*(*.level0_driver_entry))
__level0_driver_end__ = .;
__level1_driver_start__ = .;
KEEP(*(*.level1_driver_entry))
__level1_driver_end__ = .;
__level2_driver_start__ = .;
KEEP(*(*.level2_driver_entry))
__level2_driver_end__ = .;
__level3_driver_start__ = .;
KEEP(*(*.level3_driver_entry))
__level3_driver_end__ = .;
__post_driver_start__ = .;
KEEP(*(*.post_driver_entry))
__post_driver_end__ = .;
/************** end of drivers *********/
. = ALIGN(0x4) ;
__erodata = .;
__rodata_end__ = .;
} > REGION_RODATA
.data : {
. = ALIGN(0x4) ;
__sdata = . ;
__data_start__ = . ;
data_start = . ;
*(.got.plt)
*(.got)
*(.gnu.linkonce.r*)
*(.data)
*(.data*)
*(.data1)
*(.data.*)
*(.gnu.linkonce.d*)
*(.data1)
*(.gcc_except_table)
*(.gcc_except_table*)
__start_init_call = .;
*(.initcall.init)
__stop_init_call = .;
__start_cmd = .;
*(.bootloaddata.cmd)
. = ALIGN(4) ;
__stop_cmd = .;
__global_pointer$ = .;
*(.sdata)
*(.sdata.*)
*(.sdata2.*)
*(.gnu.linkonce.s.*)
*(__libc_atexit)
*(__libc_subinit)
*(__libc_subfreeres)
*(.note.ABI-tag)
. = ALIGN(0x4) ;
__edata = .;
__data_end__ = .;
} > REGION_DATA
._ram_code : {
. = ALIGN(0x4) ;
__ram_code_start__ = .;
*(.ram.code*)
. = ALIGN(0x4) ;
__ram_code_end__ = .;
} > REGION_DATA
.bss : {
. = ALIGN(0x4) ;
__sbss = ALIGN(0x4) ;
__bss_start__ = . ;
*(.dynsbss)
*(.sbss)
*(.sbss.*)
*(.scommon)
*(.dynbss)
*(.bss)
*(.bss.*)
*(COMMON)
. = ALIGN(0x4) ;
__ebss = . ;
__bss_end__ = .;
__end = . ;
end = . ;
} > REGION_BSS
._user_heap (NOLOAD): {
. = ALIGN(0x4) ;
*(.stack*)
. = ALIGN(0x4) ;
__heap_start = .;
. += __min_heap_size;
. = ALIGN(0x4) ;
} > REGION_BSS
}

View File

@ -0,0 +1,264 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
MEMORY
{
DRAM : ORIGIN = 0x00000000, LENGTH = 0x100000 /* on-chip SRAM 1*1MB */
}
__min_heap_size = 0x200;
PROVIDE (__ram_end = 0x00100000 - 0x8);
PROVIDE (__heap_end = __ram_end);
REGION_ALIAS("REGION_TEXT", DRAM);
REGION_ALIAS("REGION_RODATA", DRAM);
REGION_ALIAS("REGION_DATA", DRAM);
REGION_ALIAS("REGION_BSS", DRAM);
ENTRY(Reset_Handler)
SECTIONS
{
.text : {
. = ALIGN(0x4) ;
__stext = . ;
KEEP(*startup.o(*.text))
KEEP(*startup.o(*.vectors))
KEEP(*vectors.o(*.text))
KEEP(*(.text.entry))
*(.text)
*(.text*)
*(.text.*)
*(.gnu.warning)
*(.stub)
*(.gnu.linkonce.t*)
*(.glue_7t)
*(.glue_7)
*(.jcr)
KEEP (*(.init))
KEEP (*(.fini))
. = ALIGN(0x4) ;
PROVIDE(__ctbp = .);
*(.call_table_data)
*(.call_table_text)
. = ALIGN(0x4) ;
__etext = . ;
} > REGION_TEXT
.gcc_except_table : ONLY_IF_RO {
*(.gcc_except_table .gcc_except_table.*)
} > REGION_TEXT
.rodata : {
. = ALIGN(0x4) ;
__srodata = .;
*(.rdata)
*(.rdata*)
*(.rdata1)
*(.rdata.*)
*(.rodata)
*(.rodata1)
*(.rodata*)
*(.rodata.*)
*(.rodata.str1.4)
*(.srodata*)
. = ALIGN(0x4) ;
__init_array_start = .;
__ctors_start__ = .;
KEEP (*(SORT(.init_array.*)))
KEEP (*(.init_array))
__init_array_end = .;
__ctors_end__ = .;
__fini_array_start = .;
__dtors_start__ = .;
KEEP (*(SORT(.fini_array.*)))
KEEP (*(.fini_array))
__fini_array_end = .;
__dtors_end__ = .;
__ctor_start__ = .;
KEEP (*(SORT(.ctors.*)))
KEEP (*(.ctors))
__ctor_end__ = .;
KEEP (*(SORT(.dtors.*)))
KEEP (*(.dtors))
__dtor_end__ = .;
. = ALIGN(0x4) ;
/*****************************************/
/* section information for finsh shell */
. = ALIGN(0x4);
__fsymtab_start = .;
KEEP(*(FSymTab))
__fsymtab_end = .;
. = ALIGN(0x4);
__vsymtab_start = .;
KEEP(*(VSymTab))
__vsymtab_end = .;
. = ALIGN(0x4);
/* section information for initial. */
__rt_init_start = .;
KEEP(*(SORT(.rti_fn*)))
__rt_init_end = .;
. = ALIGN(0x4) ;
/* section information for at utest */
__rt_utest_tc_tab_start = .;
KEEP(*(UtestTcTab))
__rt_utest_tc_tab_end = .;
. = ALIGN(0x4);
/* section information for at server */
. = ALIGN(0x4);
__rtatcmdtab_start = .;
KEEP(*(RtAtCmdTab))
__rtatcmdtab_end = .;
. = ALIGN(0x4);
/* section information for modules */
. = ALIGN(0x4);
__rtmsymtab_start = .;
KEEP(*(RTMSymTab))
__rtmsymtab_end = .;
/* section information for uPRC */
. = ALIGN(0x4);
__uRPCSvcTab_start = .;
KEEP(*(uRPCSvcTab))
__uRPCSvcTab_end = .;
/* section information for var export */
. = ALIGN(0x4);
__ve_table_start = .;
KEEP(*(SORT(*.VarExpTab.*)))
__ve_table_end = .;
/*****************************************/
/************** added drivers **************/
_cli_region_begin = .;
KEEP(*(CliRegion))
. = ALIGN(0x4) ;
_cli_region_end = .;
__core_driver_start__ = .;
KEEP(*(.core_driver_entry))
. = ALIGN(0x4) ;
__core_driver_end__ = .;
__bus_driver_start__ = .;
KEEP(*(*.bus_driver_entry))
__bus_driver_end__ = .;
__early_driver_start__ = .;
KEEP(*(*.early_driver_entry))
__early_driver_end__ = .;
__vfs_driver_start__ = .;
KEEP(*(*.vfs_driver_entry))
__vfs_driver_end__ = .;
__level0_driver_start__ = .;
KEEP(*(*.level0_driver_entry))
__level0_driver_end__ = .;
__level1_driver_start__ = .;
KEEP(*(*.level1_driver_entry))
__level1_driver_end__ = .;
__level2_driver_start__ = .;
KEEP(*(*.level2_driver_entry))
__level2_driver_end__ = .;
__level3_driver_start__ = .;
KEEP(*(*.level3_driver_entry))
__level3_driver_end__ = .;
__post_driver_start__ = .;
KEEP(*(*.post_driver_entry))
__post_driver_end__ = .;
/************** end of drivers *********/
. = ALIGN(0x4) ;
__erodata = .;
__rodata_end__ = .;
} > REGION_RODATA
.data : {
. = ALIGN(0x4) ;
__sdata = . ;
__data_start__ = . ;
data_start = . ;
*(.got.plt)
*(.got)
*(.gnu.linkonce.r*)
*(.data)
*(.data*)
*(.data1)
*(.data.*)
*(.gnu.linkonce.d*)
*(.data1)
*(.gcc_except_table)
*(.gcc_except_table*)
__start_init_call = .;
*(.initcall.init)
__stop_init_call = .;
__start_cmd = .;
*(.bootloaddata.cmd)
. = ALIGN(4) ;
__stop_cmd = .;
__global_pointer$ = .;
*(.sdata)
*(.sdata.*)
*(.sdata2.*)
*(.gnu.linkonce.s.*)
*(__libc_atexit)
*(__libc_subinit)
*(__libc_subfreeres)
*(.note.ABI-tag)
__edata = .;
__data_end__ = .;
. = ALIGN(0x4) ;
} > REGION_DATA
.gcc_except_table : ONLY_IF_RW {
*(.gcc_except_table .gcc_except_table.*)
__edata = .;
__data_end__ = .;
} > REGION_DATA
.bss : {
. = ALIGN(0x4) ;
__sbss = ALIGN(0x4) ;
__bss_start__ = . ;
*(.dynsbss)
*(.sbss)
*(.sbss.*)
*(.scommon)
*(.dynbss)
*(.bss)
*(.bss.*)
*(COMMON)
. = ALIGN(0x4) ;
__ebss = . ;
__bss_end__ = .;
__end = . ;
end = . ;
} > REGION_BSS
._user_heap (NOLOAD): {
. = ALIGN(0x4) ;
*(.stack*)
. = ALIGN(0x4) ;
__heap_start = .;
. += __min_heap_size;
. = ALIGN(0x4) ;
} > REGION_BSS
}

View File

@ -0,0 +1,13 @@
from building import *
import os
cwd = GetCurrentDir()
CPPPATH = [cwd]
CPPPATH += [cwd + '/asm']
CPPPATH += [cwd + '/drv']
src = Split("""
""")
group = DefineGroup('dummy_inc', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,598 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* attention: don't modify this file as a suggest
* you should copy from chip_riscv_dummy/include/asm/riscv_asm_macro.h and keep it newer
* please contact xuantie-rtos os team if have question
*/
#ifndef __RISCV_ASM_MACRO_H__
#define __RISCV_ASM_MACRO_H__
#if (!defined(__riscv_flen)) && (CONFIG_CHECK_FPU_DIRTY)
#error "this cpu doesn't supprot FPU, but macro 'CONFIG_CHECK_FPU_DIRTY' is defined, please remove it."
#endif
#if (!defined(__riscv_vector)) && (CONFIG_CHECK_VECTOR_DIRTY)
#error "this cpu doesn't supprot vector, but macro 'CONFIG_CHECK_VECTOR_DIRTY' is defined, please remove it."
#endif
#if (!defined(__riscv_matrix) && !defined(__riscv_xtheadmatrix)) && (CONFIG_CHECK_MATRIX_DIRTY)
#error "this cpu doesn't supprot matrix, but macro 'CONFIG_CHECK_MATRIX_DIRTY' is defined, please remove it."
#endif
#include "riscv_csr.h"
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
.macro RESTORE_MSTATUS
/* t0 and t1 are not restored before using */
/* now, sp is at the top of the stack (the lowest address)*/
li t1, 0
#if __riscv_matrix || __riscv_xtheadmatrix /* matrix registers */
#if __riscv_xlen == 64
addi t1, t1, (12 + 12)
#else
addi t1, t1, 12
#endif /*__riscv_xlen */
csrr t0, xmlenb
slli t0, t0, 3
add t1, t1, t0
#endif /* __riscv_matrix || __riscv_xtheadmatrix */
#ifdef __riscv_vector /* vector registers */
csrr t0, vlenb
slli t0, t0, 5
add t1, t1, t0
#if __riscv_xlen == 64
addi t1, t1, (20+20)
#else
addi t1, t1, 20
#endif /* __riscv_xlen */
#endif /* __riscv_vector */
#if __riscv_flen == 64 /* float registers */
#if __riscv_xlen == 64
addi t1, t1, 168
#else
addi t1, t1, 164
#endif /* __riscv_xlen */
#elif __riscv_flen == 32
addi t1, t1, 84
#endif /* __riscv_flen */
#ifdef __riscv_dsp /* vxsat register, 32-bit cpu only */
addi t1, t1, 4
#endif /* __riscv_dsp */
#if __riscv_xlen == 64 /*general purpose registers*/
addi t1, t1, (72 + 72)
#elif __riscv_xlen == 32
addi t1, t1, 72
#endif
add sp, sp, t1
/* now, sp is the position of mstatus */
load_x t3, (0)(sp)
csrw mstatus, t3
sub sp, sp, t1
.endm
.macro RESTORE_SSTATUS
/* t0 and t1 are not restored before using */
/* now, sp is at the top of the stack (the lowest address)*/
li t1, 0
#if __riscv_matrix || __riscv_xtheadmatrix /* matrix registers */
#if __riscv_xlen == 64
addi t1, t1, (12 + 12)
#else
addi t1, t1, 12
#endif /*__riscv_xlen */
csrr t0, xmlenb
slli t0, t0, 3
add t1, t1, t0
#endif /* __riscv_matrix || __riscv_xtheadmatrix */
#ifdef __riscv_vector /* vector registers */
csrr t0, vlenb
slli t0, t0, 5
add t1, t1, t0
#if __riscv_xlen == 64
addi t1, t1, (20+20)
#else
addi t1, t1, 20
#endif /* __riscv_xlen */
#endif /* __riscv_vector */
#if __riscv_flen == 64 /* float registers */
#if __riscv_xlen == 64
addi t1, t1, 168
#else
addi t1, t1, 164
#endif /* __riscv_xlen */
#elif __riscv_flen == 32
addi t1, t1, 84
#endif /* __riscv_flen */
#if __riscv_xlen == 64 /*general purpose registers*/
addi t1, t1, (72 + 72)
#elif __riscv_xlen == 32
addi t1, t1, 72
#endif
add sp, sp, t1
/* now, sp is the position of mstatus */
load_x t3, (0)(sp)
csrw sstatus, t3
sub sp, sp, t1
.endm
#endif /* CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY */
.macro SAVE_VECTOR_REGISTERS
/* t0,t1 saved before using */
/* mstatus->t3 */
#ifdef __riscv_vector
#if CONFIG_CHECK_VECTOR_DIRTY
/* check if VS filed of MSTATUS is 'dirty' */
li t1, SR_VS_DIRTY
and t4, t3, t1
bne t4, t1, 1f
#endif /* CONFIG_CHECK_VECTOR_DIRTY */
/* if dirty, save vector registers */
#if __riscv_xlen == 64
addi sp, sp, -(20+20)
csrr t0, vl
store_x t0, (0 +0 )(sp)
csrr t0, vtype
store_x t0, (4 +4 )(sp)
csrr t0, vstart
store_x t0, (8 +8 )(sp)
csrr t0, vxsat
store_x t0, (12 +12 )(sp)
csrr t0, vxrm
store_x t0, (16 +16 )(sp)
#else
addi sp, sp, -20
csrr t0, vl
store_x t0, (0)(sp)
csrr t0, vtype
store_x t0, (4)(sp)
csrr t0, vstart
store_x t0, (8)(sp)
csrr t0, vxsat
store_x t0, (12)(sp)
csrr t0, vxrm
store_x t0, (16)(sp)
#endif /*__riscv_xlen */
csrr t0, vlenb
slli t0, t0, 3
slli t1, t0, 2
sub sp, sp, t1
#if (__riscv_v == 7000)
vsetvli zero, zero, e8, m8
vsb.v v0, (sp)
add sp, sp, t0
vsb.v v8, (sp)
add sp, sp, t0
vsb.v v16, (sp)
add sp, sp, t0
vsb.v v24, (sp)
#elif (__riscv_v == 1000000)
vsetvli zero, zero, e8, m8, ta, ma
vs8r.v v0, (sp)
add sp, sp, t0
vs8r.v v8, (sp)
add sp, sp, t0
vs8r.v v16, (sp)
add sp, sp, t0
vs8r.v v24, (sp)
#endif
sub t0, t1, t0
sub sp, sp, t0
#if CONFIG_CHECK_VECTOR_DIRTY
j 2f
1: /* don't need to save vector registers, set sp */
#if __riscv_xlen == 64
addi sp, sp, -(20+20)
#else
addi sp, sp, -20
#endif
csrr t0, vlenb
slli t0, t0, 5
sub sp, sp, t0
2:
#endif /* CONFIG_CHECK_VECTOR_DIRTY */
#endif /*__riscv_vector*/
.endm
.macro RESTORE_VECTOR_REGISTERS
/* t0,t1,t2 not restored before using, mstatus has been restored before using */
#ifdef __riscv_vector
#if CONFIG_CHECK_VECTOR_DIRTY
/* check if VS filed of MSTATUS is 'dirty' */
li t1, SR_VS_DIRTY
and t4, t3, t1
bne t4, t1, 1f
#endif /* CONFIG_CHECK_VECTOR_DIRTY */
/* get the range of register */
csrr t0, vlenb
slli t0, t0, 3
/* save */
#if (__riscv_v == 7000)
vsetvli zero, zero, e8, m8
vlb.v v0, (sp)
add sp, sp, t0
vlb.v v8, (sp)
add sp, sp, t0
vlb.v v16, (sp)
add sp, sp, t0
vlb.v v24, (sp)
add sp, sp, t0
#elif (__riscv_v == 1000000)
vsetvli zero, zero, e8, m8, ta, ma
vl8r.v v0, (sp)
add sp, sp, t0
vl8r.v v8, (sp)
add sp, sp, t0
vl8r.v v16, (sp)
add sp, sp, t0
vl8r.v v24, (sp)
add sp, sp, t0
#endif
#if __riscv_xlen == 64
load_x t0, (0 +0)(sp)
load_x t1, (4 +4)(sp)
load_x t2, (8 +8)(sp)
vsetvl zero, t0, t1
csrw vstart, t2
load_x t2, (12 +12)(sp)
csrw vxsat, t2
load_x t2, (16 +16)(sp)
csrw vxrm, t2
addi sp, sp, (20+20)
#else
load_x t0, (0)(sp)
load_x t1, (4)(sp)
load_x t2, (8)(sp)
vsetvl zero, t0, t1
csrw vstart, t2
load_x t2, (12)(sp)
csrw vxsat, t2
load_x t2, (16)(sp)
csrw vxrm, t2
addi sp, sp, 20
#endif /*__riscv_xlen */
#if CONFIG_CHECK_VECTOR_DIRTY
j 2f
1:
/* don't restore, move sp only */
#if __riscv_xlen == 64
addi sp, sp, (20+20)
#else
addi sp, sp, (20)
#endif
csrr t0, vlenb
slli t0, t0, 5
add sp, sp, t0
2:
#endif /* CONFIG_CHECK_VECTOR_DIRTY */
#endif /*__riscv_vector*/
.endm
.macro SAVE_FLOAT_REGISTERS
/* t0, t1 saved before using */
#if __riscv_flen == 64
#if CONFIG_CHECK_FPU_DIRTY
/* check if FS filed of MSTATUS is 'dirty' */
li t1, SR_FS_DIRTY
and t4, t3, t1
bne t4, t1, 1f
#endif /*CONFIG_CHECK_FPU_DIRTY*/
/* save */
#if __riscv_xlen == 64
addi sp, sp, -(4+4)
frcsr t0
store_x t0, (0 +0 )(sp)
#else
addi sp, sp, -4
frcsr t0
store_x t0, 0(sp)
#endif /*__riscv_xlen */
addi sp, sp, -160
fstore_x ft0, (0 +0 )(sp)
fstore_x ft1, (4 +4 )(sp)
fstore_x ft2, (8 +8 )(sp)
fstore_x ft3, (12+12)(sp)
fstore_x ft4, (16+16)(sp)
fstore_x ft5, (20+20)(sp)
fstore_x ft6, (24+24)(sp)
fstore_x ft7, (28+28)(sp)
fstore_x fa0, (32+32)(sp)
fstore_x fa1, (36+36)(sp)
fstore_x fa2, (40+40)(sp)
fstore_x fa3, (44+44)(sp)
fstore_x fa4, (48+48)(sp)
fstore_x fa5, (52+52)(sp)
fstore_x fa6, (56+56)(sp)
fstore_x fa7, (60+60)(sp)
fstore_x ft8, (64+64)(sp)
fstore_x ft9, (68+68)(sp)
fstore_x ft10,(72+72)(sp)
fstore_x ft11,(76+76)(sp)
#elif __riscv_flen == 32
#if CONFIG_CHECK_FPU_DIRTY
/* check if FS filed of MSTATUS is 'dirty' */
li t1, SR_FS_DIRTY
and t4, t3, t1
bne t4, t1, 1f
#endif /* CONFIG_CHECK_FPU_DIRTY */
addi sp, sp, -4
frcsr t0
store_x t0, 0(sp)
addi sp, sp, -80
fstore_x ft0, 0(sp)
fstore_x ft1, 4(sp)
fstore_x ft2, 8(sp)
fstore_x ft3, 12(sp)
fstore_x ft4, 16(sp)
fstore_x ft5, 20(sp)
fstore_x ft6, 24(sp)
fstore_x ft7, 28(sp)
fstore_x fa0, 32(sp)
fstore_x fa1, 36(sp)
fstore_x fa2, 40(sp)
fstore_x fa3, 44(sp)
fstore_x fa4, 48(sp)
fstore_x fa5, 52(sp)
fstore_x fa6, 56(sp)
fstore_x fa7, 60(sp)
fstore_x ft8, 64(sp)
fstore_x ft9, 68(sp)
fstore_x ft10,72(sp)
fstore_x ft11,76(sp)
#endif /*__riscv_flen */
#if CONFIG_CHECK_FPU_DIRTY
j 2f
1:
/* don't store, move sp only */
#if __riscv_flen == 64
#if __riscv_xlen == 64
addi sp, sp, -168
#else
addi sp, sp, -164
#endif /*__riscv_xlen */
#elif __riscv_flen == 32
addi sp, sp, -84
#endif /* __riscv_xlen */
2:
#endif
.endm
.macro RESTORE_FLOAT_REGISTERS
/* t0 and t1 are not restored before using, mstatus has been restored before using */
#if __riscv_flen == 64
#if CONFIG_CHECK_FPU_DIRTY
/* check if FS filed of MSTATUS is 'dirty' */
li t1, SR_FS_DIRTY
and t4, t3, t1
bne t4, t1, 1f
#endif /* CONFIG_CHECK_FPU_DIRTY */
/* restore */
fload_x ft0, (0 +0 )(sp)
fload_x ft1, (4 +4 )(sp)
fload_x ft2, (8 +8 )(sp)
fload_x ft3, (12+12)(sp)
fload_x ft4, (16+16)(sp)
fload_x ft5, (20+20)(sp)
fload_x ft6, (24+24)(sp)
fload_x ft7, (28+28)(sp)
fload_x fa0, (32+32)(sp)
fload_x fa1, (36+36)(sp)
fload_x fa2, (40+40)(sp)
fload_x fa3, (44+44)(sp)
fload_x fa4, (48+48)(sp)
fload_x fa5, (52+52)(sp)
fload_x fa6, (56+56)(sp)
fload_x fa7, (60+60)(sp)
fload_x ft8, (64+64)(sp)
fload_x ft9, (68+68)(sp)
fload_x ft10,(72+72)(sp)
fload_x ft11,(76+76)(sp)
addi sp, sp, 160
#if __riscv_xlen == 64
load_x t0, (0 +0)(sp)
fscsr t0
addi sp, sp, (4+4)
#else
load_x t0, 0(sp)
fscsr t0
addi sp, sp, 4
#endif /*__riscv_xlen */
#elif __riscv_flen == 32
#if CONFIG_CHECK_FPU_DIRTY
/* check if FS filed of MSTATUS is 'dirty' */
li t1, SR_FS_DIRTY
and t4, t3, t1
bne t4, t1, 1f
#endif /* CONFIG_CHECK_FPU_DIRTY */
/* restore */
fload_x ft0, 0(sp)
fload_x ft1, 4(sp)
fload_x ft2, 8(sp)
fload_x ft3, 12(sp)
fload_x ft4, 16(sp)
fload_x ft5, 20(sp)
fload_x ft6, 24(sp)
fload_x ft7, 28(sp)
fload_x fa0, 32(sp)
fload_x fa1, 36(sp)
fload_x fa2, 40(sp)
fload_x fa3, 44(sp)
fload_x fa4, 48(sp)
fload_x fa5, 52(sp)
fload_x fa6, 56(sp)
fload_x fa7, 60(sp)
fload_x ft8, 64(sp)
fload_x ft9, 68(sp)
fload_x ft10,72(sp)
fload_x ft11,76(sp)
addi sp, sp, 80
load_x t0, 0(sp)
fscsr t0
addi sp, sp, 4
#endif /*__riscv_flen */
#if CONFIG_CHECK_FPU_DIRTY
j 2f
1:
/* don't restore, move sp only */
#if __riscv_flen == 64
#if __riscv_xlen == 64
addi sp, sp, 168
#elif __riscv_xlen == 32
addi sp, sp, 164
#endif
#elif __riscv_flen == 32
addi sp, sp, 84
#endif /* __riscv_flen */
2:
#endif /* CONFIG_CHECK_FPU_DIRTY */
.endm
.macro SAVE_MATRIX_REGISTERS
/* t0,t1 saved before using */
#if __riscv_matrix || __riscv_xtheadmatrix
#if CONFIG_CHECK_MATRIX_DIRTY
/* check if FS filed of MSTATUS is 'dirty' */
li t1, SR_MS_DIRTY
and t4, t3, t1
bne t4, t1, 1f
#endif /* CONFIG_CHECK_MATRIX_DIRTY */
/* store */
#if __riscv_xlen == 64
addi sp, sp, -(12+12)
csrr t0, xmrstart
store_x t0, (0 +0 )(sp)
csrr t0, xmcsr
store_x t0, (4 +4 )(sp)
csrr t0, xmsize
store_x t0, (8 +8 )(sp)
#else
addi sp, sp, -12
csrr t0, xmrstart
store_x t0, (0)(sp)
csrr t0, xmcsr
store_x t0, (4)(sp)
csrr t0, xmsize
store_x t0, (8)(sp)
#endif /*__riscv_xlen */
csrr t0, xmlenb
slli t1, t0, 3
sub sp, sp, t1
csrw xmrstart, x0
mst8mb m0, (sp)
#if CONFIG_CHECK_MATRIX_DIRTY
j 2f
1:
/* don't save, move sp only */
csrr t0, xmlenb
slli t1, t0, 3
sub sp, sp, t1
#if __riscv_xlen == 64
addi sp, sp, -24
#else
addi sp, sp, -12
#endif
2:
#endif /* CONFIG_CHECK_MATRIX_DIRTY */
#endif /* __riscv_matrix || __riscv_xtheadmatrix */
.endm
.macro RESTORE_MATRIX_REGISTERS
/* t0 and t1 are not restored before using, mstatus has been restored before using */
#if __riscv_matrix || __riscv_xtheadmatrix
#if CONFIG_CHECK_MATRIX_DIRTY
/* check if FS filed of MSTATUS is 'dirty' */
li t1, SR_MS_DIRTY
and t4, t3, t1
bne t4, t1, 1f
#endif /* CONFIG_CHECK_MATRIX_DIRTY */
/* restore */
csrr t0, xmlenb
slli t1, t0, 3
csrw xmrstart, x0
mld8mb m0, (sp)
add sp, sp, t1
#if __riscv_xlen == 64
load_x t0, (0 + 0)(sp)
csrw xmrstart, t0
load_x t0, (4 + 4)(sp)
csrw xmcsr, t0
load_x t0, (8 + 8)(sp)
csrw xmsize, t0
addi sp, sp, (12+12)
#else
load_x t0, (0)(sp)
csrw xmrstart, t0
load_x t0, (4)(sp)
csrw xmcsr, t0
load_x t0, (8)(sp)
csrw xmsize, t0
addi sp, sp, 12
#endif /*__riscv_xlen */
#if CONFIG_CHECK_MATRIX_DIRTY
j 2f
1:
/* don't restore, move sp only */
csrr t0, xmlenb
slli t1, t0, 3
add sp, sp, t1
#if __riscv_xlen == 64
addi sp, sp, 24
#else
addi sp, sp, 12
#endif
2:
#endif /* CONFIG_CHECK_MATRIX_DIRTY */
#endif /* __riscv_matrix || __riscv_xtheadmatrix */
.endm
#endif /* __RISCV_ASM_MACRO_H__ */

View File

@ -0,0 +1,177 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* attention: don't modify this file as a suggest
* you should copy from chip_riscv_dummy/include/asm/riscv_csr.h and keep it newer
* please contact xuantie-rtos os team if have question
*/
#ifndef __RISCV_CSR_H__
#define __RISCV_CSR_H__
#if __riscv_xlen == 64
#define portWORD_SIZE 8
#define store_x sd
#define load_x ld
#elif __riscv_xlen == 32
#define store_x sw
#define load_x lw
#define portWORD_SIZE 4
#else
#error Assembler did not define __riscv_xlen
#endif
#if __riscv_flen == 64
#define fstore_x fsd
#define fload_x fld
#elif __riscv_flen == 32
#define fstore_x fsw
#define fload_x flw
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
#define MODE_PREFIX(suffix) s##suffix
#else
#define MODE_PREFIX(suffix) m##suffix
#endif
/* Status register flags */
#define SR_SIE 0x00000002UL /* Supervisor Interrupt Enable */
#define SR_MIE 0x00000008UL /* Machine Interrupt Enable */
#define SR_SPIE 0x00000020UL /* Previous Supervisor IE */
#define SR_MPIE 0x00000080UL /* Previous Machine IE */
#define SR_SPP_U 0x00000000UL /* Previously User mode */
#define SR_SPP_S 0x00000100UL /* Previously Supervisor mode */
#define SR_MPP_U 0x00000000UL /* Previously User mode */
#define SR_MPP_S 0x00000800UL /* Previously Supervisor mode */
#define SR_MPP_M 0x00001800UL /* Previously Machine mode */
#define SR_SUM 0x00040000UL /* Supervisor User Memory Access */
#define SR_FS 0x00006000UL /* Floating-point Status */
#define SR_FS_OFF 0x00000000UL
#define SR_FS_INITIAL 0x00002000UL
#define SR_FS_CLEAN 0x00004000UL
#define SR_FS_DIRTY 0x00006000UL
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV || CONFIG_CPU_XUANTIE_R920
#define SR_VS 0x01800000
#define SR_VS_OFF 0x00000000
#define SR_VS_INITIAL 0x00800000
#define SR_VS_CLEAN 0x01000000
#define SR_VS_DIRTY 0x01800000
#else
#define SR_VS 0x00000600
#define SR_VS_OFF 0x00000000
#define SR_VS_INITIAL 0x00000200
#define SR_VS_CLEAN 0x00000400
#define SR_VS_DIRTY 0x00000600
#endif
#if __riscv_matrix || __riscv_xtheadmatrix
#define SR_MS 0x06000000
#define SR_MS_OFF 0x00000000
#define SR_MS_INITIAL 0x02000000
#define SR_MS_CLEAN 0x04000000
#define SR_MS_DIRTY 0x06000000
#endif
/* Interrupt-enable Registers */
#define IE_MTIE 0x00000080UL
#define IE_MEIE 0x00000800UL
/* ===== Trap/Exception Causes ===== */
#define CAUSE_MISALIGNED_FETCH 0x0
#define CAUSE_FETCH_ACCESS 0x1
#define CAUSE_ILLEGAL_INSTRUCTION 0x2
#define CAUSE_BREAKPOINT 0x3
#define CAUSE_MISALIGNED_LOAD 0x4
#define CAUSE_LOAD_ACCESS 0x5
#define CAUSE_MISALIGNED_STORE 0x6
#define CAUSE_STORE_ACCESS 0x7
#define CAUSE_USER_ECALL 0x8
#define CAUSE_SUPERVISOR_ECALL 0x9
#define CAUSE_VIRTUAL_SUPERVISOR_ECALL 0xa
#define CAUSE_MACHINE_ECALL 0xb
#define CAUSE_FETCH_PAGE_FAULT 0xc
#define CAUSE_LOAD_PAGE_FAULT 0xd
#define CAUSE_STORE_PAGE_FAULT 0xf
#define PRV_U 0
#define PRV_S 1
#define PRV_M 3
#define MSTATUS_SIE 0x00000002
#define MSTATUS_MIE 0x00000008
#define MSTATUS_SPIE_SHIFT 5
#define MSTATUS_SPIE (1 << MSTATUS_SPIE_SHIFT)
#define MSTATUS_UBE 0x00000040
#define MSTATUS_MPIE 0x00000080
#define MSTATUS_SPP_SHIFT 8
#define MSTATUS_SPP (1 << MSTATUS_SPP_SHIFT)
#define MSTATUS_MPP_SHIFT 11
#define MSTATUS_MPP (3 << MSTATUS_MPP_SHIFT)
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV || CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
#define MSTATUS_VS_SHIFT 23
#else
#define MSTATUS_VS_SHIFT 9
#endif
#define MSTATUS_FS_SHIFT 13
#define MSTATUS_MS_SHIFT 25
#define INSERT_FIELD(val, which, fieldval) (((val) & ~(which)) | ((fieldval) * ((which) & ~((which)-1))))
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV || CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V ||CONFIG_CPU_XUANTIE_C908I || CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
#define ATTR_SO (1ull << 4)
#define ATTR_CA (1ull << 3)
#define ATTR_BU (1ull << 2)
#define ATTR_SH (1ull << 1)
#define ATTR_SE (1ull << 0)
#define UPPER_ATTRS_SHIFT (59)
#define UPPER_ATTRS(x) (((x) & 0x1f) << UPPER_ATTRS_SHIFT)
#else
#if __riscv_xlen == 32
#define PTE_PBMT_SHIFT (30)
#else
#define PTE_PBMT_SHIFT (61)
#endif /* end __riscv_xlen */
#define SVPBMT_PMA ((unsigned long)0x0 << PTE_PBMT_SHIFT)
#define SVPBMT_NC ((unsigned long)0x1 << PTE_PBMT_SHIFT)
#define SVPBMT_IO ((unsigned long)0x2 << PTE_PBMT_SHIFT)
#define SVPBMT_MASK ((unsigned long)0x3 << PTE_PBMT_SHIFT)
#endif
#define DIRTY_FLAG (1 << 6)
#define ACCESS_FLAG (1 << 5)
#define GLOBAL_FLAG (1 << 4)
#define AP_UNPRIV (1 << 3)
#define AP_X (1 << 2)
#define AP_W (1 << 1)
#define AP_R (1 << 0)
#define LOWER_ATTRS_SHIFT 1
#define LOWER_ATTRS(x) (((x) & 0x1ff) << LOWER_ATTRS_SHIFT)
#endif /* __RISCV_CSR_H__ */

View File

@ -0,0 +1,102 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/******************************************************************************
* @file drv/dev_tag.h
* @brief Header File for DEV TAG Driver
* @version V1.0
* @date 31. March 2020
* @model common
******************************************************************************/
#ifndef _DRV_DEV_TAG_H_
#define _DRV_DEV_TAG_H_
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
#include <drv/list.h>
#include <csi_config.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
DEV_BLANK_TAG = 0U,
DEV_DW_UART_TAG,
DEV_DW_DMA_TAG,
DEV_DW_GPIO_TAG,
DEV_DW_IIC_TAG,
DEV_DW_QSPI_TAG,
DEV_DW_SDMMC_TAG,
DEV_DW_SDHCI_TAG,
DEV_DW_SPI_TAG,
DEV_DW_TIMER_TAG,
DEV_DW_WDT_TAG,
DEV_WJ_ADC_TAG,
DEV_WJ_AES_TAG,
DEV_WJ_CODEC_TAG,
DEV_WJ_CRC_TAG,
DEV_WJ_DMA_TAG,
DEV_WJ_EFLASH_TAG,
DEV_WJ_EFUSE_TAG,
DEV_WJ_ETB_TAG,
DEV_WJ_FFT_TAG,
DEV_WJ_I2S_TAG,
DEV_WJ_MBOX_TAG,
DEV_WJ_PADREG_TAG,
DEV_WJ_PDM_TAG,
DEV_WJ_PINMUX_TAG,
DEV_WJ_PMU_TAG,
DEV_WJ_PWM_TAG,
DEV_WJ_RNG_TAG,
DEV_WJ_ROM_TAG,
DEV_WJ_RSA_TAG,
DEV_WJ_RTC_TAG,
DEV_WJ_SASC_TAG,
DEV_WJ_SHA_TAG,
DEV_WJ_SPDIF_TAG,
DEV_WJ_SPIDF_TAG,
DEV_WJ_TDM_TAG,
DEV_WJ_TIPC_TAG,
DEV_WJ_USB_TAG,
DEV_WJ_USI_TAG,
DEV_WJ_VAD_TAG,
DEV_CD_QSPI_TAG,
DEV_DCD_ISO7816_TAG,
DEV_OSR_RNG_TAG,
DEV_QX_RTC_TAG,
DEV_RCHBAND_CODEC_TAG,
DEV_CMSDK_UART_TAG,
DEV_RAMBUS_150B_PKA_TAG,
DEV_RAMBUS_150B_TRNG_TAG,
DEV_RAMBUS_120SI_TAG,
DEV_RAMBUS_120SII_TAG,
DEV_RAMBUS_120SIII_TAG,
DEV_WJ_AVFS_TAG,
DEV_WJ_BMU_TAG,
} csi_dev_tag_t;
#ifdef __cplusplus
}
#endif
#endif /* _DRV_TAG_H_ */

View File

@ -0,0 +1,167 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*/
/*******************************************************
* @file dw_timer_ll.h
* @brief header file for timer ll driver
* @version V1.0
* @date 9. April 2020
* ******************************************************/
#ifndef _DW_TIMER_LL_H_
#define _DW_TIMER_LL_H_
#include <stdio.h>
#include <soc.h>
#include <csi_core.h>
#ifdef __cplusplus
extern "C"
{
#endif
/*! Timer1 Control Reg, offset: 0x08 */
#define DW_TIMER_CTL_ENABLE_SEL_Pos (0U)
#define DW_TIMER_CTL_ENABLE_SEL_Msk (0x1U << DW_TIMER_CTL_ENABLE_SEL_Pos)
#define DW_TIMER_CTL_ENABLE_SEL_EN DW_TIMER_CTL_ENABLE_SEL_Msk
#define DW_TIMER_CTL_MODE_SEL_Pos (1U)
#define DW_TIMER_CTL_MODE_SEL_Msk (0x1U << DW_TIMER_CTL_MODE_SEL_Pos)
#define DW_TIMER_CTL_MODE_SEL_EN DW_TIMER_CTL_MODE_SEL_Msk
#define DW_TIMER_CTL_INT_MASK_Pos (2U)
#define DW_TIMER_CTL_INT_MASK_Msk (0x1U << DW_TIMER_CTL_INT_MASK_Pos)
#define DW_TIMER_CTL_INT_MAKS_EN DW_TIMER_CTL_INT_MASK_Msk
#define DW_TIMER_CTL_HARD_TRIG_Pos (4U)
#define DW_TIMER_CTL_HARD_TRIG_Msk (0x1U << DW_TIMER_CTL_HARD_TRIG_Pos)
#define DW_TIMER_CTL_HARD_TRIG_EN DW_TIMER_CTL_HARD_TRIG_Msk
/*! Timer EOI, offset: 0x0c */
#define DW_TIMER_EOI_REG_Pos (0U)
#define DW_TIMER_EOI_REG_Msk (0x1U << DW_TIMER_EOI_REG_Pos)
#define DW_TIMER_EOI_REG_EN DW_TIMER_EOI_REG_Msk
/*! Timer Int Status, offset: 0x10 */
#define DW_TIMER_INT_STATUS_Pos (0U)
#define DW_TIMER_INT_STATUS_Msk (0x1U << DW_TIMER_INT_STATUS_Pos)
#define DW_TIMER_INT_STATUS_EN DW_TIMER_INT_STATUS_Msk
/*! Timers Int Status, offset: 0xa0 */
#define DW_TIMERS_INT_STATUS_Pos (0U)
#define DW_TIMERS_INT_STATUS_Msk (0x2U << DW_TIMERS_INT_STATUS_Pos)
#define DW_TIMERS_INT_STATUS_EN DW_TIMERS_INT_STATUS_Msk
/*! Timers EOI, offset: 0xa4 */
#define DW_TIMERS_EOI_REG_Pos (0U)
#define DW_TIMERS_EOI_REG_Msk (0x2U << DW_TIMERS_EOI_REG_Pos)
#define DW_TIMERS_EOI_REG_EN DW_TIMERS_EOI_REG_Msk
/*! Timers Raw Int Status,offset: 0xa8 */
#define DW_TIMERS_RAW_INT_STA_Pos (0U)
#define DW_TIMERS_RAW_INT_STA_Msk (0x2U << DW_TIMERS_RAW_INT_STA_Pos)
#define DW_TIMERS_RAW_INT_STA_EN DW_TIMERS_RAW_INT_STA_Msk
typedef struct {
__IOM uint32_t TLC; /* Offset: 0x000 (R/W) TimerLoadCount */
__IM uint32_t TCV; /* Offset: 0x004 (R/ ) TimerCurrentValue */
__IOM uint32_t TCR; /* Offset: 0x008 (R/W) TimerControlReg */
__IM uint32_t TEOI; /* Offset: 0x00c (R/ ) TimerEOI */
__IM uint32_t TIS; /* Offset: 0x010 (R/ ) TimerIntStatus */
} dw_timer_regs_t;
typedef struct {
dw_timer_regs_t timer[8];
__IM uint32_t TSIS; /* Offset: 0x0a0 (R/ ) TimersIntStatus */
__IM uint32_t TSEOI; /* Offset: 0x0a4 (R/ ) TimersEOI */
__IM uint32_t TSRIS; /* Offset: 0x0a8 (R/ ) TimersRawIntStatus */
} dw_timer_general_regs_t;
static inline uint32_t dw_timer_read_load(dw_timer_regs_t *timer_base)
{
return (timer_base->TLC);
}
static inline void dw_timer_write_load(dw_timer_regs_t *timer_base, uint32_t value)
{
timer_base->TLC = value;
}
static inline uint32_t dw_timer_get_current(dw_timer_regs_t *timer_base)
{
return (timer_base->TCV);
}
static inline void dw_timer_set_enable(dw_timer_regs_t *timer_base)
{
timer_base->TCR |= (DW_TIMER_CTL_ENABLE_SEL_EN);
}
static inline void dw_timer_set_disable(dw_timer_regs_t *timer_base)
{
timer_base->TCR &= ~(DW_TIMER_CTL_ENABLE_SEL_EN);
}
static inline uint32_t dw_timer_get_enable(dw_timer_regs_t *timer_base)
{
return (((timer_base->TCR) & DW_TIMER_CTL_ENABLE_SEL_EN) ? (uint32_t)1 : (uint32_t)0);
}
static inline void dw_timer_set_mode_free(dw_timer_regs_t *timer_base)
{
timer_base->TCR &= ~(DW_TIMER_CTL_MODE_SEL_EN);
}
static inline void dw_timer_set_mode_load(dw_timer_regs_t *timer_base)
{
timer_base->TCR |= (DW_TIMER_CTL_MODE_SEL_EN);
}
static inline uint32_t dw_timer_get_model(dw_timer_regs_t *timer_base)
{
return (((timer_base->TCR) & DW_TIMER_CTL_MODE_SEL_EN) ? (uint32_t)1 : (uint32_t)0);
}
static inline void dw_timer_set_mask(dw_timer_regs_t *timer_base)
{
timer_base->TCR |= (DW_TIMER_CTL_INT_MAKS_EN);
}
static inline void dw_timer_set_unmask(dw_timer_regs_t *timer_base)
{
timer_base->TCR &= ~(DW_TIMER_CTL_INT_MAKS_EN);
}
static inline uint32_t dw_timer_get_mask(dw_timer_regs_t *timer_base)
{
return (((timer_base->TCR) & DW_TIMER_CTL_INT_MAKS_EN) ? (uint32_t)1 : (uint32_t)0);
}
static inline void dw_timer_set_hardtrigger_en(dw_timer_regs_t *timer_base)
{
timer_base->TCR |= (DW_TIMER_CTL_HARD_TRIG_EN);
}
static inline void dw_timer_set_hardtrigger_dis(dw_timer_regs_t *timer_base)
{
timer_base->TCR &= ~(DW_TIMER_CTL_HARD_TRIG_EN);
}
static inline uint32_t dw_timer_get_hardtrigger(dw_timer_regs_t *timer_base)
{
return (((timer_base->TCR) & DW_TIMER_CTL_HARD_TRIG_EN) ? (uint32_t)1 : (uint32_t)0);
}
static inline uint32_t dw_timer_clear_irq(dw_timer_regs_t *timer_base)
{
return (((timer_base->TEOI) & DW_TIMER_EOI_REG_EN) ? (uint32_t)1 : (uint32_t)0);
}
static inline uint32_t dw_timer_get_int_status(dw_timer_regs_t *timer_base)
{
return (((timer_base->TIS) & DW_TIMER_INT_STATUS_EN) ? (uint32_t)1 : (uint32_t)0);
}
static inline uint32_t dw_timer_general_active_after_mask(dw_timer_general_regs_t *timer_base)
{
return ((timer_base->TSIS) & DW_TIMERS_INT_STATUS_EN);
}
static inline uint32_t dw_timer_general_clear_irq(dw_timer_general_regs_t *timer_base)
{
return ((timer_base->TSEOI) & DW_TIMERS_EOI_REG_EN);
}
static inline uint32_t dw_timer_general_active_prior_mask(dw_timer_general_regs_t *timer_base)
{
return ((timer_base->TSRIS) & DW_TIMERS_RAW_INT_STA_EN);
}
#ifdef __cplusplus
}
#endif
#endif /* _DW_TIMER_LL_H_ */

View File

@ -0,0 +1,429 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/******************************************************************************
* @file dw_uart_ll.h
* @brief header file for uart ll driver
* @version V1.0
* @date 18. December 2024
******************************************************************************/
#ifndef _DW_UART_LL_H_
#define _DW_UART_LL_H_
#include <stdio.h>
#include <csi_config.h>
#include <soc.h>
#include <csi_core.h>
#ifdef __cplusplus
extern "C" {
#endif
#if CONFIG_XIP
#define ATTRIBUTE_DATA __attribute__((section(".ram.code")))
#else
#define ATTRIBUTE_DATA
#endif
/*! IER, offset: 0x4 */
#define DW_UART_IER_ERBFI_Pos (0U)
#define DW_UART_IER_ERBFI_Msk (0x1U << DW_UART_IER_ERBFI_Pos)
#define DW_UART_IER_ERBFI_EN DW_UART_IER_ERBFI_Msk
#define DW_UART_IER_ETBEI_Pos (1U)
#define DW_UART_IER_ETBEI_Msk (0x1U << DW_UART_IER_ETBEI_Pos)
#define DW_UART_IER_ETBEI_EN DW_UART_IER_ETBEI_Msk
#define DW_UART_IER_ELSI_Pos (2U)
#define DW_UART_IER_ELSI_Msk (0x1U << DW_UART_IER_ELSI_Pos)
#define DW_UART_IER_ELSI_EN DW_UART_IER_ELSI_Msk
#define DW_UART_IER_EDSSI_Pos (3U)
#define DW_UART_IER_EDSSI_Msk (0x1U << DW_UART_IER_EDSSI_Pos)
#define DW_UART_IER_EDSSI_EN DW_UART_IER_EDSSI_Msk
/*! IIR, offset: 0x8 */
#define DW_UART_IIR_IID_Pos (0U)
#define DW_UART_IIR_IID_Msk (0xFU << DW_UART_IIR_IID_Pos)
#define DW_UART_IIR_IID_MODEM_STATUS (0x0U)
#define DW_UART_IIR_IID_NO_INTERRUPT (0x1U)
#define DW_UART_IIR_IID_THR_EMPTY (0x2U)
#define DW_UART_IIR_IID_RECV_DATA_AVAIL (0x4U)
#define DW_UART_IIR_IID_RECV_LINE_STATUS (0x6U)
#define DW_UART_IIR_IID_BUSY_DETECT (0x7U)
#define DW_UART_IIR_IID_CHARACTER_TIMEOUT (0xCU)
#define DW_UART_IIR_FIFOSE_Pos (6U)
#define DW_UART_IIR_FIFOSE_Msk (0x3U << DW_UART_IIR_FIFOSE_Pos)
#define DW_UART_IIR_FIFOSE_EN DW_UART_IIR_FIFOSE_Msk
/*! FCR, offset: 0x8 */
#define DW_UART_FCR_FIFOE_Pos (0U)
#define DW_UART_FCR_FIFOE_Msk (0x1U << DW_UART_FCR_FIFOE_Pos)
#define DW_UART_FCR_FIFOE_EN DW_UART_FCR_FIFOE_Msk
#define DW_UART_FCR_RFIFOR_Pos (1U)
#define DW_UART_FCR_RFIFOR_Msk (0x1U << DW_UART_FCR_RFIFOR_Pos)
#define DW_UART_FCR_RFIFOR_RESET DW_UART_FCR_RFIFOR_Msk
#define DW_UART_FCR_XFIFOR_Pos (2U)
#define DW_UART_FCR_XFIFOR_Msk (0x1U << DW_UART_FCR_XFIFOR_Pos)
#define DW_UART_FCR_XFIFOR_RESET DW_UART_FCR_XFIFOR_Msk
#define DW_UART_FCR_TET_Pos (4U)
#define DW_UART_FCR_TET_Msk (0x3U << DW_UART_FCR_TET_Pos)
#define DW_UART_FCR_TET_FIFO_EMTPY (0x0U)
#define DW_UART_FCR_TET_FIFO_2_CHAR (0x1U << DW_UART_FCR_TET_Pos)
#define DW_UART_FCR_TET_FIFO_1_4_FULL (0x2U << DW_UART_FCR_TET_Pos)
#define DW_UART_FCR_TET_FIFO_1_2_FULL (0x3U << DW_UART_FCR_TET_Pos)
#define DW_UART_FCR_RT_Pos (6U)
#define DW_UART_FCR_RT_Msk (0x3U << DW_UART_FCR_RT_Pos)
#define DW_UART_FCR_RT_FIFO_1_CHAR (0x0U)
#define DW_UART_FCR_RT_FIFO_1_4_FULL (0x1U << DW_UART_FCR_RT_Pos)
#define DW_UART_FCR_RT_FIFO_1_2_FULL (0x2U << DW_UART_FCR_RT_Pos)
#define DW_UART_FCR_RT_FIFO_2_LESS_FULL (0x3U << DW_UART_FCR_RT_Pos)
/*! LCR, offset: 0xC */
#define DW_UART_LCR_DLS_Pos (0U)
#define DW_UART_LCR_DLS_Msk (0x3U << DW_UART_LCR_DLS_Pos)
#define DW_UART_LCR_DLS_5_BITS (0x0U)
#define DW_UART_LCR_DLS_6_BITS (0x1U << DW_UART_LCR_DLS_Pos)
#define DW_UART_LCR_DLS_7_BITS (0x2U << DW_UART_LCR_DLS_Pos)
#define DW_UART_LCR_DLS_8_BITS (0x3U << DW_UART_LCR_DLS_Pos)
#define DW_UART_LCR_STOP_Pos (2U)
#define DW_UART_LCR_STOP_Msk (0x1U << DW_UART_LCR_STOP_Pos)
#define DW_UART_LCR_STOP_1_BIT (0x0U)
#define DW_UART_LCR_STOP_2_BIT (0x1U << DW_UART_LCR_STOP_Pos)
#define DW_UART_LCR_PEN_Pos (3U)
#define DW_UART_LCR_PEN_Msk (0x1U << DW_UART_LCR_PEN_Pos)
#define DW_UART_LCR_PEN_EN DW_UART_LCR_PEN_Msk
#define DW_UART_LCR_EPS_Pos (4U)
#define DW_UART_LCR_EPS_Msk (0x1U << DW_UART_LCR_EPS_Pos)
#define DW_UART_LCR_EPS_EN DW_UART_LCR_EPS_Msk
#define DW_UART_LCR_BC_Pos (6U)
#define DW_UART_LCR_BC_Msk (0x1U << DW_UART_LCR_BC_Pos)
#define DW_UART_LCR_BC_EN DW_UART_LCR_BC_Msk
#define DW_UART_LCR_DLAB_Pos (7U)
#define DW_UART_LCR_DLAB_Msk (0x1U << DW_UART_LCR_DLAB_Pos)
#define DW_UART_LCR_DLAB_EN DW_UART_LCR_DLAB_Msk
/*! MCR, offset: 0x10 */
#define DW_UART_MCR_RTS_Pos (1U)
#define DW_UART_MCR_RTS_Msk (0x1U << DW_UART_MCR_RTS_Pos)
#define DW_UART_MCR_RTS_EN DW_UART_MCR_RTS_Msk
#define DW_UART_MCR_LB_Pos (4U)
#define DW_UART_MCR_LB_Msk (0x1U << DW_UART_MCR_LB_Pos)
#define DW_UART_MCR_LB_EN DW_UART_MCR_LB_Msk
#define DW_UART_MCR_AFCE_Pos (5U)
#define DW_UART_MCR_AFCE_Msk (0x1U << DW_UART_MCR_AFCE_Pos)
#define DW_UART_MCR_AFCE_EN DW_UART_MCR_AFCE_Msk
/*! LSR, offset: 0x14 */
#define DW_UART_LSR_DR_Pos (0U)
#define DW_UART_LSR_DR_Msk (0x1U << DW_UART_LSR_DR_Pos)
#define DW_UART_LSR_DR_READY DW_UART_LSR_DR_Msk
#define DW_UART_LSR_OE_Pos (1U)
#define DW_UART_LSR_OE_Msk (0x1U << DW_UART_LSR_OE_Pos)
#define DW_UART_LSR_OE_ERROR DW_UART_LSR_OE_Msk
#define DW_UART_LSR_PE_Pos (2U)
#define DW_UART_LSR_PE_Msk (0x1U << DW_UART_LSR_PE_Pos)
#define DW_UART_LSR_PE_ERROR DW_UART_LSR_PE_Msk
#define DW_UART_LSR_FE_Pos (3U)
#define DW_UART_LSR_FE_Msk (0x1U << DW_UART_LSR_FE_Pos)
#define DW_UART_LSR_FE_ERROR DW_UART_LSR_FE_Msk
#define DW_UART_LSR_BI_Pos (4U)
#define DW_UART_LSR_BI_Msk (0x1U << DW_UART_LSR_BI_Pos)
#define DW_UART_LSR_BI_SET DW_UART_LSR_BI_Msk
#define DW_UART_LSR_THRE_Pos (5U)
#define DW_UART_LSR_THRE_Msk (0x1U << DW_UART_LSR_THRE_Pos)
#define DW_UART_LSR_THRE_SET DW_UART_LSR_THRE_Msk
#define DW_UART_LSR_TEMT_Pos (6U)
#define DW_UART_LSR_TEMT_Msk (0x1U << DW_UART_LSR_TEMT_Pos)
#define DW_UART_LSR_TEMT_SET DW_UART_LSR_TEMT_Msk
#define DW_UART_LSR_RFE_Pos (7U)
#define DW_UART_LSR_RFE_Msk (0x1U << DW_UART_LSR_RFE_Pos)
#define DW_UART_LSR_RFE_ERROR DW_UART_LSR_RFE_Msk
/*! MSR, offset: 0x18 */
#define DW_UART_MSR_DCTS_Pos (0U)
#define DW_UART_MSR_DCTS_Msk (0x1U << DW_UART_MSR_DCTS_Pos)
#define DW_UART_MSR_DCTS_CHANGE DW_UART_MSR_DCTS_Msk
#define DW_UART_MSR_DDSR_Pos (1U)
#define DW_UART_MSR_DDSR_Msk (0x1U << DW_UART_MSR_DDSR_Pos)
#define DW_UART_MSR_DDSR_CHANGE DW_UART_MSR_DDSR_Msk
#define DW_UART_MSR_TERI_Pos (2U)
#define DW_UART_MSR_TERI_Msk (0x1U << DW_UART_MSR_TERI_Pos)
#define DW_UART_MSR_TERI_CHANGE DW_UART_MSR_TERI_Msk
#define DW_UART_MSR_DDCD_Pos (3U)
#define DW_UART_MSR_DDCD_Msk (0x1U << DW_UART_MSR_DDCD_Pos)
#define DW_UART_MSR_DDCD_CHANGE DW_UART_MSR_DDCD_Msk
#define DW_UART_MSR_CTS_Pos (4U)
#define DW_UART_MSR_CTS_Msk (0x1U << DW_UART_MSR_CTS_Pos)
#define DW_UART_MSR_CTS_ASSERTED DW_UART_MSR_CTS_Msk
#define DW_UART_MSR_DSR_Pos (5U)
#define DW_UART_MSR_DSR_Msk (0x1U << DW_UART_MSR_DSR_Pos)
#define DW_UART_MSR_DSR_ASSERTED DW_UART_MSR_DSR_Msk
#define DW_UART_MSR_RI_Pos (6U)
#define DW_UART_MSR_RI_Msk (0x1U << DW_UART_MSR_RI_Pos)
#define DW_UART_MSR_RI_ASSERTED DW_UART_MSR_RI_Msk
#define DW_UART_MSR_DCD_Pos (7U)
#define DW_UART_MSR_DCD_Msk (0x1U << DW_UART_MSR_DCD_Pos)
#define DW_UART_MSR_DCD_ASSERTED DW_UART_MSR_DCD_Msk
/*! SCR, offset: 0x1C */
#define DW_UART_SCR_SCRATCHPAD_Pos (0U)
#define DW_UART_SCR_SCRATCHPAD_Msk (0xFFU << DW_UART_SCR_SCRATCHPAD_Pos)
/*! USR, offset: 0x7C */
#define DW_UART_USR_BUSY_Pos (0U)
#define DW_UART_USR_BUSY_Msk (0x1U << DW_UART_USR_BUSY_Pos)
#define DW_UART_USR_BUSY_SET DW_UART_USR_BUSY_Msk
#define DW_UART_USR_TFNF_Pos (1U)
#define DW_UART_USR_TFNF_Msk (0x1U << DW_UART_USR_TFNF_Pos)
#define DW_UART_USR_TFNF_SET DW_UART_USR_TFNF_Msk
#define DW_UART_USR_TFE_Pos (2U)
#define DW_UART_USR_TFE_Msk (0x1U << DW_UART_USR_TFE_Pos)
#define DW_UART_USR_TFE_SET DW_UART_USR_TFE_Msk
#define DW_UART_USR_RFNE_Pos (3U)
#define DW_UART_USR_RFNE_Msk (0x1U << DW_UART_USR_RFNE_Pos)
#define DW_UART_USR_RFNE_SET DW_UART_USR_RFNE_Msk
#define DW_UART_USR_RFF_Pos (4U)
#define DW_UART_USR_RFF_Msk (0x1U << DW_UART_USR_RFF_Pos)
#define DW_UART_USR_RFF_SET DW_UART_USR_RFF_Msk
/*! TFL, offset: 0x80 */
#define DW_UART_TFL_TFIFOL_Pos (0U)
#define DW_UART_TFL_TFIFOL_Msk (0x1FU << DW_UART_TFL_TFIFOL_Pos)
#define DW_UART_TFL_TFIFOL(n) (nU << DW_UART_TFL_TFIFOL_Pos)
/*! RFL, offset: 0x84 */
#define DW_UART_RFL_RFIFOL_Pos (0U)
#define DW_UART_RFL_RFIFOL_Msk (0x1FU << DW_UART_RFL_RFIFOL_Pos)
#define DW_UART_RFL_RFIFOL(n) (nU << DW_UART_TFL_TFIFOL_Pos)
/*! HTX, offset: 0xA4 */
#define DW_UART_HTX_HALTTX_Pos (0U)
#define DW_UART_HTX_HALTTX_Msk (0x1U << DW_UART_HTX_HALTTX_Pos)
#define DW_UART_HTX_HALTTX_EN DW_UART_HTX_HALTTX_Msk
#define DW_UART_HTX_RX_ETB_FUNC_Pos (6U)
#define DW_UART_HTX_RX_ETB_FUNC_Msk (0x1U << DW_UART_HTX_RX_ETB_FUNC_Pos)
#define DW_UART_HTX_RX_ETB_FUNC_EN DW_UART_HTX_RX_ETB_FUNC_Msk
#define DW_UART_HTX_TX_ETB_FUNC_Pos (7U)
#define DW_UART_HTX_TX_ETB_FUNC_Msk (0x1U << DW_UART_HTX_TX_ETB_FUNC_Pos)
#define DW_UART_HTX_TX_ETB_FUNC_EN DW_UART_HTX_TX_ETB_FUNC_Msk
/*! DMASA, offset: 0xA8 */
#define DW_UART_DMASA_DMASACK_Pos (0U)
#define DW_UART_DMASA_DMASACK_Msk (0x1U << DW_UART_DMASA_DMASACK_Pos)
#define DW_UART_DMASA_DMASACK_SET DW_UART_DMASA_DMASACK_Msk
/* FIFO CONFIG */
#define UART_FIFO_INIT_CONFIG (DW_UART_FCR_FIFOE_EN | DW_UART_FCR_RT_FIFO_1_2_FULL|DW_UART_FCR_RFIFOR_RESET|DW_UART_FCR_XFIFOR_RESET)
/*! UART_RATE, offset: 0x3FC */
#define DW_UART_SUPPORT_RATE 0x10102U
#define UART_BUSY_TIMEOUT 0x70000000U
typedef struct {
union {
__IM uint32_t RBR; /* Offset: 0x000 (R/ ) Receive buffer register */
__OM uint32_t THR; /* Offset: 0x000 ( /W) Transmission hold register */
__IOM uint32_t DLL; /* Offset: 0x000 (R/W) Clock frequency division low section register */
};
union {
__IOM uint32_t DLH; /* Offset: 0x004 (R/W) Clock frequency division high section register */
__IOM uint32_t IER; /* Offset: 0x004 (R/W) Interrupt enable register */
};
union {
__IM uint32_t IIR; /* Offset: 0x008 (R/ ) Interrupt identification register */
__OM uint32_t FCR; /* Offset: 0x008 ( /W) FIFO control register */
};
__IOM uint32_t LCR; /* Offset: 0x00C (R/W) Line control register */
__IOM uint32_t MCR; /* Offset: 0x010 (R/W) Modem control register */
__IM uint32_t LSR; /* Offset: 0x014 (R/ ) Line state register */
__IM uint32_t MSR; /* Offset: 0x018 (R/ ) Modem state register */
uint32_t RESERVED1[21];
__IM uint32_t USR; /* Offset: 0x07c (R/ ) UART state register */
} dw_uart_regs_t;
static inline void dw_uart_enable_recv_irq(dw_uart_regs_t *uart_base)
{
uart_base->IER |= (DW_UART_IER_ERBFI_EN | DW_UART_IER_ELSI_EN);
}
static inline void dw_uart_disable_recv_irq(dw_uart_regs_t *uart_base)
{
uart_base->IER &= ~(DW_UART_IER_ERBFI_EN | DW_UART_IER_ELSI_EN);
}
static inline void dw_uart_enable_trans_irq(dw_uart_regs_t *uart_base)
{
uart_base->IER |= DW_UART_IER_ETBEI_EN;
}
static inline void dw_uart_disable_trans_irq(dw_uart_regs_t *uart_base)
{
uart_base->IER &= ~(DW_UART_IER_ETBEI_EN);
}
static inline void dw_uart_fifo_init(dw_uart_regs_t *uart_base)
{
/* FIFO enable */
uart_base->FCR = UART_FIFO_INIT_CONFIG;
}
static inline void dw_uart_fifo_enable(dw_uart_regs_t *uart_base)
{
uart_base->FCR |= DW_UART_FCR_FIFOE_EN;
}
static inline void dw_uart_fifo_disable(dw_uart_regs_t *uart_base)
{
uart_base->FCR &= ~(DW_UART_FCR_FIFOE_EN);
}
static inline uint32_t dw_uart_putready(dw_uart_regs_t *uart_base)
{
uint32_t status = 0U, ret = 0U;
status = uart_base->LSR & DW_UART_LSR_THRE_SET;
if (status != 0U) {
ret = 1U;
}
return ret;
}
static inline uint32_t dw_uart_getready(dw_uart_regs_t *uart_base)
{
uint32_t status = 0U, ret = 0U;
status = uart_base->LSR & DW_UART_LSR_DR_READY;
if (status != 0U) {
ret = 1U;
}
return ret;
}
static inline uint32_t dw_uart_get_line_status(dw_uart_regs_t *uart_base)
{
return uart_base->LSR;
}
static inline void dw_uart_config_stop_bits_1(dw_uart_regs_t *uart_base)
{
uart_base->LCR &= ~(DW_UART_LCR_STOP_Msk);
}
static inline void dw_uart_config_stop_bits_2(dw_uart_regs_t *uart_base)
{
uart_base->LCR |= DW_UART_LCR_STOP_2_BIT;
}
static inline void dw_uart_putchar(dw_uart_regs_t *uart_base, uint8_t ch)
{
uart_base->THR = ch;
}
static inline uint8_t dw_uart_getchar(dw_uart_regs_t *uart_base)
{
return (uint8_t)(uart_base->RBR);
}
static inline uint32_t dw_uart_get_intr_en_status(dw_uart_regs_t *uart_base)
{
return uart_base->IER;
}
static inline void dw_uart_set_intr_en_status(dw_uart_regs_t *uart_base, uint32_t status)
{
uart_base->IER = status;
}
static inline void dw_uart_set_fcr_reg(dw_uart_regs_t *uart_base, uint32_t value)
{
uart_base->FCR = value;
}
static inline void dw_uart_enable_auto_flow_control(dw_uart_regs_t *uart_base)
{
uart_base->MCR |= DW_UART_MCR_AFCE_EN;
uart_base->MCR |= DW_UART_MCR_RTS_EN;
}
static inline void dw_uart_disable_auto_flow_control(dw_uart_regs_t *uart_base)
{
uart_base->MCR &= ~DW_UART_MCR_AFCE_EN;
uart_base->MCR &= ~DW_UART_MCR_RTS_EN;
}
int32_t dw_uart_wait_timeout(dw_uart_regs_t *uart_base);
int32_t dw_uart_wait_idle(dw_uart_regs_t *uart_base);
int32_t dw_uart_config_baudrate(dw_uart_regs_t *uart_base, uint32_t baud, uint32_t uart_freq);
int32_t dw_uart_config_stop_bits(dw_uart_regs_t *uart_base, uint32_t stop_bits);
int32_t dw_uart_config_parity_none(dw_uart_regs_t *uart_base);
int32_t dw_uart_config_parity_odd(dw_uart_regs_t *uart_base);
int32_t dw_uart_config_parity_even(dw_uart_regs_t *uart_base);
int32_t dw_uart_config_data_bits(dw_uart_regs_t *uart_base, uint32_t data_bits);
#ifdef __cplusplus
}
#endif
#endif /* _DW_UART_LL_H_ */

View File

@ -0,0 +1,322 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _SOC_H_
#define _SOC_H_
#include <stdint.h>
#include <csi_core.h>
#include <sys_clk.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef EHS_VALUE
#define EHS_VALUE 20000000U
#endif
#ifndef ELS_VALUE
#define ELS_VALUE 32768U
#endif
#ifndef IHS_VALUE
#define IHS_VALUE 20000000U
#endif
#ifndef ILS_VALUE
#define ILS_VALUE 32768U
#endif
typedef enum {
DW_UART0_RX_DMAN = 0U,
DW_UART0_TX_DMAN = 1U,
DW_UART1_RX_DMAN = 2U,
DW_UART1_TX_DMAN = 3U,
MEMORY_DMAN = 4U,
} dman_type_t;
typedef enum {
PA0 = 0U,
PA1,
PA2,
PA3,
PA4,
PA5,
PA6,
PA7,
PA8,
PA9,
PA10,
PA11,
PA12,
PA13,
PA14,
PA15,
PA16,
PA17,
PA18,
PA19,
PA20,
PA21,
PA22,
PA23,
PA24,
PA25,
PA26,
PA27,
PA28,
PA29,
PA30,
PA31,
PIN_END = 0xFFFFFFFFU
} pin_name_t;
typedef enum {
PA0_I2S0_SCLK = 0U,
PA0_SPI0_CS = 1U,
PA0_UART0_RX = 2U,
PA0_PWM_CH0 = 3U,
PA1_I2S0_WSCLK = 0U,
PA1_SPI0_SCK = 1U,
PA1_UART0_TX = 2U,
PA1_PWM_CH1 = 3U,
PA2_I2S1_SCLK = 0U,
PA2_IIC0_SCL = 1U,
PA2_SPI1_CS = 2U,
PA2_PWM_CH2 = 3U,
PA2_ADC_A0 = 7U,
PA3_I2S1_WSCLK = 0U,
PA3_IIC0_SDA = 1U,
PA3_SPI1_SCK = 2U,
PA3_PWM_CH3 = 3U,
PA3_ADC_A1 = 8U,
PA4_I2S0_SDA = 0U,
PA4_SPI0_MOSI = 1U,
PA4_UART1_CTS = 2U,
PA4_PWM_CH4 = 3U,
PA4_ADC_A2 = 9U,
PA5_I2S1_SDA = 0U,
PA5_SPI0_MISO = 1U,
PA5_UART1_RTS = 2U,
PA5_PWM_CH5 = 3U,
PA5_ADC_A3 = 10U,
PA6_I2S0_SCLK = 0U,
PA6_UART0_TX = 1U,
PA6_SPI1_MOSI = 2U,
PA6_PWM_CH6 = 3U,
PA6_ADC_A4 = 11U,
PA7_I2S0_WSCLK = 0U,
PA7_PWMR_OUT = 1U,
PA7_SPI1_MISO = 2U,
PA7_PWM_CH7 = 3U,
PA7_ADC_A5 = 12U,
PA8_I2S0_SDA = 0U,
PA8_IIC0_SCL = 1U,
PA8_UART0_RX = 2U,
PA8_PWM_CH8 = 3U,
PA8_ADC_A6 = 13U,
PA9_I2S1_SDA = 0U,
PA9_IIC0_SDA = 1U,
PA9_PWMR_OUT = 2U,
PA9_PWM_CH9 = 3U,
PA9_ADC_A7 = 14U,
PA10_I2S0_MCLK = 0U,
PA10_UART0_TX = 1U,
PA10_SPI1_MOSI = 2U,
PA10_SPI1_MISO = 3U,
PA10_ADC_A8 = 15U,
PA15_IIC0_SCL = 0U,
PA15_SPI0_CS = 1U,
PA15_PWMR_OUT = 2U,
PA15_PWM_CH4 = 3U,
PA15_ADC_A9 = 20U,
PA16_IIC0_SDA = 0U,
PA16_SPI0_SCK = 1U,
PA16_UART1_TX = 2U,
PA16_PWM_CH5 = 3U,
PA16_ADC_A10 = 21U,
PA17_UART0_RX = 0U,
PA17_SPI0_MOSI = 1U,
PA17_I2S0_SCLK = 2U,
PA17_PWM_CH10 = 3U,
PA17_ADC_A11 = 22U,
PA18_UART0_TX = 0U,
PA18_SPI0_MISO = 1U,
PA18_I2S0_WSCLK = 2U,
PA18_PWM_CH11 = 3U,
PA18_ADC_A12 = 23U,
PA19_JTAG_TMS = 0U,
PA19_UART1_RX = 1U,
PA19_I2S1_SCLK = 2U,
PA19_IIC0_SCL = 3U,
PA19_ADC_A13 = 24U,
PA20_JTAG_TCK = 0U,
PA20_UART1_TX = 1U,
PA20_I2S1_WSCLK = 2U,
PA20_IIC0_SDA = 3U,
PA20_ADC_A14 = 25U,
PA21_UART0_CTS = 0U,
PA21_UART1_CTS = 1U,
PA21_I2S0_SDA = 2U,
PA21_IIC0_SCL = 3U,
PA21_ADC_A15 = 26U,
PA22_UART0_RTS = 0U,
PA22_UART1_RTS = 1U,
PA22_I2S1_SDA = 2U,
PA22_IIC0_SDA = 3U,
PA23_IIC0_SCL = 0U,
PA23_UART0_TX = 1U,
PA23_PWM_CH0 = 2U,
PA23_SPI0_CS = 3U,
PA24_IIC0_SDA = 0U,
PA24_UART0_RX = 1U,
PA24_PWM_CH1 = 2U,
PA24_SPI0_SCK = 3U,
PA25_PWMR_OUT = 0U,
PA25_UART0_CTS = 1U,
PA25_PWM_CH2 = 2U,
PA25_SPI0_MOSI = 3U,
PA26_I2S1_MCLK = 0U,
PA26_UART0_RTS = 1U,
PA26_PWM_CH3 = 2U,
PA26_SPI0_MISO = 3U,
PA27_I2S0_SCLK = 0U,
PA27_UART1_RX = 1U,
PA27_PWM_CH4 = 2U,
PA27_SPI1_CS = 3U,
PA28_I2S0_WSCLK = 0U,
PA28_UART1_TX = 1U,
PA28_PWM_CH5 = 2U,
PA28_SPI1_SCK = 3U,
PA29_I2S1_SCLK = 0U,
PA29_UART1_CTS = 1U,
PA29_PWM_CH6 = 2U,
PA29_SPI1_MOSI = 3U,
PA30_I2S1_WSCLK = 0U,
PA30_UART1_RTS = 1U,
PA30_PWM_CH7 = 2U,
PA30_SPI1_MISO = 3U,
PA31_I2S0_SDA = 0U,
PA31_PWMR_OUT = 1U,
PA31_PWM_CH8 = 2U,
PA31_UART0_TX = 3U,
PIN_FUNC_GPIO = 4U,
PIN_FUNC_END = 0xFFFFFFFFU
} pin_func_t;
#if CONFIG_INTC_CLIC_PLIC
#define CONFIG_IRQ_NUM (64U + PLIC_IRQ_OFFSET)
#else
#define CONFIG_IRQ_NUM 64U
#endif
///< AHB
#define SPIFLASH_BASE 0x18000000UL
#define SPIFLASH_SIZE 0x800000U
#define SRAM_BASE 0x20000000UL
#define SRAM_SIZE 0x20000U
#if CONFIG_CPU_XUANTIE_E9XX
typedef enum {
User_Software_IRQn = 0U, /* User software interrupt */
Supervisor_Software_IRQn = 1U, /* Supervisor software interrupt */
Machine_Software_IRQn = 3U, /* Machine software interrupt */
User_Timer_IRQn = 4U, /* User timer interrupt */
Supervisor_Timer_IRQn = 5U, /* Supervisor timer interrupt */
CORET_IRQn = 7U, /* Machine timer interrupt */
Machine_External_IRQn = 11U, /* Machine external interrupt */
DW_UART0_IRQn = 16U,
TIM0_IRQn = 18U, /* timer0 Interrupt */
TIM1_IRQn = 19U, /* timer1 Interrupt */
TIM2_IRQn = 20U, /* timer2 Interrupt */
TIM3_IRQn = 21U, /* timer3 Interrupt */
} irqn_type_t;
#define DW_UART0_BASE 0x40015000UL
#define DW_TIMER0_BASE 0x40011000UL
#define DW_TIMER0_SIZE 0x14U
#define DW_TIMER1_BASE (DW_TIMER0_BASE+DW_TIMER0_SIZE)
#define DW_TIMER1_SIZE DW_TIMER0_SIZE
#define DW_TIMER2_BASE 0x40011028UL
#define DW_TIMER2_SIZE 0x14U
#define DW_TIMER3_BASE (DW_TIMER2_BASE+DW_TIMER2_SIZE)
#define DW_TIMER3_SIZE DW_TIMER2_SIZE
#if CONFIG_SUPPORT_NMI_DEMO
/* fake irq is not work, just for nmi test with smartl fpga(connected TIMER4 to nmi-exception on soc bit of smartl) */
#define FAKE_IRQ_TIMER4 (-1)
#define DW_TIMER4_BASE (0x40021000UL)
#endif
#else
/* ------------------------- Interrupt Number Definition ------------------------ */
#define Supervisor_Software_IRQn (1U)
#define Machine_Software_IRQn (3U)
#define Supervisor_Timer_IRQn (5U)
#define CORET_IRQn (7U)
#define Supervisor_External_IRQn (9U)
#define Machine_External_IRQn (11U)
#define L1_CACHE_ECC_IRQn (16U)
#if CONFIG_BOARD_XIAOHUI_EVB
#if CONFIG_INTC_CLIC_PLIC
typedef enum IRQn {
L2_CACHE_ECC_IRQn = 1U + PLIC_IRQ_OFFSET, /* l2 cache ecc Interrupt */
DW_UART0_IRQn = 20U + PLIC_IRQ_OFFSET, /* uart Interrupt */
TIM0_IRQn = 25U, /* timer0 Interrupt for CLIC*/
TIM1_IRQn = 26U, /* timer1 Interrupt for CLIC*/
TIM2_IRQn = 27U + PLIC_IRQ_OFFSET, /* timer2 Interrupt */
TIM3_IRQn = 28U + PLIC_IRQ_OFFSET, /* timer3 Interrupt */
END_IRQn = 1024U + PLIC_IRQ_OFFSET
} irqn_type_t;
#else
/* extern irq number, 1-16 are reserved for inner-cpu */
typedef enum IRQn {
L2_CACHE_ECC_IRQn = 1U, /* l2 cache ecc Interrupt */
DW_UART0_IRQn = 20U, /* uart Interrupt */
TIM0_IRQn = 25U, /* timer0 Interrupt */
TIM1_IRQn = 26U, /* timer1 Interrupt */
TIM2_IRQn = 27U, /* timer2 Interrupt */
TIM3_IRQn = 28U, /* timer3 Interrupt */
} irqn_type_t;
#endif /* CONFIG_INTC_CLIC_PLIC */
#define DW_UART0_BASE (0x1900d000UL)
#define DW_TIMER0_BASE (0x19001000UL)
#define DW_TIMER1_BASE (0x19001014UL)
#define DW_TIMER2_BASE (0x19001028UL)
#define DW_TIMER3_BASE (0x1900103CUL)
#else
#error "Not support soc!!!"
#endif /* CONFIG_BOARD_XIAOHUI_EVB */
#endif /* end exx*/
#ifdef __cplusplus
}
#endif
#endif /* _SOC_H_ */

View File

@ -0,0 +1,117 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/******************************************************************************
* @file sys_clk.h
* @brief header file for setting system frequency.
* @version V1.0
* @date 9. April 2020
******************************************************************************/
#ifndef _SYS_CLK_H_
#define _SYS_CLK_H_
#include <stdint.h>
#include <soc.h>
#include <drv/clk.h>
#include <drv/porting.h>
#ifdef __cplusplus
extern "C" {
#endif
#define PMU_REG_BASE (wj_pmu_reg_t *)WJ_PMU_BASE
typedef enum {
IHS_CLK = 0U, /* internal high speed clock */
EHS_CLK, /* external high speed clock */
ILS_CLK, /* internal low speed clock */
ELS_CLK, /* external low speed clock */
PLL_CLK /* PLL clock */
} clk_src_t;
typedef enum {
CPU_24MHZ = 24000000U
} sys_freq_t;
/* pllclkout : ( pllclkin / 2)*( FN + Frac/4096 ) */
typedef struct {
uint32_t pll_is_used; /* pll is used */
uint32_t pll_source; /* select pll input source clock */
uint32_t pll_src_clk_divider; /* ratio between pll_srcclk clock and pll_clkin clock */
uint32_t fn; /* integer value of frequency division */
uint32_t frac; /* decimal value of frequency division */
} pll_config_t;
typedef struct {
uint32_t system_clk; /* system clock */
pll_config_t pll_config; /* pll config struct */
uint32_t sys_clk_source; /* select sysclk source clock */
uint32_t rtc_clk_source; /* select rtcclk source clock */
uint32_t mclk_divider; /* ratio between fs_mclk clock and mclk clock */
uint32_t apb0_clk_divider; /* ratio between mclk clock and apb0 clock */
uint32_t apb1_clk_divider; /* ratio between mclk clock and apb1 clock */
} system_clk_config_t;
typedef enum {
CLK_DIV1 = 0U,
} apb_div_t;
typedef enum {
PLL_FN_18 = 0U,
} pll_fn_t;
typedef enum {
UART0_CLK,
} clk_module_t;
/**
\brief Set the system clock according to the parameter
\param[in] config system clock config.
\return error code
*/
csi_error_t soc_sysclk_config(system_clk_config_t *config);
/**
\brief Set iic reset
\param[in] idx iic idx.
\return Null
*/
void soc_reset_iic(uint32_t idx);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_CLK_H_ */

View File

@ -0,0 +1,15 @@
# for module compiling
import os
Import('RTT_ROOT')
from building import *
cwd = GetCurrentDir()
objs = []
list = os.listdir(cwd)
for d in list:
path = os.path.join(cwd, d)
if os.path.isfile(os.path.join(path, 'SConscript')):
objs = objs + SConscript(os.path.join(d, 'SConscript'))
Return('objs')

View File

@ -0,0 +1,16 @@
# RT-Thread building script for bridge
import os
from building import *
Import('rtconfig')
cwd = GetCurrentDir()
group = []
list = os.listdir(cwd)
# add startup code files
if rtconfig.CPU in list :
group = group + SConscript(os.path.join(rtconfig.CPU, 'SConscript'))
Return('group')

View File

@ -0,0 +1,128 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler
j Default_Handler
j Default_Handler
j Default_Handler
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
#ifdef CONFIG_KERNEL_NONE
la sp, g_top_mainstack
#else
la sp, g_top_irqstack
#endif
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK
g_top_mainstack:
#endif

View File

@ -0,0 +1,274 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable(MMU_MODE_32);
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#if __riscv_flen == 64
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -0,0 +1,412 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
csrw mscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
sd s0, (sp)
#endif
csrw mscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -(140+140)
sd x1, ( 0 + 0 )(sp)
sd x3, ( 8 + 8 )(sp)
sd x4, ( 12+ 12)(sp)
sd x5, ( 16+ 16)(sp)
sd x6, ( 20+ 20)(sp)
sd x7, ( 24+ 24)(sp)
sd x8, ( 28+ 28)(sp)
sd x9, ( 32+ 32)(sp)
sd x10,( 36+ 36)(sp)
sd x11,( 40+ 40)(sp)
sd x12,( 44+ 44)(sp)
sd x13,( 48+ 48)(sp)
sd x14,( 52+ 52)(sp)
sd x15,( 56+ 56)(sp)
sd x16,( 60+ 60)(sp)
sd x17,( 64+ 64)(sp)
sd x18,( 68+ 68)(sp)
sd x19,( 72+ 72)(sp)
sd x20,( 76+ 76)(sp)
sd x21,( 80+ 80)(sp)
sd x22,( 84+ 84)(sp)
sd x23,( 88+ 88)(sp)
sd x24,( 92+ 92)(sp)
sd x25,( 96+ 96)(sp)
sd x26,(100+100)(sp)
sd x27,(104+104)(sp)
sd x28,(108+108)(sp)
sd x29,(112+112)(sp)
sd x30,(116+116)(sp)
sd x31,(120+120)(sp)
csrr a0, mepc
sd a0, (124+124)(sp)
csrr a0, mstatus
sd a0, (128+128)(sp)
csrr a0, mcause
sd a0, (132+132)(sp)
csrr a0, mtval
sd a0, (136+136)(sp)
csrr a0, mscratch
sd a0, ( 4 + 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,128 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler
j Default_Handler
j Default_Handler
j Default_Handler
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
#ifdef CONFIG_KERNEL_NONE
la sp, g_top_mainstack
#else
la sp, g_top_irqstack
#endif
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK
g_top_mainstack:
#endif

View File

@ -0,0 +1,274 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable(MMU_MODE_32);
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#if __riscv_flen == 64
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -0,0 +1,412 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
csrw mscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
sd s0, (sp)
#endif
csrw mscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -(140+140)
sd x1, ( 0 + 0 )(sp)
sd x3, ( 8 + 8 )(sp)
sd x4, ( 12+ 12)(sp)
sd x5, ( 16+ 16)(sp)
sd x6, ( 20+ 20)(sp)
sd x7, ( 24+ 24)(sp)
sd x8, ( 28+ 28)(sp)
sd x9, ( 32+ 32)(sp)
sd x10,( 36+ 36)(sp)
sd x11,( 40+ 40)(sp)
sd x12,( 44+ 44)(sp)
sd x13,( 48+ 48)(sp)
sd x14,( 52+ 52)(sp)
sd x15,( 56+ 56)(sp)
sd x16,( 60+ 60)(sp)
sd x17,( 64+ 64)(sp)
sd x18,( 68+ 68)(sp)
sd x19,( 72+ 72)(sp)
sd x20,( 76+ 76)(sp)
sd x21,( 80+ 80)(sp)
sd x22,( 84+ 84)(sp)
sd x23,( 88+ 88)(sp)
sd x24,( 92+ 92)(sp)
sd x25,( 96+ 96)(sp)
sd x26,(100+100)(sp)
sd x27,(104+104)(sp)
sd x28,(108+108)(sp)
sd x29,(112+112)(sp)
sd x30,(116+116)(sp)
sd x31,(120+120)(sp)
csrr a0, mepc
sd a0, (124+124)(sp)
csrr a0, mstatus
sd a0, (128+128)(sp)
csrr a0, mcause
sd a0, (132+132)(sp)
csrr a0, mtval
sd a0, (136+136)(sp)
csrr a0, mscratch
sd a0, ( 4 + 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,128 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler
j Default_Handler
j Default_Handler
j Default_Handler
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
#ifdef CONFIG_KERNEL_NONE
la sp, g_top_mainstack
#else
la sp, g_top_irqstack
#endif
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK
g_top_mainstack:
#endif

View File

@ -0,0 +1,274 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable(MMU_MODE_32);
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#if __riscv_flen == 64
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -0,0 +1,412 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
csrw mscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
sd s0, (sp)
#endif
csrw mscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -(140+140)
sd x1, ( 0 + 0 )(sp)
sd x3, ( 8 + 8 )(sp)
sd x4, ( 12+ 12)(sp)
sd x5, ( 16+ 16)(sp)
sd x6, ( 20+ 20)(sp)
sd x7, ( 24+ 24)(sp)
sd x8, ( 28+ 28)(sp)
sd x9, ( 32+ 32)(sp)
sd x10,( 36+ 36)(sp)
sd x11,( 40+ 40)(sp)
sd x12,( 44+ 44)(sp)
sd x13,( 48+ 48)(sp)
sd x14,( 52+ 52)(sp)
sd x15,( 56+ 56)(sp)
sd x16,( 60+ 60)(sp)
sd x17,( 64+ 64)(sp)
sd x18,( 68+ 68)(sp)
sd x19,( 72+ 72)(sp)
sd x20,( 76+ 76)(sp)
sd x21,( 80+ 80)(sp)
sd x22,( 84+ 84)(sp)
sd x23,( 88+ 88)(sp)
sd x24,( 92+ 92)(sp)
sd x25,( 96+ 96)(sp)
sd x26,(100+100)(sp)
sd x27,(104+104)(sp)
sd x28,(108+108)(sp)
sd x29,(112+112)(sp)
sd x30,(116+116)(sp)
sd x31,(120+120)(sp)
csrr a0, mepc
sd a0, (124+124)(sp)
csrr a0, mstatus
sd a0, (128+128)(sp)
csrr a0, mcause
sd a0, (132+132)(sp)
csrr a0, mtval
sd a0, (136+136)(sp)
csrr a0, mscratch
sd a0, ( 4 + 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,188 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
lw a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -0,0 +1,326 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix */
status &= ~(1ul << 0);
#endif /* __riscv_matrix || __riscv_xtheadmatrix */
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_MS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -0,0 +1,842 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
store_x s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
#if __riscv_xlen == 64
addi sp, sp, -(140+140)
store_x x1, ( 0 + 0 )(sp)
store_x x3, ( 8 + 8 )(sp)
store_x x4, ( 12+ 12)(sp)
store_x x5, ( 16+ 16)(sp)
store_x x6, ( 20+ 20)(sp)
store_x x7, ( 24+ 24)(sp)
store_x x8, ( 28+ 28)(sp)
store_x x9, ( 32+ 32)(sp)
store_x x10,( 36+ 36)(sp)
store_x x11,( 40+ 40)(sp)
store_x x12,( 44+ 44)(sp)
store_x x13,( 48+ 48)(sp)
store_x x14,( 52+ 52)(sp)
store_x x15,( 56+ 56)(sp)
store_x x16,( 60+ 60)(sp)
store_x x17,( 64+ 64)(sp)
store_x x18,( 68+ 68)(sp)
store_x x19,( 72+ 72)(sp)
store_x x20,( 76+ 76)(sp)
store_x x21,( 80+ 80)(sp)
store_x x22,( 84+ 84)(sp)
store_x x23,( 88+ 88)(sp)
store_x x24,( 92+ 92)(sp)
store_x x25,( 96+ 96)(sp)
store_x x26,(100+100)(sp)
store_x x27,(104+104)(sp)
store_x x28,(108+108)(sp)
store_x x29,(112+112)(sp)
store_x x30,(116+116)(sp)
store_x x31,(120+120)(sp)
csrr a0, mepc
store_x a0, (124+124)(sp)
csrr a0, mstatus
store_x a0, (128+128)(sp)
csrr a0, mcause
store_x a0, (132+132)(sp)
csrr a0, mtval
store_x a0, (136+136)(sp)
csrr a0, mscratch
store_x a0, ( 4 + 4 )(sp)
#else
addi sp, sp, -140
store_x x1, ( 0 )(sp)
store_x x3, ( 8 )(sp)
store_x x4, ( 12)(sp)
store_x x5, ( 16)(sp)
store_x x6, ( 20)(sp)
store_x x7, ( 24)(sp)
store_x x8, ( 28)(sp)
store_x x9, ( 32)(sp)
store_x x10,( 36)(sp)
store_x x11,( 40)(sp)
store_x x12,( 44)(sp)
store_x x13,( 48)(sp)
store_x x14,( 52)(sp)
store_x x15,( 56)(sp)
store_x x16,( 60)(sp)
store_x x17,( 64)(sp)
store_x x18,( 68)(sp)
store_x x19,( 72)(sp)
store_x x20,( 76)(sp)
store_x x21,( 80)(sp)
store_x x22,( 84)(sp)
store_x x23,( 88)(sp)
store_x x24,( 92)(sp)
store_x x25,( 96)(sp)
store_x x26,(100)(sp)
store_x x27,(104)(sp)
store_x x28,(108)(sp)
store_x x29,(112)(sp)
store_x x30,(116)(sp)
store_x x31,(120)(sp)
csrr a0, mepc
store_x a0, (124)(sp)
csrr a0, mstatus
store_x a0, (128)(sp)
csrr a0, mcause
store_x a0, (132)(sp)
csrr a0, mtval
store_x a0, (136)(sp)
csrr a0, mscratch
store_x a0, ( 4 )(sp)
#endif
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,188 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -0,0 +1,326 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix */
status &= ~(1ul << 0);
#endif /* __riscv_matrix || __riscv_xtheadmatrix */
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_MS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -0,0 +1,842 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
store_x s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
#if __riscv_xlen == 64
addi sp, sp, -(140+140)
store_x x1, ( 0 + 0 )(sp)
store_x x3, ( 8 + 8 )(sp)
store_x x4, ( 12+ 12)(sp)
store_x x5, ( 16+ 16)(sp)
store_x x6, ( 20+ 20)(sp)
store_x x7, ( 24+ 24)(sp)
store_x x8, ( 28+ 28)(sp)
store_x x9, ( 32+ 32)(sp)
store_x x10,( 36+ 36)(sp)
store_x x11,( 40+ 40)(sp)
store_x x12,( 44+ 44)(sp)
store_x x13,( 48+ 48)(sp)
store_x x14,( 52+ 52)(sp)
store_x x15,( 56+ 56)(sp)
store_x x16,( 60+ 60)(sp)
store_x x17,( 64+ 64)(sp)
store_x x18,( 68+ 68)(sp)
store_x x19,( 72+ 72)(sp)
store_x x20,( 76+ 76)(sp)
store_x x21,( 80+ 80)(sp)
store_x x22,( 84+ 84)(sp)
store_x x23,( 88+ 88)(sp)
store_x x24,( 92+ 92)(sp)
store_x x25,( 96+ 96)(sp)
store_x x26,(100+100)(sp)
store_x x27,(104+104)(sp)
store_x x28,(108+108)(sp)
store_x x29,(112+112)(sp)
store_x x30,(116+116)(sp)
store_x x31,(120+120)(sp)
csrr a0, mepc
store_x a0, (124+124)(sp)
csrr a0, mstatus
store_x a0, (128+128)(sp)
csrr a0, mcause
store_x a0, (132+132)(sp)
csrr a0, mtval
store_x a0, (136+136)(sp)
csrr a0, mscratch
store_x a0, ( 4 + 4 )(sp)
#else
addi sp, sp, -140
store_x x1, ( 0 )(sp)
store_x x3, ( 8 )(sp)
store_x x4, ( 12)(sp)
store_x x5, ( 16)(sp)
store_x x6, ( 20)(sp)
store_x x7, ( 24)(sp)
store_x x8, ( 28)(sp)
store_x x9, ( 32)(sp)
store_x x10,( 36)(sp)
store_x x11,( 40)(sp)
store_x x12,( 44)(sp)
store_x x13,( 48)(sp)
store_x x14,( 52)(sp)
store_x x15,( 56)(sp)
store_x x16,( 60)(sp)
store_x x17,( 64)(sp)
store_x x18,( 68)(sp)
store_x x19,( 72)(sp)
store_x x20,( 76)(sp)
store_x x21,( 80)(sp)
store_x x22,( 84)(sp)
store_x x23,( 88)(sp)
store_x x24,( 92)(sp)
store_x x25,( 96)(sp)
store_x x26,(100)(sp)
store_x x27,(104)(sp)
store_x x28,(108)(sp)
store_x x29,(112)(sp)
store_x x30,(116)(sp)
store_x x31,(120)(sp)
csrr a0, mepc
store_x a0, (124)(sp)
csrr a0, mstatus
store_x a0, (128)(sp)
csrr a0, mcause
store_x a0, (132)(sp)
csrr a0, mtval
store_x a0, (136)(sp)
csrr a0, mscratch
store_x a0, ( 4 )(sp)
#endif
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,188 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
lw a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -0,0 +1,326 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix */
status &= ~(1ul << 0);
#endif /* __riscv_matrix || __riscv_xtheadmatrix */
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_MS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -0,0 +1,842 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
store_x s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
#if __riscv_xlen == 64
addi sp, sp, -(140+140)
store_x x1, ( 0 + 0 )(sp)
store_x x3, ( 8 + 8 )(sp)
store_x x4, ( 12+ 12)(sp)
store_x x5, ( 16+ 16)(sp)
store_x x6, ( 20+ 20)(sp)
store_x x7, ( 24+ 24)(sp)
store_x x8, ( 28+ 28)(sp)
store_x x9, ( 32+ 32)(sp)
store_x x10,( 36+ 36)(sp)
store_x x11,( 40+ 40)(sp)
store_x x12,( 44+ 44)(sp)
store_x x13,( 48+ 48)(sp)
store_x x14,( 52+ 52)(sp)
store_x x15,( 56+ 56)(sp)
store_x x16,( 60+ 60)(sp)
store_x x17,( 64+ 64)(sp)
store_x x18,( 68+ 68)(sp)
store_x x19,( 72+ 72)(sp)
store_x x20,( 76+ 76)(sp)
store_x x21,( 80+ 80)(sp)
store_x x22,( 84+ 84)(sp)
store_x x23,( 88+ 88)(sp)
store_x x24,( 92+ 92)(sp)
store_x x25,( 96+ 96)(sp)
store_x x26,(100+100)(sp)
store_x x27,(104+104)(sp)
store_x x28,(108+108)(sp)
store_x x29,(112+112)(sp)
store_x x30,(116+116)(sp)
store_x x31,(120+120)(sp)
csrr a0, mepc
store_x a0, (124+124)(sp)
csrr a0, mstatus
store_x a0, (128+128)(sp)
csrr a0, mcause
store_x a0, (132+132)(sp)
csrr a0, mtval
store_x a0, (136+136)(sp)
csrr a0, mscratch
store_x a0, ( 4 + 4 )(sp)
#else
addi sp, sp, -140
store_x x1, ( 0 )(sp)
store_x x3, ( 8 )(sp)
store_x x4, ( 12)(sp)
store_x x5, ( 16)(sp)
store_x x6, ( 20)(sp)
store_x x7, ( 24)(sp)
store_x x8, ( 28)(sp)
store_x x9, ( 32)(sp)
store_x x10,( 36)(sp)
store_x x11,( 40)(sp)
store_x x12,( 44)(sp)
store_x x13,( 48)(sp)
store_x x14,( 52)(sp)
store_x x15,( 56)(sp)
store_x x16,( 60)(sp)
store_x x17,( 64)(sp)
store_x x18,( 68)(sp)
store_x x19,( 72)(sp)
store_x x20,( 76)(sp)
store_x x21,( 80)(sp)
store_x x22,( 84)(sp)
store_x x23,( 88)(sp)
store_x x24,( 92)(sp)
store_x x25,( 96)(sp)
store_x x26,(100)(sp)
store_x x27,(104)(sp)
store_x x28,(108)(sp)
store_x x29,(112)(sp)
store_x x30,(116)(sp)
store_x x31,(120)(sp)
csrr a0, mepc
store_x a0, (124)(sp)
csrr a0, mstatus
store_x a0, (128)(sp)
csrr a0, mcause
store_x a0, (132)(sp)
csrr a0, mtval
store_x a0, (136)(sp)
csrr a0, mscratch
store_x a0, ( 4 )(sp)
#endif
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,188 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -0,0 +1,326 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix */
status &= ~(1ul << 0);
#endif /* __riscv_matrix || __riscv_xtheadmatrix */
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_MS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -0,0 +1,842 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
store_x s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
#if __riscv_xlen == 64
addi sp, sp, -(140+140)
store_x x1, ( 0 + 0 )(sp)
store_x x3, ( 8 + 8 )(sp)
store_x x4, ( 12+ 12)(sp)
store_x x5, ( 16+ 16)(sp)
store_x x6, ( 20+ 20)(sp)
store_x x7, ( 24+ 24)(sp)
store_x x8, ( 28+ 28)(sp)
store_x x9, ( 32+ 32)(sp)
store_x x10,( 36+ 36)(sp)
store_x x11,( 40+ 40)(sp)
store_x x12,( 44+ 44)(sp)
store_x x13,( 48+ 48)(sp)
store_x x14,( 52+ 52)(sp)
store_x x15,( 56+ 56)(sp)
store_x x16,( 60+ 60)(sp)
store_x x17,( 64+ 64)(sp)
store_x x18,( 68+ 68)(sp)
store_x x19,( 72+ 72)(sp)
store_x x20,( 76+ 76)(sp)
store_x x21,( 80+ 80)(sp)
store_x x22,( 84+ 84)(sp)
store_x x23,( 88+ 88)(sp)
store_x x24,( 92+ 92)(sp)
store_x x25,( 96+ 96)(sp)
store_x x26,(100+100)(sp)
store_x x27,(104+104)(sp)
store_x x28,(108+108)(sp)
store_x x29,(112+112)(sp)
store_x x30,(116+116)(sp)
store_x x31,(120+120)(sp)
csrr a0, mepc
store_x a0, (124+124)(sp)
csrr a0, mstatus
store_x a0, (128+128)(sp)
csrr a0, mcause
store_x a0, (132+132)(sp)
csrr a0, mtval
store_x a0, (136+136)(sp)
csrr a0, mscratch
store_x a0, ( 4 + 4 )(sp)
#else
addi sp, sp, -140
store_x x1, ( 0 )(sp)
store_x x3, ( 8 )(sp)
store_x x4, ( 12)(sp)
store_x x5, ( 16)(sp)
store_x x6, ( 20)(sp)
store_x x7, ( 24)(sp)
store_x x8, ( 28)(sp)
store_x x9, ( 32)(sp)
store_x x10,( 36)(sp)
store_x x11,( 40)(sp)
store_x x12,( 44)(sp)
store_x x13,( 48)(sp)
store_x x14,( 52)(sp)
store_x x15,( 56)(sp)
store_x x16,( 60)(sp)
store_x x17,( 64)(sp)
store_x x18,( 68)(sp)
store_x x19,( 72)(sp)
store_x x20,( 76)(sp)
store_x x21,( 80)(sp)
store_x x22,( 84)(sp)
store_x x23,( 88)(sp)
store_x x24,( 92)(sp)
store_x x25,( 96)(sp)
store_x x26,(100)(sp)
store_x x27,(104)(sp)
store_x x28,(108)(sp)
store_x x29,(112)(sp)
store_x x30,(116)(sp)
store_x x31,(120)(sp)
csrr a0, mepc
store_x a0, (124)(sp)
csrr a0, mstatus
store_x a0, (128)(sp)
csrr a0, mcause
store_x a0, (132)(sp)
csrr a0, mtval
store_x a0, (136)(sp)
csrr a0, mscratch
store_x a0, ( 4 )(sp)
#endif
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,188 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
lw a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -0,0 +1,326 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix */
status &= ~(1ul << 0);
#endif /* __riscv_matrix || __riscv_xtheadmatrix */
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_MS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -0,0 +1,842 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
store_x s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
#if __riscv_xlen == 64
addi sp, sp, -(140+140)
store_x x1, ( 0 + 0 )(sp)
store_x x3, ( 8 + 8 )(sp)
store_x x4, ( 12+ 12)(sp)
store_x x5, ( 16+ 16)(sp)
store_x x6, ( 20+ 20)(sp)
store_x x7, ( 24+ 24)(sp)
store_x x8, ( 28+ 28)(sp)
store_x x9, ( 32+ 32)(sp)
store_x x10,( 36+ 36)(sp)
store_x x11,( 40+ 40)(sp)
store_x x12,( 44+ 44)(sp)
store_x x13,( 48+ 48)(sp)
store_x x14,( 52+ 52)(sp)
store_x x15,( 56+ 56)(sp)
store_x x16,( 60+ 60)(sp)
store_x x17,( 64+ 64)(sp)
store_x x18,( 68+ 68)(sp)
store_x x19,( 72+ 72)(sp)
store_x x20,( 76+ 76)(sp)
store_x x21,( 80+ 80)(sp)
store_x x22,( 84+ 84)(sp)
store_x x23,( 88+ 88)(sp)
store_x x24,( 92+ 92)(sp)
store_x x25,( 96+ 96)(sp)
store_x x26,(100+100)(sp)
store_x x27,(104+104)(sp)
store_x x28,(108+108)(sp)
store_x x29,(112+112)(sp)
store_x x30,(116+116)(sp)
store_x x31,(120+120)(sp)
csrr a0, mepc
store_x a0, (124+124)(sp)
csrr a0, mstatus
store_x a0, (128+128)(sp)
csrr a0, mcause
store_x a0, (132+132)(sp)
csrr a0, mtval
store_x a0, (136+136)(sp)
csrr a0, mscratch
store_x a0, ( 4 + 4 )(sp)
#else
addi sp, sp, -140
store_x x1, ( 0 )(sp)
store_x x3, ( 8 )(sp)
store_x x4, ( 12)(sp)
store_x x5, ( 16)(sp)
store_x x6, ( 20)(sp)
store_x x7, ( 24)(sp)
store_x x8, ( 28)(sp)
store_x x9, ( 32)(sp)
store_x x10,( 36)(sp)
store_x x11,( 40)(sp)
store_x x12,( 44)(sp)
store_x x13,( 48)(sp)
store_x x14,( 52)(sp)
store_x x15,( 56)(sp)
store_x x16,( 60)(sp)
store_x x17,( 64)(sp)
store_x x18,( 68)(sp)
store_x x19,( 72)(sp)
store_x x20,( 76)(sp)
store_x x21,( 80)(sp)
store_x x22,( 84)(sp)
store_x x23,( 88)(sp)
store_x x24,( 92)(sp)
store_x x25,( 96)(sp)
store_x x26,(100)(sp)
store_x x27,(104)(sp)
store_x x28,(108)(sp)
store_x x29,(112)(sp)
store_x x30,(116)(sp)
store_x x31,(120)(sp)
csrr a0, mepc
store_x a0, (124)(sp)
csrr a0, mstatus
store_x a0, (128)(sp)
csrr a0, mcause
store_x a0, (132)(sp)
csrr a0, mtval
store_x a0, (136)(sp)
csrr a0, mscratch
store_x a0, ( 4 )(sp)
#endif
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,188 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -0,0 +1,326 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix */
status &= ~(1ul << 0);
#endif /* __riscv_matrix || __riscv_xtheadmatrix */
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_MS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -0,0 +1,842 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
store_x s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
#if __riscv_xlen == 64
addi sp, sp, -(140+140)
store_x x1, ( 0 + 0 )(sp)
store_x x3, ( 8 + 8 )(sp)
store_x x4, ( 12+ 12)(sp)
store_x x5, ( 16+ 16)(sp)
store_x x6, ( 20+ 20)(sp)
store_x x7, ( 24+ 24)(sp)
store_x x8, ( 28+ 28)(sp)
store_x x9, ( 32+ 32)(sp)
store_x x10,( 36+ 36)(sp)
store_x x11,( 40+ 40)(sp)
store_x x12,( 44+ 44)(sp)
store_x x13,( 48+ 48)(sp)
store_x x14,( 52+ 52)(sp)
store_x x15,( 56+ 56)(sp)
store_x x16,( 60+ 60)(sp)
store_x x17,( 64+ 64)(sp)
store_x x18,( 68+ 68)(sp)
store_x x19,( 72+ 72)(sp)
store_x x20,( 76+ 76)(sp)
store_x x21,( 80+ 80)(sp)
store_x x22,( 84+ 84)(sp)
store_x x23,( 88+ 88)(sp)
store_x x24,( 92+ 92)(sp)
store_x x25,( 96+ 96)(sp)
store_x x26,(100+100)(sp)
store_x x27,(104+104)(sp)
store_x x28,(108+108)(sp)
store_x x29,(112+112)(sp)
store_x x30,(116+116)(sp)
store_x x31,(120+120)(sp)
csrr a0, mepc
store_x a0, (124+124)(sp)
csrr a0, mstatus
store_x a0, (128+128)(sp)
csrr a0, mcause
store_x a0, (132+132)(sp)
csrr a0, mtval
store_x a0, (136+136)(sp)
csrr a0, mscratch
store_x a0, ( 4 + 4 )(sp)
#else
addi sp, sp, -140
store_x x1, ( 0 )(sp)
store_x x3, ( 8 )(sp)
store_x x4, ( 12)(sp)
store_x x5, ( 16)(sp)
store_x x6, ( 20)(sp)
store_x x7, ( 24)(sp)
store_x x8, ( 28)(sp)
store_x x9, ( 32)(sp)
store_x x10,( 36)(sp)
store_x x11,( 40)(sp)
store_x x12,( 44)(sp)
store_x x13,( 48)(sp)
store_x x14,( 52)(sp)
store_x x15,( 56)(sp)
store_x x16,( 60)(sp)
store_x x17,( 64)(sp)
store_x x18,( 68)(sp)
store_x x19,( 72)(sp)
store_x x20,( 76)(sp)
store_x x21,( 80)(sp)
store_x x22,( 84)(sp)
store_x x23,( 88)(sp)
store_x x24,( 92)(sp)
store_x x25,( 96)(sp)
store_x x26,(100)(sp)
store_x x27,(104)(sp)
store_x x28,(108)(sp)
store_x x29,(112)(sp)
store_x x30,(116)(sp)
store_x x31,(120)(sp)
csrr a0, mepc
store_x a0, (124)(sp)
csrr a0, mstatus
store_x a0, (128)(sp)
csrr a0, mcause
store_x a0, (132)(sp)
csrr a0, mtval
store_x a0, (136)(sp)
csrr a0, mscratch
store_x a0, ( 4 )(sp)
#endif
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,188 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
lw a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -0,0 +1,326 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix */
status &= ~(1ul << 0);
#endif /* __riscv_matrix || __riscv_xtheadmatrix */
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_MS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -0,0 +1,842 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
store_x s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
#if __riscv_xlen == 64
addi sp, sp, -(140+140)
store_x x1, ( 0 + 0 )(sp)
store_x x3, ( 8 + 8 )(sp)
store_x x4, ( 12+ 12)(sp)
store_x x5, ( 16+ 16)(sp)
store_x x6, ( 20+ 20)(sp)
store_x x7, ( 24+ 24)(sp)
store_x x8, ( 28+ 28)(sp)
store_x x9, ( 32+ 32)(sp)
store_x x10,( 36+ 36)(sp)
store_x x11,( 40+ 40)(sp)
store_x x12,( 44+ 44)(sp)
store_x x13,( 48+ 48)(sp)
store_x x14,( 52+ 52)(sp)
store_x x15,( 56+ 56)(sp)
store_x x16,( 60+ 60)(sp)
store_x x17,( 64+ 64)(sp)
store_x x18,( 68+ 68)(sp)
store_x x19,( 72+ 72)(sp)
store_x x20,( 76+ 76)(sp)
store_x x21,( 80+ 80)(sp)
store_x x22,( 84+ 84)(sp)
store_x x23,( 88+ 88)(sp)
store_x x24,( 92+ 92)(sp)
store_x x25,( 96+ 96)(sp)
store_x x26,(100+100)(sp)
store_x x27,(104+104)(sp)
store_x x28,(108+108)(sp)
store_x x29,(112+112)(sp)
store_x x30,(116+116)(sp)
store_x x31,(120+120)(sp)
csrr a0, mepc
store_x a0, (124+124)(sp)
csrr a0, mstatus
store_x a0, (128+128)(sp)
csrr a0, mcause
store_x a0, (132+132)(sp)
csrr a0, mtval
store_x a0, (136+136)(sp)
csrr a0, mscratch
store_x a0, ( 4 + 4 )(sp)
#else
addi sp, sp, -140
store_x x1, ( 0 )(sp)
store_x x3, ( 8 )(sp)
store_x x4, ( 12)(sp)
store_x x5, ( 16)(sp)
store_x x6, ( 20)(sp)
store_x x7, ( 24)(sp)
store_x x8, ( 28)(sp)
store_x x9, ( 32)(sp)
store_x x10,( 36)(sp)
store_x x11,( 40)(sp)
store_x x12,( 44)(sp)
store_x x13,( 48)(sp)
store_x x14,( 52)(sp)
store_x x15,( 56)(sp)
store_x x16,( 60)(sp)
store_x x17,( 64)(sp)
store_x x18,( 68)(sp)
store_x x19,( 72)(sp)
store_x x20,( 76)(sp)
store_x x21,( 80)(sp)
store_x x22,( 84)(sp)
store_x x23,( 88)(sp)
store_x x24,( 92)(sp)
store_x x25,( 96)(sp)
store_x x26,(100)(sp)
store_x x27,(104)(sp)
store_x x28,(108)(sp)
store_x x29,(112)(sp)
store_x x30,(116)(sp)
store_x x31,(120)(sp)
csrr a0, mepc
store_x a0, (124)(sp)
csrr a0, mstatus
store_x a0, (128)(sp)
csrr a0, mcause
store_x a0, (132)(sp)
csrr a0, mtval
store_x a0, (136)(sp)
csrr a0, mscratch
store_x a0, ( 4 )(sp)
#endif
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,188 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -0,0 +1,326 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix */
status &= ~(1ul << 0);
#endif /* __riscv_matrix || __riscv_xtheadmatrix */
__set_MXSTATUS(status);
#if __riscv_flen
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if __riscv_matrix || __riscv_xtheadmatrix
/* enable matrix ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_MS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -0,0 +1,842 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, sepc
store_x t0, (68+68)(sp)
csrr t0, sstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, sepc
store_x t0, (68)(sp)
csrr t0, sstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72+72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw sepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
load_x t0, (72)(sp)
csrw sstatus, t0
#endif
load_x t0, (68)(sp)
csrw sepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, sscratch
sret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
store_x t0, (0)(sp)
store_x t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
store_x s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#if __riscv_xlen == 64
addi sp, sp, -(76+76)
store_x t0, (4+4)(sp)
store_x t1, (8+8)(sp)
store_x t2, (12+12)(sp)
csrr t0, mepc
store_x t0, (68+68)(sp)
csrr t0, mstatus
store_x t0, (72+72)(sp)
store_x ra, (0 +0 )(sp)
store_x a0, (16+16)(sp)
store_x a1, (20+20)(sp)
store_x a2, (24+24)(sp)
store_x a3, (28+28)(sp)
store_x a4, (32+32)(sp)
store_x a5, (36+36)(sp)
store_x a6, (40+40)(sp)
store_x a7, (44+44)(sp)
store_x t3, (48+48)(sp)
store_x t4, (52+52)(sp)
store_x t5, (56+56)(sp)
store_x t6, (60+60)(sp)
#else
addi sp, sp, -76
store_x t0, (4)(sp)
store_x t1, (8)(sp)
store_x t2, (12)(sp)
csrr t0, mepc
store_x t0, (68)(sp)
csrr t0, mstatus
store_x t0, (72)(sp)
store_x ra, (0)(sp)
store_x a0, (16)(sp)
store_x a1, (20)(sp)
store_x a2, (24)(sp)
store_x a3, (28)(sp)
store_x a4, (32)(sp)
store_x a5, (36)(sp)
store_x a6, (40)(sp)
store_x a7, (44)(sp)
store_x t3, (48)(sp)
store_x t4, (52)(sp)
store_x t5, (56)(sp)
store_x t6, (60)(sp)
#endif
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
SAVE_MATRIX_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY || CONFIG_CHECK_MATRIX_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_MATRIX_REGISTERS
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if __riscv_xlen == 64
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72+72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68+68)(sp)
csrw mepc, t0
load_x ra, (0 +0 )(sp)
load_x t0, (4 +4 )(sp)
load_x t1, (8 +8 )(sp)
load_x t2, (12+12)(sp)
load_x a0, (16+16)(sp)
load_x a1, (20+20)(sp)
load_x a2, (24+24)(sp)
load_x a3, (28+28)(sp)
load_x a4, (32+32)(sp)
load_x a5, (36+36)(sp)
load_x a6, (40+40)(sp)
load_x a7, (44+44)(sp)
load_x t3, (48+48)(sp)
load_x t4, (52+52)(sp)
load_x t5, (56+56)(sp)
load_x t6, (60+60)(sp)
addi sp, sp, (76+76)
#else
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY) && (!CONFIG_CHECK_MATRIX_DIRTY)
load_x t0, (72)(sp)
csrw mstatus, t0
#endif
load_x t0, (68)(sp)
csrw mepc, t0
load_x ra, (0)(sp)
load_x t0, (4)(sp)
load_x t1, (8)(sp)
load_x t2, (12)(sp)
load_x a0, (16)(sp)
load_x a1, (20)(sp)
load_x a2, (24)(sp)
load_x a3, (28)(sp)
load_x a4, (32)(sp)
load_x a5, (36)(sp)
load_x a6, (40)(sp)
load_x a7, (44)(sp)
load_x t3, (48)(sp)
load_x t4, (52)(sp)
load_x t5, (56)(sp)
load_x t6, (60)(sp)
addi sp, sp, (76)
#endif
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
load_x t0, (0)(sp)
load_x t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
#if __riscv_xlen == 64
addi sp, sp, -(140+140)
store_x x1, ( 0 + 0 )(sp)
store_x x3, ( 8 + 8 )(sp)
store_x x4, ( 12+ 12)(sp)
store_x x5, ( 16+ 16)(sp)
store_x x6, ( 20+ 20)(sp)
store_x x7, ( 24+ 24)(sp)
store_x x8, ( 28+ 28)(sp)
store_x x9, ( 32+ 32)(sp)
store_x x10,( 36+ 36)(sp)
store_x x11,( 40+ 40)(sp)
store_x x12,( 44+ 44)(sp)
store_x x13,( 48+ 48)(sp)
store_x x14,( 52+ 52)(sp)
store_x x15,( 56+ 56)(sp)
store_x x16,( 60+ 60)(sp)
store_x x17,( 64+ 64)(sp)
store_x x18,( 68+ 68)(sp)
store_x x19,( 72+ 72)(sp)
store_x x20,( 76+ 76)(sp)
store_x x21,( 80+ 80)(sp)
store_x x22,( 84+ 84)(sp)
store_x x23,( 88+ 88)(sp)
store_x x24,( 92+ 92)(sp)
store_x x25,( 96+ 96)(sp)
store_x x26,(100+100)(sp)
store_x x27,(104+104)(sp)
store_x x28,(108+108)(sp)
store_x x29,(112+112)(sp)
store_x x30,(116+116)(sp)
store_x x31,(120+120)(sp)
csrr a0, mepc
store_x a0, (124+124)(sp)
csrr a0, mstatus
store_x a0, (128+128)(sp)
csrr a0, mcause
store_x a0, (132+132)(sp)
csrr a0, mtval
store_x a0, (136+136)(sp)
csrr a0, mscratch
store_x a0, ( 4 + 4 )(sp)
#else
addi sp, sp, -140
store_x x1, ( 0 )(sp)
store_x x3, ( 8 )(sp)
store_x x4, ( 12)(sp)
store_x x5, ( 16)(sp)
store_x x6, ( 20)(sp)
store_x x7, ( 24)(sp)
store_x x8, ( 28)(sp)
store_x x9, ( 32)(sp)
store_x x10,( 36)(sp)
store_x x11,( 40)(sp)
store_x x12,( 44)(sp)
store_x x13,( 48)(sp)
store_x x14,( 52)(sp)
store_x x15,( 56)(sp)
store_x x16,( 60)(sp)
store_x x17,( 64)(sp)
store_x x18,( 68)(sp)
store_x x19,( 72)(sp)
store_x x20,( 76)(sp)
store_x x21,( 80)(sp)
store_x x22,( 84)(sp)
store_x x23,( 88)(sp)
store_x x24,( 92)(sp)
store_x x25,( 96)(sp)
store_x x26,(100)(sp)
store_x x27,(104)(sp)
store_x x28,(108)(sp)
store_x x29,(112)(sp)
store_x x30,(116)(sp)
store_x x31,(120)(sp)
csrr a0, mepc
store_x a0, (124)(sp)
csrr a0, mstatus
store_x a0, (128)(sp)
csrr a0, mcause
store_x a0, (132)(sp)
csrr a0, mtval
store_x a0, (136)(sp)
csrr a0, mscratch
store_x a0, ( 4 )(sp)
#endif
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,188 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -0,0 +1,316 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#if __riscv_flen == 64
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -0,0 +1,527 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
sd s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -(140+140)
sd x1, ( 0 + 0 )(sp)
sd x3, ( 8 + 8 )(sp)
sd x4, ( 12+ 12)(sp)
sd x5, ( 16+ 16)(sp)
sd x6, ( 20+ 20)(sp)
sd x7, ( 24+ 24)(sp)
sd x8, ( 28+ 28)(sp)
sd x9, ( 32+ 32)(sp)
sd x10,( 36+ 36)(sp)
sd x11,( 40+ 40)(sp)
sd x12,( 44+ 44)(sp)
sd x13,( 48+ 48)(sp)
sd x14,( 52+ 52)(sp)
sd x15,( 56+ 56)(sp)
sd x16,( 60+ 60)(sp)
sd x17,( 64+ 64)(sp)
sd x18,( 68+ 68)(sp)
sd x19,( 72+ 72)(sp)
sd x20,( 76+ 76)(sp)
sd x21,( 80+ 80)(sp)
sd x22,( 84+ 84)(sp)
sd x23,( 88+ 88)(sp)
sd x24,( 92+ 92)(sp)
sd x25,( 96+ 96)(sp)
sd x26,(100+100)(sp)
sd x27,(104+104)(sp)
sd x28,(108+108)(sp)
sd x29,(112+112)(sp)
sd x30,(116+116)(sp)
sd x31,(120+120)(sp)
csrr a0, mepc
sd a0, (124+124)(sp)
csrr a0, mstatus
sd a0, (128+128)(sp)
csrr a0, mcause
sd a0, (132+132)(sp)
csrr a0, mtval
sd a0, (136+136)(sp)
csrr a0, mscratch
sd a0, ( 4 + 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,188 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -0,0 +1,316 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#if __riscv_flen == 64
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -0,0 +1,527 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
sd s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -(140+140)
sd x1, ( 0 + 0 )(sp)
sd x3, ( 8 + 8 )(sp)
sd x4, ( 12+ 12)(sp)
sd x5, ( 16+ 16)(sp)
sd x6, ( 20+ 20)(sp)
sd x7, ( 24+ 24)(sp)
sd x8, ( 28+ 28)(sp)
sd x9, ( 32+ 32)(sp)
sd x10,( 36+ 36)(sp)
sd x11,( 40+ 40)(sp)
sd x12,( 44+ 44)(sp)
sd x13,( 48+ 48)(sp)
sd x14,( 52+ 52)(sp)
sd x15,( 56+ 56)(sp)
sd x16,( 60+ 60)(sp)
sd x17,( 64+ 64)(sp)
sd x18,( 68+ 68)(sp)
sd x19,( 72+ 72)(sp)
sd x20,( 76+ 76)(sp)
sd x21,( 80+ 80)(sp)
sd x22,( 84+ 84)(sp)
sd x23,( 88+ 88)(sp)
sd x24,( 92+ 92)(sp)
sd x25,( 96+ 96)(sp)
sd x26,(100+100)(sp)
sd x27,(104+104)(sp)
sd x28,(108+108)(sp)
sd x29,(112+112)(sp)
sd x30,(116+116)(sp)
sd x31,(120+120)(sp)
csrr a0, mepc
sd a0, (124+124)(sp)
csrr a0, mstatus
sd a0, (128+128)(sp)
csrr a0, mcause
sd a0, (132+132)(sp)
csrr a0, mtval
sd a0, (136+136)(sp)
csrr a0, mscratch
sd a0, ( 4 + 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,188 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -0,0 +1,316 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#if __riscv_flen == 64
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -0,0 +1,527 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
sd s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -(140+140)
sd x1, ( 0 + 0 )(sp)
sd x3, ( 8 + 8 )(sp)
sd x4, ( 12+ 12)(sp)
sd x5, ( 16+ 16)(sp)
sd x6, ( 20+ 20)(sp)
sd x7, ( 24+ 24)(sp)
sd x8, ( 28+ 28)(sp)
sd x9, ( 32+ 32)(sp)
sd x10,( 36+ 36)(sp)
sd x11,( 40+ 40)(sp)
sd x12,( 44+ 44)(sp)
sd x13,( 48+ 48)(sp)
sd x14,( 52+ 52)(sp)
sd x15,( 56+ 56)(sp)
sd x16,( 60+ 60)(sp)
sd x17,( 64+ 64)(sp)
sd x18,( 68+ 68)(sp)
sd x19,( 72+ 72)(sp)
sd x20,( 76+ 76)(sp)
sd x21,( 80+ 80)(sp)
sd x22,( 84+ 84)(sp)
sd x23,( 88+ 88)(sp)
sd x24,( 92+ 92)(sp)
sd x25,( 96+ 96)(sp)
sd x26,(100+100)(sp)
sd x27,(104+104)(sp)
sd x28,(108+108)(sp)
sd x29,(112+112)(sp)
sd x30,(116+116)(sp)
sd x31,(120+120)(sp)
csrr a0, mepc
sd a0, (124+124)(sp)
csrr a0, mstatus
sd a0, (128+128)(sp)
csrr a0, mcause
sd a0, (132+132)(sp)
csrr a0, mtval
sd a0, (136+136)(sp)
csrr a0, mscratch
sd a0, ( 4 + 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,188 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -0,0 +1,316 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#if __riscv_flen == 64
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -0,0 +1,521 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
sd s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -(140+140)
sd x1, ( 0 + 0 )(sp)
sd x3, ( 8 + 8 )(sp)
sd x4, ( 12+ 12)(sp)
sd x5, ( 16+ 16)(sp)
sd x6, ( 20+ 20)(sp)
sd x7, ( 24+ 24)(sp)
sd x8, ( 28+ 28)(sp)
sd x9, ( 32+ 32)(sp)
sd x10,( 36+ 36)(sp)
sd x11,( 40+ 40)(sp)
sd x12,( 44+ 44)(sp)
sd x13,( 48+ 48)(sp)
sd x14,( 52+ 52)(sp)
sd x15,( 56+ 56)(sp)
sd x16,( 60+ 60)(sp)
sd x17,( 64+ 64)(sp)
sd x18,( 68+ 68)(sp)
sd x19,( 72+ 72)(sp)
sd x20,( 76+ 76)(sp)
sd x21,( 80+ 80)(sp)
sd x22,( 84+ 84)(sp)
sd x23,( 88+ 88)(sp)
sd x24,( 92+ 92)(sp)
sd x25,( 96+ 96)(sp)
sd x26,(100+100)(sp)
sd x27,(104+104)(sp)
sd x28,(108+108)(sp)
sd x29,(112+112)(sp)
sd x30,(116+116)(sp)
sd x31,(120+120)(sp)
csrr a0, mepc
sd a0, (124+124)(sp)
csrr a0, mstatus
sd a0, (128+128)(sp)
csrr a0, mcause
sd a0, (132+132)(sp)
csrr a0, mtval
sd a0, (136+136)(sp)
csrr a0, mscratch
sd a0, ( 4 + 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,188 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -0,0 +1,324 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
#if CONFIG_CPU_XUANTIE_C910V3_CP || CONFIG_CPU_XUANTIE_C920V3_CP
/* disable theadisaee & enable MM */
unsigned long status = __get_MXSTATUS();
status &= ~(1 << 22);
status |= (1 << 24 | 1 << 15);
__set_MXSTATUS(status);
#else
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#endif
#if __riscv_flen == 64
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -0,0 +1,521 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
sd s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -(140+140)
sd x1, ( 0 + 0 )(sp)
sd x3, ( 8 + 8 )(sp)
sd x4, ( 12+ 12)(sp)
sd x5, ( 16+ 16)(sp)
sd x6, ( 20+ 20)(sp)
sd x7, ( 24+ 24)(sp)
sd x8, ( 28+ 28)(sp)
sd x9, ( 32+ 32)(sp)
sd x10,( 36+ 36)(sp)
sd x11,( 40+ 40)(sp)
sd x12,( 44+ 44)(sp)
sd x13,( 48+ 48)(sp)
sd x14,( 52+ 52)(sp)
sd x15,( 56+ 56)(sp)
sd x16,( 60+ 60)(sp)
sd x17,( 64+ 64)(sp)
sd x18,( 68+ 68)(sp)
sd x19,( 72+ 72)(sp)
sd x20,( 76+ 76)(sp)
sd x21,( 80+ 80)(sp)
sd x22,( 84+ 84)(sp)
sd x23,( 88+ 88)(sp)
sd x24,( 92+ 92)(sp)
sd x25,( 96+ 96)(sp)
sd x26,(100+100)(sp)
sd x27,(104+104)(sp)
sd x28,(108+108)(sp)
sd x29,(112+112)(sp)
sd x30,(116+116)(sp)
sd x31,(120+120)(sp)
csrr a0, mepc
sd a0, (124+124)(sp)
csrr a0, mstatus
sd a0, (128+128)(sp)
csrr a0, mcause
sd a0, (132+132)(sp)
csrr a0, mtval
sd a0, (136+136)(sp)
csrr a0, mscratch
sd a0, ( 4 + 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,188 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -0,0 +1,324 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
#if CONFIG_CPU_XUANTIE_C910V3_CP || CONFIG_CPU_XUANTIE_C920V3_CP
/* disable theadisaee & enable MM */
unsigned long status = __get_MXSTATUS();
status &= ~(1 << 22);
status |= (1 << 24 | 1 << 15);
__set_MXSTATUS(status);
#else
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#endif
#if __riscv_flen == 64
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -0,0 +1,521 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
sd s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -(140+140)
sd x1, ( 0 + 0 )(sp)
sd x3, ( 8 + 8 )(sp)
sd x4, ( 12+ 12)(sp)
sd x5, ( 16+ 16)(sp)
sd x6, ( 20+ 20)(sp)
sd x7, ( 24+ 24)(sp)
sd x8, ( 28+ 28)(sp)
sd x9, ( 32+ 32)(sp)
sd x10,( 36+ 36)(sp)
sd x11,( 40+ 40)(sp)
sd x12,( 44+ 44)(sp)
sd x13,( 48+ 48)(sp)
sd x14,( 52+ 52)(sp)
sd x15,( 56+ 56)(sp)
sd x16,( 60+ 60)(sp)
sd x17,( 64+ 64)(sp)
sd x18,( 68+ 68)(sp)
sd x19,( 72+ 72)(sp)
sd x20,( 76+ 76)(sp)
sd x21,( 80+ 80)(sp)
sd x22,( 84+ 84)(sp)
sd x23,( 88+ 88)(sp)
sd x24,( 92+ 92)(sp)
sd x25,( 96+ 96)(sp)
sd x26,(100+100)(sp)
sd x27,(104+104)(sp)
sd x28,(108+108)(sp)
sd x29,(112+112)(sp)
sd x30,(116+116)(sp)
sd x31,(120+120)(sp)
csrr a0, mepc
sd a0, (124+124)(sp)
csrr a0, mstatus
sd a0, (128+128)(sp)
csrr a0, mcause
sd a0, (132+132)(sp)
csrr a0, mtval
sd a0, (136+136)(sp)
csrr a0, mscratch
sd a0, ( 4 + 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,188 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -0,0 +1,316 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#if __riscv_flen == 64
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -0,0 +1,521 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
sd s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -(140+140)
sd x1, ( 0 + 0 )(sp)
sd x3, ( 8 + 8 )(sp)
sd x4, ( 12+ 12)(sp)
sd x5, ( 16+ 16)(sp)
sd x6, ( 20+ 20)(sp)
sd x7, ( 24+ 24)(sp)
sd x8, ( 28+ 28)(sp)
sd x9, ( 32+ 32)(sp)
sd x10,( 36+ 36)(sp)
sd x11,( 40+ 40)(sp)
sd x12,( 44+ 44)(sp)
sd x13,( 48+ 48)(sp)
sd x14,( 52+ 52)(sp)
sd x15,( 56+ 56)(sp)
sd x16,( 60+ 60)(sp)
sd x17,( 64+ 64)(sp)
sd x18,( 68+ 68)(sp)
sd x19,( 72+ 72)(sp)
sd x20,( 76+ 76)(sp)
sd x21,( 80+ 80)(sp)
sd x22,( 84+ 84)(sp)
sd x23,( 88+ 88)(sp)
sd x24,( 92+ 92)(sp)
sd x25,( 96+ 96)(sp)
sd x26,(100+100)(sp)
sd x27,(104+104)(sp)
sd x28,(108+108)(sp)
sd x29,(112+112)(sp)
sd x30,(116+116)(sp)
sd x31,(120+120)(sp)
csrr a0, mepc
sd a0, (124+124)(sp)
csrr a0, mstatus
sd a0, (128+128)(sp)
csrr a0, mcause
sd a0, (132+132)(sp)
csrr a0, mtval
sd a0, (136+136)(sp)
csrr a0, mscratch
sd a0, ( 4 + 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,188 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

View File

@ -0,0 +1,324 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <soc.h>
#include <csi_core.h>
#include <drv/tick.h>
#include <drv/porting.h>
#include <drv/irq.h>
#include "riscv_csr.h"
#if (defined(CONFIG_KERNEL_RHINO) || defined(CONFIG_KERNEL_FREERTOS) || defined(CONFIG_KERNEL_RTTHREAD)) && defined(CONFIG_KERNEL_NONE)
#error "Please check the current system is baremetal or not!!!"
#endif
#if !defined(CONFIG_SMP) || (defined(CONFIG_SMP) && !CONFIG_SMP)
#if CONFIG_NR_CPUS > 1
#error "Please define CONFIG_NR_CPUS as 1 or do not need define."
#endif
#endif
#if CONFIG_ECC_L2_ENABLE
static csi_dev_t ecc_l2_dev;
#endif
extern void section_data_copy(void);
extern void section_ram_code_copy(void);
extern void section_bss_clear(void);
#ifdef CONFIG_RISCV_SMODE
extern unsigned long __Vectors;
unsigned long page_table_l2[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l1[512] __attribute__ ((aligned(4096)));
unsigned long page_table_l0[512] __attribute__ ((aligned(4096)));
void _mmu_init(void) __attribute__((noinline));
void _mmu_init(void)
{
#if CONFIG_CPU_XUANTIE_C906 || CONFIG_CPU_XUANTIE_C906FD || CONFIG_CPU_XUANTIE_C906FDV \
|| CONFIG_CPU_XUANTIE_C908 || CONFIG_CPU_XUANTIE_C908V || CONFIG_CPU_XUANTIE_C908I \
|| CONFIG_CPU_XUANTIE_R910 || CONFIG_CPU_XUANTIE_R920
unsigned long status = __get_MXSTATUS();
/* open MAEE for thead-mmu extension */
status |= (1 << 21);
__set_MXSTATUS(status);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | i << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (UPPER_ATTRS(ATTR_SO | ATTR_SH) | i << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (UPPER_ATTRS(ATTR_CA | ATTR_SH) | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#elif CONFIG_CPU_XUANTIE_C907_RV32 || CONFIG_CPU_XUANTIE_C907FD_RV32 || CONFIG_CPU_XUANTIE_C907FDV_RV32 || CONFIG_CPU_XUANTIE_C907FDVM_RV32
unsigned long envcfgh = __get_MENVCFGH();
/* enable svpbmt */
envcfgh |= (1 << 30);
__set_MENVCFGH(envcfgh);
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(4M ~ 1G-1) <==> PA(4M ~ 1G-1) */
for (unsigned long i = 1; i < 256; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
for (unsigned long i = 256; i < 512; i++) {
page_table_l1[i] = (SVPBMT_PMA | (i) << 20 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
#else
unsigned long envcfg = __get_MENVCFG();
/* enable svpbmt */
envcfg |= (1ull << 62);
__set_MENVCFG(envcfg);
page_table_l2[0] = 0x1 | ((unsigned long)page_table_l1 >> 12) << 10;
page_table_l1[0] = 0x1 | ((unsigned long)page_table_l0 >> 12) << 10;
/* setup mmu VA(0M ~ 1M-1) <==> PA(0M ~ 1M-1) */
for (unsigned long i = 0; i < 256; i++) {
page_table_l0[i] = (SVPBMT_PMA | (i) << 10 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(2M ~ 1G-1) <==> PA(2M ~ 1G-1) */
for (unsigned long i = 1; i < 512; i++) {
page_table_l1[i] = (SVPBMT_IO | (i) << 19 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
}
/* setup mmu VA(1G ~ 2G-1) <==> PA(1G ~ 2G-1) */
page_table_l2[1] = (SVPBMT_PMA | (1) << 28 | LOWER_ATTRS(DIRTY_FLAG | ACCESS_FLAG | AP_X | AP_W | AP_R | GLOBAL_FLAG)) | 0x1;
#endif
#if __riscv_xlen == 64
csi_dcache_clean_range((unsigned long *)&page_table_l2, sizeof(page_table_l2));
#endif
csi_dcache_clean_range((unsigned long *)&page_table_l1, sizeof(page_table_l1));
csi_dcache_clean_range((unsigned long *)&page_table_l0, sizeof(page_table_l0));
csi_mmu_invalid_tlb_all();
#if __riscv_xlen == 64
__set_SATP(((unsigned long)&page_table_l2 >> 12));
csi_mmu_set_mode(MMU_MODE_39);
csi_mmu_enable();
#else
__set_SATP(((unsigned long)&page_table_l1 >> 12));
csi_mmu_set_mode(MMU_MODE_32);
csi_mmu_enable();
#endif
}
void _system_switchto_smode(void)
{
unsigned long m_status = __get_MSTATUS();
m_status &= ~MSTATUS_TVM_MASK;
m_status &= ~MSTATUS_MPP_MASK;
m_status |= MSTATUS_MPP_S;
__set_MSTATUS(m_status);
/* setup S-Mode csr regs */
__set_STVEC((unsigned long)(&__Vectors) | 0x1);
//FIXME:
__ASM("auipc a0, 0");
__ASM("addi a0, a0, 14");
__ASM("csrw mepc, a0");
__ASM("mret");
}
void _system_init_for_smode(void)
{
_system_switchto_smode();
}
void smode_init(void)
{
/* may be not clear after reset on FPGA */
csi_mmu_disable();
_mmu_init();
_system_init_for_smode();
}
#endif
/**
* @brief initialize pmp
* @param None
* @return None
*/
static void pmp_init(void)
{
long addr;
addr = 0x90000000UL >> 2;
__set_PMPADDR0(addr);
__set_PMPxCFG(0, 0x8f);
}
static void interrupt_init(void)
{
int i;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_PRIO[i] = 31;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_IP[i] = 0;
}
for (i = 0; i < (CONFIG_IRQ_NUM + 32) / 32; i++) {
PLIC->PLIC_H0_MIE[i] = 0;
PLIC->PLIC_H0_SIE[i] = 0;
}
/* set hart threshold 0, enable all interrupt */
PLIC->PLIC_H0_MTH = 0;
PLIC->PLIC_H0_STH = 0;
for (i = 0; i < CONFIG_IRQ_NUM; i++) {
PLIC->PLIC_H0_MCLAIM = i;
PLIC->PLIC_H0_SCLAIM = i;
}
/* set PLIC_PER */
PLIC->PLIC_PER = 0x1;
/* enable MEIE & MTIE & MSIE */
uint32_t mie = __get_MIE();
mie |= (1 << 11 | 1 << 7 | 1 << 3);
#if CONFIG_ECC_L1_ENABLE
mie |= (1 << 16);
#endif
__set_MIE(mie);
}
static void section_init(void)
{
#if CONFIG_XIP
section_data_copy();
section_ram_code_copy();
csi_dcache_clean();
csi_icache_invalid();
#endif
section_bss_clear();
}
static void cache_init(void)
{
/* enable cache */
csi_dcache_enable();
csi_icache_enable();
}
/**
* @brief initialize the system
* Initialize the psr and vbr.
* @param None
* @return None
*/
void SystemInit(void)
{
#if CONFIG_CPU_XUANTIE_C910V3_CP || CONFIG_CPU_XUANTIE_C920V3_CP
/* disable theadisaee & enable MM */
unsigned long status = __get_MXSTATUS();
status &= ~(1 << 22);
status |= (1 << 24 | 1 << 15);
__set_MXSTATUS(status);
#else
/* enable theadisaee & MM */
unsigned long status = __get_MXSTATUS();
status |= (1 << 22 | 1 << 15);
__set_MXSTATUS(status);
#endif
#if __riscv_flen == 64
/* enable float ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_FS_SHIFT);
__set_MSTATUS(status);
#endif
#ifdef __riscv_vector
/* enable vector ISA */
status = __get_MSTATUS();
status |= (1 << MSTATUS_VS_SHIFT);
__set_MSTATUS(status);
#endif
#if CONFIG_ECC_L1_ENABLE
/* enable L1 cache ecc */
uint64_t mhint = __get_MHINT();
mhint |= (0x1 << 19);
__set_MHINT(mhint);
#endif
#if CONFIG_ECC_L2_ENABLE
/* enable L2 cache ecc */
uint64_t mccr2 = __get_MCCR2();
mccr2 |= (0x1 << 1);
__set_MCCR2(mccr2);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable ecall delegate */
unsigned long medeleg = __get_MEDELEG();
medeleg |= (1 << 9);
__set_MEDELEG(medeleg);
/* enable interrupt delegate */
unsigned long mideleg = __get_MIDELEG();
mideleg |= 0x222;
__set_MIDELEG(mideleg);
#endif
#ifdef CONFIG_RISCV_SMODE
/* enable mcounteren for s-mode */
__set_MCOUNTEREN(0xffffffff);
#if CBO_INSN_SUPPORT
unsigned long envcfg = __get_MENVCFG();
/* enable CBIE & CBCFE & CBZE on lower priviledge */
envcfg |= (3 << 4 | 1 << 6 | 1 << 7);
__set_MENVCFG(envcfg);
#endif
#endif
cache_init();
section_init();
pmp_init();
interrupt_init();
soc_set_sys_freq(20000000);
csi_tick_init();
#if CONFIG_ECC_L2_ENABLE
extern void ecc_l2_irqhandler(void *arg);
/* l2 cache ecc interrupt register */
ecc_l2_dev.irq_num = L2_CACHE_ECC_IRQn;
csi_irq_attach(ecc_l2_dev.irq_num, ecc_l2_irqhandler, &ecc_l2_dev);
csi_irq_enable(ecc_l2_dev.irq_num);
#endif
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <csi_core.h>
#if defined(AOS_COMP_DEBUG) && (AOS_COMP_DEBUG > 0)
#include <debug/dbg.h>
#else
#define printk printf
#endif
void (*trap_c_callback)(void);
void trap_c(uintptr_t *regs)
{
int i;
unsigned long vec = 0;
vec = __get_MCAUSE();
printk("CPU Exception(mcause);: NO.0x%lx", vec);
printk("\n");
for (i = 0; i < 31; i++) {
printk("x%d: %p\t", i + 1, (void *)regs[i]);
if ((i % 4) == 3) {
printk("\n");
}
}
printk("\n");
printk("mepc : %p\n", (void *)regs[31]);
printk("mstatus: %p\n", (void *)regs[32]);
if (trap_c_callback) {
trap_c_callback();
}
while (1);
}
__attribute__((weak)) void exceptionHandler(void *context)
{
trap_c((uintptr_t *)context);
}

View File

@ -0,0 +1,521 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "riscv_asm_macro.h"
.section .stack
.align 4
.global g_trapstackbase
.global g_top_trapstack
g_trapstackbase:
.space CONFIG_ARCH_INTERRUPTSTACK
g_top_trapstack:
.text
.global _interrupt_return_address
.align 3
.weak Scoret_Handler
.type Scoret_Handler, %function
Scoret_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mcoret_Handler
.type Mcoret_Handler, %function
Mcoret_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, CORET_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#if CONFIG_ECC_L1_ENABLE
.align 3
.weak ECC_L1_Handler
.type ECC_L1_Handler, %function
ECC_L1_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, ECC_L1_IRQHandler
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
#endif
.align 3
.weak Sirq_Handler
.type Sirq_Handler, %function
Sirq_Handler:
csrw sscratch, sp
la sp, g_top_irqstack
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, sepc
sd t0, (68+68)(sp)
csrr t0, sstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, sstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_SSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw sstatus, t0
#endif
ld t0, (68+68)(sp)
csrw sepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, sscratch
sret
.align 3
.weak Mirq_Handler
.type Mirq_Handler, %function
Mirq_Handler:
addi sp, sp, -16
sd t0, (0)(sp)
sd t1, (8)(sp)
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, -8
sd s0, (sp)
#endif
csrw mscratch, sp
csrr t0, mhartid
la sp, g_base_irqstack
addi t1, t0, 1
li t0, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t0
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
addi sp, sp, -(76+76)
sd t0, (4+4)(sp)
sd t1, (8+8)(sp)
sd t2, (12+12)(sp)
csrr t0, mepc
sd t0, (68+68)(sp)
csrr t0, mstatus
sd t0, (72+72)(sp)
sd ra, (0 +0 )(sp)
sd a0, (16+16)(sp)
sd a1, (20+20)(sp)
sd a2, (24+24)(sp)
sd a3, (28+28)(sp)
sd a4, (32+32)(sp)
sd a5, (36+36)(sp)
sd a6, (40+40)(sp)
sd a7, (44+44)(sp)
sd t3, (48+48)(sp)
sd t4, (52+52)(sp)
sd t5, (56+56)(sp)
sd t6, (60+60)(sp)
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
csrr t3, mstatus
#endif
SAVE_FLOAT_REGISTERS
SAVE_VECTOR_REGISTERS
la t2, do_irq
jalr t2
_interrupt_return_address:
#if CONFIG_CHECK_FPU_DIRTY || CONFIG_CHECK_VECTOR_DIRTY
RESTORE_MSTATUS
#endif
RESTORE_VECTOR_REGISTERS
RESTORE_FLOAT_REGISTERS
#if (!CONFIG_CHECK_FPU_DIRTY) && (!CONFIG_CHECK_VECTOR_DIRTY)
ld t0, (72+72)(sp)
csrw mstatus, t0
#endif
ld t0, (68+68)(sp)
csrw mepc, t0
ld ra, (0 +0 )(sp)
ld t0, (4 +4 )(sp)
ld t1, (8 +8 )(sp)
ld t2, (12+12)(sp)
ld a0, (16+16)(sp)
ld a1, (20+20)(sp)
ld a2, (24+24)(sp)
ld a3, (28+28)(sp)
ld a4, (32+32)(sp)
ld a5, (36+36)(sp)
ld a6, (40+40)(sp)
ld a7, (44+44)(sp)
ld t3, (48+48)(sp)
ld t4, (52+52)(sp)
ld t5, (56+56)(sp)
ld t6, (60+60)(sp)
addi sp, sp, (76+76)
csrr sp, mscratch
#if CONFIG_PROFILING_PERF && CONFIG_PERF_BACKTRACE_USE_FP
addi sp, sp, 8
#endif
ld t0, (0)(sp)
ld t1, (8)(sp)
addi sp, sp, 16
mret
/******************************************************************************
* Functions:
* void trap(void);
* default exception handler
******************************************************************************/
.align 3
.global trap
.type trap, %function
trap:
csrw mscratch, sp
la sp, g_top_trapstack
addi sp, sp, -(140+140)
sd x1, ( 0 + 0 )(sp)
sd x3, ( 8 + 8 )(sp)
sd x4, ( 12+ 12)(sp)
sd x5, ( 16+ 16)(sp)
sd x6, ( 20+ 20)(sp)
sd x7, ( 24+ 24)(sp)
sd x8, ( 28+ 28)(sp)
sd x9, ( 32+ 32)(sp)
sd x10,( 36+ 36)(sp)
sd x11,( 40+ 40)(sp)
sd x12,( 44+ 44)(sp)
sd x13,( 48+ 48)(sp)
sd x14,( 52+ 52)(sp)
sd x15,( 56+ 56)(sp)
sd x16,( 60+ 60)(sp)
sd x17,( 64+ 64)(sp)
sd x18,( 68+ 68)(sp)
sd x19,( 72+ 72)(sp)
sd x20,( 76+ 76)(sp)
sd x21,( 80+ 80)(sp)
sd x22,( 84+ 84)(sp)
sd x23,( 88+ 88)(sp)
sd x24,( 92+ 92)(sp)
sd x25,( 96+ 96)(sp)
sd x26,(100+100)(sp)
sd x27,(104+104)(sp)
sd x28,(108+108)(sp)
sd x29,(112+112)(sp)
sd x30,(116+116)(sp)
sd x31,(120+120)(sp)
csrr a0, mepc
sd a0, (124+124)(sp)
csrr a0, mstatus
sd a0, (128+128)(sp)
csrr a0, mcause
sd a0, (132+132)(sp)
csrr a0, mtval
sd a0, (136+136)(sp)
csrr a0, mscratch
sd a0, ( 4 + 4 )(sp)
mv a0, sp
la a1, exceptionHandler
jalr a1
.align 3
.weak Default_Handler
.type Default_Handler, %function
Default_Handler:
j trap
.size Default_Handler, . - Default_Handler
/* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be
* overwritten by other handlers */
.macro def_irq_handler handler_name
.weak \handler_name
.set \handler_name, Default_Handler
.endm
def_irq_handler Stspend_Handler
def_irq_handler Mtspend_Handler
def_irq_handler CORET_IRQHandler

View File

@ -0,0 +1,188 @@
/*
* Copyright (C) 2017-2024 Alibaba Group Holding Limited
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <csi_config.h>
#ifndef CONFIG_NR_CPUS
#define CONFIG_NR_CPUS 1
#endif
.globl Reset_Handler
.section .vectors
.align 6
.globl __Vectors
.type __Vectors, @object
__Vectors:
j Default_Handler /* 0 */
j Stspend_Handler /* 1 */
j Default_Handler /* 2 */
j Mtspend_Handler /* 3 */
j Default_Handler /* 4 */
j Scoret_Handler /* 5 */
j Default_Handler /* 6 */
j Mcoret_Handler /* 7 */
j Default_Handler /* 8 */
j Sirq_Handler /* 9 */
j Default_Handler /* 10 */
j Mirq_Handler /* 11 */
j Default_Handler /* 12 */
j Default_Handler /* 13 */
j Default_Handler /* 14 */
j Default_Handler /* 15 */
#if CONFIG_ECC_L1_ENABLE
j ECC_L1_Handler /* 16 */
#else
j Default_Handler /* 16 */
#endif
.text
.align 2
j Reset_Handler
.align 2
.long 0x594B5343 /* CSKY ASCII */
.long 0x594B5343 /* CSKY ASCII */
.align 2
.rept 9
.long 0
.endr
.long Reset_Handler
_start:
.type Reset_Handler, %function
Reset_Handler:
.option push
.option norelax
/* disable ie and clear all interrupts */
csrw mie, zero
csrw mip, zero
/* Disable MIE to avoid triggering interrupts before the first task starts. */
/* This bit is set when a task recovers context. */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrc mstatus, (1 << 1)
#else
csrc mstatus, (1 << 3)
#endif
la gp, __global_pointer$
.option pop
la a0, __Vectors
li a1, 0x1
or a0, a0,a1
csrw mtvec, a0
/* get cpu id */
csrr a0, mhartid
#if defined(CONFIG_SMP) && CONFIG_SMP
/* check if hart is within range */
/* tp: hart id */
li t0, CONFIG_NR_CPUS
bge a0, t0, hart_out_of_bounds_loop
#endif
#ifdef CONFIG_KERNEL_NONE
la sp, g_base_mainstack
addi t1, a0, 1
li t2, CONFIG_ARCH_MAINSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_MAINSTACK + g_base_mainstack */
#else
la sp, g_base_irqstack
addi t1, a0, 1
li t2, CONFIG_ARCH_INTERRUPTSTACK
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * CONFIG_ARCH_INTERRUPTSTACK + g_base_irqstack */
#endif
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
#ifndef __NO_SYSTEM_INIT
la a0, SystemInit
jalr a0
#endif
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
la a0, smode_init
jalr a0
#endif
#ifdef CONFIG_KERNEL_NONE
/* Enable interrupt */
#if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
csrs sstatus, (1 << 1)
#else
csrs mstatus, (1 << 3)
#endif
#endif
la a0, pre_main
jalr a0
.size Reset_Handler, . - Reset_Handler
__exit:
j __exit
.type secondary_cpu_entry, %function
secondary_cpu_entry:
#if defined(CONFIG_SMP) && CONFIG_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#if defined(CONFIG_SMP) && CONFIG_SMP
1:
jal secondary_cpu_c_start
.size secondary_cpu_entry, . - secondary_cpu_entry
hart_out_of_bounds_loop:
/* Harts in this loop are out of bounds, increase CONFIG_NR_CPUS. */
wfi
j hart_out_of_bounds_loop
#endif
.section .stack
.align 4
.global g_base_irqstack
.global g_top_irqstack
g_base_irqstack:
.space CONFIG_ARCH_INTERRUPTSTACK * CONFIG_NR_CPUS
g_top_irqstack:
#ifdef CONFIG_KERNEL_NONE
.align 4
.global g_base_mainstack
.global g_top_mainstack
g_base_mainstack:
.space CONFIG_ARCH_MAINSTACK * CONFIG_NR_CPUS
g_top_mainstack:
#endif
#if defined(CONFIG_SMP) && CONFIG_SMP
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif

Some files were not shown because too many files have changed in this diff Show More