Haoc: Support pointer protection for ARM64 IEE (IEE_PTRP).

Support pointer protection for ARM64 IEE (IEE_PTRP) in
deepin kernel.

Signed-off-by: Liu Zhehui <liuzhh@zgclab.edu.cn>
This commit is contained in:
lvjinglin 2025-04-20 20:10:59 +08:00 committed by Avenger-285714
parent 416542dded
commit 2955188452
15 changed files with 497 additions and 3 deletions

View File

@ -13,6 +13,12 @@
/* Place the enum entries in the order corresponding to iee_funcs array. */
enum {
IEE_OP_MEMSET,
#ifdef CONFIG_IEE_PTRP
IEE_OP_SET_TOKEN_PGD,
IEE_OP_INIT_TOKEN,
IEE_OP_INVALIDATE_TOKEN,
IEE_OP_VALIDATE_TOKEN,
#endif
IEE_FLAG_END
};
@ -22,5 +28,6 @@ enum {
extern unsigned long long iee_rw_gate(int flag, ...);
#define __iee_code __section(".iee.text")
#define __iee_data __section(".iee.data")
#endif

View File

@ -14,5 +14,12 @@
#include <linux/mm.h>
void _iee_memset(unsigned long __unused, void *ptr, int data, size_t n);
#ifdef CONFIG_IEE_PTRP
void _iee_set_token_pgd(unsigned long __unused, struct task_struct *tsk,
pgd_t *pgd);
void _iee_init_token(unsigned long __unused, struct task_struct *tsk);
void _iee_invalidate_token(unsigned long __unused, struct task_struct *tsk);
void _iee_validate_token(unsigned long __unused, struct task_struct *tsk);
#endif
#endif

View File

@ -10,6 +10,7 @@
#ifndef _LINUX_IEE_ASM_FUNC_H
#define _LINUX_IEE_ASM_FUNC_H
extern void iee_set_logical_mem(unsigned long addr, unsigned int order, bool prot);
extern void put_pages_into_iee(unsigned long addr, int order);
extern void set_iee_page(unsigned long addr, int order);
extern void unset_iee_page(unsigned long addr, int order);

View File

@ -11,6 +11,7 @@
#define _LINUX_IEE_MMU_H
extern phys_addr_t __init early_iee_stack_alloc(int order);
extern phys_addr_t __init early_iee_data_alloc(int shift);
extern void __iee_create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot,

View File

@ -0,0 +1,63 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IEE_TOKEN_H
#define _LINUX_IEE_TOKEN_H
#include <asm/haoc/haoc-def.h>
extern struct kmem_cache *task_struct_cachep;
extern void __init iee_prepare_init_task_token(void);
extern void iee_set_token_page_valid(unsigned long token, unsigned long new,
unsigned int order);
extern void iee_set_token_page_invalid(unsigned long token_addr,
unsigned long token_page, unsigned int order);
extern struct slab *iee_alloc_task_token_slab(struct kmem_cache *s,
struct slab *slab, unsigned int order);
struct task_token {
pgd_t *pgd; /* Logical VA */
bool valid;
};
#ifndef CONFIG_IEE_SIP
#include <asm/haoc/iee.h>
static inline void iee_verify_token_pgd(struct task_struct *tsk)
{
struct task_token *token;
if (tsk == &init_task)
return;
token = (struct task_token *)__addr_to_iee(tsk);
if (token->pgd != tsk->mm->pgd)
panic("IEE Pgd Error: tsk_pgd: 0x%lx, token_pgd: 0x%lx",
(unsigned long)tsk->mm->pgd, (unsigned long)token->pgd);
}
#else
static inline void iee_verify_token_pgd(struct task_struct *tsk)
{
}
#endif
static inline void iee_set_token_pgd(struct task_struct *tsk, pgd_t *pgd)
{
iee_rw_gate(IEE_OP_SET_TOKEN_PGD, tsk, pgd);
}
static inline void iee_init_token(struct task_struct *tsk)
{
iee_rw_gate(IEE_OP_INIT_TOKEN, tsk);
}
static inline void iee_invalidate_token(struct task_struct *tsk)
{
iee_rw_gate(IEE_OP_INVALIDATE_TOKEN, tsk);
}
static inline void iee_validate_token(struct task_struct *tsk)
{
iee_rw_gate(IEE_OP_VALIDATE_TOKEN, tsk);
}
#endif

View File

@ -26,6 +26,7 @@ extern bool haoc_enabled;
#define __virt_to_iee(x) (((u64)x) | IEE_OFFSET)
#define __kimg_to_iee(x) (__phys_to_iee(__pa_symbol(x)))
#define __page_to_iee(x) (__phys_to_iee(page_to_phys(x)))
#define __slab_to_iee(x) (__page_to_iee(folio_page(slab_folio(x), 0)))
#define __iee_to_virt(x) (((u64)x) & ~IEE_OFFSET)
#define __iee_to_phys(x) (__pa(__iee_to_virt(x)))
@ -40,6 +41,15 @@ extern bool haoc_enabled;
__val; \
})
#define __addr_to_iee(x) ({ \
u64 __val; \
if (__is_lm_address((u64)x)) \
__val = __virt_to_iee((u64)x); \
else \
__val = __kimg_to_iee((u64)x); \
__val; \
})
#define SET_UPAGE(x) __pgprot(pgprot_val(x) | PTE_USER)
#define SET_PPAGE(x) __pgprot(pgprot_val(x) & (~PTE_USER))
#define SET_INVALID(x) __pgprot(pgprot_val(x) & (~PTE_VALID))

View File

@ -25,6 +25,10 @@
#include <asm/sysreg.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_IEE
#include <asm/haoc/iee.h>
#include <asm/haoc/iee-asm.h>
#endif
extern bool rodata_full;
static inline void contextidr_thread_switch(struct task_struct *next)
@ -173,6 +177,10 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
*/
ttbr1 |= TTBR_CNP_BIT;
}
#ifdef CONFIG_IEE
if (iee_init_done)
ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, IEE_ASID);
#endif
replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);

View File

@ -14,4 +14,14 @@ config IEE
verified in IEE APIs.
Needs hardware support FEAT_HPDS.
config IEE_PTRP
bool "Pointer Protection for IEE(IEE_PTRP)"
help
Provide IEE matadata for each process called task_token to allow
verifing pointers inside task_struct, like struct cred* that determines
the capabilities of a process.
Could be a enhancement of other sub-module of HAOC.
depends on IEE
def_bool y
endmenu # HAOC

View File

@ -20,5 +20,11 @@ typedef void (*iee_func)(void);
*/
iee_func iee_funcs[] = {
(iee_func)_iee_memset,
#ifdef CONFIG_IEE_PTRP
(iee_func)_iee_set_token_pgd,
(iee_func)_iee_init_token,
(iee_func)_iee_invalidate_token,
(iee_func)_iee_validate_token,
#endif
NULL
};

View File

@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IEE) += iee.o iee-gate.o iee-init.o iee-func.o iee-mmu.o
obj-$(CONFIG_IEE_PTRP) += iee-token.o
ccflags-y += -I$(srctree)/mm

View File

@ -14,6 +14,7 @@
#include <asm/ptrace.h>
#include <asm/system_misc.h>
#include <asm/daifflags.h>
#include "slab.h"
static inline bool iee_support_pmd_block(unsigned long addr, unsigned int order)
{
@ -87,6 +88,128 @@ static void iee_may_split_pmd(pud_t *pudp, unsigned long addr, unsigned int orde
}
}
/*
* Used to enforce or remove RO protection linear addresses of IEE objects.
* Not handling block descriptors except pmd blocks when change page tables,
* so DO NOT use larger block on kernel linear mappings.
*/
void iee_set_logical_mem(unsigned long addr, unsigned int order, bool prot)
{
pgd_t *pgdir = swapper_pg_dir;
pgd_t *pgdp = pgd_offset_pgd(pgdir, addr);
p4d_t *p4dp = p4d_offset(pgdp, addr);
pud_t *pudp = pud_offset(p4dp, addr);
pmd_t *pmdp;
/* Split pmd block if needed. */
iee_may_split_pmd(pudp, addr, order);
pmdp = pmd_offset(pudp, addr);
if (pmd_leaf(READ_ONCE(*pmdp))) {
/* Only permits writing single pmd block right now. */
if (order != (PMD_SHIFT - PAGE_SHIFT))
panic("%s: error on linear mappings.", __func__);
pmd_t pmd = READ_ONCE(*pmdp);
if (prot)
pmd = __pmd((pmd_val(pmd) | PMD_SECT_RDONLY) & ~PTE_DBM);
else
pmd = __pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY);
set_pmd(pmdp, pmd);
} else {
pte_t *ptep = pte_offset_kernel(pmdp, addr);
pte_t pte;
/* Protect addresses one by one on this pte table.*/
for (int i = 0; i < (1UL << order); i++) {
/* Clear continuous bits first. */
if (pte_val(*ptep) & PTE_CONT && !iee_support_cont_pte(addr, order)) {
pte_t *cont_ptep = pte_offset_kernel(pmdp, addr & CONT_PTE_MASK);
for (int j = 0; j < CONT_PTES; j++) {
set_pte(cont_ptep, __pte(pte_val(*cont_ptep) & ~PTE_CONT));
cont_ptep++;
}
}
pte = READ_ONCE(*ptep);
if (prot)
pte = __pte((pte_val(pte) | PTE_RDONLY) & ~PTE_DBM);
else
pte = __pte(pte_val(pte) & ~PTE_RDONLY);
set_pte(ptep, pte);
ptep++;
}
}
}
void iee_set_logical_mem_ro(unsigned long addr)
{
iee_set_logical_mem(addr, 0, true);
__flush_tlb_kernel_pgtable(addr);
isb();
}
void iee_set_logical_mem_rw(unsigned long addr)
{
iee_set_logical_mem(addr, 0, false);
__flush_tlb_kernel_pgtable(addr);
isb();
}
/*
* IEE addresses are mapped in page granule so we only need to valid or invalid
* the pte entries of these mappings to mark whether a physical page is inside IEE.
*/
void set_iee_address(unsigned long addr, unsigned int order, bool valid)
{
pgd_t *pgdir = swapper_pg_dir;
pgd_t *pgdp = pgd_offset_pgd(pgdir, addr);
p4d_t *p4dp = p4d_offset(pgdp, addr);
pud_t *pudp = pud_offset(p4dp, addr);
pmd_t *pmdp = pmd_offset(pudp, addr);
pte_t *ptep = pte_offset_kernel(pmdp, addr);
unsigned long end_addr = addr + (PAGE_SIZE << order);
if ((addr < (PAGE_OFFSET + IEE_OFFSET)) |
(addr > (PAGE_OFFSET + BIT(vabits_actual - 1)))) {
pr_err("IEE: Invalid address to valid in IEE.");
return;
}
if (addr != ALIGN(addr, PAGE_SIZE)
|| end_addr > ALIGN(addr + 1, PMD_SIZE))
panic("%s: invalid input address range 0x%lx-0x%lx.", __func__,
addr, end_addr);
for (int i = 0; i < (1UL << order); i++) {
pte_t pte = READ_ONCE(*ptep);
if (valid)
pte = __pte(pte_val(pte) | PTE_VALID);
else
pte = __pte(pte_val(pte) & ~PTE_VALID);
set_pte(ptep, pte);
ptep++;
}
}
// TODO: Delete when allocate page table from pool
void set_iee_address_valid(unsigned long lm_addr, unsigned int order)
{
set_iee_address(__virt_to_iee(lm_addr), order, true);
}
void set_iee_address_invalid(unsigned long lm_addr, unsigned int order)
{
set_iee_address(__virt_to_iee(lm_addr), order, false);
}
// TODO END
/* Modify linear and IEE mappings of each address at the same time to avoid
* synchronization problems.
*/
@ -282,6 +405,23 @@ void unset_iee_page(unsigned long addr, int order)
remove_pages_from_iee(addr, order);
}
unsigned int iee_calculate_order(struct kmem_cache *s, unsigned int order)
{
#ifdef CONFIG_IEE_PTRP
if (strcmp(s->name, "task_struct") == 0)
return IEE_DATA_ORDER;
#endif
return order;
}
void iee_set_min_partial(struct kmem_cache *s)
{
#ifdef CONFIG_IEE_PTRP
if (strcmp(s->name, "task_struct") == 0)
s->min_partial *= 16;
#endif
}
static char *handler[] = {
"ELR_EL1",
"TCR_EL1",

View File

@ -11,12 +11,15 @@
#include <linux/memblock.h>
#include <asm/cpufeature.h>
#include <asm/haoc/iee-mmu.h>
#ifdef CONFIG_IEE_PTRP
#include <asm/haoc/iee-token.h>
#endif
#include <asm/haoc/iee-asm.h>
__aligned(PAGE_SIZE) DEFINE_PER_CPU(u64*[(PAGE_SIZE/8)],
iee_cpu_stack_ptr);
bool __ro_after_init iee_init_done;
bool __ro_after_init __aligned(8) iee_init_done;
bool __ro_after_init haoc_enabled;
/* Allocate pages from IEE data pool to use as per-cpu IEE stack. */
@ -59,7 +62,12 @@ void __init iee_init_post(void)
return;
iee_setup_asid();
/* Flush tlb to enable IEE. */
flush_tlb_all();
#ifdef CONFIG_IEE_PTRP
iee_prepare_init_task_token();
#endif
iee_init_done = true;
}

View File

@ -33,6 +33,11 @@ struct iee_early_alloc {
char *name;
};
static struct iee_early_alloc iee_data = {
.name = "iee_early_data",
.curr_block_nr = -1
};
static struct iee_early_alloc iee_stack = {
.name = "iee_stack",
.curr_block_nr = -1
@ -55,7 +60,7 @@ void __init iee_init_tcr(void)
__set_fixmap(FIX_PTE, __pa_symbol(&kernel_tcr), FIXMAP_PAGE_NORMAL);
ptr += (unsigned long)(&kernel_tcr) & (PAGE_SIZE - 1);
*((u64 *)ptr) = read_sysreg(tcr_el1) & IEE_TCR_MASK;
*((u64 *)ptr) = read_sysreg(tcr_el1) & IEE_TCR_MASK & ~(TCR_HPD1 | TCR_A1);
clear_fixmap(FIX_PTE);
ptr = (unsigned long)(fix_to_virt(FIX_PTE));
__set_fixmap(FIX_PTE, __pa_symbol(&iee_tcr), FIXMAP_PAGE_NORMAL);
@ -124,13 +129,34 @@ static phys_addr_t __init iee_mem_pool_early_alloc(struct iee_early_alloc *cache
return phys;
}
/* Calculate the reserved size for early data. */
static unsigned int get_iee_alloc_order(int shift)
{
phys_addr_t start, end;
u64 i = 0, size_order = 0;
unsigned long size = 0;
for_each_mem_range(i, &start, &end) {
if (start >= end)
break;
size += (end - start);
}
size = size >> 36;
while (size >> size_order)
size_order++;
return IEE_DATA_ORDER + (size_order + shift);
}
/* Prepare one block for each early page pool. */
void __init early_iee_data_cache_init(void)
{
if (!haoc_enabled)
return;
/* Calculate IEE stack alloc block size. */
iee_mem_pool_early_alloc(&iee_stack, IEE_DATA_ORDER);
/* Calculate IEE data alloc block size. */
iee_mem_pool_early_alloc(&iee_data, get_iee_alloc_order(1));
}
phys_addr_t __init iee_early_alloc(struct iee_early_alloc *cache,
@ -170,6 +196,11 @@ phys_addr_t __init early_iee_stack_alloc(int order)
return iee_early_alloc(&iee_stack, order);
}
phys_addr_t __init early_iee_data_alloc(int shift)
{
return iee_early_alloc(&iee_data, 0);
}
static phys_addr_t __init early_pgtable_alloc(int shift)
{
phys_addr_t phys;

View File

@ -0,0 +1,196 @@
// SPDX-License-Identifier: GPL-2.0
#include <asm/haoc/iee.h>
#include <asm/haoc/iee-mmu.h>
#include <asm/haoc/iee-token.h>
#include "slab.h"
void __init iee_prepare_init_task_token(void)
{
struct task_token *init_token = (struct task_token *)__kimg_to_iee(&init_task);
u64 init_token_addr = (u64)init_token;
phys_addr_t init_token_page;
int order = 0;
/*
* if the init token covers the page boundary, the token page shall
* be allocated more.
*/
if (ALIGN((init_token_addr + sizeof(struct task_token)), PAGE_SIZE)
!= ALIGN(init_token_addr + 1, PAGE_SIZE))
order = 1;
/* Allocate from IEE data pool. */
init_token_page = early_iee_data_alloc(order);
/* Map the IEE token address of init_task. */
for (int i = 0; i < 1UL << order; i++) {
pgd_t *pgdir = swapper_pg_dir;
pgd_t *pgdp = pgd_offset_pgd(pgdir, init_token_addr);
p4d_t *p4dp = p4d_offset(pgdp, init_token_addr);
pud_t *pudp = pud_offset(p4dp, init_token_addr);
pmd_t *pmdp = pmd_offset(pudp, init_token_addr);
pte_t *ptep = pte_offset_kernel(pmdp, init_token_addr);
pte_t pte = READ_ONCE(*ptep);
pte = __pte(((pte_val(pte) | PTE_VALID) & ~PTE_ADDR_MASK)
| __phys_to_pte_val(init_token_page));
/* Manaully go through IEE gates to bypass PTP checks. */
#ifdef CONFIG_PTP
write_sysreg(read_sysreg(TCR_EL1) | TCR_HPD1 | TCR_A1, tcr_el1);
isb();
WRITE_ONCE(*__ptr_to_iee(ptep), pte);
write_sysreg(read_sysreg(TCR_EL1) & ~(TCR_HPD1 | TCR_A1), tcr_el1);
isb();
#else
set_pte(ptep, pte);
#endif
init_token_addr += PAGE_SIZE;
}
/* Operate on the init token since it's already valid. */
iee_validate_token(&init_task);
pr_info("IEE: CONFIG_PTRP enabled.");
}
/* To map or unmap token pages when allocate or free task_struct. */
static inline void iee_set_token(unsigned long token_addr, unsigned long token_pages,
unsigned int order, bool prot)
{
phys_addr_t token_phys = __pa(token_pages);
unsigned long end_addr = token_addr + (PAGE_SIZE << order);
u64 curr_addr = token_addr;
/*
* IEE mappings must be 4-level so we just need to find out the pte of start
* address.
*/
pgd_t *pgdir = swapper_pg_dir;
pgd_t *pgdp = pgd_offset_pgd(pgdir, token_addr);
p4d_t *p4dp = p4d_offset(pgdp, token_addr);
pud_t *pudp = pud_offset(p4dp, token_addr);
pmd_t *pmdp = pmd_offset(pudp, token_addr);
pte_t *ptep = pte_offset_kernel(pmdp, token_addr);
/*
* We assume that input address range would never cross the boundary
* of a pmd block, so this function could simply use ptep++ to iterate
* inside a pte table.
*/
if (token_addr != ALIGN(token_addr, PAGE_SIZE)
|| end_addr > ALIGN(token_addr + 1, PMD_SIZE))
panic("%s: invalid input address range 0x%lx-0x%lx.", __func__,
token_addr, end_addr);
/* map new pages to IEE addresses one by one or clear them. */
for (int i = 0; i < (1UL << order); i++) {
pte_t pte = READ_ONCE(*ptep);
if (prot) {
/* rewrite physical address on pte. */
pte = __pte(((pte_val(pte) | PTE_VALID) & ~PTE_ADDR_MASK)
| __phys_to_pte_val(token_phys));
token_phys += PAGE_SIZE;
} else {
/* Restore the 1:1 mapping with physical addresses. */
pte = __pte(((pte_val(pte) & ~PTE_VALID) & ~PTE_ADDR_MASK)
| __phys_to_pte_val(__iee_to_phys(curr_addr)));
curr_addr += PAGE_SIZE;
}
set_pte(ptep, pte);
ptep++;
}
/* Apply or remove RO protection on linear mappings. */
iee_set_logical_mem(token_pages, order, prot);
flush_tlb_kernel_range(token_addr, end_addr);
flush_tlb_kernel_range(token_pages, (token_pages + (PAGE_SIZE << order)));
}
/*
* iee_set_token_page_valid() - After allocated task token pages, map them to the
* corresponding IEE addresses, and enforce RO protection on their linear mappings.
*
* @token_addr: the start IEE address of task tokens.
* @token_pages: virtual address of allocated token pages.
* @order: size for address range.
*/
void iee_set_token_page_valid(unsigned long token_addr, unsigned long token_pages,
unsigned int order)
{
#ifdef DEBUG
pr_info("IEE: Set token addr 0x%lx order %d for page 0x%lx", token_addr,
order, token_pages);
#endif
iee_set_token(token_addr, token_pages, order, true);
}
void iee_set_token_page_invalid(unsigned long token_addr, unsigned long token_pages,
unsigned int order)
{
#ifdef DEBUG
pr_info("IEE: Unset token addr 0x%lx order %d for page 0x%lx", token_addr,
order, token_pages);
#endif
iee_set_token(token_addr, token_pages, order, false);
}
struct slab *iee_alloc_task_token_slab(struct kmem_cache *s,
struct slab *slab, unsigned int order)
{
if (!slab || s != task_struct_cachep)
return slab;
struct folio *folio = slab_folio(slab);
unsigned long token_addr = __slab_to_iee(slab);
unsigned long alloc_token = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
/* Allocation of task_struct and token pages must be done at the same time. */
if (!alloc_token) {
/* Failed on allocation of token page. Free the allocated ones,
* return and try smaller order.
*/
__slab_clear_pfmemalloc(slab);
folio->mapping = NULL;
/* Make the mapping reset visible before clearing the flag */
smp_wmb();
__folio_clear_slab(folio);
__free_pages((struct page *)folio, order);
return NULL;
}
/* Map allocated token pages to token addresses. */
iee_set_token_page_valid(token_addr, alloc_token, order);
return slab;
}
void __iee_code _iee_init_token(unsigned long __unused, struct task_struct *tsk)
{
/* Do nothing for now. Wait for later update. */
}
void __iee_code _iee_set_token_pgd(unsigned long __unused, struct task_struct *tsk,
pgd_t *pgd)
{
struct task_token *token = (struct task_token *)__addr_to_iee(tsk);
token->pgd = pgd;
}
void __iee_code _iee_validate_token(unsigned long __unused, struct task_struct *tsk)
{
struct task_token *token = (struct task_token *)__addr_to_iee(tsk);
if (token->valid)
pr_err("IEE: validate token for multiple times.");
token->valid = true;
}
void __iee_code _iee_invalidate_token(unsigned long __unused, struct task_struct *tsk)
{
struct task_token *token = (struct task_token *)__addr_to_iee(tsk);
token->pgd = NULL;
token->valid = false;
}

View File

@ -184,6 +184,7 @@ unsigned int __weak iee_calculate_order(struct kmem_cache *s, unsigned int order
{
return order;
}
void __weak iee_set_min_partial(struct kmem_cache *s) { }
#endif
/*
* We could simply use migrate_disable()/enable() but as long as it's a
@ -4690,6 +4691,9 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
#ifdef CONFIG_IEE
iee_set_min_partial(s);
#endif
set_cpu_partial(s);
#ifdef CONFIG_NUMA