HAOC: Support pointer protection for x86 IEE (IEE_PTRP)

Support pointer protection for x86 IEE (IEE_PTRP) in
deepin kernel.

Signed-off-by: Liu Zhehui <liuzhh@zgclab.edu.cn>
This commit is contained in:
0bluewhale0 2025-04-18 14:38:20 +08:00 committed by Avenger-285714
parent 7d1832b640
commit 7806e1cc71
13 changed files with 181 additions and 3 deletions

View File

@ -1553,6 +1553,10 @@ config IEE_SIP
depends on IEE
def_bool y
config IEE_PTRP
depends on IEE
def_bool y
# Common NUMA Features
config NUMA
bool "NUMA Memory Allocation and Scheduler Support"

View File

@ -15,6 +15,11 @@ enum {
IEE_OP_MEMSET,
IEE_OP_SET_FREEPTR,
IEE_OP_TEST_CLEAR_BIT,
#ifdef CONFIG_IEE_PTRP
IEE_OP_SET_TOKEN_PGD,
IEE_OP_INVALIDATE_TOKEN,
IEE_OP_VALIDATE_TOKEN,
#endif
IEE_FLAG_END
};

View File

@ -11,10 +11,17 @@
#define _LINUX_HAOC_H
#include <linux/types.h>
#include <linux/sched.h>
void _iee_memcpy(unsigned long __unused, void *dst, void *src, size_t n);
void _iee_memset(unsigned long __unused, void *ptr, int data, size_t n);
void _iee_set_freeptr(unsigned long __unused, void **pptr, void *ptr);
unsigned long _iee_test_and_clear_bit(unsigned long __unused,
long nr, unsigned long *addr);
#ifdef CONFIG_IEE_PTRP
void _iee_set_token_pgd(unsigned long __unused, struct task_struct *tsk,
pgd_t *pgd);
void _iee_invalidate_token(unsigned long __unused, struct task_struct *tsk);
void _iee_validate_token(unsigned long __unused, struct task_struct *tsk);
#endif
#endif

View File

@ -10,7 +10,9 @@
#ifndef _LINUX_IEE_FUNC_H
#define _LINUX_IEE_FUNC_H
#include <linux/slab.h>
extern void set_iee_page(unsigned long addr, unsigned int order);
extern void unset_iee_page(unsigned long addr, unsigned int order);
extern bool iee_free_slab_data(struct kmem_cache *s, struct slab *slab, unsigned int order);
extern unsigned int iee_calculate_order(struct kmem_cache *s, unsigned int order);
#endif /* _LINUX_IEE_FUNC_H */

View File

@ -15,5 +15,10 @@ iee_func iee_funcs[] = {
(iee_func)_iee_memset,
(iee_func)_iee_set_freeptr,
(iee_func)_iee_test_and_clear_bit,
#ifdef CONFIG_IEE_PTRP
(iee_func)_iee_set_token_pgd,
(iee_func)_iee_invalidate_token,
(iee_func)_iee_validate_token,
#endif
NULL
};

View File

@ -1,3 +1,4 @@
obj-y += iee-gate.o iee-init.o iee.o iee-func.o
obj-$(CONFIG_IEE_SIP) += iee-si.o
obj-$(CONFIG_IEE_PTRP) += iee-token.o
ccflags-y += -I$(srctree)/mm

View File

@ -8,6 +8,11 @@
*/
#include <linux/set_memory.h>
#include <asm/haoc/iee-func.h>
#include "slab.h"
#ifdef CONFIG_IEE_PTRP
#include <asm/haoc/iee-token.h>
#endif
void set_iee_page(unsigned long addr, unsigned int order)
{
@ -18,3 +23,58 @@ void unset_iee_page(unsigned long addr, unsigned int order)
{
set_memory_rw(addr, 1 << order);
}
struct iee_free_slab_work {
struct work_struct work;
struct kmem_cache *s;
struct slab *slab;
};
void iee_free_slab(struct kmem_cache *s, struct slab *slab,
void (*do_free_slab)(struct work_struct *work))
{
struct iee_free_slab_work *iee_free_slab_work =
kmalloc(sizeof(struct iee_free_slab_work), GFP_ATOMIC);
iee_free_slab_work->s = s;
iee_free_slab_work->slab = slab;
INIT_WORK(&iee_free_slab_work->work, do_free_slab);
schedule_work(&iee_free_slab_work->work);
}
#ifdef CONFIG_IEE_PTRP
static void iee_free_task_struct_slab(struct work_struct *work)
{
struct iee_free_slab_work *iee_free_slab_work =
container_of(work, struct iee_free_slab_work, work);
struct slab *slab = iee_free_slab_work->slab;
struct folio *folio = slab_folio(slab);
unsigned int order = folio_order(folio);
unsigned long token = __slab_to_iee(slab);
// Free token.
iee_set_token_page_invalid(token, 0, order);
__free_pages(&folio->page, order);
kfree(iee_free_slab_work);
}
#endif
bool iee_free_slab_data(struct kmem_cache *s, struct slab *slab,
unsigned int order)
{
#ifdef CONFIG_IEE_PTRP
if (s == task_struct_cachep) {
iee_free_slab(s, slab, iee_free_task_struct_slab);
return true;
}
#endif
return false;
}
unsigned int iee_calculate_order(struct kmem_cache *s, unsigned int order)
{
#ifdef CONFIG_IEE_PTRP
if (strcmp(s->name, "task_struct") == 0)
return IEE_DATA_ORDER;
#endif
return order;
}

View File

@ -78,6 +78,10 @@
#include <trace/events/sched.h>
#ifdef CONFIG_IEE_PTRP
#include <asm/haoc/iee-token.h>
#endif
static int bprm_creds_from_file(struct linux_binprm *bprm);
int suid_dumpable = 0;
@ -1035,6 +1039,10 @@ static int exec_mmap(struct mm_struct *mm)
if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
local_irq_enable();
activate_mm(active_mm, mm);
#ifdef CONFIG_IEE_PTRP
if(haoc_enabled)
iee_set_token_pgd(tsk, mm->pgd);
#endif
if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
local_irq_enable();
lru_gen_add_mm(mm);

View File

@ -73,6 +73,9 @@
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <asm/mmu_context.h>
#ifdef CONFIG_IEE_PTRP
#include <asm/haoc/iee-token.h>
#endif
/*
* The default value should be high enough to not crash a system that randomly
@ -560,6 +563,10 @@ static void exit_mm(void)
smp_mb__after_spinlock();
local_irq_disable();
current->mm = NULL;
#ifdef CONFIG_IEE_PTRP
if(haoc_enabled)
iee_set_token_pgd(current, NULL);
#endif
membarrier_update_current_mm(NULL);
enter_lazy_tlb(mm, current);
local_irq_enable();

View File

@ -111,6 +111,9 @@
#define CREATE_TRACE_POINTS
#include <trace/events/task.h>
#ifdef CONFIG_IEE_PTRP
#include <asm/haoc/iee-token.h>
#endif
/*
* Minimum number of threads to boot the kernel
@ -167,7 +170,11 @@ void __weak arch_release_task_struct(struct task_struct *tsk)
}
#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
#ifdef CONFIG_IEE_PTRP
struct kmem_cache *task_struct_cachep;
#else
static struct kmem_cache *task_struct_cachep;
#endif
static inline struct task_struct *alloc_task_struct_node(int node)
{
@ -629,6 +636,10 @@ void free_task(struct task_struct *tsk)
if (tsk->flags & PF_KTHREAD)
free_kthread_struct(tsk);
bpf_task_storage_free(tsk);
#ifdef CONFIG_IEE_PTRP
if(haoc_enabled)
iee_invalidate_token(tsk);
#endif
free_task_struct(tsk);
}
EXPORT_SYMBOL(free_task);
@ -1750,6 +1761,10 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
#endif
tsk->mm = NULL;
#ifdef CONFIG_IEE_PTRP
if(haoc_enabled)
iee_set_token_pgd(tsk, NULL);
#endif
tsk->active_mm = NULL;
/*
@ -1771,6 +1786,10 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
}
tsk->mm = mm;
#ifdef CONFIG_IEE_PTRP
if(haoc_enabled)
iee_set_token_pgd(tsk, mm->pgd);
#endif
tsk->active_mm = mm;
sched_mm_cid_fork(tsk);
return 0;
@ -2358,6 +2377,10 @@ __latent_entropy struct task_struct *copy_process(
p = dup_task_struct(current, node);
if (!p)
goto fork_out;
#ifdef CONFIG_IEE_PTRP
if(haoc_enabled)
iee_validate_token(p);
#endif
p->flags &= ~PF_KTHREAD;
if (args->kthread)
p->flags |= PF_KTHREAD;

View File

@ -29,6 +29,9 @@
#include <linux/numa.h>
#include <linux/sched/isolation.h>
#include <trace/events/sched.h>
#ifdef CONFIG_IEE_PTRP
#include <asm/haoc/iee-token.h>
#endif
static DEFINE_SPINLOCK(kthread_create_lock);
@ -1457,6 +1460,10 @@ void kthread_use_mm(struct mm_struct *mm)
tsk->active_mm = mm;
tsk->mm = mm;
membarrier_update_current_mm(mm);
#ifdef CONFIG_IEE_PTRP
if(haoc_enabled)
iee_set_token_pgd(tsk, mm->pgd);
#endif
switch_mm_irqs_off(active_mm, mm, tsk);
local_irq_enable();
task_unlock(tsk);
@ -1501,6 +1508,10 @@ void kthread_unuse_mm(struct mm_struct *mm)
local_irq_disable();
tsk->mm = NULL;
membarrier_update_current_mm(NULL);
#ifdef CONFIG_IEE_PTRP
if(haoc_enabled)
iee_set_token_pgd(tsk, NULL);
#endif
mmgrab_lazy_tlb(mm);
/* active_mm is still 'mm' */
enter_lazy_tlb(mm, tsk);

View File

@ -95,6 +95,9 @@
#include "../workqueue_internal.h"
#include "../../io_uring/io-wq.h"
#include "../smpboot.h"
#ifdef CONFIG_IEE_PTRP
#include <asm/haoc/iee-token.h>
#endif
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
@ -5362,6 +5365,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
* case 'prev->active_mm == next->mm' through
* finish_task_switch()'s mmdrop().
*/
#ifdef CONFIG_IEE_PTRP
if(haoc_enabled)
iee_verify_token_pgd(next);
#endif
switch_mm_irqs_off(prev->active_mm, next->mm, next);
lru_gen_use_mm(next->mm);

View File

@ -44,6 +44,12 @@
#include <linux/debugfs.h>
#include <trace/events/kmem.h>
#ifdef CONFIG_IEE
#include <asm/haoc/iee-func.h>
#endif
#ifdef CONFIG_IEE_PTRP
#include <asm/haoc/iee-token.h>
#endif
#include "internal.h"
@ -165,7 +171,17 @@
* options set. This moves slab handling out of
* the fast path and disables lockless freelists.
*/
#ifdef CONFIG_IEE
void __weak iee_allocate_slab_data(struct kmem_cache *s, struct slab *slab, unsigned int order) {}
bool __weak iee_free_slab_data(struct kmem_cache *s, struct slab *slab, unsigned int order)
{
return false;
}
unsigned int __weak iee_calculate_order(struct kmem_cache *s, unsigned int order)
{
return order;
}
#endif
/*
* We could simply use migrate_disable()/enable() but as long as it's a
* function call even on !PREEMPT_RT, use inline preempt_disable() there.
@ -2021,6 +2037,10 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM;
slab = alloc_slab_page(alloc_gfp, node, oo);
#ifdef CONFIG_IEE_PTRP
if(haoc_enabled)
slab = iee_alloc_task_token_slab(s, slab, oo_order(oo));
#endif
if (unlikely(!slab)) {
oo = s->min;
alloc_gfp = flags;
@ -2029,6 +2049,10 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
* Try a lower order alloc if possible
*/
slab = alloc_slab_page(alloc_gfp, node, oo);
#ifdef CONFIG_IEE_PTRP
if(haoc_enabled)
slab = iee_alloc_task_token_slab(s, slab, oo_order(oo));
#endif
if (unlikely(!slab))
return NULL;
stat(s, ORDER_FALLBACK);
@ -2038,6 +2062,10 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
slab->inuse = 0;
slab->frozen = 0;
#ifdef CONFIG_IEE
if(haoc_enabled)
iee_allocate_slab_data(s, slab, oo_order(oo));
#endif
account_slab(slab, oo_order(oo), s, flags);
slab->slab_cache = s;
@ -2090,6 +2118,13 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
__folio_clear_slab(folio);
mm_account_reclaimed_pages(pages);
unaccount_slab(slab, order, s);
#ifdef CONFIG_IEE
if(haoc_enabled)
{
if (iee_free_slab_data(s, slab, order))
return;
}
#endif
__free_pages(&folio->page, order);
}
@ -4487,7 +4522,10 @@ static int calculate_sizes(struct kmem_cache *s)
s->size = size;
s->reciprocal_size = reciprocal_value(size);
order = calculate_order(size);
#ifdef CONFIG_IEE
if(haoc_enabled)
order = iee_calculate_order(s, order);
#endif
if ((int)order < 0)
return 0;