diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d641efeef34a..fb938ab51f85 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1553,6 +1553,10 @@ config IEE_SIP depends on IEE def_bool y +config IEE_PTRP + depends on IEE + def_bool y + # Common NUMA Features config NUMA bool "NUMA Memory Allocation and Scheduler Support" diff --git a/arch/x86/include/asm/haoc/haoc-def.h b/arch/x86/include/asm/haoc/haoc-def.h index 9abddba274c9..1eb801149521 100644 --- a/arch/x86/include/asm/haoc/haoc-def.h +++ b/arch/x86/include/asm/haoc/haoc-def.h @@ -15,6 +15,11 @@ enum { IEE_OP_MEMSET, IEE_OP_SET_FREEPTR, IEE_OP_TEST_CLEAR_BIT, +#ifdef CONFIG_IEE_PTRP + IEE_OP_SET_TOKEN_PGD, + IEE_OP_INVALIDATE_TOKEN, + IEE_OP_VALIDATE_TOKEN, +#endif IEE_FLAG_END }; diff --git a/arch/x86/include/asm/haoc/haoc.h b/arch/x86/include/asm/haoc/haoc.h index e6d1193c4f1a..8b24a71ec67c 100644 --- a/arch/x86/include/asm/haoc/haoc.h +++ b/arch/x86/include/asm/haoc/haoc.h @@ -11,10 +11,17 @@ #define _LINUX_HAOC_H #include +#include void _iee_memcpy(unsigned long __unused, void *dst, void *src, size_t n); void _iee_memset(unsigned long __unused, void *ptr, int data, size_t n); void _iee_set_freeptr(unsigned long __unused, void **pptr, void *ptr); unsigned long _iee_test_and_clear_bit(unsigned long __unused, long nr, unsigned long *addr); +#ifdef CONFIG_IEE_PTRP +void _iee_set_token_pgd(unsigned long __unused, struct task_struct *tsk, + pgd_t *pgd); +void _iee_invalidate_token(unsigned long __unused, struct task_struct *tsk); +void _iee_validate_token(unsigned long __unused, struct task_struct *tsk); +#endif #endif diff --git a/arch/x86/include/asm/haoc/iee-func.h b/arch/x86/include/asm/haoc/iee-func.h index 42455aa11615..523ebecf7f38 100644 --- a/arch/x86/include/asm/haoc/iee-func.h +++ b/arch/x86/include/asm/haoc/iee-func.h @@ -10,7 +10,9 @@ #ifndef _LINUX_IEE_FUNC_H #define _LINUX_IEE_FUNC_H +#include extern void set_iee_page(unsigned long addr, unsigned int order); extern void unset_iee_page(unsigned long addr, unsigned int order); - +extern bool iee_free_slab_data(struct kmem_cache *s, struct slab *slab, unsigned int order); +extern unsigned int iee_calculate_order(struct kmem_cache *s, unsigned int order); #endif /* _LINUX_IEE_FUNC_H */ diff --git a/arch/x86/kernel/haoc/haoc.c b/arch/x86/kernel/haoc/haoc.c index 4676f3c0454e..79ebc303ae99 100644 --- a/arch/x86/kernel/haoc/haoc.c +++ b/arch/x86/kernel/haoc/haoc.c @@ -15,5 +15,10 @@ iee_func iee_funcs[] = { (iee_func)_iee_memset, (iee_func)_iee_set_freeptr, (iee_func)_iee_test_and_clear_bit, +#ifdef CONFIG_IEE_PTRP + (iee_func)_iee_set_token_pgd, + (iee_func)_iee_invalidate_token, + (iee_func)_iee_validate_token, +#endif NULL }; diff --git a/arch/x86/kernel/haoc/iee/Makefile b/arch/x86/kernel/haoc/iee/Makefile index 33c902b314ca..6c13071f54d3 100644 --- a/arch/x86/kernel/haoc/iee/Makefile +++ b/arch/x86/kernel/haoc/iee/Makefile @@ -1,3 +1,4 @@ obj-y += iee-gate.o iee-init.o iee.o iee-func.o obj-$(CONFIG_IEE_SIP) += iee-si.o +obj-$(CONFIG_IEE_PTRP) += iee-token.o ccflags-y += -I$(srctree)/mm diff --git a/arch/x86/kernel/haoc/iee/iee-func.c b/arch/x86/kernel/haoc/iee/iee-func.c index 7b669401c624..c18dbecb696d 100644 --- a/arch/x86/kernel/haoc/iee/iee-func.c +++ b/arch/x86/kernel/haoc/iee/iee-func.c @@ -8,6 +8,11 @@ */ #include +#include +#include "slab.h" +#ifdef CONFIG_IEE_PTRP +#include +#endif void set_iee_page(unsigned long addr, unsigned int order) { @@ -18,3 +23,58 @@ void unset_iee_page(unsigned long addr, unsigned int order) { set_memory_rw(addr, 1 << order); } + +struct iee_free_slab_work { + struct work_struct work; + struct kmem_cache *s; + struct slab *slab; +}; + +void iee_free_slab(struct kmem_cache *s, struct slab *slab, + void (*do_free_slab)(struct work_struct *work)) +{ + struct iee_free_slab_work *iee_free_slab_work = + kmalloc(sizeof(struct iee_free_slab_work), GFP_ATOMIC); + + iee_free_slab_work->s = s; + iee_free_slab_work->slab = slab; + INIT_WORK(&iee_free_slab_work->work, do_free_slab); + schedule_work(&iee_free_slab_work->work); +} + +#ifdef CONFIG_IEE_PTRP +static void iee_free_task_struct_slab(struct work_struct *work) +{ + struct iee_free_slab_work *iee_free_slab_work = + container_of(work, struct iee_free_slab_work, work); + struct slab *slab = iee_free_slab_work->slab; + struct folio *folio = slab_folio(slab); + unsigned int order = folio_order(folio); + unsigned long token = __slab_to_iee(slab); + // Free token. + iee_set_token_page_invalid(token, 0, order); + __free_pages(&folio->page, order); + kfree(iee_free_slab_work); +} +#endif + +bool iee_free_slab_data(struct kmem_cache *s, struct slab *slab, + unsigned int order) +{ +#ifdef CONFIG_IEE_PTRP + if (s == task_struct_cachep) { + iee_free_slab(s, slab, iee_free_task_struct_slab); + return true; + } +#endif + return false; +} + +unsigned int iee_calculate_order(struct kmem_cache *s, unsigned int order) +{ +#ifdef CONFIG_IEE_PTRP + if (strcmp(s->name, "task_struct") == 0) + return IEE_DATA_ORDER; +#endif + return order; +} \ No newline at end of file diff --git a/fs/exec.c b/fs/exec.c index a4efccc95160..fbc7d9e0135f 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -78,6 +78,10 @@ #include +#ifdef CONFIG_IEE_PTRP +#include +#endif + static int bprm_creds_from_file(struct linux_binprm *bprm); int suid_dumpable = 0; @@ -1035,6 +1039,10 @@ static int exec_mmap(struct mm_struct *mm) if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM)) local_irq_enable(); activate_mm(active_mm, mm); +#ifdef CONFIG_IEE_PTRP + if(haoc_enabled) + iee_set_token_pgd(tsk, mm->pgd); +#endif if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM)) local_irq_enable(); lru_gen_add_mm(mm); diff --git a/kernel/exit.c b/kernel/exit.c index 3540b2c9b1b6..bf76cda53b51 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -73,6 +73,9 @@ #include #include #include +#ifdef CONFIG_IEE_PTRP +#include +#endif /* * The default value should be high enough to not crash a system that randomly @@ -560,6 +563,10 @@ static void exit_mm(void) smp_mb__after_spinlock(); local_irq_disable(); current->mm = NULL; +#ifdef CONFIG_IEE_PTRP + if(haoc_enabled) + iee_set_token_pgd(current, NULL); +#endif membarrier_update_current_mm(NULL); enter_lazy_tlb(mm, current); local_irq_enable(); diff --git a/kernel/fork.c b/kernel/fork.c index e90ba9d72974..61c5ab7a4370 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -111,6 +111,9 @@ #define CREATE_TRACE_POINTS #include +#ifdef CONFIG_IEE_PTRP +#include +#endif /* * Minimum number of threads to boot the kernel @@ -167,7 +170,11 @@ void __weak arch_release_task_struct(struct task_struct *tsk) } #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR +#ifdef CONFIG_IEE_PTRP +struct kmem_cache *task_struct_cachep; +#else static struct kmem_cache *task_struct_cachep; +#endif static inline struct task_struct *alloc_task_struct_node(int node) { @@ -629,6 +636,10 @@ void free_task(struct task_struct *tsk) if (tsk->flags & PF_KTHREAD) free_kthread_struct(tsk); bpf_task_storage_free(tsk); +#ifdef CONFIG_IEE_PTRP + if(haoc_enabled) + iee_invalidate_token(tsk); +#endif free_task_struct(tsk); } EXPORT_SYMBOL(free_task); @@ -1750,6 +1761,10 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) #endif tsk->mm = NULL; +#ifdef CONFIG_IEE_PTRP + if(haoc_enabled) + iee_set_token_pgd(tsk, NULL); +#endif tsk->active_mm = NULL; /* @@ -1771,6 +1786,10 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) } tsk->mm = mm; +#ifdef CONFIG_IEE_PTRP + if(haoc_enabled) + iee_set_token_pgd(tsk, mm->pgd); +#endif tsk->active_mm = mm; sched_mm_cid_fork(tsk); return 0; @@ -2358,6 +2377,10 @@ __latent_entropy struct task_struct *copy_process( p = dup_task_struct(current, node); if (!p) goto fork_out; +#ifdef CONFIG_IEE_PTRP + if(haoc_enabled) + iee_validate_token(p); +#endif p->flags &= ~PF_KTHREAD; if (args->kthread) p->flags |= PF_KTHREAD; diff --git a/kernel/kthread.c b/kernel/kthread.c index 980e6b325b7d..0caf7249eda5 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -29,6 +29,9 @@ #include #include #include +#ifdef CONFIG_IEE_PTRP +#include +#endif static DEFINE_SPINLOCK(kthread_create_lock); @@ -1457,6 +1460,10 @@ void kthread_use_mm(struct mm_struct *mm) tsk->active_mm = mm; tsk->mm = mm; membarrier_update_current_mm(mm); +#ifdef CONFIG_IEE_PTRP + if(haoc_enabled) + iee_set_token_pgd(tsk, mm->pgd); +#endif switch_mm_irqs_off(active_mm, mm, tsk); local_irq_enable(); task_unlock(tsk); @@ -1501,6 +1508,10 @@ void kthread_unuse_mm(struct mm_struct *mm) local_irq_disable(); tsk->mm = NULL; membarrier_update_current_mm(NULL); +#ifdef CONFIG_IEE_PTRP + if(haoc_enabled) + iee_set_token_pgd(tsk, NULL); +#endif mmgrab_lazy_tlb(mm); /* active_mm is still 'mm' */ enter_lazy_tlb(mm, tsk); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 2c160c530995..99cc19591a7c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -95,6 +95,9 @@ #include "../workqueue_internal.h" #include "../../io_uring/io-wq.h" #include "../smpboot.h" +#ifdef CONFIG_IEE_PTRP +#include +#endif EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu); EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask); @@ -5362,6 +5365,10 @@ context_switch(struct rq *rq, struct task_struct *prev, * case 'prev->active_mm == next->mm' through * finish_task_switch()'s mmdrop(). */ +#ifdef CONFIG_IEE_PTRP + if(haoc_enabled) + iee_verify_token_pgd(next); +#endif switch_mm_irqs_off(prev->active_mm, next->mm, next); lru_gen_use_mm(next->mm); diff --git a/mm/slub.c b/mm/slub.c index d2544c88a5c4..6985535d02eb 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -44,6 +44,12 @@ #include #include +#ifdef CONFIG_IEE +#include +#endif +#ifdef CONFIG_IEE_PTRP +#include +#endif #include "internal.h" @@ -165,7 +171,17 @@ * options set. This moves slab handling out of * the fast path and disables lockless freelists. */ - +#ifdef CONFIG_IEE +void __weak iee_allocate_slab_data(struct kmem_cache *s, struct slab *slab, unsigned int order) {} +bool __weak iee_free_slab_data(struct kmem_cache *s, struct slab *slab, unsigned int order) +{ + return false; +} +unsigned int __weak iee_calculate_order(struct kmem_cache *s, unsigned int order) +{ + return order; +} +#endif /* * We could simply use migrate_disable()/enable() but as long as it's a * function call even on !PREEMPT_RT, use inline preempt_disable() there. @@ -2021,6 +2037,10 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM; slab = alloc_slab_page(alloc_gfp, node, oo); +#ifdef CONFIG_IEE_PTRP + if(haoc_enabled) + slab = iee_alloc_task_token_slab(s, slab, oo_order(oo)); +#endif if (unlikely(!slab)) { oo = s->min; alloc_gfp = flags; @@ -2029,6 +2049,10 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) * Try a lower order alloc if possible */ slab = alloc_slab_page(alloc_gfp, node, oo); +#ifdef CONFIG_IEE_PTRP + if(haoc_enabled) + slab = iee_alloc_task_token_slab(s, slab, oo_order(oo)); +#endif if (unlikely(!slab)) return NULL; stat(s, ORDER_FALLBACK); @@ -2038,6 +2062,10 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) slab->inuse = 0; slab->frozen = 0; +#ifdef CONFIG_IEE + if(haoc_enabled) + iee_allocate_slab_data(s, slab, oo_order(oo)); +#endif account_slab(slab, oo_order(oo), s, flags); slab->slab_cache = s; @@ -2090,6 +2118,13 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab) __folio_clear_slab(folio); mm_account_reclaimed_pages(pages); unaccount_slab(slab, order, s); +#ifdef CONFIG_IEE + if(haoc_enabled) + { + if (iee_free_slab_data(s, slab, order)) + return; + } +#endif __free_pages(&folio->page, order); } @@ -4487,7 +4522,10 @@ static int calculate_sizes(struct kmem_cache *s) s->size = size; s->reciprocal_size = reciprocal_value(size); order = calculate_order(size); - + #ifdef CONFIG_IEE + if(haoc_enabled) + order = iee_calculate_order(s, order); + #endif if ((int)order < 0) return 0;