anolis: kabi: revert all kabi_use
ANBZ: #9320 Because we dont take care of kABI consistency between major version now, revert kabi use, these reserved space are intended to ensure kABI compatibility between minor versions and their corresponding major version. Signed-off-by: Guixin Liu <kanie@linux.alibaba.com> Signed-off-by:Cruz Zhao <CruzZhao@linux.alibaba.com> Reviewed-by: Yi Tao <escape@linux.alibaba.com> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Reviewed-by: Cruz Zhao <CruzZhao@linux.alibaba.com> Link: https://gitee.com/anolis/cloud-kernel/pulls/3344
This commit is contained in:
parent
79967fc6e3
commit
3e6454be7f
|
@ -363,12 +363,13 @@ struct cgroup_rstat_cpu {
|
|||
*/
|
||||
struct u64_stats_sync bsync;
|
||||
struct cgroup_base_stat bstat;
|
||||
|
||||
struct cgroup_base_stat_task bstat_task;
|
||||
/*
|
||||
* Snapshots at the last reading. These are used to calculate the
|
||||
* deltas to propagate to the global counters.
|
||||
*/
|
||||
struct cgroup_base_stat last_bstat;
|
||||
struct cgroup_base_stat_task last_bstat_task;
|
||||
|
||||
/*
|
||||
* Child cgroups with stat updates on this cpu since the last read
|
||||
|
@ -383,8 +384,11 @@ struct cgroup_rstat_cpu {
|
|||
*/
|
||||
struct cgroup *updated_children; /* terminated by self cgroup */
|
||||
struct cgroup *updated_next; /* NULL iff not on the list */
|
||||
CK_KABI_EXTEND(struct cgroup_base_stat_task bstat_task)
|
||||
CK_KABI_EXTEND(struct cgroup_base_stat_task last_bstat_task)
|
||||
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
CK_KABI_RESERVE(3)
|
||||
CK_KABI_RESERVE(4)
|
||||
};
|
||||
|
||||
struct cgroup_freezer_state {
|
||||
|
@ -507,7 +511,9 @@ struct cgroup {
|
|||
|
||||
/* cgroup basic resource statistics */
|
||||
struct cgroup_base_stat last_bstat;
|
||||
struct cgroup_base_stat_task last_bstat_task;
|
||||
struct cgroup_base_stat bstat;
|
||||
struct cgroup_base_stat_task bstat_task;
|
||||
struct prev_cputime prev_cputime; /* for printing out cputime */
|
||||
|
||||
/*
|
||||
|
@ -544,8 +550,10 @@ struct cgroup {
|
|||
struct kernfs_root *hidden_place; /* tree to hide cgroup in pool. */
|
||||
struct delayed_work supply_pool_work;
|
||||
|
||||
CK_KABI_USE(1, 2, struct cgroup_base_stat_task last_bstat_task)
|
||||
CK_KABI_USE(3, 4, struct cgroup_base_stat_task bstat_task)
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
CK_KABI_RESERVE(3)
|
||||
CK_KABI_RESERVE(4)
|
||||
|
||||
/* ids of the ancestors at each level including self */
|
||||
u64 ancestor_ids[];
|
||||
|
|
|
@ -488,9 +488,9 @@ struct address_space {
|
|||
struct list_head private_list;
|
||||
void *private_data;
|
||||
|
||||
/* CK_KABI_RESERVE(1) */
|
||||
CK_KABI_USE(1, struct fast_reflink_work *fast_reflink_work);
|
||||
struct fast_reflink_work *fast_reflink_work;
|
||||
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
CK_KABI_RESERVE(3)
|
||||
CK_KABI_RESERVE(4)
|
||||
|
@ -1915,7 +1915,6 @@ struct file_operations {
|
|||
int (*fadvise)(struct file *, loff_t, loff_t, int);
|
||||
int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
|
||||
int (*uring_cmd_iopoll)(struct io_uring_cmd *ioucmd);
|
||||
CK_KABI_DEPRECATE(bool, may_pollfree)
|
||||
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
|
|
|
@ -214,8 +214,9 @@ struct iommu_iotlb_gather {
|
|||
unsigned long end;
|
||||
size_t pgsize;
|
||||
struct page *freelist;
|
||||
bool queued;
|
||||
|
||||
CK_KABI_USE(1, bool queued)
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
};
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ struct irqstat {
|
|||
struct irq_desc {
|
||||
struct irq_common_data irq_common_data;
|
||||
struct irq_data irq_data;
|
||||
CK_KABI_REPLACE(unsigned int __percpu *kstat_irqs, struct irqstat __percpu *kstat_irqs);
|
||||
struct irqstat __percpu *kstat_irqs;
|
||||
irq_flow_handler_t handle_irq;
|
||||
struct irqaction *action; /* IRQ action list */
|
||||
unsigned int status_use_accessors;
|
||||
|
|
|
@ -104,7 +104,9 @@ struct kernfs_elem_dir {
|
|||
* Monotonic revision counter, used to identify if a directory
|
||||
* node has changed during negative dentry revalidation.
|
||||
*/
|
||||
CK_KABI_USE(1, unsigned long rev)
|
||||
unsigned long rev;
|
||||
|
||||
CK_KABI_RESERVE(1)
|
||||
};
|
||||
|
||||
struct kernfs_elem_symlink {
|
||||
|
|
|
@ -530,12 +530,12 @@ struct mem_cgroup {
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_LRU_GEN
|
||||
CK_KABI_USE(1, unsigned long mglru_batch_size)
|
||||
CK_KABI_USE(2, unsigned long mglru_reclaim_pages)
|
||||
#else
|
||||
unsigned long mglru_batch_size;
|
||||
unsigned long mglru_reclaim_pages;
|
||||
#endif
|
||||
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
#endif
|
||||
CK_KABI_RESERVE(3)
|
||||
CK_KABI_RESERVE(4)
|
||||
CK_KABI_RESERVE(5)
|
||||
|
|
|
@ -101,9 +101,10 @@ struct dev_pagemap_ops {
|
|||
* When this is not implemented, or it returns -EOPNOTSUPP, the caller
|
||||
* will fall back to a common handler called mf_generic_kill_procs().
|
||||
*/
|
||||
CK_KABI_USE(1,
|
||||
int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn,
|
||||
unsigned long nr_pages, int mf_flags));
|
||||
int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn,
|
||||
unsigned long nr_pages, int mf_flags);
|
||||
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
};
|
||||
|
||||
|
|
|
@ -563,10 +563,10 @@ struct vm_fault {
|
|||
* atomic context.
|
||||
*/
|
||||
#ifdef CONFIG_PAGETABLE_SHARE
|
||||
CK_KABI_USE(1, struct vm_area_struct *orig_vma) /* Original VMA */
|
||||
#else
|
||||
CK_KABI_RESERVE(1)
|
||||
struct vm_area_struct *orig_vma; /* Original VMA */
|
||||
#endif
|
||||
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
CK_KABI_RESERVE(3)
|
||||
CK_KABI_RESERVE(4)
|
||||
|
|
|
@ -394,13 +394,12 @@ struct vm_area_struct {
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_PAGETABLE_SHARE
|
||||
CK_KABI_USE(1, struct pgtable_share_struct *pgtable_share_data)
|
||||
#else
|
||||
CK_KABI_RESERVE(1)
|
||||
struct pgtable_share_struct *pgtable_share_data;
|
||||
#endif
|
||||
bool fast_reflink;
|
||||
|
||||
CK_KABI_USE_SPLIT(2, bool fast_reflink);
|
||||
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
CK_KABI_RESERVE(3)
|
||||
CK_KABI_RESERVE(4)
|
||||
} __randomize_layout;
|
||||
|
|
|
@ -858,7 +858,9 @@ struct zone {
|
|||
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
|
||||
atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
|
||||
|
||||
CK_KABI_USE(1, unsigned long reported_pages)
|
||||
unsigned long reported_pages;
|
||||
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
CK_KABI_RESERVE(3)
|
||||
CK_KABI_RESERVE(4)
|
||||
|
|
|
@ -2177,6 +2177,8 @@ struct net_device {
|
|||
void *ml_priv;
|
||||
enum netdev_ml_priv_type ml_priv_type;
|
||||
|
||||
enum netdev_stat_type pcpu_stat_type:8;
|
||||
|
||||
union {
|
||||
struct pcpu_lstats __percpu *lstats;
|
||||
struct pcpu_sw_netstats __percpu *tstats;
|
||||
|
@ -2234,9 +2236,10 @@ struct net_device {
|
|||
/* protected by rtnl_lock */
|
||||
struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];
|
||||
|
||||
CK_KABI_USE_SPLIT(1, enum netdev_stat_type pcpu_stat_type:8)
|
||||
/** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */
|
||||
CK_KABI_USE(2, struct dim_irq_moder *irq_moder)
|
||||
struct dim_irq_moder *irq_moder;
|
||||
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
CK_KABI_RESERVE(3)
|
||||
CK_KABI_RESERVE(4)
|
||||
|
|
|
@ -666,6 +666,7 @@ struct perf_event {
|
|||
/* The cumulative AND of all event_caps for events in this group. */
|
||||
int group_caps;
|
||||
|
||||
unsigned int group_generation;
|
||||
struct perf_event *group_leader;
|
||||
struct pmu *pmu;
|
||||
void *pmu_private;
|
||||
|
@ -776,7 +777,7 @@ struct perf_event {
|
|||
struct list_head sb_list;
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
||||
CK_KABI_USE_SPLIT(1, unsigned int group_generation)
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
CK_KABI_RESERVE(3)
|
||||
CK_KABI_RESERVE(4)
|
||||
|
@ -1025,6 +1026,7 @@ struct perf_sample_data {
|
|||
u64 addr;
|
||||
struct perf_raw_record *raw;
|
||||
struct perf_branch_stack *br_stack;
|
||||
u64 *br_stack_cntr;
|
||||
u64 period;
|
||||
union perf_sample_weight weight;
|
||||
u64 txn;
|
||||
|
@ -1058,7 +1060,6 @@ struct perf_sample_data {
|
|||
u64 cgroup;
|
||||
u64 data_page_size;
|
||||
u64 code_page_size;
|
||||
CK_KABI_EXTEND(u64 *br_stack_cntr);
|
||||
} ____cacheline_aligned;
|
||||
|
||||
/* default value for data source */
|
||||
|
|
|
@ -498,29 +498,25 @@ struct sched_statistics {
|
|||
|
||||
#ifdef CONFIG_SCHED_CORE
|
||||
u64 core_forceidle_sum;
|
||||
u64 core_forceidle_task_sum;
|
||||
u64 forceidled_sum;
|
||||
u64 forceidled_sum_base;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE)
|
||||
u64 core_sibidle_sum;
|
||||
u64 core_sibidle_task_sum;
|
||||
#endif
|
||||
|
||||
CK_KABI_USE(1, unsigned long forceidled_sum)
|
||||
CK_KABI_USE(2, unsigned long forceidled_sum_base)
|
||||
#ifdef CONFIG_SCHED_CORE
|
||||
CK_KABI_USE(3, unsigned long core_forceidle_task_sum)
|
||||
#else
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
CK_KABI_RESERVE(3)
|
||||
#endif
|
||||
#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE)
|
||||
CK_KABI_USE(4, unsigned long core_sibidle_task_sum)
|
||||
#else
|
||||
CK_KABI_RESERVE(4)
|
||||
#endif
|
||||
CK_KABI_RESERVE(5)
|
||||
CK_KABI_RESERVE(6)
|
||||
CK_KABI_RESERVE(7)
|
||||
CK_KABI_RESERVE(8)
|
||||
#endif
|
||||
#endif /* CONFIG_SCHEDSTATS */
|
||||
};
|
||||
|
||||
struct sched_entity {
|
||||
|
@ -546,7 +542,7 @@ struct sched_entity {
|
|||
u64 cg_iowait_start;
|
||||
u64 cg_ineffective_sum;
|
||||
u64 cg_ineffective_start;
|
||||
CK_KABI_REPLACE_SPLIT(seqlock_t idle_seqlock, seqcount_t idle_seqcount);
|
||||
seqcount_t idle_seqcount;
|
||||
spinlock_t iowait_lock;
|
||||
|
||||
u64 nr_migrations;
|
||||
|
@ -592,7 +588,9 @@ struct sched_entity {
|
|||
#endif
|
||||
#endif
|
||||
|
||||
CK_KABI_USE(1, long priority)
|
||||
long priority;
|
||||
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
CK_KABI_RESERVE(3)
|
||||
CK_KABI_RESERVE(4)
|
||||
|
@ -1032,6 +1030,9 @@ struct task_struct {
|
|||
/* CLONE_CHILD_CLEARTID: */
|
||||
int __user *clear_child_tid;
|
||||
|
||||
/* PF_IO_WORKER */
|
||||
void *pf_io_worker;
|
||||
|
||||
u64 utime;
|
||||
u64 stime;
|
||||
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
|
||||
|
@ -1513,8 +1514,8 @@ struct task_struct {
|
|||
struct cpumask cpus_allowed_alt;
|
||||
int soft_cpus_version;
|
||||
#endif
|
||||
/* PF_IO_WORKER */
|
||||
CK_KABI_USE(1, void *pf_io_worker)
|
||||
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
CK_KABI_RESERVE(3)
|
||||
CK_KABI_RESERVE(4)
|
||||
|
|
|
@ -129,8 +129,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
|
|||
PREZERO_HW_CLEAR,
|
||||
PREZERO_HW_CLEAR_PAGES,
|
||||
#endif
|
||||
CK_KABI_EXTEND_ENUM(ALLOC_REPORTED_PAGE)
|
||||
CK_KABI_EXTEND_ENUM(REPORT_PAGE)
|
||||
ALLOC_REPORTED_PAGE,
|
||||
REPORT_PAGE,
|
||||
NR_VM_EVENT_ITEMS,
|
||||
};
|
||||
|
||||
|
|
|
@ -96,7 +96,6 @@ struct net {
|
|||
struct list_head dev_base_head;
|
||||
struct proc_dir_entry *proc_net;
|
||||
struct proc_dir_entry *proc_net_stat;
|
||||
CK_KABI_DEPRECATE(struct proc_dir_entry *, proc_net_smc)
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
struct ctl_table_set sysctls;
|
||||
|
|
|
@ -28,8 +28,10 @@ struct netns_smc {
|
|||
int sysctl_wmem;
|
||||
int sysctl_rmem;
|
||||
int sysctl_tcp2smc;
|
||||
CK_KABI_USE_SPLIT(1, int sysctl_max_links_per_lgr, int sysctl_max_conns_per_lgr)
|
||||
int sysctl_max_links_per_lgr;
|
||||
int sysctl_max_conns_per_lgr;
|
||||
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
CK_KABI_RESERVE(3)
|
||||
CK_KABI_RESERVE(4)
|
||||
|
|
|
@ -44,9 +44,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
|
|||
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
|
||||
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
||||
#if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP)
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, cfsb_csd);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
/*
|
||||
|
|
|
@ -7130,7 +7130,7 @@ static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
|
|||
first = list_empty(&rq->cfsb_csd_list);
|
||||
list_add_tail(&cfs_rq->throttled_csd_list, &rq->cfsb_csd_list);
|
||||
if (first)
|
||||
smp_call_function_single_async(cpu_of(rq), cpu_cfsb_csd(cpu_of(rq)));
|
||||
smp_call_function_single_async(cpu_of(rq), &rq->cfsb_csd);
|
||||
}
|
||||
#else
|
||||
static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
|
||||
|
@ -14334,7 +14334,7 @@ __init void init_sched_fair_class(void)
|
|||
|
||||
for_each_possible_cpu(i) {
|
||||
#ifdef CONFIG_CFS_BANDWIDTH
|
||||
INIT_CSD(cpu_cfsb_csd(i), __cfsb_csd_unthrottle, cpu_rq(i));
|
||||
INIT_CSD(&cpu_rq(i)->cfsb_csd, __cfsb_csd_unthrottle, cpu_rq(i));
|
||||
INIT_LIST_HEAD(&cpu_rq(i)->cfsb_csd_list);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -554,8 +554,9 @@ struct task_group {
|
|||
int specs_ratio;
|
||||
bool group_balancer;
|
||||
#endif
|
||||
long priority;
|
||||
|
||||
CK_KABI_USE(1, long priority)
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
CK_KABI_RESERVE(3)
|
||||
CK_KABI_RESERVE(4)
|
||||
|
@ -695,11 +696,11 @@ struct cfs_rq {
|
|||
|
||||
#ifdef CONFIG_GROUP_IDENTITY
|
||||
unsigned int nr_tasks;
|
||||
unsigned int h_nr_expel_immune;
|
||||
u64 min_under_vruntime;
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
u64 expel_spread;
|
||||
u64 expel_start;
|
||||
unsigned int h_nr_expel_immune;
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
struct list_head expel_list;
|
||||
#endif
|
||||
struct rb_root_cached under_timeline;
|
||||
|
@ -785,20 +786,14 @@ struct cfs_rq {
|
|||
unsigned long nr_uninterruptible;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
CK_KABI_USE(1, 2, struct list_head throttled_csd_list)
|
||||
#else
|
||||
struct list_head throttled_csd_list;
|
||||
#endif
|
||||
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
#endif
|
||||
#if defined(CONFIG_GROUP_IDENTITY) && !defined(CONFIG_SCHED_SMT)
|
||||
CK_KABI_USE(3, unsigned int h_nr_expel_immune)
|
||||
CK_KABI_USE(4, u64 expel_spread)
|
||||
CK_KABI_USE(5, u64 expel_start)
|
||||
#else
|
||||
CK_KABI_RESERVE(3)
|
||||
CK_KABI_RESERVE(4)
|
||||
CK_KABI_RESERVE(5)
|
||||
#endif
|
||||
CK_KABI_RESERVE(6)
|
||||
CK_KABI_RESERVE(7)
|
||||
CK_KABI_RESERVE(8)
|
||||
|
@ -1332,6 +1327,7 @@ struct rq {
|
|||
unsigned int core_forceidle_seq;
|
||||
unsigned int core_sibidle_occupation;
|
||||
u64 core_sibidle_start;
|
||||
u64 core_sibidle_start_task;
|
||||
unsigned int core_id;
|
||||
unsigned int core_sibidle_count;
|
||||
bool in_forceidle;
|
||||
|
@ -1339,35 +1335,28 @@ struct rq {
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_ACPU
|
||||
u64 acpu_idle_sum;
|
||||
u64 sibidle_sum;
|
||||
u64 last_acpu_update_time;
|
||||
u64 acpu_idle_sum;
|
||||
u64 sibidle_sum;
|
||||
u64 last_acpu_update_time;
|
||||
u64 sibidle_task_sum;
|
||||
u64 last_acpu_update_time_task;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP)
|
||||
CK_KABI_USE(1, 2, struct list_head cfsb_csd_list)
|
||||
#else
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
call_single_data_t cfsb_csd;
|
||||
struct list_head cfsb_csd_list;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) && defined(CONFIG_ARM64)
|
||||
CK_KABI_USE(3, u64 prev_irq_time);
|
||||
#else
|
||||
u64 prev_irq_time;
|
||||
#endif
|
||||
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
CK_KABI_RESERVE(3)
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_CORE
|
||||
CK_KABI_USE(4, u64 core_sibidle_start_task)
|
||||
#else
|
||||
CK_KABI_RESERVE(4)
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_ACPU
|
||||
CK_KABI_USE(5, u64 sibidle_task_sum)
|
||||
CK_KABI_USE(6, u64 last_acpu_update_time_task)
|
||||
#else
|
||||
CK_KABI_RESERVE(5)
|
||||
CK_KABI_RESERVE(6)
|
||||
#endif
|
||||
CK_KABI_RESERVE(7)
|
||||
CK_KABI_RESERVE(8)
|
||||
};
|
||||
|
@ -1422,11 +1411,6 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
|||
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
||||
#define raw_rq() raw_cpu_ptr(&runqueues)
|
||||
|
||||
#if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP)
|
||||
DECLARE_PER_CPU_SHARED_ALIGNED(call_single_data_t, cfsb_csd);
|
||||
#define cpu_cfsb_csd(cpu) (&per_cpu(cfsb_csd, (cpu)))
|
||||
#endif
|
||||
|
||||
struct sched_group;
|
||||
#ifdef CONFIG_SCHED_CORE
|
||||
static inline struct cpumask *sched_group_span(struct sched_group *sg);
|
||||
|
|
Loading…
Reference in New Issue