anolis: sched: introduce simplified proxy execution for jbd2_lock
ANBZ: #20446 When underclass task helds the jbd2_lock, it may resched actively and cannot be scheduled on cpu in a short time because of low priority, resulting that other tasks waiting for the lock for a long time. In order to allow underclass tasks to release jbd2 lock as soon as possible, we use simplified proxy execution for jbd2_lock: when an underclass task hold jbd2 lock, we move it to root task group temporarily, and move it to the original task group when it release the jbd2 lock. We also introduce switches to control whether should we enable proxy exec for some jbd2 lock and whether should we enable proxy exec for highclass tasks(for normal tasks and underclass tasks by default): - /proc/fs/jbd2/$dev/proxy_exec - /proc/fs/jbd2/$dev/proxy_exec_for_highclass Signed-off-by: Cruz Zhao <CruzZhao@linux.alibaba.com> Reviewed-by: Tianchen Ding <dtcccc@linux.alibaba.com> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Reviewed-by: Cruz Zhao <cruzzhao@linux.alibaba.com> Link: https://gitee.com/anolis/cloud-kernel/pulls/5086
This commit is contained in:
parent
d1f3184e49
commit
e93dbf3bd4
|
@ -1518,6 +1518,158 @@ static const struct proc_ops jbd2_stall_thresh_proc_ops = {
|
|||
.proc_release = single_release,
|
||||
};
|
||||
|
||||
static atomic_t jbd2_proxy_exec_count;
|
||||
static DEFINE_MUTEX(jbd2_proxy_exec_mutex);
|
||||
DEFINE_STATIC_KEY_FALSE(__jbd2_proxy_exec_enabled);
|
||||
|
||||
/*
|
||||
* When we enable proxy exec for journal, we call jbd2_proxy_exec_get().
|
||||
* If we are the first one to enable, we'll also enable __jbd2_proxy_exec_enabled.
|
||||
*/
|
||||
void jbd2_proxy_exec_get(journal_t *journal)
|
||||
{
|
||||
atomic_inc(&journal->proxy_exec_refcount);
|
||||
|
||||
if (atomic_inc_not_zero(&jbd2_proxy_exec_count))
|
||||
return;
|
||||
|
||||
mutex_lock(&jbd2_proxy_exec_mutex);
|
||||
if (!atomic_read(&jbd2_proxy_exec_count))
|
||||
static_branch_enable(&__jbd2_proxy_exec_enabled);
|
||||
|
||||
/* Ensure the static branch is enabled before we increase jbd2_proxy_exec_count. */
|
||||
smp_mb__before_atomic();
|
||||
atomic_inc(&jbd2_proxy_exec_count);
|
||||
mutex_unlock(&jbd2_proxy_exec_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* When we disable proxy exec for journal or release lock, we call jbd2_proxy_exec_put().
|
||||
* If we are the last one to disable proxy exec, we'll also disable __jbd2_proxy_exec_enabled.
|
||||
*/
|
||||
void jbd2_proxy_exec_put(journal_t *journal)
|
||||
{
|
||||
if (!atomic_dec_and_test(&journal->proxy_exec_refcount))
|
||||
return;
|
||||
|
||||
if (atomic_dec_and_mutex_lock(&jbd2_proxy_exec_count, &jbd2_proxy_exec_mutex)) {
|
||||
static_branch_disable(&__jbd2_proxy_exec_enabled);
|
||||
mutex_unlock(&jbd2_proxy_exec_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static int jbd2_seq_proxy_exec_show(struct seq_file *m, void *v)
|
||||
{
|
||||
journal_t *journal = m->private;
|
||||
|
||||
seq_printf(m, "%d\n", journal->proxy_exec);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int jbd2_seq_proxy_exec_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
journal_t *journal = PDE_DATA(inode);
|
||||
|
||||
return single_open(filp, jbd2_seq_proxy_exec_show, journal);
|
||||
}
|
||||
|
||||
static ssize_t jbd2_seq_proxy_exec_write(struct file *file,
|
||||
const char __user *buf, size_t count, loff_t *offset)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
journal_t *journal = PDE_DATA(inode);
|
||||
char buffer[PROC_NUMBUF];
|
||||
unsigned long long proxy_exec;
|
||||
int err;
|
||||
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
if (count > sizeof(buffer) - 1)
|
||||
count = sizeof(buffer) - 1;
|
||||
if (copy_from_user(buffer, buf, count)) {
|
||||
err = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = kstrtoull(strstrip(buffer), 0, &proxy_exec);
|
||||
if (err)
|
||||
goto out;
|
||||
if (proxy_exec != 0 && proxy_exec != 1) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!journal->proxy_exec && proxy_exec)
|
||||
jbd2_proxy_exec_get(journal);
|
||||
else if (journal->proxy_exec && !proxy_exec)
|
||||
jbd2_proxy_exec_put(journal);
|
||||
|
||||
WRITE_ONCE(journal->proxy_exec, proxy_exec);
|
||||
|
||||
out:
|
||||
return err < 0 ? err : count;
|
||||
}
|
||||
|
||||
|
||||
static const struct proc_ops jbd2_proxy_exec_proc_ops = {
|
||||
.proc_open = jbd2_seq_proxy_exec_open,
|
||||
.proc_read = seq_read,
|
||||
.proc_write = jbd2_seq_proxy_exec_write,
|
||||
.proc_lseek = seq_lseek,
|
||||
.proc_release = single_release,
|
||||
};
|
||||
|
||||
static int jbd2_seq_proxy_exec_for_highclass_show(struct seq_file *m, void *v)
|
||||
{
|
||||
journal_t *journal = m->private;
|
||||
|
||||
seq_printf(m, "%d\n", journal->proxy_exec_for_highclass);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int jbd2_seq_proxy_exec_for_highclass_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
journal_t *journal = PDE_DATA(inode);
|
||||
|
||||
return single_open(filp, jbd2_seq_proxy_exec_for_highclass_show, journal);
|
||||
}
|
||||
|
||||
static ssize_t jbd2_seq_proxy_exec_for_highclass_write(struct file *file,
|
||||
const char __user *buf, size_t count, loff_t *offset)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
journal_t *journal = PDE_DATA(inode);
|
||||
char buffer[PROC_NUMBUF];
|
||||
unsigned long long proxy_exec_for_highclass;
|
||||
int err;
|
||||
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
if (count > sizeof(buffer) - 1)
|
||||
count = sizeof(buffer) - 1;
|
||||
if (copy_from_user(buffer, buf, count)) {
|
||||
err = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = kstrtoull(strstrip(buffer), 0, &proxy_exec_for_highclass);
|
||||
if (err)
|
||||
goto out;
|
||||
if (proxy_exec_for_highclass != 0 && proxy_exec_for_highclass != 1) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
WRITE_ONCE(journal->proxy_exec_for_highclass, proxy_exec_for_highclass);
|
||||
out:
|
||||
return err < 0 ? err : count;
|
||||
}
|
||||
|
||||
static const struct proc_ops jbd2_proxy_exec_for_highclass_proc_ops = {
|
||||
.proc_open = jbd2_seq_proxy_exec_for_highclass_open,
|
||||
.proc_read = seq_read,
|
||||
.proc_write = jbd2_seq_proxy_exec_for_highclass_write,
|
||||
.proc_lseek = seq_lseek,
|
||||
.proc_release = single_release,
|
||||
};
|
||||
|
||||
static struct proc_dir_entry *proc_jbd2_stats;
|
||||
|
||||
static void jbd2_stats_proc_init(journal_t *journal)
|
||||
|
@ -1532,6 +1684,10 @@ static void jbd2_stats_proc_init(journal_t *journal)
|
|||
&jbd2_stats_proc_ops, journal);
|
||||
proc_create_data("stall_thresh", 0644, journal->j_proc_entry,
|
||||
&jbd2_stall_thresh_proc_ops, journal);
|
||||
proc_create_data("proxy_exec", 0644, journal->j_proc_entry,
|
||||
&jbd2_proxy_exec_proc_ops, journal);
|
||||
proc_create_data("proxy_exec_for_highclass", 0644, journal->j_proc_entry,
|
||||
&jbd2_proxy_exec_for_highclass_proc_ops, journal);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1723,6 +1879,9 @@ journal_t *jbd2_journal_init_inode(struct inode *inode)
|
|||
p = strreplace(journal->j_devname, '/', '!');
|
||||
sprintf(p, "-%lu", journal->j_inode->i_ino);
|
||||
jbd2_stats_proc_init(journal);
|
||||
atomic_set(&journal->proxy_exec_refcount, 0);
|
||||
journal->proxy_exec = false;
|
||||
journal->proxy_exec_for_highclass = false;
|
||||
|
||||
return journal;
|
||||
}
|
||||
|
|
|
@ -440,6 +440,8 @@ repeat:
|
|||
handle->h_revoke_credits_requested = handle->h_revoke_credits;
|
||||
handle->h_start_jiffies = jiffies;
|
||||
atomic_inc(&transaction->t_updates);
|
||||
if (jbd2_proxy_exec_enabled(journal, true) && jbd2_proxy_exec_try_get(journal))
|
||||
sched_move_task_to_root_task_group(current, journal->proxy_exec_for_highclass);
|
||||
atomic_inc(&transaction->t_handle_count);
|
||||
jbd_debug(4, "Handle %p given %d credits (total %d, free %lu)\n",
|
||||
handle, blocks,
|
||||
|
@ -746,6 +748,10 @@ static void stop_this_handle(handle_t *handle)
|
|||
if (atomic_dec_and_test(&transaction->t_updates))
|
||||
wake_up(&journal->j_wait_updates);
|
||||
|
||||
if (jbd2_proxy_exec_enabled(journal, false)) {
|
||||
sched_move_task_to_origin_task_group(current);
|
||||
jbd2_proxy_exec_put(journal);
|
||||
}
|
||||
rwsem_release(&journal->j_trans_commit_map, _THIS_IP_);
|
||||
/*
|
||||
* Scope of the GFP_NOFS context is over here and so we can restore the
|
||||
|
|
|
@ -1287,6 +1287,9 @@ struct journal_s
|
|||
*/
|
||||
struct lockdep_map j_trans_commit_map;
|
||||
#endif
|
||||
atomic_t proxy_exec_refcount;
|
||||
bool proxy_exec;
|
||||
bool proxy_exec_for_highclass;
|
||||
|
||||
/**
|
||||
* @j_fc_cleanup_callback:
|
||||
|
@ -1817,4 +1820,34 @@ static inline int jbd2_handle_buffer_credits(handle_t *handle)
|
|||
#define EFSBADCRC EBADMSG /* Bad CRC detected */
|
||||
#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(__jbd2_proxy_exec_enabled);
|
||||
/*
|
||||
* When we get lock, we determine whether the proxy exec path should be taken based on
|
||||
* the user's configuration.
|
||||
* When we release lock, we determin whether the proxy exec path should be taken based
|
||||
* on whether current->proxy_exec is true, to prevent some tasks from staying in the
|
||||
* root task group forever.
|
||||
*/
|
||||
static inline bool jbd2_proxy_exec_enabled(journal_t *journal, bool lock)
|
||||
{
|
||||
if (!static_branch_unlikely(&__jbd2_proxy_exec_enabled))
|
||||
return false;
|
||||
|
||||
if (lock)
|
||||
return journal->proxy_exec;
|
||||
|
||||
return current->proxy_exec;
|
||||
}
|
||||
|
||||
/*
|
||||
* When get lock, we call jbd2_proxy_exec_try_get().
|
||||
* To avoid the race window between read and add operations, we use atomic_inc_not_zero(),
|
||||
* and if journal->proxy_exec_refcount is zero, return false.
|
||||
*/
|
||||
static inline bool jbd2_proxy_exec_try_get(journal_t *journal)
|
||||
{
|
||||
return atomic_inc_not_zero(&journal->proxy_exec_refcount);
|
||||
}
|
||||
|
||||
extern void jbd2_proxy_exec_put(journal_t *journal);
|
||||
#endif /* _LINUX_JBD2_H */
|
||||
|
|
|
@ -1511,6 +1511,7 @@ struct task_struct {
|
|||
struct cpumask cpus_allowed_alt;
|
||||
int soft_cpus_version;
|
||||
#endif
|
||||
bool proxy_exec;
|
||||
|
||||
CK_KABI_RESERVE(1)
|
||||
CK_KABI_RESERVE(2)
|
||||
|
@ -2410,5 +2411,12 @@ static inline void sched_check_group_identity_unlock(void) { }
|
|||
static inline bool sched_check_sched_core_lock(void) { return true; }
|
||||
static inline void sched_check_sched_core_unlock(void) { }
|
||||
#endif
|
||||
|
||||
extern void sched_move_task_to_root_task_group(struct task_struct *tsk,
|
||||
bool proxy_exec_for_highclass);
|
||||
extern void sched_move_task_to_origin_task_group(struct task_struct *tsk);
|
||||
DECLARE_STATIC_KEY_FALSE(__jbd2_proxy_exec_enabled);
|
||||
static inline bool jbd2_proxy_exec_disabled(void)
|
||||
{
|
||||
return !static_branch_unlikely(&__jbd2_proxy_exec_enabled);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -9053,6 +9053,15 @@ void sched_offline_group(struct task_group *tg)
|
|||
spin_unlock_irqrestore(&task_group_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Proxy exec only supports jbd2 lock currently, and when other locks supported, we'll
|
||||
* add the judgement into proxy_exec_enabled().
|
||||
*/
|
||||
static inline bool proxy_exec_enabled(void)
|
||||
{
|
||||
return !jbd2_proxy_exec_disabled();
|
||||
}
|
||||
|
||||
static void sched_change_group(struct task_struct *tsk, int type)
|
||||
{
|
||||
struct task_group *tg;
|
||||
|
@ -9065,7 +9074,11 @@ static void sched_change_group(struct task_struct *tsk, int type)
|
|||
tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
|
||||
struct task_group, css);
|
||||
tg = autogroup_task_group(tsk, tg);
|
||||
tsk->sched_task_group = tg;
|
||||
|
||||
if (proxy_exec_enabled() && unlikely(tsk->proxy_exec))
|
||||
tsk->sched_task_group = &root_task_group;
|
||||
else
|
||||
tsk->sched_task_group = tg;
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
if (tsk->sched_class->task_change_group)
|
||||
|
@ -9131,6 +9144,37 @@ void sched_move_task(struct task_struct *tsk)
|
|||
task_rq_unlock(rq, tsk, &rf);
|
||||
}
|
||||
|
||||
void sched_move_task_to_root_task_group(struct task_struct *tsk, bool proxy_exec_for_highclass)
|
||||
{
|
||||
if (tsk->sched_task_group == &root_task_group)
|
||||
return;
|
||||
|
||||
if (tsk->sched_class != &fair_sched_class)
|
||||
return;
|
||||
|
||||
if (!proxy_exec_for_highclass && is_highclass_task(tsk))
|
||||
return;
|
||||
|
||||
tsk->proxy_exec = true;
|
||||
sched_move_task(tsk);
|
||||
}
|
||||
|
||||
void sched_move_task_to_origin_task_group(struct task_struct *tsk)
|
||||
{
|
||||
struct task_group *orig_tg;
|
||||
|
||||
if (!tsk->proxy_exec)
|
||||
return;
|
||||
|
||||
tsk->proxy_exec = false;
|
||||
orig_tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
|
||||
struct task_group, css);
|
||||
orig_tg = autogroup_task_group(tsk, orig_tg);
|
||||
|
||||
if (orig_tg != tsk->sched_task_group)
|
||||
sched_move_task(tsk);
|
||||
}
|
||||
|
||||
static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
|
||||
{
|
||||
return css ? container_of(css, struct task_group, css) : NULL;
|
||||
|
|
|
@ -625,53 +625,7 @@ enum {
|
|||
TYPE_STRICT,
|
||||
};
|
||||
|
||||
#define ID_NORMAL 0x0000
|
||||
#define ID_UNDERCLASS 0x0001
|
||||
#define ID_HIGHCLASS 0x0002
|
||||
#define ID_SMT_EXPELLER 0x0004
|
||||
#define ID_IDLE_SAVER 0x0008
|
||||
#define ID_IDLE_SEEKER 0x0010
|
||||
#define IDENTITY_FLAGS_MASK 0x00ff
|
||||
|
||||
static DEFINE_MUTEX(identity_mutex);
|
||||
/*
|
||||
* When we talking about identity, there are two viewpoint, that is
|
||||
* level-view and top-view.
|
||||
*
|
||||
* Helpers like is_xxx() is for level-view, usually for the comparison
|
||||
* of two se from the same level, their identity depends on the task
|
||||
* group they standing for, and normal task will never got identity.
|
||||
*
|
||||
* Helpers like is_xxx_task() is for top-view, usually for the task
|
||||
* scheduling decisions, to estimate the cpu situation for tasks.
|
||||
*
|
||||
* To be noticed, identity of task on top-view depends on the identity
|
||||
* of it's group, for example we consider a task from the underclass
|
||||
* group as a underclass task, depite of the fact that it may be the
|
||||
* descendant of a highclass group.
|
||||
*/
|
||||
static inline bool test_identity(struct sched_entity *se, int flags)
|
||||
{
|
||||
return se->id_flags & flags;
|
||||
}
|
||||
|
||||
static inline bool __is_underclass(struct sched_entity *se)
|
||||
{
|
||||
return test_identity(se, ID_UNDERCLASS);
|
||||
}
|
||||
|
||||
inline bool is_underclass(struct sched_entity *se)
|
||||
{
|
||||
if (group_identity_disabled())
|
||||
return false;
|
||||
return __is_underclass(se);
|
||||
}
|
||||
|
||||
static inline bool __is_highclass(struct sched_entity *se)
|
||||
{
|
||||
return test_identity(se, ID_HIGHCLASS);
|
||||
}
|
||||
|
||||
static inline bool is_highclass(struct sched_entity *se)
|
||||
{
|
||||
if (group_identity_disabled())
|
||||
|
@ -933,26 +887,6 @@ static inline unsigned long expel_score(struct rq *rq)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline bool __is_highclass_task(struct task_struct *p)
|
||||
{
|
||||
struct sched_entity *se = p->se.parent ? : &p->se;
|
||||
bool ret;
|
||||
|
||||
rcu_read_lock();
|
||||
ret = __is_highclass(se);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline bool is_highclass_task(struct task_struct *p)
|
||||
{
|
||||
if (group_identity_disabled())
|
||||
return false;
|
||||
|
||||
return __is_highclass_task(p);
|
||||
}
|
||||
|
||||
/*
|
||||
* Underclass tasks should be hidden from the rq when cpu is on
|
||||
* expel, this helper return true when se need expel on rq.
|
||||
|
@ -978,17 +912,6 @@ static inline bool task_is_expeller(struct task_struct *p)
|
|||
return ret;
|
||||
}
|
||||
|
||||
inline bool is_underclass_task(struct task_struct *p)
|
||||
{
|
||||
struct sched_entity *se = p->se.parent ? : &p->se;
|
||||
bool ret;
|
||||
|
||||
rcu_read_lock();
|
||||
ret = __is_underclass(se);
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline bool is_idle_saver_task(struct task_struct *p)
|
||||
{
|
||||
struct sched_entity *se = p->se.parent ? : &p->se;
|
||||
|
@ -2304,7 +2227,7 @@ unsigned int id_nr_invalid(struct rq *rq)
|
|||
|
||||
#else
|
||||
|
||||
static inline bool is_underclass(struct sched_entity *curr)
|
||||
static inline bool underclass_only(struct rq *rq)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@ -2314,16 +2237,6 @@ static inline bool is_highclass(struct sched_entity *se)
|
|||
return true;
|
||||
}
|
||||
|
||||
static inline bool underclass_only(struct rq *rq)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool is_highclass_task(struct task_struct *p)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool is_idle_seeker_task(struct task_struct *p)
|
||||
{
|
||||
return false;
|
||||
|
|
|
@ -726,15 +726,98 @@ extern int clear_identity(struct task_group *tg);
|
|||
extern void notify_smt_expeller(struct rq *rq, struct task_struct *p);
|
||||
extern unsigned int id_nr_invalid(struct rq *rq);
|
||||
extern void update_id_idle_avg(struct rq *rq, u64 delta);
|
||||
extern bool is_underclass(struct sched_entity *se);
|
||||
extern bool is_underclass_task(struct task_struct *p);
|
||||
static inline bool group_identity_disabled(void);
|
||||
|
||||
#define ID_NORMAL 0x0000
|
||||
#define ID_UNDERCLASS 0x0001
|
||||
#define ID_HIGHCLASS 0x0002
|
||||
#define ID_SMT_EXPELLER 0x0004
|
||||
#define ID_IDLE_SAVER 0x0008
|
||||
#define ID_IDLE_SEEKER 0x0010
|
||||
#define IDENTITY_FLAGS_MASK 0x00ff
|
||||
|
||||
/*
|
||||
* When we talking about identity, there are two viewpoint, that is
|
||||
* level-view and top-view.
|
||||
*
|
||||
* Helpers like is_xxx() is for level-view, usually for the comparison
|
||||
* of two se from the same level, their identity depends on the task
|
||||
* group they standing for, and normal task will never got identity.
|
||||
*
|
||||
* Helpers like is_xxx_task() is for top-view, usually for the task
|
||||
* scheduling decisions, to estimate the cpu situation for tasks.
|
||||
*
|
||||
* To be noticed, identity of task on top-view depends on the identity
|
||||
* of it's group, for example we consider a task from the underclass
|
||||
* group as a underclass task, depite of the fact that it may be the
|
||||
* descendant of a highclass group.
|
||||
*/
|
||||
static inline bool test_identity(struct sched_entity *se, int flags)
|
||||
{
|
||||
return se->id_flags & flags;
|
||||
}
|
||||
|
||||
static inline bool __is_underclass(struct sched_entity *se)
|
||||
{
|
||||
return test_identity(se, ID_UNDERCLASS);
|
||||
}
|
||||
|
||||
static inline bool is_underclass(struct sched_entity *se)
|
||||
{
|
||||
if (group_identity_disabled())
|
||||
return false;
|
||||
return __is_underclass(se);
|
||||
}
|
||||
|
||||
static inline bool __is_highclass(struct sched_entity *se)
|
||||
{
|
||||
return test_identity(se, ID_HIGHCLASS);
|
||||
}
|
||||
|
||||
static inline bool __is_underclass_task(struct task_struct *p)
|
||||
{
|
||||
struct sched_entity *se = p->se.parent ? : &p->se;
|
||||
bool ret;
|
||||
|
||||
rcu_read_lock();
|
||||
ret = __is_underclass(se);
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline bool __is_highclass_task(struct task_struct *p)
|
||||
{
|
||||
struct sched_entity *se = p->se.parent ? : &p->se;
|
||||
bool ret;
|
||||
|
||||
rcu_read_lock();
|
||||
ret = __is_highclass(se);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline bool is_underclass_task(struct task_struct *p)
|
||||
{
|
||||
if (group_identity_disabled())
|
||||
return false;
|
||||
|
||||
return __is_underclass_task(p);
|
||||
}
|
||||
|
||||
static inline bool is_highclass_task(struct task_struct *p)
|
||||
{
|
||||
if (group_identity_disabled())
|
||||
return false;
|
||||
|
||||
return __is_highclass_task(p);
|
||||
}
|
||||
|
||||
static inline void clear_task_identity(struct task_struct *p)
|
||||
{
|
||||
if (unlikely(p->se.id_flags))
|
||||
update_identity(NULL, p, 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
extern bool rq_on_expel(struct rq *rq);
|
||||
extern void task_tick_gi(struct rq *rq);
|
||||
|
@ -748,6 +831,9 @@ static inline void notify_smt_expeller(struct rq *rq, struct task_struct *p) {}
|
|||
static inline unsigned int id_nr_invalid(struct rq *rq) { return 0; }
|
||||
static inline void update_id_idle_avg(struct rq *rq, u64 delta) {}
|
||||
static inline void task_tick_gi(struct rq *rq) { }
|
||||
static inline bool is_underclass(struct sched_entity *curr) { return false; }
|
||||
static inline bool is_highclass_task(struct task_struct *p) { return false; }
|
||||
static inline bool is_underclass_task(struct task_struct *p) { return false; }
|
||||
#endif
|
||||
|
||||
/* CFS-related fields in a runqueue */
|
||||
|
|
Loading…
Reference in New Issue