sched: Change nr_uninterruptible type to unsigned long
commit 36569780b0d64de283f9d6c2195fd1a43e221ee8 upstream. The commite6fe3f422b
("sched: Make multiple runqueue task counters 32-bit") changed nr_uninterruptible to an unsigned int. But the nr_uninterruptible values for each of the CPU runqueues can grow to large numbers, sometimes exceeding INT_MAX. This is valid, if, over time, a large number of tasks are migrated off of one CPU after going into an uninterruptible state. Only the sum of all nr_interruptible values across all CPUs yields the correct result, as explained in a comment in kernel/sched/loadavg.c. Change the type of nr_uninterruptible back to unsigned long to prevent overflows, and thus the miscalculation of load average. Fixes:e6fe3f422b
("sched: Make multiple runqueue task counters 32-bit") Signed-off-by: Aruna Ramakrishna <aruna.ramakrishna@oracle.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20250709173328.606794-1-aruna.ramakrishna@oracle.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> (cherry picked from commit 496efa228f0dd58980d301e379e5561a9b612eaa)
This commit is contained in:
parent
7ab7c1113e
commit
4544b4d4a6
|
@ -80,7 +80,7 @@ long calc_load_fold_active(struct rq *this_rq, long adjust)
|
|||
long nr_active, delta = 0;
|
||||
|
||||
nr_active = this_rq->nr_running - adjust;
|
||||
nr_active += (int)this_rq->nr_uninterruptible;
|
||||
nr_active += (long)this_rq->nr_uninterruptible;
|
||||
|
||||
if (nr_active != this_rq->calc_load_active) {
|
||||
delta = nr_active - this_rq->calc_load_active;
|
||||
|
|
|
@ -1033,7 +1033,7 @@ struct rq {
|
|||
* one CPU and if it got migrated afterwards it may decrease
|
||||
* it on another CPU. Always updated under the runqueue lock:
|
||||
*/
|
||||
unsigned int nr_uninterruptible;
|
||||
unsigned long nr_uninterruptible;
|
||||
|
||||
struct task_struct __rcu *curr;
|
||||
struct task_struct *idle;
|
||||
|
|
Loading…
Reference in New Issue