diff -drupN a/kernel/sched/deadline.c b/kernel/sched/deadline.c --- a/kernel/sched/deadline.c 2018-08-06 17:23:04.000000000 +0300 +++ b/kernel/sched/deadline.c 2022-06-12 05:28:14.000000000 +0300 @@ -18,6 +18,8 @@ #include +#include "walt.h" + struct dl_bandwidth def_dl_bandwidth; static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se) @@ -947,6 +949,7 @@ void inc_dl_tasks(struct sched_dl_entity WARN_ON(!dl_prio(prio)); dl_rq->dl_nr_running++; add_nr_running(rq_of_dl_rq(dl_rq), 1); + walt_inc_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se)); inc_dl_deadline(dl_rq, deadline); inc_dl_migration(dl_se, dl_rq); @@ -961,6 +964,7 @@ void dec_dl_tasks(struct sched_dl_entity WARN_ON(!dl_rq->dl_nr_running); dl_rq->dl_nr_running--; sub_nr_running(rq_of_dl_rq(dl_rq), 1); + walt_dec_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se)); dec_dl_deadline(dl_rq, dl_se->deadline); dec_dl_migration(dl_se, dl_rq); @@ -1628,7 +1632,9 @@ retry: } deactivate_task(rq, next_task, 0); + next_task->on_rq = TASK_ON_RQ_MIGRATING; set_task_cpu(next_task, later_rq->cpu); + next_task->on_rq = TASK_ON_RQ_QUEUED; activate_task(later_rq, next_task, 0); ret = 1; @@ -1716,7 +1722,9 @@ static void pull_dl_task(struct rq *this resched = true; deactivate_task(src_rq, p, 0); + p->on_rq = TASK_ON_RQ_MIGRATING; set_task_cpu(p, this_cpu); + p->on_rq = TASK_ON_RQ_QUEUED; activate_task(this_rq, p, 0); dmin = p->dl.deadline;