mirror of https://github.com/OpenIPC/firmware.git
117 lines
3.1 KiB
Diff
117 lines
3.1 KiB
Diff
diff -drupN a/kernel/sched/rt.c b/kernel/sched/rt.c
|
|
--- a/kernel/sched/rt.c 2018-08-06 17:23:04.000000000 +0300
|
|
+++ b/kernel/sched/rt.c 2022-06-12 05:28:14.000000000 +0300
|
|
@@ -8,6 +8,8 @@
|
|
#include <linux/slab.h>
|
|
#include <linux/irq_work.h>
|
|
|
|
+#include "walt.h"
|
|
+
|
|
int sched_rr_timeslice = RR_TIMESLICE;
|
|
|
|
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
|
|
@@ -888,6 +890,51 @@ static inline int rt_se_prio(struct sche
|
|
return rt_task_of(rt_se)->prio;
|
|
}
|
|
|
|
+static void dump_throttled_rt_tasks(struct rt_rq *rt_rq)
|
|
+{
|
|
+ struct rt_prio_array *array = &rt_rq->active;
|
|
+ struct sched_rt_entity *rt_se;
|
|
+ char buf[500];
|
|
+ char *pos = buf;
|
|
+ char *end = buf + sizeof(buf);
|
|
+ int idx;
|
|
+
|
|
+ pos += snprintf(pos, sizeof(buf),
|
|
+ "sched: RT throttling activated for rt_rq %p (cpu %d)\n",
|
|
+ rt_rq, cpu_of(rq_of_rt_rq(rt_rq)));
|
|
+
|
|
+ if (bitmap_empty(array->bitmap, MAX_RT_PRIO))
|
|
+ goto out;
|
|
+
|
|
+ pos += snprintf(pos, end - pos, "potential CPU hogs:\n");
|
|
+ idx = sched_find_first_bit(array->bitmap);
|
|
+ while (idx < MAX_RT_PRIO) {
|
|
+ list_for_each_entry(rt_se, array->queue + idx, run_list) {
|
|
+ struct task_struct *p;
|
|
+
|
|
+ if (!rt_entity_is_task(rt_se))
|
|
+ continue;
|
|
+
|
|
+ p = rt_task_of(rt_se);
|
|
+ if (pos < end)
|
|
+ pos += snprintf(pos, end - pos, "\t%s (%d)\n",
|
|
+ p->comm, p->pid);
|
|
+ }
|
|
+ idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx + 1);
|
|
+ }
|
|
+out:
|
|
+#ifdef CONFIG_PANIC_ON_RT_THROTTLING
|
|
+ /*
|
|
+ * Use pr_err() in the BUG() case since printk_sched() will
|
|
+ * not get flushed and deadlock is not a concern.
|
|
+ */
|
|
+ pr_err("%s", buf);
|
|
+ BUG();
|
|
+#else
|
|
+ printk_deferred("%s", buf);
|
|
+#endif
|
|
+}
|
|
+
|
|
static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
|
|
{
|
|
u64 runtime = sched_rt_runtime(rt_rq);
|
|
@@ -911,8 +958,14 @@ static int sched_rt_runtime_exceeded(str
|
|
* but accrue some time due to boosting.
|
|
*/
|
|
if (likely(rt_b->rt_runtime)) {
|
|
+ static bool once = false;
|
|
+
|
|
rt_rq->rt_throttled = 1;
|
|
- printk_deferred_once("sched: RT throttling activated\n");
|
|
+
|
|
+ if (!once) {
|
|
+ once = true;
|
|
+ dump_throttled_rt_tasks(rt_rq);
|
|
+ }
|
|
} else {
|
|
/*
|
|
* In case we did anyway, make it go away,
|
|
@@ -1313,6 +1366,7 @@ enqueue_task_rt(struct rq *rq, struct ta
|
|
rt_se->timeout = 0;
|
|
|
|
enqueue_rt_entity(rt_se, flags);
|
|
+ walt_inc_cumulative_runnable_avg(rq, p);
|
|
|
|
if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
|
|
enqueue_pushable_task(rq, p);
|
|
@@ -1324,6 +1378,7 @@ static void dequeue_task_rt(struct rq *r
|
|
|
|
update_curr_rt(rq);
|
|
dequeue_rt_entity(rt_se, flags);
|
|
+ walt_dec_cumulative_runnable_avg(rq, p);
|
|
|
|
dequeue_pushable_task(rq, p);
|
|
}
|
|
@@ -1833,7 +1888,9 @@ retry:
|
|
}
|
|
|
|
deactivate_task(rq, next_task, 0);
|
|
+ next_task->on_rq = TASK_ON_RQ_MIGRATING;
|
|
set_task_cpu(next_task, lowest_rq->cpu);
|
|
+ next_task->on_rq = TASK_ON_RQ_QUEUED;
|
|
activate_task(lowest_rq, next_task, 0);
|
|
ret = 1;
|
|
|
|
@@ -2105,7 +2162,9 @@ static void pull_rt_task(struct rq *this
|
|
resched = true;
|
|
|
|
deactivate_task(src_rq, p, 0);
|
|
+ p->on_rq = TASK_ON_RQ_MIGRATING;
|
|
set_task_cpu(p, this_cpu);
|
|
+ p->on_rq = TASK_ON_RQ_QUEUED;
|
|
activate_task(this_rq, p, 0);
|
|
/*
|
|
* We continue with the search, just in
|