|
|
|
@@ -1,57 +1,3 @@
|
|
|
|
|
From e44ef62b127f6a161a131c84db92a7527d8fc72d Mon Sep 17 00:00:00 2001
|
|
|
|
|
From: Peter Jung <admin@ptr1337.dev>
|
|
|
|
|
Date: Wed, 22 Feb 2023 19:24:36 +0100
|
|
|
|
|
Subject: [PATCH] prjc
|
|
|
|
|
|
|
|
|
|
Signed-off-by: Peter Jung <admin@ptr1337.dev>
|
|
|
|
|
---
|
|
|
|
|
.../admin-guide/kernel-parameters.txt | 6 +
|
|
|
|
|
Documentation/admin-guide/sysctl/kernel.rst | 10 +
|
|
|
|
|
Documentation/scheduler/sched-BMQ.txt | 110 +
|
|
|
|
|
fs/proc/base.c | 2 +-
|
|
|
|
|
include/asm-generic/resource.h | 2 +-
|
|
|
|
|
include/linux/sched.h | 33 +-
|
|
|
|
|
include/linux/sched/deadline.h | 20 +
|
|
|
|
|
include/linux/sched/prio.h | 26 +
|
|
|
|
|
include/linux/sched/rt.h | 2 +
|
|
|
|
|
include/linux/sched/topology.h | 3 +-
|
|
|
|
|
init/Kconfig | 34 +
|
|
|
|
|
init/init_task.c | 18 +
|
|
|
|
|
kernel/Kconfig.preempt | 2 +-
|
|
|
|
|
kernel/cgroup/cpuset.c | 4 +-
|
|
|
|
|
kernel/delayacct.c | 2 +-
|
|
|
|
|
kernel/exit.c | 4 +-
|
|
|
|
|
kernel/locking/rtmutex.c | 16 +-
|
|
|
|
|
kernel/sched/Makefile | 5 +
|
|
|
|
|
kernel/sched/alt_core.c | 8111 +++++++++++++++++
|
|
|
|
|
kernel/sched/alt_debug.c | 31 +
|
|
|
|
|
kernel/sched/alt_sched.h | 671 ++
|
|
|
|
|
kernel/sched/bmq.h | 110 +
|
|
|
|
|
kernel/sched/build_policy.c | 8 +-
|
|
|
|
|
kernel/sched/build_utility.c | 2 +
|
|
|
|
|
kernel/sched/cpufreq_schedutil.c | 10 +
|
|
|
|
|
kernel/sched/cputime.c | 10 +-
|
|
|
|
|
kernel/sched/debug.c | 10 +
|
|
|
|
|
kernel/sched/idle.c | 2 +
|
|
|
|
|
kernel/sched/pds.h | 127 +
|
|
|
|
|
kernel/sched/pelt.c | 4 +-
|
|
|
|
|
kernel/sched/pelt.h | 8 +-
|
|
|
|
|
kernel/sched/sched.h | 9 +
|
|
|
|
|
kernel/sched/stats.c | 4 +
|
|
|
|
|
kernel/sched/stats.h | 2 +
|
|
|
|
|
kernel/sched/topology.c | 17 +
|
|
|
|
|
kernel/sysctl.c | 15 +
|
|
|
|
|
kernel/time/hrtimer.c | 2 +
|
|
|
|
|
kernel/time/posix-cpu-timers.c | 10 +-
|
|
|
|
|
kernel/trace/trace_selftest.c | 5 +
|
|
|
|
|
39 files changed, 9445 insertions(+), 22 deletions(-)
|
|
|
|
|
create mode 100644 Documentation/scheduler/sched-BMQ.txt
|
|
|
|
|
create mode 100644 kernel/sched/alt_core.c
|
|
|
|
|
create mode 100644 kernel/sched/alt_debug.c
|
|
|
|
|
create mode 100644 kernel/sched/alt_sched.h
|
|
|
|
|
create mode 100644 kernel/sched/bmq.h
|
|
|
|
|
create mode 100644 kernel/sched/pds.h
|
|
|
|
|
|
|
|
|
|
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
|
|
|
|
|
index 6cfa6e3996cf..1b6a407213da 100644
|
|
|
|
|
--- a/Documentation/admin-guide/kernel-parameters.txt
|
|
|
|
@@ -686,10 +632,10 @@ index 976092b7bd45..31d587c16ec1 100644
|
|
|
|
|
obj-y += build_utility.o
|
|
|
|
|
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 000000000000..f5e9c01f9382
|
|
|
|
|
index 000000000000..a122b1f8678e
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/kernel/sched/alt_core.c
|
|
|
|
|
@@ -0,0 +1,8111 @@
|
|
|
|
|
@@ -0,0 +1,8120 @@
|
|
|
|
|
+/*
|
|
|
|
|
+ * kernel/sched/alt_core.c
|
|
|
|
|
+ *
|
|
|
|
@@ -759,7 +705,12 @@ index 000000000000..f5e9c01f9382
|
|
|
|
|
+#define sched_feat(x) (0)
|
|
|
|
|
+#endif /* CONFIG_SCHED_DEBUG */
|
|
|
|
|
+
|
|
|
|
|
+#define ALT_SCHED_VERSION "v6.2-r0"
|
|
|
|
|
+#define ALT_SCHED_VERSION "v6.2-r2"
|
|
|
|
|
+
|
|
|
|
|
+/*
|
|
|
|
|
+ * Compile time debug macro
|
|
|
|
|
+ * #define ALT_SCHED_DEBUG
|
|
|
|
|
+ */
|
|
|
|
|
+
|
|
|
|
|
+/* rt_prio(prio) defined in include/linux/sched/rt.h */
|
|
|
|
|
+#define rt_task(p) rt_prio((p)->prio)
|
|
|
|
@@ -814,9 +765,9 @@ index 000000000000..f5e9c01f9382
|
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
|
+static cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp;
|
|
|
|
|
+
|
|
|
|
|
+DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
|
|
|
|
|
+DEFINE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
|
|
|
|
|
+DEFINE_PER_CPU(cpumask_t *, sched_cpu_topo_end_mask);
|
|
|
|
|
+DEFINE_PER_CPU_ALIGNED(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
|
|
|
|
|
+DEFINE_PER_CPU_ALIGNED(cpumask_t *, sched_cpu_llc_mask);
|
|
|
|
|
+DEFINE_PER_CPU_ALIGNED(cpumask_t *, sched_cpu_topo_end_mask);
|
|
|
|
|
+
|
|
|
|
|
+#ifdef CONFIG_SCHED_SMT
|
|
|
|
|
+DEFINE_STATIC_KEY_FALSE(sched_smt_present);
|
|
|
|
@@ -910,13 +861,13 @@ index 000000000000..f5e9c01f9382
|
|
|
|
|
+
|
|
|
|
|
+ if (prio < last_prio) {
|
|
|
|
|
+ if (IDLE_TASK_SCHED_PRIO == last_prio) {
|
|
|
|
|
+ cpumask_clear_cpu(cpu, sched_idle_mask);
|
|
|
|
|
+ last_prio -= 2;
|
|
|
|
|
+#ifdef CONFIG_SCHED_SMT
|
|
|
|
|
+ if (static_branch_likely(&sched_smt_present))
|
|
|
|
|
+ cpumask_andnot(&sched_sg_idle_mask,
|
|
|
|
|
+ &sched_sg_idle_mask, cpu_smt_mask(cpu));
|
|
|
|
|
+#endif
|
|
|
|
|
+ cpumask_clear_cpu(cpu, sched_idle_mask);
|
|
|
|
|
+ last_prio -= 2;
|
|
|
|
|
+ }
|
|
|
|
|
+ clear_recorded_preempt_mask(pr, prio, last_prio, cpu);
|
|
|
|
|
+
|
|
|
|
@@ -924,18 +875,14 @@ index 000000000000..f5e9c01f9382
|
|
|
|
|
+ }
|
|
|
|
|
+ /* last_prio < prio */
|
|
|
|
|
+ if (IDLE_TASK_SCHED_PRIO == prio) {
|
|
|
|
|
+ cpumask_set_cpu(cpu, sched_idle_mask);
|
|
|
|
|
+ prio -= 2;
|
|
|
|
|
+#ifdef CONFIG_SCHED_SMT
|
|
|
|
|
+ if (static_branch_likely(&sched_smt_present)) {
|
|
|
|
|
+ cpumask_t tmp;
|
|
|
|
|
+
|
|
|
|
|
+ cpumask_and(&tmp, cpu_smt_mask(cpu), sched_idle_mask);
|
|
|
|
|
+ if (cpumask_equal(&tmp, cpu_smt_mask(cpu)))
|
|
|
|
|
+ if (static_branch_likely(&sched_smt_present) &&
|
|
|
|
|
+ cpumask_intersects(cpu_smt_mask(cpu), sched_idle_mask))
|
|
|
|
|
+ cpumask_or(&sched_sg_idle_mask,
|
|
|
|
|
+ &sched_sg_idle_mask, cpu_smt_mask(cpu));
|
|
|
|
|
+ }
|
|
|
|
|
+#endif
|
|
|
|
|
+ cpumask_set_cpu(cpu, sched_idle_mask);
|
|
|
|
|
+ prio -= 2;
|
|
|
|
|
+ }
|
|
|
|
|
+ set_recorded_preempt_mask(pr, last_prio, prio, cpu);
|
|
|
|
|
+}
|
|
|
|
@@ -1476,11 +1423,13 @@ index 000000000000..f5e9c01f9382
|
|
|
|
|
+
|
|
|
|
|
+static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
|
|
|
|
|
+{
|
|
|
|
|
+#ifdef ALT_SCHED_DEBUG
|
|
|
|
|
+ lockdep_assert_held(&rq->lock);
|
|
|
|
|
+
|
|
|
|
|
+ /*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
|
|
|
|
|
+ WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
|
|
|
|
|
+ task_cpu(p), cpu_of(rq));
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+ __SCHED_DEQUEUE_TASK(p, rq, flags);
|
|
|
|
|
+ --rq->nr_running;
|
|
|
|
@@ -1494,11 +1443,13 @@ index 000000000000..f5e9c01f9382
|
|
|
|
|
+
|
|
|
|
|
+static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
|
|
|
|
|
+{
|
|
|
|
|
+#ifdef ALT_SCHED_DEBUG
|
|
|
|
|
+ lockdep_assert_held(&rq->lock);
|
|
|
|
|
+
|
|
|
|
|
+ /*printk(KERN_INFO "sched: enqueue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
|
|
|
|
|
+ WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
|
|
|
|
|
+ task_cpu(p), cpu_of(rq));
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+ __SCHED_ENQUEUE_TASK(p, rq, flags);
|
|
|
|
|
+ update_sched_preempt_mask(rq);
|
|
|
|
@@ -1513,10 +1464,12 @@ index 000000000000..f5e9c01f9382
|
|
|
|
|
+
|
|
|
|
|
+static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
|
|
|
|
|
+{
|
|
|
|
|
+#ifdef ALT_SCHED_DEBUG
|
|
|
|
|
+ lockdep_assert_held(&rq->lock);
|
|
|
|
|
+ /*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
|
|
|
|
|
+ WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
|
|
|
|
|
+ cpu_of(rq), task_cpu(p));
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+ list_del(&p->sq_node);
|
|
|
|
|
+ list_add_tail(&p->sq_node, &rq->queue.heads[idx]);
|
|
|
|
@@ -2035,8 +1988,8 @@ index 000000000000..f5e9c01f9382
|
|
|
|
|
+ */
|
|
|
|
|
+static inline void deactivate_task(struct task_struct *p, struct rq *rq)
|
|
|
|
|
+{
|
|
|
|
|
+ dequeue_task(p, rq, DEQUEUE_SLEEP);
|
|
|
|
|
+ p->on_rq = 0;
|
|
|
|
|
+ dequeue_task(p, rq, DEQUEUE_SLEEP);
|
|
|
|
|
+ cpufreq_update_util(rq, 0);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
@@ -2253,7 +2206,7 @@ index 000000000000..f5e9c01f9382
|
|
|
|
|
+{
|
|
|
|
|
+ lockdep_assert_held(&rq->lock);
|
|
|
|
|
+
|
|
|
|
|
+ WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
|
|
|
|
|
+ p->on_rq = TASK_ON_RQ_MIGRATING;
|
|
|
|
|
+ dequeue_task(p, rq, 0);
|
|
|
|
|
+ update_sched_preempt_mask(rq);
|
|
|
|
|
+ set_task_cpu(p, new_cpu);
|
|
|
|
@@ -4849,10 +4802,9 @@ index 000000000000..f5e9c01f9382
|
|
|
|
|
+/*
|
|
|
|
|
+ * sg_balance - slibing group balance check for run queue @rq
|
|
|
|
|
+ */
|
|
|
|
|
+static inline void sg_balance(struct rq *rq)
|
|
|
|
|
+static inline void sg_balance(struct rq *rq, int cpu)
|
|
|
|
|
+{
|
|
|
|
|
+ cpumask_t chk;
|
|
|
|
|
+ int cpu = cpu_of(rq);
|
|
|
|
|
+
|
|
|
|
|
+ /* exit when cpu is offline */
|
|
|
|
|
+ if (unlikely(!rq->online))
|
|
|
|
@@ -5166,11 +5118,6 @@ index 000000000000..f5e9c01f9382
|
|
|
|
|
+ schedstat_inc(this_rq()->sched_count);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/*
|
|
|
|
|
+ * Compile time debug macro
|
|
|
|
|
+ * #define ALT_SCHED_DEBUG
|
|
|
|
|
+ */
|
|
|
|
|
+
|
|
|
|
|
+#ifdef ALT_SCHED_DEBUG
|
|
|
|
|
+void alt_sched_debug(void)
|
|
|
|
|
+{
|
|
|
|
@@ -5207,10 +5154,12 @@ index 000000000000..f5e9c01f9382
|
|
|
|
|
+ (p = sched_rq_next_task(skip, rq)) != rq->idle) {
|
|
|
|
|
+ skip = sched_rq_next_task(p, rq);
|
|
|
|
|
+ if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
|
|
|
|
|
+ p->on_rq = TASK_ON_RQ_MIGRATING;
|
|
|
|
|
+ __SCHED_DEQUEUE_TASK(p, rq, 0);
|
|
|
|
|
+ set_task_cpu(p, dest_cpu);
|
|
|
|
|
+ sched_task_sanity_check(p, dest_rq);
|
|
|
|
|
+ __SCHED_ENQUEUE_TASK(p, dest_rq, 0);
|
|
|
|
|
+ p->on_rq = TASK_ON_RQ_QUEUED;
|
|
|
|
|
+ nr_migrated++;
|
|
|
|
|
+ }
|
|
|
|
|
+ nr_tries--;
|
|
|
|
@@ -5507,19 +5456,21 @@ index 000000000000..f5e9c01f9382
|
|
|
|
|
+ */
|
|
|
|
|
+ ++*switch_count;
|
|
|
|
|
+
|
|
|
|
|
+ psi_sched_switch(prev, next, !task_on_rq_queued(prev));
|
|
|
|
|
+ psi_sched_switch(prev, next, deactivated);
|
|
|
|
|
+
|
|
|
|
|
+ trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
|
|
|
|
|
+
|
|
|
|
|
+ /* Also unlocks the rq: */
|
|
|
|
|
+ rq = context_switch(rq, prev, next);
|
|
|
|
|
+
|
|
|
|
|
+ cpu = cpu_of(rq);
|
|
|
|
|
+ } else {
|
|
|
|
|
+ __balance_callbacks(rq);
|
|
|
|
|
+ raw_spin_unlock_irq(&rq->lock);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+#ifdef CONFIG_SCHED_SMT
|
|
|
|
|
+ sg_balance(rq);
|
|
|
|
|
+ sg_balance(rq, cpu);
|
|
|
|
|
+#endif
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
@@ -5844,14 +5795,18 @@ index 000000000000..f5e9c01f9382
|
|
|
|
|
+
|
|
|
|
|
+static inline void check_task_changed(struct task_struct *p, struct rq *rq)
|
|
|
|
|
+{
|
|
|
|
|
+ /* Trigger resched if task sched_prio has been modified. */
|
|
|
|
|
+ if (task_on_rq_queued(p)) {
|
|
|
|
|
+ int idx;
|
|
|
|
|
+
|
|
|
|
|
+ /* Trigger resched if task sched_prio has been modified. */
|
|
|
|
|
+ if (task_on_rq_queued(p) && (idx = task_sched_prio_idx(p, rq)) != p->sq_idx) {
|
|
|
|
|
+ update_rq_clock(rq);
|
|
|
|
|
+ idx = task_sched_prio_idx(p, rq);
|
|
|
|
|
+ if (idx != p->sq_idx) {
|
|
|
|
|
+ requeue_task(p, rq, idx);
|
|
|
|
|
+ check_preempt_curr(rq);
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static void __setscheduler_prio(struct task_struct *p, int prio)
|
|
|
|
|
+{
|
|
|
|
@@ -5902,7 +5857,6 @@ index 000000000000..f5e9c01f9382
|
|
|
|
|
+ return;
|
|
|
|
|
+
|
|
|
|
|
+ rq = __task_access_lock(p, &lock);
|
|
|
|
|
+ update_rq_clock(rq);
|
|
|
|
|
+ /*
|
|
|
|
|
+ * Set under pi_lock && rq->lock, such that the value can be used under
|
|
|
|
|
+ * either lock.
|
|
|
|
@@ -8275,7 +8229,8 @@ index 000000000000..f5e9c01f9382
|
|
|
|
|
+ int i;
|
|
|
|
|
+ struct rq *rq;
|
|
|
|
|
+
|
|
|
|
|
+ printk(KERN_INFO ALT_SCHED_VERSION_MSG);
|
|
|
|
|
+ printk(KERN_INFO "sched/alt: "ALT_SCHED_NAME" CPU Scheduler "ALT_SCHED_VERSION\
|
|
|
|
|
+ " by Alfred Chen.\n");
|
|
|
|
|
+
|
|
|
|
|
+ wait_bit_init();
|
|
|
|
|
+
|
|
|
|
@@ -8840,10 +8795,10 @@ index 000000000000..1212a031700e
|
|
|
|
|
+{}
|
|
|
|
|
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 000000000000..0b563999d4c1
|
|
|
|
|
index 000000000000..e9b93e63406a
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/kernel/sched/alt_sched.h
|
|
|
|
|
@@ -0,0 +1,671 @@
|
|
|
|
|
@@ -0,0 +1,672 @@
|
|
|
|
|
+#ifndef ALT_SCHED_H
|
|
|
|
|
+#define ALT_SCHED_H
|
|
|
|
|
+
|
|
|
|
@@ -8965,6 +8920,8 @@ index 000000000000..0b563999d4c1
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+struct rq;
|
|
|
|
|
+struct cpuidle_state;
|
|
|
|
|
+
|
|
|
|
|
+struct balance_callback {
|
|
|
|
|
+ struct balance_callback *next;
|
|
|
|
|
+ void (*func)(struct rq *rq);
|
|
|
|
@@ -9136,8 +9093,7 @@ index 000000000000..0b563999d4c1
|
|
|
|
|
+ NR_CPU_AFFINITY_LEVELS
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+DECLARE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
|
|
|
|
|
+DECLARE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
|
|
|
|
|
+DECLARE_PER_CPU_ALIGNED(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
|
|
|
|
|
+
|
|
|
|
|
+static inline int
|
|
|
|
|
+__best_mask_cpu(const cpumask_t *cpumask, const cpumask_t *mask)
|
|
|
|
@@ -9517,11 +9473,11 @@ index 000000000000..0b563999d4c1
|
|
|
|
|
+#endif /* ALT_SCHED_H */
|
|
|
|
|
diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 000000000000..66b77291b9d0
|
|
|
|
|
index 000000000000..f29b8f3aa786
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/kernel/sched/bmq.h
|
|
|
|
|
@@ -0,0 +1,110 @@
|
|
|
|
|
+#define ALT_SCHED_VERSION_MSG "sched/bmq: BMQ CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
|
|
|
|
|
+#define ALT_SCHED_NAME "BMQ"
|
|
|
|
|
+
|
|
|
|
|
+/*
|
|
|
|
|
+ * BMQ only routines
|
|
|
|
@@ -9860,14 +9816,15 @@ index f26ab2675f7d..480d4ad16d45 100644
|
|
|
|
|
+#endif
|
|
|
|
|
diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 000000000000..56a649d02e49
|
|
|
|
|
index 000000000000..27e09b4feb8c
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/kernel/sched/pds.h
|
|
|
|
|
@@ -0,0 +1,127 @@
|
|
|
|
|
+#define ALT_SCHED_VERSION_MSG "sched/pds: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
|
|
|
|
|
@@ -0,0 +1,133 @@
|
|
|
|
|
+#define ALT_SCHED_NAME "PDS"
|
|
|
|
|
+
|
|
|
|
|
+static int sched_timeslice_shift = 22;
|
|
|
|
|
+
|
|
|
|
|
+/* PDS assume NORMAL_PRIO_NUM is power of 2 */
|
|
|
|
|
+#define NORMAL_PRIO_MOD(x) ((x) & (NORMAL_PRIO_NUM - 1))
|
|
|
|
|
+
|
|
|
|
|
+/*
|
|
|
|
@@ -9884,38 +9841,43 @@ index 000000000000..56a649d02e49
|
|
|
|
|
+{
|
|
|
|
|
+ s64 delta = p->deadline - rq->time_edge + NORMAL_PRIO_NUM - NICE_WIDTH;
|
|
|
|
|
+
|
|
|
|
|
+#ifdef ALT_SCHED_DEBUG
|
|
|
|
|
+ if (WARN_ONCE(delta > NORMAL_PRIO_NUM - 1,
|
|
|
|
|
+ "pds: task_sched_prio_normal() delta %lld\n", delta))
|
|
|
|
|
+ return NORMAL_PRIO_NUM - 1;
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+ return (delta < 0) ? 0 : delta;
|
|
|
|
|
+ return max(0LL, delta);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static inline int task_sched_prio(const struct task_struct *p)
|
|
|
|
|
+{
|
|
|
|
|
+ return (p->prio < MAX_RT_PRIO) ? p->prio :
|
|
|
|
|
+ return (p->prio < MIN_NORMAL_PRIO) ? p->prio :
|
|
|
|
|
+ MIN_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static inline int
|
|
|
|
|
+task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
|
|
|
|
|
+{
|
|
|
|
|
+ return (p->prio < MAX_RT_PRIO) ? p->prio : MIN_NORMAL_PRIO +
|
|
|
|
|
+ NORMAL_PRIO_MOD(task_sched_prio_normal(p, rq) + rq->time_edge);
|
|
|
|
|
+ u64 idx;
|
|
|
|
|
+
|
|
|
|
|
+ if (p->prio < MAX_RT_PRIO)
|
|
|
|
|
+ return p->prio;
|
|
|
|
|
+
|
|
|
|
|
+ idx = max(p->deadline + NORMAL_PRIO_NUM - NICE_WIDTH, rq->time_edge);
|
|
|
|
|
+ return MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(idx);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static inline int sched_prio2idx(int prio, struct rq *rq)
|
|
|
|
|
+{
|
|
|
|
|
+ return (IDLE_TASK_SCHED_PRIO == prio || prio < MAX_RT_PRIO) ? prio :
|
|
|
|
|
+ MIN_NORMAL_PRIO + NORMAL_PRIO_MOD((prio - MIN_NORMAL_PRIO) +
|
|
|
|
|
+ rq->time_edge);
|
|
|
|
|
+ MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(prio + rq->time_edge);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static inline int sched_idx2prio(int idx, struct rq *rq)
|
|
|
|
|
+{
|
|
|
|
|
+ return (idx < MAX_RT_PRIO) ? idx : MIN_NORMAL_PRIO +
|
|
|
|
|
+ NORMAL_PRIO_MOD((idx - MIN_NORMAL_PRIO) + NORMAL_PRIO_NUM -
|
|
|
|
|
+ NORMAL_PRIO_MOD(rq->time_edge));
|
|
|
|
|
+ NORMAL_PRIO_MOD(idx - rq->time_edge);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static inline void sched_renew_deadline(struct task_struct *p, const struct rq *rq)
|
|
|
|
@@ -9940,6 +9902,7 @@ index 000000000000..56a649d02e49
|
|
|
|
|
+ if (now == old)
|
|
|
|
|
+ return;
|
|
|
|
|
+
|
|
|
|
|
+ rq->time_edge = now;
|
|
|
|
|
+ delta = min_t(u64, NORMAL_PRIO_NUM, now - old);
|
|
|
|
|
+ INIT_LIST_HEAD(&head);
|
|
|
|
|
+
|
|
|
|
@@ -9949,10 +9912,9 @@ index 000000000000..56a649d02e49
|
|
|
|
|
+
|
|
|
|
|
+ rq->queue.bitmap[2] = (NORMAL_PRIO_NUM == delta) ? 0UL :
|
|
|
|
|
+ rq->queue.bitmap[2] >> delta;
|
|
|
|
|
+ rq->time_edge = now;
|
|
|
|
|
+ if (!list_empty(&head)) {
|
|
|
|
|
+ u64 idx = MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(now);
|
|
|
|
|
+ struct task_struct *p;
|
|
|
|
|
+ u64 idx = MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(now);
|
|
|
|
|
+
|
|
|
|
|
+ list_for_each_entry(p, &head, sq_node)
|
|
|
|
|
+ p->sq_idx = idx;
|
|
|
|
@@ -10322,6 +10284,363 @@ index ff0536cea968..ce266990006d 100644
|
|
|
|
|
};
|
|
|
|
|
struct wakeup_test_data *x = data;
|
|
|
|
|
|
|
|
|
|
--
|
|
|
|
|
2.39.2
|
|
|
|
|
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
|
|
|
|
|
index a122b1f8678e..78748ebb1d71 100644
|
|
|
|
|
--- a/kernel/sched/alt_core.c
|
|
|
|
|
+++ b/kernel/sched/alt_core.c
|
|
|
|
|
@@ -254,8 +254,7 @@ static inline void update_sched_preempt_mask(struct rq *rq)
|
|
|
|
|
*/
|
|
|
|
|
static inline struct task_struct *sched_rq_first_task(struct rq *rq)
|
|
|
|
|
{
|
|
|
|
|
- unsigned long idx = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
|
|
|
|
|
- const struct list_head *head = &rq->queue.heads[sched_prio2idx(idx, rq)];
|
|
|
|
|
+ const struct list_head *head = &rq->queue.heads[sched_prio2idx(rq->prio, rq)];
|
|
|
|
|
|
|
|
|
|
return list_first_entry(head, struct task_struct, sq_node);
|
|
|
|
|
}
|
|
|
|
|
@@ -767,13 +766,15 @@ unsigned long get_wchan(struct task_struct *p)
|
|
|
|
|
* Add/Remove/Requeue task to/from the runqueue routines
|
|
|
|
|
* Context: rq->lock
|
|
|
|
|
*/
|
|
|
|
|
-#define __SCHED_DEQUEUE_TASK(p, rq, flags) \
|
|
|
|
|
+#define __SCHED_DEQUEUE_TASK(p, rq, flags, func) \
|
|
|
|
|
sched_info_dequeue(rq, p); \
|
|
|
|
|
psi_dequeue(p, flags & DEQUEUE_SLEEP); \
|
|
|
|
|
\
|
|
|
|
|
list_del(&p->sq_node); \
|
|
|
|
|
- if (list_empty(&rq->queue.heads[p->sq_idx])) \
|
|
|
|
|
- clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
|
|
|
|
|
+ if (list_empty(&rq->queue.heads[p->sq_idx])) { \
|
|
|
|
|
+ clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap); \
|
|
|
|
|
+ func; \
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
|
|
#define __SCHED_ENQUEUE_TASK(p, rq, flags) \
|
|
|
|
|
sched_info_enqueue(rq, p); \
|
|
|
|
|
@@ -788,12 +789,12 @@ static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
|
|
|
|
|
#ifdef ALT_SCHED_DEBUG
|
|
|
|
|
lockdep_assert_held(&rq->lock);
|
|
|
|
|
|
|
|
|
|
- /*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
|
|
|
|
|
+ /*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->deadline);*/
|
|
|
|
|
WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
|
|
|
|
|
task_cpu(p), cpu_of(rq));
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
- __SCHED_DEQUEUE_TASK(p, rq, flags);
|
|
|
|
|
+ __SCHED_DEQUEUE_TASK(p, rq, flags, update_sched_preempt_mask(rq));
|
|
|
|
|
--rq->nr_running;
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
if (1 == rq->nr_running)
|
|
|
|
|
@@ -808,7 +809,7 @@ static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
|
|
|
|
|
#ifdef ALT_SCHED_DEBUG
|
|
|
|
|
lockdep_assert_held(&rq->lock);
|
|
|
|
|
|
|
|
|
|
- /*printk(KERN_INFO "sched: enqueue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
|
|
|
|
|
+ /*printk(KERN_INFO "sched: enqueue(%d) %px %d\n", cpu_of(rq), p, p->prio);*/
|
|
|
|
|
WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
|
|
|
|
|
task_cpu(p), cpu_of(rq));
|
|
|
|
|
#endif
|
|
|
|
|
@@ -828,7 +829,7 @@ static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
|
|
|
|
|
{
|
|
|
|
|
#ifdef ALT_SCHED_DEBUG
|
|
|
|
|
lockdep_assert_held(&rq->lock);
|
|
|
|
|
- /*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
|
|
|
|
|
+ /*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->deadline);*/
|
|
|
|
|
WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
|
|
|
|
|
cpu_of(rq), task_cpu(p));
|
|
|
|
|
#endif
|
|
|
|
|
@@ -837,8 +838,7 @@ static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
|
|
|
|
|
list_add_tail(&p->sq_node, &rq->queue.heads[idx]);
|
|
|
|
|
if (idx != p->sq_idx) {
|
|
|
|
|
if (list_empty(&rq->queue.heads[p->sq_idx]))
|
|
|
|
|
- clear_bit(sched_idx2prio(p->sq_idx, rq),
|
|
|
|
|
- rq->queue.bitmap);
|
|
|
|
|
+ clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
|
|
|
|
|
p->sq_idx = idx;
|
|
|
|
|
set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
|
|
|
|
|
update_sched_preempt_mask(rq);
|
|
|
|
|
@@ -1350,8 +1350,8 @@ static void activate_task(struct task_struct *p, struct rq *rq)
|
|
|
|
|
*/
|
|
|
|
|
static inline void deactivate_task(struct task_struct *p, struct rq *rq)
|
|
|
|
|
{
|
|
|
|
|
- p->on_rq = 0;
|
|
|
|
|
dequeue_task(p, rq, DEQUEUE_SLEEP);
|
|
|
|
|
+ p->on_rq = 0;
|
|
|
|
|
cpufreq_update_util(rq, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -1568,9 +1568,8 @@ static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int
|
|
|
|
|
{
|
|
|
|
|
lockdep_assert_held(&rq->lock);
|
|
|
|
|
|
|
|
|
|
- p->on_rq = TASK_ON_RQ_MIGRATING;
|
|
|
|
|
+ WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
|
|
|
|
|
dequeue_task(p, rq, 0);
|
|
|
|
|
- update_sched_preempt_mask(rq);
|
|
|
|
|
set_task_cpu(p, new_cpu);
|
|
|
|
|
raw_spin_unlock(&rq->lock);
|
|
|
|
|
|
|
|
|
|
@@ -4516,12 +4515,10 @@ migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu)
|
|
|
|
|
(p = sched_rq_next_task(skip, rq)) != rq->idle) {
|
|
|
|
|
skip = sched_rq_next_task(p, rq);
|
|
|
|
|
if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
|
|
|
|
|
- p->on_rq = TASK_ON_RQ_MIGRATING;
|
|
|
|
|
- __SCHED_DEQUEUE_TASK(p, rq, 0);
|
|
|
|
|
+ __SCHED_DEQUEUE_TASK(p, rq, 0, );
|
|
|
|
|
set_task_cpu(p, dest_cpu);
|
|
|
|
|
sched_task_sanity_check(p, dest_rq);
|
|
|
|
|
__SCHED_ENQUEUE_TASK(p, dest_rq, 0);
|
|
|
|
|
- p->on_rq = TASK_ON_RQ_QUEUED;
|
|
|
|
|
nr_migrated++;
|
|
|
|
|
}
|
|
|
|
|
nr_tries--;
|
|
|
|
|
@@ -4566,6 +4563,7 @@ static inline int take_other_rq_tasks(struct rq *rq, int cpu)
|
|
|
|
|
if (rq->nr_running > 1)
|
|
|
|
|
cpumask_set_cpu(cpu, &sched_rq_pending_mask);
|
|
|
|
|
|
|
|
|
|
+ update_sched_preempt_mask(rq);
|
|
|
|
|
cpufreq_update_util(rq, 0);
|
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
@@ -4637,8 +4635,7 @@ choose_next_task(struct rq *rq, int cpu)
|
|
|
|
|
#ifdef CONFIG_HIGH_RES_TIMERS
|
|
|
|
|
hrtick_start(rq, next->time_slice);
|
|
|
|
|
#endif
|
|
|
|
|
- /*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu,
|
|
|
|
|
- * next);*/
|
|
|
|
|
+ /*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu, next);*/
|
|
|
|
|
return next;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -4706,7 +4703,6 @@ static void __sched notrace __schedule(unsigned int sched_mode)
|
|
|
|
|
unsigned long prev_state;
|
|
|
|
|
struct rq *rq;
|
|
|
|
|
int cpu;
|
|
|
|
|
- int deactivated = 0;
|
|
|
|
|
|
|
|
|
|
cpu = smp_processor_id();
|
|
|
|
|
rq = cpu_rq(cpu);
|
|
|
|
|
@@ -4771,7 +4767,6 @@ static void __sched notrace __schedule(unsigned int sched_mode)
|
|
|
|
|
*/
|
|
|
|
|
sched_task_deactivate(prev, rq);
|
|
|
|
|
deactivate_task(prev, rq);
|
|
|
|
|
- deactivated = 1;
|
|
|
|
|
|
|
|
|
|
if (prev->in_iowait) {
|
|
|
|
|
atomic_inc(&rq->nr_iowait);
|
|
|
|
|
@@ -4791,11 +4786,10 @@ static void __sched notrace __schedule(unsigned int sched_mode)
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
if (likely(prev != next)) {
|
|
|
|
|
- if (deactivated)
|
|
|
|
|
- update_sched_preempt_mask(rq);
|
|
|
|
|
next->last_ran = rq->clock_task;
|
|
|
|
|
rq->last_ts_switch = rq->clock;
|
|
|
|
|
|
|
|
|
|
+ /*printk(KERN_INFO "sched: %px -> %px\n", prev, next);*/
|
|
|
|
|
rq->nr_switches++;
|
|
|
|
|
/*
|
|
|
|
|
* RCU users of rcu_dereference(rq->curr) may not see
|
|
|
|
|
@@ -4818,7 +4812,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
|
|
|
|
|
*/
|
|
|
|
|
++*switch_count;
|
|
|
|
|
|
|
|
|
|
- psi_sched_switch(prev, next, deactivated);
|
|
|
|
|
+ psi_sched_switch(prev, next, !task_on_rq_queued(prev));
|
|
|
|
|
|
|
|
|
|
trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
|
|
|
|
|
|
|
|
|
|
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
|
|
|
|
|
index e9b93e63406a..60bbb4583d16 100644
|
|
|
|
|
--- a/kernel/sched/alt_sched.h
|
|
|
|
|
+++ b/kernel/sched/alt_sched.h
|
|
|
|
|
@@ -22,8 +22,8 @@
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_SCHED_PDS
|
|
|
|
|
-/* bits: RT(0-99), reserved(100-127), NORMAL_PRIO_NUM, cpu idle task */
|
|
|
|
|
-#define SCHED_BITS (MIN_NORMAL_PRIO + NORMAL_PRIO_NUM + 1)
|
|
|
|
|
+/* bits: RT(0-24), reserved(25-31), SCHED_NORMAL_PRIO_NUM(32), cpu idle task(1) */
|
|
|
|
|
+#define SCHED_BITS (64 + 1)
|
|
|
|
|
#endif /* CONFIG_SCHED_PDS */
|
|
|
|
|
|
|
|
|
|
#define IDLE_TASK_SCHED_PRIO (SCHED_BITS - 1)
|
|
|
|
|
@@ -142,7 +142,7 @@ struct rq {
|
|
|
|
|
#ifdef CONFIG_SCHED_PDS
|
|
|
|
|
u64 time_edge;
|
|
|
|
|
#endif
|
|
|
|
|
- unsigned long prio;
|
|
|
|
|
+ unsigned long prio;
|
|
|
|
|
|
|
|
|
|
/* switch count */
|
|
|
|
|
u64 nr_switches;
|
|
|
|
|
diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
|
|
|
|
|
index 27e09b4feb8c..5a94a98e19af 100644
|
|
|
|
|
--- a/kernel/sched/pds.h
|
|
|
|
|
+++ b/kernel/sched/pds.h
|
|
|
|
|
@@ -1,9 +1,15 @@
|
|
|
|
|
#define ALT_SCHED_NAME "PDS"
|
|
|
|
|
|
|
|
|
|
-static int sched_timeslice_shift = 22;
|
|
|
|
|
+#define MIN_SCHED_NORMAL_PRIO (32)
|
|
|
|
|
+#define SCHED_NORMAL_PRIO_NUM (32)
|
|
|
|
|
+#define SCHED_EDGE_DELTA (SCHED_NORMAL_PRIO_NUM - NICE_WIDTH / 2)
|
|
|
|
|
|
|
|
|
|
/* PDS assume NORMAL_PRIO_NUM is power of 2 */
|
|
|
|
|
#define NORMAL_PRIO_MOD(x) ((x) & (NORMAL_PRIO_NUM - 1))
|
|
|
|
|
+#define SCHED_NORMAL_PRIO_MOD(x) ((x) & (SCHED_NORMAL_PRIO_NUM - 1))
|
|
|
|
|
+
|
|
|
|
|
+/* 4ms -> shift 22, 2 time slice slots -> shift 23 */
|
|
|
|
|
+static int sched_timeslice_shift = 23;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Common interfaces
|
|
|
|
|
@@ -11,18 +17,18 @@ static int sched_timeslice_shift = 22;
|
|
|
|
|
static inline void sched_timeslice_imp(const int timeslice_ms)
|
|
|
|
|
{
|
|
|
|
|
if (2 == timeslice_ms)
|
|
|
|
|
- sched_timeslice_shift = 21;
|
|
|
|
|
+ sched_timeslice_shift = 22;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
|
task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
|
|
|
|
|
{
|
|
|
|
|
- s64 delta = p->deadline - rq->time_edge + NORMAL_PRIO_NUM - NICE_WIDTH;
|
|
|
|
|
+ s64 delta = p->deadline - rq->time_edge + SCHED_EDGE_DELTA;
|
|
|
|
|
|
|
|
|
|
#ifdef ALT_SCHED_DEBUG
|
|
|
|
|
if (WARN_ONCE(delta > NORMAL_PRIO_NUM - 1,
|
|
|
|
|
"pds: task_sched_prio_normal() delta %lld\n", delta))
|
|
|
|
|
- return NORMAL_PRIO_NUM - 1;
|
|
|
|
|
+ return SCHED_NORMAL_PRIO_NUM - 1;
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
return max(0LL, delta);
|
|
|
|
|
@@ -30,8 +36,8 @@ task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
|
|
|
|
|
|
|
|
|
|
static inline int task_sched_prio(const struct task_struct *p)
|
|
|
|
|
{
|
|
|
|
|
- return (p->prio < MIN_NORMAL_PRIO) ? p->prio :
|
|
|
|
|
- MIN_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
|
|
|
|
|
+ return (p->prio < MIN_NORMAL_PRIO) ? (p->prio >> 2) :
|
|
|
|
|
+ MIN_SCHED_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
|
@@ -39,30 +45,35 @@ task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
|
|
|
|
|
{
|
|
|
|
|
u64 idx;
|
|
|
|
|
|
|
|
|
|
- if (p->prio < MAX_RT_PRIO)
|
|
|
|
|
- return p->prio;
|
|
|
|
|
+ if (p->prio < MIN_NORMAL_PRIO)
|
|
|
|
|
+ return p->prio >> 2;
|
|
|
|
|
|
|
|
|
|
- idx = max(p->deadline + NORMAL_PRIO_NUM - NICE_WIDTH, rq->time_edge);
|
|
|
|
|
- return MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(idx);
|
|
|
|
|
+ idx = max(p->deadline + SCHED_EDGE_DELTA, rq->time_edge);
|
|
|
|
|
+ /*printk(KERN_INFO "sched: task_sched_prio_idx edge:%llu, deadline=%llu idx=%llu\n", rq->time_edge, p->deadline, idx);*/
|
|
|
|
|
+ return MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(idx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
-static inline int sched_prio2idx(int prio, struct rq *rq)
|
|
|
|
|
+static inline int sched_prio2idx(int sched_prio, struct rq *rq)
|
|
|
|
|
{
|
|
|
|
|
- return (IDLE_TASK_SCHED_PRIO == prio || prio < MAX_RT_PRIO) ? prio :
|
|
|
|
|
- MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(prio + rq->time_edge);
|
|
|
|
|
+ return (IDLE_TASK_SCHED_PRIO == sched_prio || sched_prio < MIN_SCHED_NORMAL_PRIO) ?
|
|
|
|
|
+ sched_prio :
|
|
|
|
|
+ MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(sched_prio + rq->time_edge);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
-static inline int sched_idx2prio(int idx, struct rq *rq)
|
|
|
|
|
+static inline int sched_idx2prio(int sched_idx, struct rq *rq)
|
|
|
|
|
{
|
|
|
|
|
- return (idx < MAX_RT_PRIO) ? idx : MIN_NORMAL_PRIO +
|
|
|
|
|
- NORMAL_PRIO_MOD(idx - rq->time_edge);
|
|
|
|
|
+ int ret;
|
|
|
|
|
+ ret = (sched_idx < MIN_SCHED_NORMAL_PRIO) ? sched_idx :
|
|
|
|
|
+ MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(sched_idx - rq->time_edge);
|
|
|
|
|
+ /*printk(KERN_INFO "sched: sched_idx2prio edge:%llu, %d -> %d\n", rq->time_edge, sched_idx, ret);*/
|
|
|
|
|
+
|
|
|
|
|
+ return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void sched_renew_deadline(struct task_struct *p, const struct rq *rq)
|
|
|
|
|
{
|
|
|
|
|
- if (p->prio >= MAX_RT_PRIO)
|
|
|
|
|
- p->deadline = (rq->clock >> sched_timeslice_shift) +
|
|
|
|
|
- p->static_prio - (MAX_PRIO - NICE_WIDTH);
|
|
|
|
|
+ if (p->prio >= MIN_NORMAL_PRIO)
|
|
|
|
|
+ p->deadline = rq->time_edge + (p->static_prio - (MAX_PRIO - NICE_WIDTH)) / 2;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int task_running_nice(struct task_struct *p)
|
|
|
|
|
@@ -70,36 +81,48 @@ int task_running_nice(struct task_struct *p)
|
|
|
|
|
return (p->prio > DEFAULT_PRIO);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
+const u64 RT_MASK = 0xffffffffULL;
|
|
|
|
|
+
|
|
|
|
|
static inline void update_rq_time_edge(struct rq *rq)
|
|
|
|
|
{
|
|
|
|
|
struct list_head head;
|
|
|
|
|
u64 old = rq->time_edge;
|
|
|
|
|
u64 now = rq->clock >> sched_timeslice_shift;
|
|
|
|
|
u64 prio, delta;
|
|
|
|
|
+ DECLARE_BITMAP(normal, SCHED_QUEUE_BITS);
|
|
|
|
|
|
|
|
|
|
if (now == old)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
rq->time_edge = now;
|
|
|
|
|
- delta = min_t(u64, NORMAL_PRIO_NUM, now - old);
|
|
|
|
|
+ delta = min_t(u64, SCHED_NORMAL_PRIO_NUM, now - old);
|
|
|
|
|
INIT_LIST_HEAD(&head);
|
|
|
|
|
|
|
|
|
|
- for_each_set_bit(prio, &rq->queue.bitmap[2], delta)
|
|
|
|
|
- list_splice_tail_init(rq->queue.heads + MIN_NORMAL_PRIO +
|
|
|
|
|
- NORMAL_PRIO_MOD(prio + old), &head);
|
|
|
|
|
+ /*printk(KERN_INFO "sched: update_rq_time_edge 0x%016lx %llu\n", rq->queue.bitmap[0], delta);*/
|
|
|
|
|
+ prio = MIN_SCHED_NORMAL_PRIO;
|
|
|
|
|
+ for_each_set_bit_from(prio, &rq->queue.bitmap[0], MIN_SCHED_NORMAL_PRIO + delta)
|
|
|
|
|
+ list_splice_tail_init(rq->queue.heads + MIN_SCHED_NORMAL_PRIO +
|
|
|
|
|
+ SCHED_NORMAL_PRIO_MOD(prio + old), &head);
|
|
|
|
|
|
|
|
|
|
- rq->queue.bitmap[2] = (NORMAL_PRIO_NUM == delta) ? 0UL :
|
|
|
|
|
- rq->queue.bitmap[2] >> delta;
|
|
|
|
|
+ bitmap_shift_right(&normal[0], &rq->queue.bitmap[0], delta, SCHED_QUEUE_BITS);
|
|
|
|
|
if (!list_empty(&head)) {
|
|
|
|
|
struct task_struct *p;
|
|
|
|
|
- u64 idx = MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(now);
|
|
|
|
|
+ u64 idx = MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(now);
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(p, &head, sq_node)
|
|
|
|
|
p->sq_idx = idx;
|
|
|
|
|
|
|
|
|
|
list_splice(&head, rq->queue.heads + idx);
|
|
|
|
|
- rq->queue.bitmap[2] |= 1UL;
|
|
|
|
|
+ set_bit(MIN_SCHED_NORMAL_PRIO, &normal[0]);
|
|
|
|
|
}
|
|
|
|
|
+ bitmap_replace(&rq->queue.bitmap[0], &normal[0], &rq->queue.bitmap[0],
|
|
|
|
|
+ (const unsigned long *)&RT_MASK, SCHED_QUEUE_BITS);
|
|
|
|
|
+ /*printk(KERN_INFO "sched: update_rq_time_edge 0x%016lx 0x%016lx", rq->queue.bitmap[0], normal);*/
|
|
|
|
|
+ if (rq->prio < MIN_SCHED_NORMAL_PRIO || IDLE_TASK_SCHED_PRIO == rq->prio)
|
|
|
|
|
+ return;
|
|
|
|
|
+
|
|
|
|
|
+ rq->prio = (rq->prio < MIN_SCHED_NORMAL_PRIO + delta) ?
|
|
|
|
|
+ MIN_SCHED_NORMAL_PRIO : rq->prio - delta;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
|
|
|
|
|
@@ -112,7 +135,7 @@ static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
|
|
|
|
|
|
|
|
|
|
static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq)
|
|
|
|
|
{
|
|
|
|
|
- u64 max_dl = rq->time_edge + NICE_WIDTH - 1;
|
|
|
|
|
+ u64 max_dl = rq->time_edge + NICE_WIDTH / 2 - 1;
|
|
|
|
|
if (unlikely(p->deadline > max_dl))
|
|
|
|
|
p->deadline = max_dl;
|
|
|
|
|
}
|