Tk-Glitch 2024-04-19 16:39:41 +02:00
parent d9efb6e894
commit 4d2d24eef1
2 changed files with 117 additions and 132 deletions

View File

@ -767,7 +767,7 @@ _tkg_srcprep() {
# prjc/bmq patch rev
if [ "$_kver" = "508" ] || [ "$_kver" = "507" ] || [ "$_kver" = "509" ] || [ "$_kver" = "511" ] || [ "$_kver" = "513" ] || [ "$_kver" = "514" ]; then
rev=3
elif [ "$_kver" = "510" ]; then
elif [ "$_kver" = "510" ] || [ "$_kver" = "608" ]; then
rev=5
elif [ "$_kver" = "512" ] || [ "$_kver" = "515" ] || [ "$_kver" = "516" ] || [ "$_kver" = "601" ] || [ "$_kver" = "603" ] || [ "$_kver" = "608" ]; then
rev=1

View File

@ -158,7 +158,7 @@ index 8874f681b056..59eb72bf7d5f 100644
[RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ffe8f618ab86..3c17bfa142b7 100644
index ffe8f618ab86..18058ad9ddae 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -771,8 +771,14 @@ struct task_struct {
@ -185,14 +185,13 @@ index ffe8f618ab86..3c17bfa142b7 100644
#endif
int on_rq;
@@ -794,6 +801,20 @@ struct task_struct {
@@ -794,6 +801,19 @@ struct task_struct {
int normal_prio;
unsigned int rt_priority;
+#ifdef CONFIG_SCHED_ALT
+ u64 last_ran;
+ s64 time_slice;
+ int sq_idx;
+ struct list_head sq_node;
+#ifdef CONFIG_SCHED_BMQ
+ int boost_prio;
@ -206,7 +205,7 @@ index ffe8f618ab86..3c17bfa142b7 100644
struct sched_entity se;
struct sched_rt_entity rt;
struct sched_dl_entity dl;
@@ -805,6 +826,7 @@ struct task_struct {
@@ -805,6 +825,7 @@ struct task_struct {
unsigned long core_cookie;
unsigned int core_occupation;
#endif
@ -214,7 +213,7 @@ index ffe8f618ab86..3c17bfa142b7 100644
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
@@ -1564,6 +1586,15 @@ struct task_struct {
@@ -1564,6 +1585,15 @@ struct task_struct {
*/
};
@ -269,10 +268,10 @@ index df3aca89d4f5..1df1f7635188 100644
static inline bool dl_time_before(u64 a, u64 b)
{
diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
index ab83d85e1183..a9a1dfa99140 100644
index ab83d85e1183..e66dfb553bc5 100644
--- a/include/linux/sched/prio.h
+++ b/include/linux/sched/prio.h
@@ -18,6 +18,32 @@
@@ -18,6 +18,28 @@
#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
@ -285,20 +284,16 @@ index ab83d85e1183..a9a1dfa99140 100644
+/* +/- priority levels from the base priority */
+#ifdef CONFIG_SCHED_BMQ
+#define MAX_PRIORITY_ADJ (12)
+
+#define MIN_NORMAL_PRIO (MAX_RT_PRIO)
+#define MAX_PRIO (MIN_NORMAL_PRIO + NICE_WIDTH)
+#define DEFAULT_PRIO (MIN_NORMAL_PRIO + NICE_WIDTH / 2)
+#endif
+
+#ifdef CONFIG_SCHED_PDS
+#define MAX_PRIORITY_ADJ (0)
+#endif
+
+#define MIN_NORMAL_PRIO (128)
+#define NORMAL_PRIO_NUM (64)
+#define MAX_PRIO (MIN_NORMAL_PRIO + NORMAL_PRIO_NUM)
+#define DEFAULT_PRIO (MAX_PRIO - NICE_WIDTH / 2)
+#endif
+#define DEFAULT_PRIO (MAX_PRIO - MAX_PRIORITY_ADJ - NICE_WIDTH / 2)
+
+#endif /* CONFIG_SCHED_ALT */
+
@ -423,7 +418,7 @@ index bee58f7468c3..81f568107a6b 100644
select CGROUP_SCHED
select FAIR_GROUP_SCHED
diff --git a/init/init_task.c b/init/init_task.c
index 7ecb458eb3da..9530a7173b37 100644
index 7ecb458eb3da..e8f8be4f23ba 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -70,9 +70,15 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
@ -431,9 +426,9 @@ index 7ecb458eb3da..9530a7173b37 100644
.usage = REFCOUNT_INIT(2),
.flags = PF_KTHREAD,
+#ifdef CONFIG_SCHED_ALT
+ .prio = DEFAULT_PRIO + MAX_PRIORITY_ADJ,
+ .prio = DEFAULT_PRIO,
+ .static_prio = DEFAULT_PRIO,
+ .normal_prio = DEFAULT_PRIO + MAX_PRIORITY_ADJ,
+ .normal_prio = DEFAULT_PRIO,
+#else
.prio = MAX_PRIO - 20,
.static_prio = MAX_PRIO - 20,
@ -442,7 +437,7 @@ index 7ecb458eb3da..9530a7173b37 100644
.policy = SCHED_NORMAL,
.cpus_ptr = &init_task.cpus_mask,
.user_cpus_ptr = NULL,
@@ -84,6 +90,17 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
@@ -84,6 +90,16 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
.restart_block = {
.fn = do_no_restart_syscall,
},
@ -450,7 +445,6 @@ index 7ecb458eb3da..9530a7173b37 100644
+ .sq_node = LIST_HEAD_INIT(init_task.sq_node),
+#ifdef CONFIG_SCHED_BMQ
+ .boost_prio = 0,
+ .sq_idx = 15,
+#endif
+#ifdef CONFIG_SCHED_PDS
+ .deadline = 0,
@ -460,7 +454,7 @@ index 7ecb458eb3da..9530a7173b37 100644
.se = {
.group_node = LIST_HEAD_INIT(init_task.se.group_node),
},
@@ -91,6 +108,7 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
@@ -91,6 +107,7 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
.run_list = LIST_HEAD_INIT(init_task.rt.run_list),
.time_slice = RR_TIMESLICE,
},
@ -665,10 +659,10 @@ index 976092b7bd45..31d587c16ec1 100644
obj-y += build_utility.o
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
new file mode 100644
index 000000000000..a8ee1063097d
index 000000000000..2f280340d55f
--- /dev/null
+++ b/kernel/sched/alt_core.c
@@ -0,0 +1,8940 @@
@@ -0,0 +1,8941 @@
+/*
+ * kernel/sched/alt_core.c
+ *
@ -747,7 +741,7 @@ index 000000000000..a8ee1063097d
+#define sched_feat(x) (0)
+#endif /* CONFIG_SCHED_DEBUG */
+
+#define ALT_SCHED_VERSION "v6.8-r1"
+#define ALT_SCHED_VERSION "v6.8-r5"
+
+/*
+ * Compile time debug macro
@ -853,7 +847,6 @@ index 000000000000..a8ee1063097d
+static inline void sched_queue_init_idle(struct sched_queue *q,
+ struct task_struct *idle)
+{
+ idle->sq_idx = IDLE_TASK_SCHED_PRIO;
+ INIT_LIST_HEAD(&q->heads[IDLE_TASK_SCHED_PRIO]);
+ list_add_tail(&idle->sq_node, &q->heads[IDLE_TASK_SCHED_PRIO]);
+ idle->on_rq = TASK_ON_RQ_QUEUED;
@ -886,6 +879,9 @@ index 000000000000..a8ee1063097d
+ return;
+
+ rq->prio = prio;
+#ifdef CONFIG_SCHED_PDS
+ rq->prio_idx = sched_prio2idx(rq->prio, rq);
+#endif
+ cpu = cpu_of(rq);
+ pr = atomic_read(&sched_prio_record);
+
@ -922,7 +918,7 @@ index 000000000000..a8ee1063097d
+ */
+static inline struct task_struct *sched_rq_first_task(struct rq *rq)
+{
+ const struct list_head *head = &rq->queue.heads[sched_prio2idx(rq->prio, rq)];
+ const struct list_head *head = &rq->queue.heads[sched_rq_prio_idx(rq)];
+
+ return list_first_entry(head, struct task_struct, sq_node);
+}
@ -930,12 +926,13 @@ index 000000000000..a8ee1063097d
+static inline struct task_struct *
+sched_rq_next_task(struct task_struct *p, struct rq *rq)
+{
+ unsigned long idx = p->sq_idx;
+ struct list_head *head = &rq->queue.heads[idx];
+ struct list_head *next = p->sq_node.next;
+
+ if (list_is_last(&p->sq_node, head)) {
+ idx = find_next_bit(rq->queue.bitmap, SCHED_QUEUE_BITS,
+ sched_idx2prio(idx, rq) + 1);
+ if (&rq->queue.heads[0] <= next && next < &rq->queue.heads[SCHED_LEVELS]) {
+ struct list_head *head;
+ unsigned long idx = next - &rq->queue.heads[0];
+
+ idx = find_next_bit(rq->queue.bitmap, SCHED_QUEUE_BITS, sched_idx2prio(idx, rq) + 1);
+ head = &rq->queue.heads[sched_prio2idx(idx, rq)];
+
+ return list_first_entry(head, struct task_struct, sq_node);
@ -1434,18 +1431,21 @@ index 000000000000..a8ee1063097d
+ \
+ __list_del_entry(&p->sq_node); \
+ if (p->sq_node.prev == p->sq_node.next) { \
+ clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap); \
+ clear_bit(sched_idx2prio(p->sq_node.next - &rq->queue.heads[0], rq), \
+ rq->queue.bitmap); \
+ func; \
+ }
+
+#define __SCHED_ENQUEUE_TASK(p, rq, flags, func) \
+ sched_info_enqueue(rq, p); \
+ \
+ p->sq_idx = task_sched_prio_idx(p, rq); \
+ list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]); \
+ if (list_is_first(&p->sq_node, &rq->queue.heads[p->sq_idx])) { \
+ set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap); \
+ { \
+ int idx, prio; \
+ TASK_SCHED_PRIO_IDX(p, rq, idx, prio); \
+ list_add_tail(&p->sq_node, &rq->queue.heads[idx]); \
+ if (list_is_first(&p->sq_node, &rq->queue.heads[idx])) { \
+ set_bit(prio, rq->queue.bitmap); \
+ func; \
+ } \
+ }
+
+static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
@ -1488,10 +1488,12 @@ index 000000000000..a8ee1063097d
+ sched_update_tick_dependency(rq);
+}
+
+static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
+static inline void requeue_task(struct task_struct *p, struct rq *rq)
+{
+ struct list_head *node = &p->sq_node;
+ int deq_idx, idx, prio;
+
+ TASK_SCHED_PRIO_IDX(p, rq, idx, prio);
+#ifdef ALT_SCHED_DEBUG
+ lockdep_assert_held(&rq->lock);
+ /*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->deadline);*/
@ -1502,14 +1504,12 @@ index 000000000000..a8ee1063097d
+ return;
+
+ __list_del_entry(node);
+ if (node->prev == node->next && idx != p->sq_idx)
+ clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
+
+ p->sq_idx = idx;
+ if (node->prev == node->next && (deq_idx = node->next - &rq->queue.heads[0]) != idx)
+ clear_bit(sched_idx2prio(deq_idx, rq), rq->queue.bitmap);
+
+ list_add_tail(node, &rq->queue.heads[idx]);
+ if (list_is_first(node, &rq->queue.heads[idx]))
+ set_bit(sched_idx2prio(idx, rq), rq->queue.bitmap);
+ set_bit(prio, rq->queue.bitmap);
+ update_sched_preempt_mask(rq);
+}
+
@ -2113,8 +2113,7 @@ index 000000000000..a8ee1063097d
+
+static inline int __normal_prio(int policy, int rt_prio, int static_prio)
+{
+ return rt_policy(policy) ? (MAX_RT_PRIO - 1 - rt_prio) :
+ static_prio + MAX_PRIORITY_ADJ;
+ return rt_policy(policy) ? (MAX_RT_PRIO - 1 - rt_prio) : static_prio;
+}
+
+/*
@ -4916,8 +4915,8 @@ index 000000000000..a8ee1063097d
+ stop_one_cpu_nowait(cpu, sg_balance_cpu_stop, p,
+ &rq->active_balance_work);
+
+ raw_spin_lock(&src_rq->lock);
+ preempt_enable();
+ raw_spin_lock(&src_rq->lock);
+ }
+
+ return res;
@ -5345,7 +5344,7 @@ index 000000000000..a8ee1063097d
+ sched_task_renew(p, rq);
+
+ if (SCHED_FIFO != p->policy && task_on_rq_queued(p))
+ requeue_task(p, rq, task_sched_prio_idx(p, rq));
+ requeue_task(p, rq);
+}
+
+/*
@ -5540,9 +5539,6 @@ index 000000000000..a8ee1063097d
+#endif
+
+ if (likely(prev != next)) {
+#ifdef CONFIG_SCHED_BMQ
+ rq->last_ts_switch = rq->clock;
+#endif
+ next->last_ran = rq->clock_task;
+
+ /*printk(KERN_INFO "sched: %px -> %px\n", prev, next);*/
@ -5913,16 +5909,11 @@ index 000000000000..a8ee1063097d
+{
+ /* Trigger resched if task sched_prio has been modified. */
+ if (task_on_rq_queued(p)) {
+ int idx;
+
+ update_rq_clock(rq);
+ idx = task_sched_prio_idx(p, rq);
+ if (idx != p->sq_idx) {
+ requeue_task(p, rq, idx);
+ requeue_task(p, rq);
+ wakeup_preempt(rq);
+ }
+}
+}
+
+static void __setscheduler_prio(struct task_struct *p, int prio)
+{
@ -6044,6 +6035,7 @@ index 000000000000..a8ee1063097d
+ /* Avoid rq from going away on us: */
+ preempt_disable();
+
+ if (task_on_rq_queued(p))
+ __balance_callbacks(rq);
+ __task_access_unlock(p, lock);
+
@ -7075,11 +7067,11 @@ index 000000000000..a8ee1063097d
+ p = current;
+ if (rt_task(p)) {
+ if (task_on_rq_queued(p))
+ requeue_task(p, rq, task_sched_prio_idx(p, rq));
+ requeue_task(p, rq);
+ } else if (rq->nr_running > 1) {
+ do_sched_yield_type_1(p, rq);
+ if (task_on_rq_queued(p))
+ requeue_task(p, rq, task_sched_prio_idx(p, rq));
+ requeue_task(p, rq);
+ }
+
+ preempt_disable();
@ -8377,6 +8369,9 @@ index 000000000000..a8ee1063097d
+
+ sched_queue_init(&rq->queue);
+ rq->prio = IDLE_TASK_SCHED_PRIO;
+#ifdef CONFIG_SCHED_PDS
+ rq->prio_idx = rq->prio;
+#endif
+
+ raw_spin_lock_init(&rq->lock);
+ rq->nr_running = rq->nr_uninterruptible = 0;
@ -8778,7 +8773,7 @@ index 000000000000..a8ee1063097d
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static DEFINE_MUTEX(shares_mutex);
+
+int sched_group_set_shares(struct task_group *tg, unsigned long shares)
+static int sched_group_set_shares(struct task_group *tg, unsigned long shares)
+{
+ /*
+ * We can't change the weight of the root cgroup.
@ -9649,10 +9644,10 @@ index 000000000000..1dbd7eb6a434
+{}
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
new file mode 100644
index 000000000000..3f8e156d768e
index 000000000000..81c3ed9ebb7d
--- /dev/null
+++ b/kernel/sched/alt_sched.h
@@ -0,0 +1,974 @@
@@ -0,0 +1,972 @@
+#ifndef ALT_SCHED_H
+#define ALT_SCHED_H
+
@ -9827,6 +9822,7 @@ index 000000000000..3f8e156d768e
+ struct sched_queue queue ____cacheline_aligned;
+#ifdef CONFIG_SCHED_PDS
+ u64 time_edge;
+ unsigned long prio_idx;
+#endif
+ unsigned long prio;
+
@ -9889,9 +9885,6 @@ index 000000000000..3f8e156d768e
+ /* Ensure that all clocks are in the same cache line */
+ u64 clock ____cacheline_aligned;
+ u64 clock_task;
+#ifdef CONFIG_SCHED_BMQ
+ u64 last_ts_switch;
+#endif
+
+ unsigned int nr_running;
+ unsigned long nr_uninterruptible;
@ -10629,19 +10622,16 @@ index 000000000000..3f8e156d768e
+#endif /* ALT_SCHED_H */
diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
new file mode 100644
index 000000000000..840009dc1e8d
index 000000000000..bdc34ad3cc8c
--- /dev/null
+++ b/kernel/sched/bmq.h
@@ -0,0 +1,99 @@
@@ -0,0 +1,98 @@
+#define ALT_SCHED_NAME "BMQ"
+
+/*
+ * BMQ only routines
+ */
+#define rq_switch_time(rq) ((rq)->clock - (rq)->last_ts_switch)
+#define boost_threshold(p) (sysctl_sched_base_slice >> ((14 - (p)->boost_prio) / 2))
+
+static inline void boost_task(struct task_struct *p)
+static inline void boost_task(struct task_struct *p, int n)
+{
+ int limit;
+
@ -10650,15 +10640,13 @@ index 000000000000..840009dc1e8d
+ limit = -MAX_PRIORITY_ADJ;
+ break;
+ case SCHED_BATCH:
+ case SCHED_IDLE:
+ limit = 0;
+ break;
+ default:
+ return;
+ }
+
+ if (p->boost_prio > limit)
+ p->boost_prio--;
+ p->boost_prio = max(limit, p->boost_prio - n);
+}
+
+static inline void deboost_task(struct task_struct *p)
@ -10672,23 +10660,22 @@ index 000000000000..840009dc1e8d
+ */
+static inline void sched_timeslice_imp(const int timeslice_ms) {}
+
+/* This API is used in task_prio(), return value readed by human users */
+static inline int
+task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
+{
+ return p->prio + p->boost_prio - MAX_RT_PRIO;
+ return p->prio + p->boost_prio - MIN_NORMAL_PRIO;
+}
+
+static inline int task_sched_prio(const struct task_struct *p)
+{
+ return (p->prio < MAX_RT_PRIO)? (p->prio >> 2) :
+ MIN_SCHED_NORMAL_PRIO + (p->prio + p->boost_prio - MAX_RT_PRIO) / 2;
+ return (p->prio < MIN_NORMAL_PRIO)? (p->prio >> 2) :
+ MIN_SCHED_NORMAL_PRIO + (p->prio + p->boost_prio - MIN_NORMAL_PRIO) / 2;
+}
+
+static inline int
+task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
+{
+ return task_sched_prio(p);
+}
+#define TASK_SCHED_PRIO_IDX(p, rq, idx, prio) \
+ prio = task_sched_prio(p); \
+ idx = prio;
+
+static inline int sched_prio2idx(int prio, struct rq *rq)
+{
@ -10700,16 +10687,20 @@ index 000000000000..840009dc1e8d
+ return idx;
+}
+
+static inline int sched_rq_prio_idx(struct rq *rq)
+{
+ return rq->prio;
+}
+
+inline int task_running_nice(struct task_struct *p)
+{
+ return (p->prio + p->boost_prio > DEFAULT_PRIO + MAX_PRIORITY_ADJ);
+ return (p->prio + p->boost_prio > DEFAULT_PRIO);
+}
+
+static inline void sched_update_rq_clock(struct rq *rq) {}
+
+static inline void sched_task_renew(struct task_struct *p, const struct rq *rq)
+{
+ if (rq_switch_time(rq) > sysctl_sched_base_slice)
+ deboost_task(p);
+}
+
@ -10723,14 +10714,15 @@ index 000000000000..840009dc1e8d
+
+static inline void sched_task_ttwu(struct task_struct *p)
+{
+ if(this_rq()->clock_task - p->last_ran > sysctl_sched_base_slice)
+ boost_task(p);
+ s64 delta = this_rq()->clock_task > p->last_ran;
+
+ if (likely(delta > 0))
+ boost_task(p, delta >> 22);
+}
+
+static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq)
+{
+ if (rq_switch_time(rq) < boost_threshold(p))
+ boost_task(p);
+ boost_task(p, 1);
+}
diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
index d9dc9ab3773f..71a25540d65e 100644
@ -10956,10 +10948,10 @@ index 31231925f1ec..c087311032f2 100644
+#endif
diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
new file mode 100644
index 000000000000..b2232a2cbb6d
index 000000000000..f9de95e0ec0e
--- /dev/null
+++ b/kernel/sched/pds.h
@@ -0,0 +1,141 @@
@@ -0,0 +1,134 @@
+#define ALT_SCHED_NAME "PDS"
+
+static const u64 RT_MASK = ((1ULL << MIN_SCHED_NORMAL_PRIO) - 1);
@ -10967,7 +10959,7 @@ index 000000000000..b2232a2cbb6d
+#define SCHED_NORMAL_PRIO_NUM (32)
+#define SCHED_EDGE_DELTA (SCHED_NORMAL_PRIO_NUM - NICE_WIDTH / 2)
+
+/* PDS assume NORMAL_PRIO_NUM is power of 2 */
+/* PDS assume SCHED_NORMAL_PRIO_NUM is power of 2 */
+#define SCHED_NORMAL_PRIO_MOD(x) ((x) & (SCHED_NORMAL_PRIO_NUM - 1))
+
+/* default time slice 4ms -> shift 22, 2 time slice slots -> shift 23 */
@ -10976,24 +10968,18 @@ index 000000000000..b2232a2cbb6d
+/*
+ * Common interfaces
+ */
+static inline void sched_timeslice_imp(const int timeslice_ms)
+{
+ if (2 == timeslice_ms)
+ sched_timeslice_shift = 22;
+}
+
+static inline int
+task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
+{
+ s64 delta = p->deadline - rq->time_edge + SCHED_EDGE_DELTA;
+ u64 sched_dl = max(p->deadline, rq->time_edge);
+
+#ifdef ALT_SCHED_DEBUG
+ if (WARN_ONCE(delta > NORMAL_PRIO_NUM - 1,
+ "pds: task_sched_prio_normal() delta %lld\n", delta))
+ if (WARN_ONCE(sched_dl - rq->time_edge > NORMAL_PRIO_NUM - 1,
+ "pds: task_sched_prio_normal() delta %lld\n", sched_dl - rq->time_edge))
+ return SCHED_NORMAL_PRIO_NUM - 1;
+#endif
+
+ return max(0LL, delta);
+ return sched_dl - rq->time_edge;
+}
+
+static inline int task_sched_prio(const struct task_struct *p)
@ -11002,17 +10988,14 @@ index 000000000000..b2232a2cbb6d
+ MIN_SCHED_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
+}
+
+static inline int
+task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
+{
+ u64 idx;
+
+ if (p->prio < MIN_NORMAL_PRIO)
+ return p->prio >> 2;
+
+ idx = max(p->deadline + SCHED_EDGE_DELTA, rq->time_edge);
+ /*printk(KERN_INFO "sched: task_sched_prio_idx edge:%llu, deadline=%llu idx=%llu\n", rq->time_edge, p->deadline, idx);*/
+ return MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(idx);
+#define TASK_SCHED_PRIO_IDX(p, rq, idx, prio) \
+ if (p->prio < MIN_NORMAL_PRIO) { \
+ prio = p->prio >> 2; \
+ idx = prio; \
+ } else { \
+ u64 sched_dl = max(p->deadline, rq->time_edge); \
+ prio = MIN_SCHED_NORMAL_PRIO + sched_dl - rq->time_edge; \
+ idx = MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(sched_dl); \
+ }
+
+static inline int sched_prio2idx(int sched_prio, struct rq *rq)
@ -11029,6 +11012,11 @@ index 000000000000..b2232a2cbb6d
+ MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(sched_idx - rq->time_edge);
+}
+
+static inline int sched_rq_prio_idx(struct rq *rq)
+{
+ return rq->prio_idx;
+}
+
+int task_running_nice(struct task_struct *p)
+{
+ return (p->prio > DEFAULT_PRIO);
@ -11056,12 +11044,8 @@ index 000000000000..b2232a2cbb6d
+
+ bitmap_shift_right(normal, rq->queue.bitmap, delta, SCHED_QUEUE_BITS);
+ if (!list_empty(&head)) {
+ struct task_struct *p;
+ u64 idx = MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(now);
+
+ list_for_each_entry(p, &head, sq_node)
+ p->sq_idx = idx;
+
+ __list_splice(&head, rq->queue.heads + idx, rq->queue.heads[idx].next);
+ set_bit(MIN_SCHED_NORMAL_PRIO, normal);
+ }
@ -11071,19 +11055,20 @@ index 000000000000..b2232a2cbb6d
+ if (rq->prio < MIN_SCHED_NORMAL_PRIO || IDLE_TASK_SCHED_PRIO == rq->prio)
+ return;
+
+ rq->prio = (rq->prio < MIN_SCHED_NORMAL_PRIO + delta) ?
+ MIN_SCHED_NORMAL_PRIO : rq->prio - delta;
+ rq->prio = max_t(u64, MIN_SCHED_NORMAL_PRIO, rq->prio - delta);
+ rq->prio_idx = sched_prio2idx(rq->prio, rq);
+}
+
+static inline void sched_task_renew(struct task_struct *p, const struct rq *rq)
+{
+ if (p->prio >= MIN_NORMAL_PRIO)
+ p->deadline = rq->time_edge + (p->static_prio - (MAX_PRIO - NICE_WIDTH)) / 2;
+ p->deadline = rq->time_edge + SCHED_EDGE_DELTA +
+ (p->static_prio - (MAX_PRIO - NICE_WIDTH)) / 2;
+}
+
+static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq)
+{
+ u64 max_dl = rq->time_edge + NICE_WIDTH / 2 - 1;
+ u64 max_dl = rq->time_edge + SCHED_EDGE_DELTA + NICE_WIDTH / 2 - 1;
+ if (unlikely(p->deadline > max_dl))
+ p->deadline = max_dl;
+}