6.8: Update Project C to v6.8-r1 - https://gitlab.com/alfredchen/projectc/-/blob/master/6.8/prjc_v6.8-r1.patch?ref_type=heads
This commit is contained in:
@@ -764,33 +764,13 @@ _tkg_srcprep() {
|
|||||||
tkgpatch="$srcdir/0002-mm-Support-soft-dirty-flag-read-with-reset.patch" && _tkg_patcher
|
tkgpatch="$srcdir/0002-mm-Support-soft-dirty-flag-read-with-reset.patch" && _tkg_patcher
|
||||||
|
|
||||||
# prjc/bmq patch rev
|
# prjc/bmq patch rev
|
||||||
if [ "$_kver" = "508" ] || [ "$_kver" = "507" ]; then
|
if [ "$_kver" = "508" ] || [ "$_kver" = "507" ] || [ "$_kver" = "509" ] || [ "$_kver" = "511" ] || [ "$_kver" = "513" ] || [ "$_kver" = "514" ]; then
|
||||||
rev=3
|
|
||||||
elif [ "$_kver" = "509" ]; then
|
|
||||||
rev=3
|
rev=3
|
||||||
elif [ "$_kver" = "510" ]; then
|
elif [ "$_kver" = "510" ]; then
|
||||||
rev=5
|
rev=5
|
||||||
elif [ "$_kver" = "511" ]; then
|
elif [ "$_kver" = "512" ] || [ "$_kver" = "515" ] || [ "$_kver" = "516" ] || [ "$_kver" = "601" ] || [ "$_kver" = "603" ] || [ "$_kver" = "608" ]; then
|
||||||
rev=3
|
|
||||||
elif [ "$_kver" = "512" ]; then
|
|
||||||
rev=1
|
rev=1
|
||||||
elif [ "$_kver" = "513" ]; then
|
elif [ "$_kver" = "518" ] || [ "$_kver" = "602" ] || [ "$_kver" = "607" ]; then
|
||||||
rev=3
|
|
||||||
elif [ "$_kver" = "514" ]; then
|
|
||||||
rev=3
|
|
||||||
elif [ "$_kver" = "515" ]; then
|
|
||||||
rev=1
|
|
||||||
elif [ "$_kver" = "516" ]; then
|
|
||||||
rev=1
|
|
||||||
elif [ "$_kver" = "518" ]; then
|
|
||||||
rev=2
|
|
||||||
elif [ "$_kver" = "601" ]; then
|
|
||||||
rev=1
|
|
||||||
elif [ "$_kver" = "602" ]; then
|
|
||||||
rev=2
|
|
||||||
elif [ "$_kver" = "603" ]; then
|
|
||||||
rev=1
|
|
||||||
elif [ "$_kver" = "607" ]; then
|
|
||||||
rev=2
|
rev=2
|
||||||
else
|
else
|
||||||
rev=0
|
rev=0
|
||||||
|
@@ -1,8 +1,8 @@
|
|||||||
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
|
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
|
||||||
index 6584a1f9bfe3..226c79dd34cc 100644
|
index 6584a1f9bfe3..1ac9e0850c9c 100644
|
||||||
--- a/Documentation/admin-guide/sysctl/kernel.rst
|
--- a/Documentation/admin-guide/sysctl/kernel.rst
|
||||||
+++ b/Documentation/admin-guide/sysctl/kernel.rst
|
+++ b/Documentation/admin-guide/sysctl/kernel.rst
|
||||||
@@ -1646,3 +1646,13 @@ is 10 seconds.
|
@@ -1646,3 +1646,12 @@ is 10 seconds.
|
||||||
|
|
||||||
The softlockup threshold is (``2 * watchdog_thresh``). Setting this
|
The softlockup threshold is (``2 * watchdog_thresh``). Setting this
|
||||||
tunable to zero will disable lockup detection altogether.
|
tunable to zero will disable lockup detection altogether.
|
||||||
@@ -15,7 +15,6 @@ index 6584a1f9bfe3..226c79dd34cc 100644
|
|||||||
+
|
+
|
||||||
+ 0 - No yield.
|
+ 0 - No yield.
|
||||||
+ 1 - Requeue task. (default)
|
+ 1 - Requeue task. (default)
|
||||||
+ 2 - Set run queue skip task. Same as CFS.
|
|
||||||
diff --git a/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt
|
diff --git a/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt
|
||||||
new file mode 100644
|
new file mode 100644
|
||||||
index 000000000000..05c84eec0f31
|
index 000000000000..05c84eec0f31
|
||||||
@@ -666,10 +665,10 @@ index 976092b7bd45..31d587c16ec1 100644
|
|||||||
obj-y += build_utility.o
|
obj-y += build_utility.o
|
||||||
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
|
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
|
||||||
new file mode 100644
|
new file mode 100644
|
||||||
index 000000000000..2745abb36a20
|
index 000000000000..a8ee1063097d
|
||||||
--- /dev/null
|
--- /dev/null
|
||||||
+++ b/kernel/sched/alt_core.c
|
+++ b/kernel/sched/alt_core.c
|
||||||
@@ -0,0 +1,8960 @@
|
@@ -0,0 +1,8940 @@
|
||||||
+/*
|
+/*
|
||||||
+ * kernel/sched/alt_core.c
|
+ * kernel/sched/alt_core.c
|
||||||
+ *
|
+ *
|
||||||
@@ -748,7 +747,7 @@ index 000000000000..2745abb36a20
|
|||||||
+#define sched_feat(x) (0)
|
+#define sched_feat(x) (0)
|
||||||
+#endif /* CONFIG_SCHED_DEBUG */
|
+#endif /* CONFIG_SCHED_DEBUG */
|
||||||
+
|
+
|
||||||
+#define ALT_SCHED_VERSION "v6.8-r0"
|
+#define ALT_SCHED_VERSION "v6.8-r1"
|
||||||
+
|
+
|
||||||
+/*
|
+/*
|
||||||
+ * Compile time debug macro
|
+ * Compile time debug macro
|
||||||
@@ -768,8 +767,6 @@ index 000000000000..2745abb36a20
|
|||||||
+ */
|
+ */
|
||||||
+unsigned int sysctl_sched_base_slice __read_mostly = (4 << 20);
|
+unsigned int sysctl_sched_base_slice __read_mostly = (4 << 20);
|
||||||
+
|
+
|
||||||
+static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx);
|
|
||||||
+
|
|
||||||
+#ifdef CONFIG_SCHED_BMQ
|
+#ifdef CONFIG_SCHED_BMQ
|
||||||
+#include "bmq.h"
|
+#include "bmq.h"
|
||||||
+#endif
|
+#endif
|
||||||
@@ -790,7 +787,6 @@ index 000000000000..2745abb36a20
|
|||||||
+ * sched_yield_type - Type of sched_yield() will be performed.
|
+ * sched_yield_type - Type of sched_yield() will be performed.
|
||||||
+ * 0: No yield.
|
+ * 0: No yield.
|
||||||
+ * 1: Requeue task. (default)
|
+ * 1: Requeue task. (default)
|
||||||
+ * 2: Set rq skip task. (Same as mainline)
|
|
||||||
+ */
|
+ */
|
||||||
+int sched_yield_type __read_mostly = 1;
|
+int sched_yield_type __read_mostly = 1;
|
||||||
+
|
+
|
||||||
@@ -825,11 +821,12 @@ index 000000000000..2745abb36a20
|
|||||||
+# define finish_arch_post_lock_switch() do { } while (0)
|
+# define finish_arch_post_lock_switch() do { } while (0)
|
||||||
+#endif
|
+#endif
|
||||||
+
|
+
|
||||||
|
+static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS + 1] ____cacheline_aligned_in_smp;
|
||||||
|
+
|
||||||
|
+static cpumask_t *const sched_idle_mask = &sched_preempt_mask[SCHED_QUEUE_BITS - 1];
|
||||||
+#ifdef CONFIG_SCHED_SMT
|
+#ifdef CONFIG_SCHED_SMT
|
||||||
+static cpumask_t sched_sg_idle_mask ____cacheline_aligned_in_smp;
|
+static cpumask_t *const sched_sg_idle_mask = &sched_preempt_mask[SCHED_QUEUE_BITS];
|
||||||
+#endif
|
+#endif
|
||||||
+static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp;
|
|
||||||
+static cpumask_t *const sched_idle_mask = &sched_preempt_mask[0];
|
|
||||||
+
|
+
|
||||||
+/* task function */
|
+/* task function */
|
||||||
+static inline const struct cpumask *task_user_cpus(struct task_struct *p)
|
+static inline const struct cpumask *task_user_cpus(struct task_struct *p)
|
||||||
@@ -857,22 +854,23 @@ index 000000000000..2745abb36a20
|
|||||||
+ struct task_struct *idle)
|
+ struct task_struct *idle)
|
||||||
+{
|
+{
|
||||||
+ idle->sq_idx = IDLE_TASK_SCHED_PRIO;
|
+ idle->sq_idx = IDLE_TASK_SCHED_PRIO;
|
||||||
+ INIT_LIST_HEAD(&q->heads[idle->sq_idx]);
|
+ INIT_LIST_HEAD(&q->heads[IDLE_TASK_SCHED_PRIO]);
|
||||||
+ list_add(&idle->sq_node, &q->heads[idle->sq_idx]);
|
+ list_add_tail(&idle->sq_node, &q->heads[IDLE_TASK_SCHED_PRIO]);
|
||||||
|
+ idle->on_rq = TASK_ON_RQ_QUEUED;
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+static inline void
|
+static inline void
|
||||||
+clear_recorded_preempt_mask(int pr, int low, int high, int cpu)
|
+clear_recorded_preempt_mask(int pr, int low, int high, int cpu)
|
||||||
+{
|
+{
|
||||||
+ if (low < pr && pr <= high)
|
+ if (low < pr && pr <= high)
|
||||||
+ cpumask_clear_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - pr);
|
+ cpumask_clear_cpu(cpu, sched_preempt_mask + pr);
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+static inline void
|
+static inline void
|
||||||
+set_recorded_preempt_mask(int pr, int low, int high, int cpu)
|
+set_recorded_preempt_mask(int pr, int low, int high, int cpu)
|
||||||
+{
|
+{
|
||||||
+ if (low < pr && pr <= high)
|
+ if (low < pr && pr <= high)
|
||||||
+ cpumask_set_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - pr);
|
+ cpumask_set_cpu(cpu, sched_preempt_mask + pr);
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+static atomic_t sched_prio_record = ATOMIC_INIT(0);
|
+static atomic_t sched_prio_record = ATOMIC_INIT(0);
|
||||||
@@ -895,8 +893,8 @@ index 000000000000..2745abb36a20
|
|||||||
+ if (IDLE_TASK_SCHED_PRIO == last_prio) {
|
+ if (IDLE_TASK_SCHED_PRIO == last_prio) {
|
||||||
+#ifdef CONFIG_SCHED_SMT
|
+#ifdef CONFIG_SCHED_SMT
|
||||||
+ if (static_branch_likely(&sched_smt_present))
|
+ if (static_branch_likely(&sched_smt_present))
|
||||||
+ cpumask_andnot(&sched_sg_idle_mask,
|
+ cpumask_andnot(sched_sg_idle_mask,
|
||||||
+ &sched_sg_idle_mask, cpu_smt_mask(cpu));
|
+ sched_sg_idle_mask, cpu_smt_mask(cpu));
|
||||||
+#endif
|
+#endif
|
||||||
+ cpumask_clear_cpu(cpu, sched_idle_mask);
|
+ cpumask_clear_cpu(cpu, sched_idle_mask);
|
||||||
+ last_prio -= 2;
|
+ last_prio -= 2;
|
||||||
@@ -910,8 +908,8 @@ index 000000000000..2745abb36a20
|
|||||||
+#ifdef CONFIG_SCHED_SMT
|
+#ifdef CONFIG_SCHED_SMT
|
||||||
+ if (static_branch_likely(&sched_smt_present) &&
|
+ if (static_branch_likely(&sched_smt_present) &&
|
||||||
+ cpumask_intersects(cpu_smt_mask(cpu), sched_idle_mask))
|
+ cpumask_intersects(cpu_smt_mask(cpu), sched_idle_mask))
|
||||||
+ cpumask_or(&sched_sg_idle_mask,
|
+ cpumask_or(sched_sg_idle_mask,
|
||||||
+ &sched_sg_idle_mask, cpu_smt_mask(cpu));
|
+ sched_sg_idle_mask, cpu_smt_mask(cpu));
|
||||||
+#endif
|
+#endif
|
||||||
+ cpumask_set_cpu(cpu, sched_idle_mask);
|
+ cpumask_set_cpu(cpu, sched_idle_mask);
|
||||||
+ prio -= 2;
|
+ prio -= 2;
|
||||||
@@ -946,16 +944,6 @@ index 000000000000..2745abb36a20
|
|||||||
+ return list_next_entry(p, sq_node);
|
+ return list_next_entry(p, sq_node);
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+static inline struct task_struct *rq_runnable_task(struct rq *rq)
|
|
||||||
+{
|
|
||||||
+ struct task_struct *next = sched_rq_first_task(rq);
|
|
||||||
+
|
|
||||||
+ if (unlikely(next == rq->skip))
|
|
||||||
+ next = sched_rq_next_task(next, rq);
|
|
||||||
+
|
|
||||||
+ return next;
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+/*
|
+/*
|
||||||
+ * Serialization rules:
|
+ * Serialization rules:
|
||||||
+ *
|
+ *
|
||||||
@@ -1444,18 +1432,21 @@ index 000000000000..2745abb36a20
|
|||||||
+#define __SCHED_DEQUEUE_TASK(p, rq, flags, func) \
|
+#define __SCHED_DEQUEUE_TASK(p, rq, flags, func) \
|
||||||
+ sched_info_dequeue(rq, p); \
|
+ sched_info_dequeue(rq, p); \
|
||||||
+ \
|
+ \
|
||||||
+ list_del(&p->sq_node); \
|
+ __list_del_entry(&p->sq_node); \
|
||||||
+ if (list_empty(&rq->queue.heads[p->sq_idx])) { \
|
+ if (p->sq_node.prev == p->sq_node.next) { \
|
||||||
+ clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap); \
|
+ clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap); \
|
||||||
+ func; \
|
+ func; \
|
||||||
+ }
|
+ }
|
||||||
+
|
+
|
||||||
+#define __SCHED_ENQUEUE_TASK(p, rq, flags) \
|
+#define __SCHED_ENQUEUE_TASK(p, rq, flags, func) \
|
||||||
+ sched_info_enqueue(rq, p); \
|
+ sched_info_enqueue(rq, p); \
|
||||||
+ \
|
+ \
|
||||||
+ p->sq_idx = task_sched_prio_idx(p, rq); \
|
+ p->sq_idx = task_sched_prio_idx(p, rq); \
|
||||||
+ list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]); \
|
+ list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]); \
|
||||||
+ set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
|
+ if (list_is_first(&p->sq_node, &rq->queue.heads[p->sq_idx])) { \
|
||||||
|
+ set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap); \
|
||||||
|
+ func; \
|
||||||
|
+ }
|
||||||
+
|
+
|
||||||
+static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
|
+static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
|
||||||
+{
|
+{
|
||||||
@@ -1487,8 +1478,7 @@ index 000000000000..2745abb36a20
|
|||||||
+ task_cpu(p), cpu_of(rq));
|
+ task_cpu(p), cpu_of(rq));
|
||||||
+#endif
|
+#endif
|
||||||
+
|
+
|
||||||
+ __SCHED_ENQUEUE_TASK(p, rq, flags);
|
+ __SCHED_ENQUEUE_TASK(p, rq, flags, update_sched_preempt_mask(rq));
|
||||||
+ update_sched_preempt_mask(rq);
|
|
||||||
+ ++rq->nr_running;
|
+ ++rq->nr_running;
|
||||||
+#ifdef CONFIG_SMP
|
+#ifdef CONFIG_SMP
|
||||||
+ if (2 == rq->nr_running)
|
+ if (2 == rq->nr_running)
|
||||||
@@ -1500,22 +1490,27 @@ index 000000000000..2745abb36a20
|
|||||||
+
|
+
|
||||||
+static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
|
+static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
|
||||||
+{
|
+{
|
||||||
|
+ struct list_head *node = &p->sq_node;
|
||||||
|
+
|
||||||
+#ifdef ALT_SCHED_DEBUG
|
+#ifdef ALT_SCHED_DEBUG
|
||||||
+ lockdep_assert_held(&rq->lock);
|
+ lockdep_assert_held(&rq->lock);
|
||||||
+ /*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->deadline);*/
|
+ /*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->deadline);*/
|
||||||
+ WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
|
+ WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
|
||||||
+ cpu_of(rq), task_cpu(p));
|
+ cpu_of(rq), task_cpu(p));
|
||||||
+#endif
|
+#endif
|
||||||
|
+ if (list_is_last(node, &rq->queue.heads[idx]))
|
||||||
|
+ return;
|
||||||
+
|
+
|
||||||
+ list_del(&p->sq_node);
|
+ __list_del_entry(node);
|
||||||
+ list_add_tail(&p->sq_node, &rq->queue.heads[idx]);
|
+ if (node->prev == node->next && idx != p->sq_idx)
|
||||||
+ if (idx != p->sq_idx) {
|
+ clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
|
||||||
+ if (list_empty(&rq->queue.heads[p->sq_idx]))
|
+
|
||||||
+ clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
|
+ p->sq_idx = idx;
|
||||||
+ p->sq_idx = idx;
|
+
|
||||||
+ set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
|
+ list_add_tail(node, &rq->queue.heads[idx]);
|
||||||
+ update_sched_preempt_mask(rq);
|
+ if (list_is_first(node, &rq->queue.heads[idx]))
|
||||||
+ }
|
+ set_bit(sched_idx2prio(idx, rq), rq->queue.bitmap);
|
||||||
|
+ update_sched_preempt_mask(rq);
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+/*
|
+/*
|
||||||
@@ -1737,7 +1732,7 @@ index 000000000000..2745abb36a20
|
|||||||
+
|
+
|
||||||
+ hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
|
+ hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
|
||||||
+
|
+
|
||||||
+ for (mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
|
+ for (mask = per_cpu(sched_cpu_topo_masks, cpu);
|
||||||
+ mask < per_cpu(sched_cpu_topo_end_mask, cpu); mask++)
|
+ mask < per_cpu(sched_cpu_topo_end_mask, cpu); mask++)
|
||||||
+ for_each_cpu_and(i, mask, hk_mask)
|
+ for_each_cpu_and(i, mask, hk_mask)
|
||||||
+ if (!idle_cpu(i))
|
+ if (!idle_cpu(i))
|
||||||
@@ -2731,28 +2726,33 @@ index 000000000000..2745abb36a20
|
|||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+static inline void
|
+static inline void
|
||||||
+sched_preempt_mask_flush(cpumask_t *mask, int prio)
|
+sched_preempt_mask_flush(cpumask_t *mask, int prio, int ref)
|
||||||
+{
|
+{
|
||||||
+ int cpu;
|
+ int cpu;
|
||||||
+
|
+
|
||||||
+ cpumask_copy(mask, sched_idle_mask);
|
+ cpumask_copy(mask, sched_preempt_mask + ref);
|
||||||
+
|
+ if (prio < ref) {
|
||||||
+ for_each_clear_bit(cpu, cpumask_bits(mask), nr_cpumask_bits) {
|
+ for_each_clear_bit(cpu, cpumask_bits(mask), nr_cpumask_bits) {
|
||||||
+ if (prio < cpu_rq(cpu)->prio)
|
+ if (prio < cpu_rq(cpu)->prio)
|
||||||
+ cpumask_set_cpu(cpu, mask);
|
+ cpumask_set_cpu(cpu, mask);
|
||||||
|
+ }
|
||||||
|
+ } else {
|
||||||
|
+ for_each_cpu_andnot(cpu, mask, sched_idle_mask) {
|
||||||
|
+ if (prio >= cpu_rq(cpu)->prio)
|
||||||
|
+ cpumask_clear_cpu(cpu, mask);
|
||||||
|
+ }
|
||||||
+ }
|
+ }
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+static inline int
|
+static inline int
|
||||||
+preempt_mask_check(struct task_struct *p, cpumask_t *allow_mask, cpumask_t *preempt_mask)
|
+preempt_mask_check(cpumask_t *preempt_mask, cpumask_t *allow_mask, int prio)
|
||||||
+{
|
+{
|
||||||
+ int task_prio = task_sched_prio(p);
|
+ cpumask_t *mask = sched_preempt_mask + prio;
|
||||||
+ cpumask_t *mask = sched_preempt_mask + SCHED_QUEUE_BITS - 1 - task_prio;
|
|
||||||
+ int pr = atomic_read(&sched_prio_record);
|
+ int pr = atomic_read(&sched_prio_record);
|
||||||
+
|
+
|
||||||
+ if (pr != task_prio) {
|
+ if (pr != prio && SCHED_QUEUE_BITS - 1 != prio) {
|
||||||
+ sched_preempt_mask_flush(mask, task_prio);
|
+ sched_preempt_mask_flush(mask, prio, pr);
|
||||||
+ atomic_set(&sched_prio_record, task_prio);
|
+ atomic_set(&sched_prio_record, prio);
|
||||||
+ }
|
+ }
|
||||||
+
|
+
|
||||||
+ return cpumask_and(preempt_mask, allow_mask, mask);
|
+ return cpumask_and(preempt_mask, allow_mask, mask);
|
||||||
@@ -2767,10 +2767,10 @@ index 000000000000..2745abb36a20
|
|||||||
+
|
+
|
||||||
+ if (
|
+ if (
|
||||||
+#ifdef CONFIG_SCHED_SMT
|
+#ifdef CONFIG_SCHED_SMT
|
||||||
+ cpumask_and(&mask, &allow_mask, &sched_sg_idle_mask) ||
|
+ cpumask_and(&mask, &allow_mask, sched_sg_idle_mask) ||
|
||||||
+#endif
|
+#endif
|
||||||
+ cpumask_and(&mask, &allow_mask, sched_idle_mask) ||
|
+ cpumask_and(&mask, &allow_mask, sched_idle_mask) ||
|
||||||
+ preempt_mask_check(p, &allow_mask, &mask))
|
+ preempt_mask_check(&mask, &allow_mask, task_sched_prio(p)))
|
||||||
+ return best_mask_cpu(task_cpu(p), &mask);
|
+ return best_mask_cpu(task_cpu(p), &mask);
|
||||||
+
|
+
|
||||||
+ return best_mask_cpu(task_cpu(p), &allow_mask);
|
+ return best_mask_cpu(task_cpu(p), &allow_mask);
|
||||||
@@ -4874,7 +4874,7 @@ index 000000000000..2745abb36a20
|
|||||||
+ rq->active_balance = 0;
|
+ rq->active_balance = 0;
|
||||||
+ /* _something_ may have changed the task, double check again */
|
+ /* _something_ may have changed the task, double check again */
|
||||||
+ if (task_on_rq_queued(p) && task_rq(p) == rq &&
|
+ if (task_on_rq_queued(p) && task_rq(p) == rq &&
|
||||||
+ cpumask_and(&tmp, p->cpus_ptr, &sched_sg_idle_mask) &&
|
+ cpumask_and(&tmp, p->cpus_ptr, sched_sg_idle_mask) &&
|
||||||
+ !is_migration_disabled(p)) {
|
+ !is_migration_disabled(p)) {
|
||||||
+ int cpu = cpu_of(rq);
|
+ int cpu = cpu_of(rq);
|
||||||
+ int dcpu = __best_mask_cpu(&tmp, per_cpu(sched_cpu_llc_mask, cpu));
|
+ int dcpu = __best_mask_cpu(&tmp, per_cpu(sched_cpu_llc_mask, cpu));
|
||||||
@@ -4890,58 +4890,61 @@ index 000000000000..2745abb36a20
|
|||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+/* sg_balance_trigger - trigger slibing group balance for @cpu */
|
+/* sg_balance_trigger - trigger slibing group balance for @cpu */
|
||||||
+static inline int sg_balance_trigger(const int cpu)
|
+static inline int sg_balance_trigger(struct rq *src_rq, const int cpu)
|
||||||
+{
|
+{
|
||||||
+ struct rq *rq= cpu_rq(cpu);
|
+ struct rq *rq= cpu_rq(cpu);
|
||||||
+ unsigned long flags;
|
+ unsigned long flags;
|
||||||
+ struct task_struct *curr;
|
+ struct task_struct *p;
|
||||||
+ int res;
|
+ int res;
|
||||||
+
|
+
|
||||||
+ if (!raw_spin_trylock_irqsave(&rq->lock, flags))
|
+ if (!raw_spin_trylock_irqsave(&rq->lock, flags))
|
||||||
+ return 0;
|
+ return 0;
|
||||||
+ curr = rq->curr;
|
|
||||||
+ res = (!is_idle_task(curr)) && (1 == rq->nr_running) &&\
|
|
||||||
+ cpumask_intersects(curr->cpus_ptr, &sched_sg_idle_mask) &&\
|
|
||||||
+ !is_migration_disabled(curr) && (!rq->active_balance);
|
|
||||||
+
|
+
|
||||||
|
+ res = (1 == rq->nr_running) && \
|
||||||
|
+ !is_migration_disabled((p = sched_rq_first_task(rq))) && \
|
||||||
|
+ cpumask_intersects(p->cpus_ptr, sched_sg_idle_mask) && \
|
||||||
|
+ !rq->active_balance;
|
||||||
+ if (res)
|
+ if (res)
|
||||||
+ rq->active_balance = 1;
|
+ rq->active_balance = 1;
|
||||||
+
|
+
|
||||||
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
|
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||||
+
|
+
|
||||||
+ if (res)
|
+ if (res) {
|
||||||
+ stop_one_cpu_nowait(cpu, sg_balance_cpu_stop, curr,
|
+ preempt_disable();
|
||||||
|
+ raw_spin_unlock(&src_rq->lock);
|
||||||
|
+
|
||||||
|
+ stop_one_cpu_nowait(cpu, sg_balance_cpu_stop, p,
|
||||||
+ &rq->active_balance_work);
|
+ &rq->active_balance_work);
|
||||||
|
+
|
||||||
|
+ raw_spin_lock(&src_rq->lock);
|
||||||
|
+ preempt_enable();
|
||||||
|
+ }
|
||||||
|
+
|
||||||
+ return res;
|
+ return res;
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+/*
|
+/*
|
||||||
+ * sg_balance - slibing group balance check for run queue @rq
|
+ * sg_balance - slibing group balance check for run queue @rq
|
||||||
+ */
|
+ */
|
||||||
+static inline void sg_balance(struct rq *rq, int cpu)
|
+static inline void sg_balance(struct rq *rq)
|
||||||
+{
|
+{
|
||||||
+ cpumask_t chk;
|
+ cpumask_t chk;
|
||||||
+
|
+
|
||||||
+ /* exit when cpu is offline */
|
+ if (cpumask_andnot(&chk, cpu_active_mask, sched_idle_mask) &&
|
||||||
+ if (unlikely(!rq->online))
|
|
||||||
+ return;
|
|
||||||
+
|
|
||||||
+ /*
|
|
||||||
+ * Only cpu in slibing idle group will do the checking and then
|
|
||||||
+ * find potential cpus which can migrate the current running task
|
|
||||||
+ */
|
|
||||||
+ if (cpumask_test_cpu(cpu, &sched_sg_idle_mask) &&
|
|
||||||
+ cpumask_andnot(&chk, cpu_online_mask, sched_idle_mask) &&
|
|
||||||
+ cpumask_andnot(&chk, &chk, &sched_rq_pending_mask)) {
|
+ cpumask_andnot(&chk, &chk, &sched_rq_pending_mask)) {
|
||||||
+ int i;
|
+ int i, cpu = cpu_of(rq);
|
||||||
+
|
+
|
||||||
+ for_each_cpu_wrap(i, &chk, cpu) {
|
+ for_each_cpu_wrap(i, &chk, cpu) {
|
||||||
+ if (!cpumask_intersects(cpu_smt_mask(i), sched_idle_mask) &&\
|
+ if (cpumask_subset(cpu_smt_mask(i), &chk) &&\
|
||||||
+ sg_balance_trigger(i))
|
+ sg_balance_trigger(rq, i))
|
||||||
+ return;
|
+ return;
|
||||||
+ }
|
+ }
|
||||||
+ }
|
+ }
|
||||||
+}
|
+}
|
||||||
|
+
|
||||||
|
+static DEFINE_PER_CPU(struct balance_callback, sg_balance_head) = {
|
||||||
|
+ .func = sg_balance,
|
||||||
|
+};
|
||||||
+#endif /* CONFIG_SCHED_SMT */
|
+#endif /* CONFIG_SCHED_SMT */
|
||||||
+
|
+
|
||||||
+#ifdef CONFIG_NO_HZ_FULL
|
+#ifdef CONFIG_NO_HZ_FULL
|
||||||
@@ -5271,7 +5274,7 @@ index 000000000000..2745abb36a20
|
|||||||
+ set_task_cpu(p, dest_cpu);
|
+ set_task_cpu(p, dest_cpu);
|
||||||
+ sched_task_sanity_check(p, dest_rq);
|
+ sched_task_sanity_check(p, dest_rq);
|
||||||
+ sched_mm_cid_migrate_to(dest_rq, p, cpu_of(rq));
|
+ sched_mm_cid_migrate_to(dest_rq, p, cpu_of(rq));
|
||||||
+ __SCHED_ENQUEUE_TASK(p, dest_rq, 0);
|
+ __SCHED_ENQUEUE_TASK(p, dest_rq, 0, );
|
||||||
+ nr_migrated++;
|
+ nr_migrated++;
|
||||||
+ }
|
+ }
|
||||||
+ nr_tries--;
|
+ nr_tries--;
|
||||||
@@ -5282,7 +5285,7 @@ index 000000000000..2745abb36a20
|
|||||||
+
|
+
|
||||||
+static inline int take_other_rq_tasks(struct rq *rq, int cpu)
|
+static inline int take_other_rq_tasks(struct rq *rq, int cpu)
|
||||||
+{
|
+{
|
||||||
+ struct cpumask *topo_mask, *end_mask;
|
+ cpumask_t *topo_mask, *end_mask, chk;
|
||||||
+
|
+
|
||||||
+ if (unlikely(!rq->online))
|
+ if (unlikely(!rq->online))
|
||||||
+ return 0;
|
+ return 0;
|
||||||
@@ -5290,11 +5293,15 @@ index 000000000000..2745abb36a20
|
|||||||
+ if (cpumask_empty(&sched_rq_pending_mask))
|
+ if (cpumask_empty(&sched_rq_pending_mask))
|
||||||
+ return 0;
|
+ return 0;
|
||||||
+
|
+
|
||||||
+ topo_mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
|
+ topo_mask = per_cpu(sched_cpu_topo_masks, cpu);
|
||||||
+ end_mask = per_cpu(sched_cpu_topo_end_mask, cpu);
|
+ end_mask = per_cpu(sched_cpu_topo_end_mask, cpu);
|
||||||
+ do {
|
+ do {
|
||||||
+ int i;
|
+ int i;
|
||||||
+ for_each_cpu_and(i, &sched_rq_pending_mask, topo_mask) {
|
+
|
||||||
|
+ if (!cpumask_and(&chk, &sched_rq_pending_mask, topo_mask))
|
||||||
|
+ continue;
|
||||||
|
+
|
||||||
|
+ for_each_cpu_wrap(i, &chk, cpu) {
|
||||||
+ int nr_migrated;
|
+ int nr_migrated;
|
||||||
+ struct rq *src_rq;
|
+ struct rq *src_rq;
|
||||||
+
|
+
|
||||||
@@ -5359,34 +5366,18 @@ index 000000000000..2745abb36a20
|
|||||||
+static inline struct task_struct *
|
+static inline struct task_struct *
|
||||||
+choose_next_task(struct rq *rq, int cpu)
|
+choose_next_task(struct rq *rq, int cpu)
|
||||||
+{
|
+{
|
||||||
+ struct task_struct *next;
|
+ struct task_struct *next = sched_rq_first_task(rq);
|
||||||
+
|
+
|
||||||
+ if (unlikely(rq->skip)) {
|
|
||||||
+ next = rq_runnable_task(rq);
|
|
||||||
+ if (next == rq->idle) {
|
|
||||||
+#ifdef CONFIG_SMP
|
|
||||||
+ if (!take_other_rq_tasks(rq, cpu)) {
|
|
||||||
+#endif
|
|
||||||
+ rq->skip = NULL;
|
|
||||||
+ schedstat_inc(rq->sched_goidle);
|
|
||||||
+ return next;
|
|
||||||
+#ifdef CONFIG_SMP
|
|
||||||
+ }
|
|
||||||
+ next = rq_runnable_task(rq);
|
|
||||||
+#endif
|
|
||||||
+ }
|
|
||||||
+ rq->skip = NULL;
|
|
||||||
+#ifdef CONFIG_HIGH_RES_TIMERS
|
|
||||||
+ hrtick_start(rq, next->time_slice);
|
|
||||||
+#endif
|
|
||||||
+ return next;
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ next = sched_rq_first_task(rq);
|
|
||||||
+ if (next == rq->idle) {
|
+ if (next == rq->idle) {
|
||||||
+#ifdef CONFIG_SMP
|
+#ifdef CONFIG_SMP
|
||||||
+ if (!take_other_rq_tasks(rq, cpu)) {
|
+ if (!take_other_rq_tasks(rq, cpu)) {
|
||||||
+#endif
|
+#endif
|
||||||
|
+
|
||||||
|
+#ifdef CONFIG_SCHED_SMT
|
||||||
|
+ if (likely(rq->online) &&
|
||||||
|
+ cpumask_test_cpu(cpu, sched_sg_idle_mask))
|
||||||
|
+ __queue_balance_callback(rq, &per_cpu(sg_balance_head, cpu));
|
||||||
|
+#endif
|
||||||
+ schedstat_inc(rq->sched_goidle);
|
+ schedstat_inc(rq->sched_goidle);
|
||||||
+ /*printk(KERN_INFO "sched: choose_next_task(%d) idle %px\n", cpu, next);*/
|
+ /*printk(KERN_INFO "sched: choose_next_task(%d) idle %px\n", cpu, next);*/
|
||||||
+ return next;
|
+ return next;
|
||||||
@@ -5587,10 +5578,6 @@ index 000000000000..2745abb36a20
|
|||||||
+ __balance_callbacks(rq);
|
+ __balance_callbacks(rq);
|
||||||
+ raw_spin_unlock_irq(&rq->lock);
|
+ raw_spin_unlock_irq(&rq->lock);
|
||||||
+ }
|
+ }
|
||||||
+
|
|
||||||
+#ifdef CONFIG_SCHED_SMT
|
|
||||||
+ sg_balance(rq, cpu);
|
|
||||||
+#endif
|
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+void __noreturn do_task_dead(void)
|
+void __noreturn do_task_dead(void)
|
||||||
@@ -7090,13 +7077,9 @@ index 000000000000..2745abb36a20
|
|||||||
+ if (task_on_rq_queued(p))
|
+ if (task_on_rq_queued(p))
|
||||||
+ requeue_task(p, rq, task_sched_prio_idx(p, rq));
|
+ requeue_task(p, rq, task_sched_prio_idx(p, rq));
|
||||||
+ } else if (rq->nr_running > 1) {
|
+ } else if (rq->nr_running > 1) {
|
||||||
+ if (1 == sched_yield_type) {
|
+ do_sched_yield_type_1(p, rq);
|
||||||
+ do_sched_yield_type_1(p, rq);
|
+ if (task_on_rq_queued(p))
|
||||||
+ if (task_on_rq_queued(p))
|
+ requeue_task(p, rq, task_sched_prio_idx(p, rq));
|
||||||
+ requeue_task(p, rq, task_sched_prio_idx(p, rq));
|
|
||||||
+ } else if (2 == sched_yield_type) {
|
|
||||||
+ rq->skip = p;
|
|
||||||
+ }
|
|
||||||
+ }
|
+ }
|
||||||
+
|
+
|
||||||
+ preempt_disable();
|
+ preempt_disable();
|
||||||
@@ -8156,7 +8139,7 @@ index 000000000000..2745abb36a20
|
|||||||
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
|
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
|
||||||
+ static_branch_dec_cpuslocked(&sched_smt_present);
|
+ static_branch_dec_cpuslocked(&sched_smt_present);
|
||||||
+ if (!static_branch_likely(&sched_smt_present))
|
+ if (!static_branch_likely(&sched_smt_present))
|
||||||
+ cpumask_clear(&sched_sg_idle_mask);
|
+ cpumask_clear(sched_sg_idle_mask);
|
||||||
+ }
|
+ }
|
||||||
+#endif
|
+#endif
|
||||||
+
|
+
|
||||||
@@ -8273,12 +8256,9 @@ index 000000000000..2745abb36a20
|
|||||||
+ /* init topo masks */
|
+ /* init topo masks */
|
||||||
+ tmp = per_cpu(sched_cpu_topo_masks, cpu);
|
+ tmp = per_cpu(sched_cpu_topo_masks, cpu);
|
||||||
+
|
+
|
||||||
+ cpumask_copy(tmp, cpumask_of(cpu));
|
|
||||||
+ tmp++;
|
|
||||||
+ cpumask_copy(tmp, cpu_possible_mask);
|
+ cpumask_copy(tmp, cpu_possible_mask);
|
||||||
+ per_cpu(sched_cpu_llc_mask, cpu) = tmp;
|
+ per_cpu(sched_cpu_llc_mask, cpu) = tmp;
|
||||||
+ per_cpu(sched_cpu_topo_end_mask, cpu) = ++tmp;
|
+ per_cpu(sched_cpu_topo_end_mask, cpu) = ++tmp;
|
||||||
+ /*per_cpu(sd_llc_id, cpu) = cpu;*/
|
|
||||||
+ }
|
+ }
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
@@ -8301,7 +8281,7 @@ index 000000000000..2745abb36a20
|
|||||||
+ /* take chance to reset time slice for idle tasks */
|
+ /* take chance to reset time slice for idle tasks */
|
||||||
+ cpu_rq(cpu)->idle->time_slice = sysctl_sched_base_slice;
|
+ cpu_rq(cpu)->idle->time_slice = sysctl_sched_base_slice;
|
||||||
+
|
+
|
||||||
+ topo = per_cpu(sched_cpu_topo_masks, cpu) + 1;
|
+ topo = per_cpu(sched_cpu_topo_masks, cpu);
|
||||||
+
|
+
|
||||||
+ bitmap_complement(cpumask_bits(topo), cpumask_bits(cpumask_of(cpu)),
|
+ bitmap_complement(cpumask_bits(topo), cpumask_bits(cpumask_of(cpu)),
|
||||||
+ nr_cpumask_bits);
|
+ nr_cpumask_bits);
|
||||||
@@ -8397,7 +8377,6 @@ index 000000000000..2745abb36a20
|
|||||||
+
|
+
|
||||||
+ sched_queue_init(&rq->queue);
|
+ sched_queue_init(&rq->queue);
|
||||||
+ rq->prio = IDLE_TASK_SCHED_PRIO;
|
+ rq->prio = IDLE_TASK_SCHED_PRIO;
|
||||||
+ rq->skip = NULL;
|
|
||||||
+
|
+
|
||||||
+ raw_spin_lock_init(&rq->lock);
|
+ raw_spin_lock_init(&rq->lock);
|
||||||
+ rq->nr_running = rq->nr_uninterruptible = 0;
|
+ rq->nr_running = rq->nr_uninterruptible = 0;
|
||||||
@@ -9670,10 +9649,10 @@ index 000000000000..1dbd7eb6a434
|
|||||||
+{}
|
+{}
|
||||||
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
|
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
|
||||||
new file mode 100644
|
new file mode 100644
|
||||||
index 000000000000..e9bfc67e8165
|
index 000000000000..3f8e156d768e
|
||||||
--- /dev/null
|
--- /dev/null
|
||||||
+++ b/kernel/sched/alt_sched.h
|
+++ b/kernel/sched/alt_sched.h
|
||||||
@@ -0,0 +1,954 @@
|
@@ -0,0 +1,974 @@
|
||||||
+#ifndef ALT_SCHED_H
|
+#ifndef ALT_SCHED_H
|
||||||
+#define ALT_SCHED_H
|
+#define ALT_SCHED_H
|
||||||
+
|
+
|
||||||
@@ -9843,10 +9822,9 @@ index 000000000000..e9bfc67e8165
|
|||||||
+ struct task_struct __rcu *curr;
|
+ struct task_struct __rcu *curr;
|
||||||
+ struct task_struct *idle;
|
+ struct task_struct *idle;
|
||||||
+ struct task_struct *stop;
|
+ struct task_struct *stop;
|
||||||
+ struct task_struct *skip;
|
|
||||||
+ struct mm_struct *prev_mm;
|
+ struct mm_struct *prev_mm;
|
||||||
+
|
+
|
||||||
+ struct sched_queue queue;
|
+ struct sched_queue queue ____cacheline_aligned;
|
||||||
+#ifdef CONFIG_SCHED_PDS
|
+#ifdef CONFIG_SCHED_PDS
|
||||||
+ u64 time_edge;
|
+ u64 time_edge;
|
||||||
+#endif
|
+#endif
|
||||||
@@ -9995,7 +9973,6 @@ index 000000000000..e9bfc67e8165
|
|||||||
+extern bool sched_smp_initialized;
|
+extern bool sched_smp_initialized;
|
||||||
+
|
+
|
||||||
+enum {
|
+enum {
|
||||||
+ ITSELF_LEVEL_SPACE_HOLDER,
|
|
||||||
+#ifdef CONFIG_SCHED_SMT
|
+#ifdef CONFIG_SCHED_SMT
|
||||||
+ SMT_LEVEL_SPACE_HOLDER,
|
+ SMT_LEVEL_SPACE_HOLDER,
|
||||||
+#endif
|
+#endif
|
||||||
@@ -10627,6 +10604,28 @@ index 000000000000..e9bfc67e8165
|
|||||||
+static inline void init_sched_mm_cid(struct task_struct *t) { }
|
+static inline void init_sched_mm_cid(struct task_struct *t) { }
|
||||||
+#endif
|
+#endif
|
||||||
+
|
+
|
||||||
|
+#ifdef CONFIG_SMP
|
||||||
|
+extern struct balance_callback balance_push_callback;
|
||||||
|
+
|
||||||
|
+static inline void
|
||||||
|
+__queue_balance_callback(struct rq *rq,
|
||||||
|
+ struct balance_callback *head)
|
||||||
|
+{
|
||||||
|
+ lockdep_assert_rq_held(rq);
|
||||||
|
+
|
||||||
|
+ /*
|
||||||
|
+ * Don't (re)queue an already queued item; nor queue anything when
|
||||||
|
+ * balance_push() is active, see the comment with
|
||||||
|
+ * balance_push_callback.
|
||||||
|
+ */
|
||||||
|
+ if (unlikely(head->next || rq->balance_callback == &balance_push_callback))
|
||||||
|
+ return;
|
||||||
|
+
|
||||||
|
+ head->next = rq->balance_callback;
|
||||||
|
+ rq->balance_callback = head;
|
||||||
|
+}
|
||||||
|
+#endif /* CONFIG_SMP */
|
||||||
|
+
|
||||||
+#endif /* ALT_SCHED_H */
|
+#endif /* ALT_SCHED_H */
|
||||||
diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
|
diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
|
||||||
new file mode 100644
|
new file mode 100644
|
||||||
@@ -10957,7 +10956,7 @@ index 31231925f1ec..c087311032f2 100644
|
|||||||
+#endif
|
+#endif
|
||||||
diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
|
diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
|
||||||
new file mode 100644
|
new file mode 100644
|
||||||
index 000000000000..c35dfb909f23
|
index 000000000000..b2232a2cbb6d
|
||||||
--- /dev/null
|
--- /dev/null
|
||||||
+++ b/kernel/sched/pds.h
|
+++ b/kernel/sched/pds.h
|
||||||
@@ -0,0 +1,141 @@
|
@@ -0,0 +1,141 @@
|
||||||
@@ -11063,7 +11062,7 @@ index 000000000000..c35dfb909f23
|
|||||||
+ list_for_each_entry(p, &head, sq_node)
|
+ list_for_each_entry(p, &head, sq_node)
|
||||||
+ p->sq_idx = idx;
|
+ p->sq_idx = idx;
|
||||||
+
|
+
|
||||||
+ list_splice(&head, rq->queue.heads + idx);
|
+ __list_splice(&head, rq->queue.heads + idx, rq->queue.heads[idx].next);
|
||||||
+ set_bit(MIN_SCHED_NORMAL_PRIO, normal);
|
+ set_bit(MIN_SCHED_NORMAL_PRIO, normal);
|
||||||
+ }
|
+ }
|
||||||
+ bitmap_replace(rq->queue.bitmap, normal, rq->queue.bitmap,
|
+ bitmap_replace(rq->queue.bitmap, normal, rq->queue.bitmap,
|
Reference in New Issue
Block a user