linux516/517: Update Project C patchsets with upstream rebases from Alfred - https://gitlab.com/alfredchen/linux-prjc

This commit is contained in:
Tk-Glitch
2022-04-04 02:26:44 +02:00
parent 3cd937c080
commit e2d0ff4d6a
4 changed files with 262 additions and 244 deletions

View File

@@ -59,7 +59,7 @@ else
fi fi
pkgname=("${pkgbase}" "${pkgbase}-headers") pkgname=("${pkgbase}" "${pkgbase}-headers")
pkgver="${_basekernel}"."${_sub}" pkgver="${_basekernel}"."${_sub}"
pkgrel=250 pkgrel=251
pkgdesc='Linux-tkg' pkgdesc='Linux-tkg'
arch=('x86_64') # no i686 in here arch=('x86_64') # no i686 in here
url="http://www.kernel.org/" url="http://www.kernel.org/"
@@ -657,7 +657,7 @@ case $_basever in
#0008-5.14-bcachefs.patch #0008-5.14-bcachefs.patch
0009-glitched-ondemand-bmq.patch 0009-glitched-ondemand-bmq.patch
0009-glitched-bmq.patch 0009-glitched-bmq.patch
0009-prjc_v5.16-r0.patch 0009-prjc_v5.16-r1.patch
#0012-linux-hardened.patch #0012-linux-hardened.patch
0012-misc-additions.patch 0012-misc-additions.patch
# MM Dirty Soft for WRITE_WATCH support in Wine # MM Dirty Soft for WRITE_WATCH support in Wine
@@ -681,7 +681,7 @@ case $_basever in
'f91223f98f132602a4fa525917a1f27afe30bdb55a1ac863e739c536188417b3' 'f91223f98f132602a4fa525917a1f27afe30bdb55a1ac863e739c536188417b3'
'9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177' '9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177'
'a557b342111849a5f920bbe1c129f3ff1fc1eff62c6bd6685e0972fc88e39911' 'a557b342111849a5f920bbe1c129f3ff1fc1eff62c6bd6685e0972fc88e39911'
'7bd99d10ec9f834de95424d033f940f9531beb3a7b4d9711448f0ed66832c03d' 'ccf8d7dc78e92577f826f3e4d76453b1a873d41eb0df15528d117b25925b3f77'
#'decd4a55c0d47b1eb808733490cdfea1207a2022d46f06d04a3cc60fdcb3f32c' #'decd4a55c0d47b1eb808733490cdfea1207a2022d46f06d04a3cc60fdcb3f32c'
'1aa0a172e1e27fb8171053f3047dcf4a61bd2eda5ea18f02b2bb391741a69887' '1aa0a172e1e27fb8171053f3047dcf4a61bd2eda5ea18f02b2bb391741a69887'
'1b656ad96004f27e9dc63d7f430b50d5c48510d6d4cd595a81c24b21adb70313' '1b656ad96004f27e9dc63d7f430b50d5c48510d6d4cd595a81c24b21adb70313'
@@ -733,7 +733,7 @@ case $_basever in
'f91223f98f132602a4fa525917a1f27afe30bdb55a1ac863e739c536188417b3' 'f91223f98f132602a4fa525917a1f27afe30bdb55a1ac863e739c536188417b3'
'9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177' '9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177'
'a557b342111849a5f920bbe1c129f3ff1fc1eff62c6bd6685e0972fc88e39911' 'a557b342111849a5f920bbe1c129f3ff1fc1eff62c6bd6685e0972fc88e39911'
'5d8aa3d707982e324d3ce8fcc5f832035d8155dc703f0125bbaa21cd87ce26f3' 'c62c73dac6bdb437b1b8e2153b10437fd6924bffca7cff2f8f3eb145e555d9d5'
#'decd4a55c0d47b1eb808733490cdfea1207a2022d46f06d04a3cc60fdcb3f32c' #'decd4a55c0d47b1eb808733490cdfea1207a2022d46f06d04a3cc60fdcb3f32c'
'1aa0a172e1e27fb8171053f3047dcf4a61bd2eda5ea18f02b2bb391741a69887' '1aa0a172e1e27fb8171053f3047dcf4a61bd2eda5ea18f02b2bb391741a69887'
'1b656ad96004f27e9dc63d7f430b50d5c48510d6d4cd595a81c24b21adb70313' '1b656ad96004f27e9dc63d7f430b50d5c48510d6d4cd595a81c24b21adb70313'

View File

@@ -517,6 +517,8 @@ _tkg_srcprep() {
rev=3 rev=3
elif [ "$_basever" = "515" ]; then elif [ "$_basever" = "515" ]; then
rev=1 rev=1
elif [ "$_basever" = "516" ]; then
rev=1
else else
rev=0 rev=0
fi fi

View File

@@ -1,5 +1,5 @@
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 2fba82431efb..654a29d94696 100644 index 391b3f9055fe..5d0e76e5a815 100644
--- a/Documentation/admin-guide/kernel-parameters.txt --- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -5027,6 +5027,12 @@ @@ -5027,6 +5027,12 @@
@@ -176,34 +176,23 @@ index 8874f681b056..59eb72bf7d5f 100644
[RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \
} }
diff --git a/include/linux/sched.h b/include/linux/sched.h diff --git a/include/linux/sched.h b/include/linux/sched.h
index 78c351e35fec..c6746f5ec3f5 100644 index ee5ed8821963..61ee2514329a 100644
--- a/include/linux/sched.h --- a/include/linux/sched.h
+++ b/include/linux/sched.h +++ b/include/linux/sched.h
@@ -748,8 +748,14 @@ struct task_struct { @@ -748,7 +748,12 @@ struct task_struct {
unsigned int ptrace; unsigned int ptrace;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
- int on_cpu; + struct __call_single_node wake_entry;
struct __call_single_node wake_entry;
+#endif +#endif
+#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT) +#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT)
+ int on_cpu; int on_cpu;
+#endif +#endif
+ +#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT)
+#ifdef CONFIG_SMP struct __call_single_node wake_entry;
+#ifndef CONFIG_SCHED_ALT
unsigned int wakee_flips; unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts; unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee; @@ -771,6 +776,20 @@ struct task_struct {
@@ -763,6 +769,7 @@ struct task_struct {
*/
int recent_used_cpu;
int wake_cpu;
+#endif /* !CONFIG_SCHED_ALT */
#endif
int on_rq;
@@ -771,6 +778,20 @@ struct task_struct {
int normal_prio; int normal_prio;
unsigned int rt_priority; unsigned int rt_priority;
@@ -224,7 +213,7 @@ index 78c351e35fec..c6746f5ec3f5 100644
struct sched_entity se; struct sched_entity se;
struct sched_rt_entity rt; struct sched_rt_entity rt;
struct sched_dl_entity dl; struct sched_dl_entity dl;
@@ -781,6 +802,7 @@ struct task_struct { @@ -781,6 +800,7 @@ struct task_struct {
unsigned long core_cookie; unsigned long core_cookie;
unsigned int core_occupation; unsigned int core_occupation;
#endif #endif
@@ -232,7 +221,7 @@ index 78c351e35fec..c6746f5ec3f5 100644
#ifdef CONFIG_CGROUP_SCHED #ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group; struct task_group *sched_task_group;
@@ -1501,6 +1523,15 @@ struct task_struct { @@ -1501,6 +1521,15 @@ struct task_struct {
*/ */
}; };
@@ -487,7 +476,7 @@ index ce77f0265660..3cccf8caa1be 100644
This option permits Core Scheduling, a means of coordinated task This option permits Core Scheduling, a means of coordinated task
selection across SMT siblings. When enabled -- see selection across SMT siblings. When enabled -- see
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index d0e163a02099..b5276a7a5d82 100644 index df62527f5e0b..556e69cdd44f 100644
--- a/kernel/cgroup/cpuset.c --- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c
@@ -682,7 +682,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) @@ -682,7 +682,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
@@ -643,10 +632,10 @@ index c7421f2d05e1..9b32442ff2ca 100644
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
new file mode 100644 new file mode 100644
index 000000000000..114bd1fd88eb index 000000000000..83407c4ee806
--- /dev/null --- /dev/null
+++ b/kernel/sched/alt_core.c +++ b/kernel/sched/alt_core.c
@@ -0,0 +1,7682 @@ @@ -0,0 +1,7701 @@
+/* +/*
+ * kernel/sched/alt_core.c + * kernel/sched/alt_core.c
+ * + *
@@ -677,7 +666,6 @@ index 000000000000..114bd1fd88eb
+#include <linux/kprobes.h> +#include <linux/kprobes.h>
+#include <linux/mmu_context.h> +#include <linux/mmu_context.h>
+#include <linux/nmi.h> +#include <linux/nmi.h>
+#include <linux/profile.h>
+#include <linux/rcupdate_wait.h> +#include <linux/rcupdate_wait.h>
+#include <linux/security.h> +#include <linux/security.h>
+#include <linux/syscalls.h> +#include <linux/syscalls.h>
@@ -716,7 +704,7 @@ index 000000000000..114bd1fd88eb
+#define sched_feat(x) (0) +#define sched_feat(x) (0)
+#endif /* CONFIG_SCHED_DEBUG */ +#endif /* CONFIG_SCHED_DEBUG */
+ +
+#define ALT_SCHED_VERSION "v5.15-r1" +#define ALT_SCHED_VERSION "v5.16-r1"
+ +
+/* rt_prio(prio) defined in include/linux/sched/rt.h */ +/* rt_prio(prio) defined in include/linux/sched/rt.h */
+#define rt_task(p) rt_prio((p)->prio) +#define rt_task(p) rt_prio((p)->prio)
@@ -1355,6 +1343,25 @@ index 000000000000..114bd1fd88eb
+ return task_on_rq_queued(p); + return task_on_rq_queued(p);
+} +}
+ +
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long ip = 0;
+ unsigned int state;
+
+ if (!p || p == current)
+ return 0;
+
+ /* Only get wchan if task is blocked and we can keep it that way. */
+ raw_spin_lock_irq(&p->pi_lock);
+ state = READ_ONCE(p->__state);
+ smp_rmb(); /* see try_to_wake_up() */
+ if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
+ ip = __get_wchan(p);
+ raw_spin_unlock_irq(&p->pi_lock);
+
+ return ip;
+}
+
+/* +/*
+ * Add/Remove/Requeue task to/from the runqueue routines + * Add/Remove/Requeue task to/from the runqueue routines
+ * Context: rq->lock + * Context: rq->lock
@@ -1396,25 +1403,6 @@ index 000000000000..114bd1fd88eb
+ sched_update_tick_dependency(rq); + sched_update_tick_dependency(rq);
+} +}
+ +
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long ip = 0;
+ unsigned int state;
+
+ if (!p || p == current)
+ return 0;
+
+ /* Only get wchan if task is blocked and we can keep it that way. */
+ raw_spin_lock_irq(&p->pi_lock);
+ state = READ_ONCE(p->__state);
+ smp_rmb(); /* see try_to_wake_up() */
+ if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
+ ip = __get_wchan(p);
+ raw_spin_unlock_irq(&p->pi_lock);
+
+ return ip;
+}
+
+static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags) +static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
+{ +{
+ lockdep_assert_held(&rq->lock); + lockdep_assert_held(&rq->lock);
@@ -1982,6 +1970,7 @@ index 000000000000..114bd1fd88eb
+ * per-task data have been completed by this moment. + * per-task data have been completed by this moment.
+ */ + */
+ smp_wmb(); + smp_wmb();
+
+ WRITE_ONCE(task_thread_info(p)->cpu, cpu); + WRITE_ONCE(task_thread_info(p)->cpu, cpu);
+#endif +#endif
+} +}
@@ -2899,9 +2888,10 @@ index 000000000000..114bd1fd88eb
+ rq = this_rq(); + rq = this_rq();
+ +
+#ifdef CONFIG_SMP +#ifdef CONFIG_SMP
+ if (cpu == rq->cpu) + if (cpu == rq->cpu) {
+ __schedstat_inc(rq->ttwu_local); + __schedstat_inc(rq->ttwu_local);
+ else { + __schedstat_inc(p->stats.nr_wakeups_local);
+ } else {
+ /** Alt schedule FW ToDo: + /** Alt schedule FW ToDo:
+ * How to do ttwu_wake_remote + * How to do ttwu_wake_remote
+ */ + */
@@ -2909,6 +2899,7 @@ index 000000000000..114bd1fd88eb
+#endif /* CONFIG_SMP */ +#endif /* CONFIG_SMP */
+ +
+ __schedstat_inc(rq->ttwu_count); + __schedstat_inc(rq->ttwu_count);
+ __schedstat_inc(p->stats.nr_wakeups);
+} +}
+ +
+/* +/*
@@ -3099,7 +3090,7 @@ index 000000000000..114bd1fd88eb
+ raw_spin_lock_irqsave(&rq->lock, flags); + raw_spin_lock_irqsave(&rq->lock, flags);
+ if (is_idle_task(rq->curr)) + if (is_idle_task(rq->curr))
+ resched_curr(rq); + resched_curr(rq);
+ /* Else CPU is not idle, do nothing here: */ + /* Else CPU is not idle, do nothing here */
+ raw_spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock_irqrestore(&rq->lock, flags);
+ +
+out: +out:
@@ -3519,9 +3510,9 @@ index 000000000000..114bd1fd88eb
+ +
+ /* + /*
+ * At this point the task is pinned; either: + * At this point the task is pinned; either:
+ * - blocked and we're holding off wakeups (pi->lock) + * - blocked and we're holding off wakeups (pi->lock)
+ * - woken, and we're holding off enqueue (rq->lock) + * - woken, and we're holding off enqueue (rq->lock)
+ * - queued, and we're holding off schedule (rq->lock) + * - queued, and we're holding off schedule (rq->lock)
+ * - running, and we're holding off de-schedule (rq->lock) + * - running, and we're holding off de-schedule (rq->lock)
+ * + *
+ * The called function (@func) can use: task_curr(), p->on_rq and + * The called function (@func) can use: task_curr(), p->on_rq and
@@ -3572,6 +3563,11 @@ index 000000000000..114bd1fd88eb
+ p->stime = 0; + p->stime = 0;
+ p->sched_time = 0; + p->sched_time = 0;
+ +
+#ifdef CONFIG_SCHEDSTATS
+ /* Even if schedstat is disabled, there should not be garbage */
+ memset(&p->stats, 0, sizeof(p->stats));
+#endif
+
+#ifdef CONFIG_PREEMPT_NOTIFIERS +#ifdef CONFIG_PREEMPT_NOTIFIERS
+ INIT_HLIST_HEAD(&p->preempt_notifiers); + INIT_HLIST_HEAD(&p->preempt_notifiers);
+#endif +#endif
@@ -3637,11 +3633,8 @@ index 000000000000..114bd1fd88eb
+ struct rq *rq; + struct rq *rq;
+ +
+ /* + /*
+ * The child is not yet in the pid-hash so no cgroup attach races, + * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
+ * and the cgroup is pinned to this child due to cgroup_fork() + * required yet, but lockdep gets upset if rules are violated.
+ * is ran before sched_fork().
+ *
+ * Silence PROVE_RCU.
+ */ + */
+ raw_spin_lock_irqsave(&p->pi_lock, flags); + raw_spin_lock_irqsave(&p->pi_lock, flags);
+ /* + /*
@@ -3676,9 +3669,6 @@ index 000000000000..114bd1fd88eb
+ +
+void sched_post_fork(struct task_struct *p) +void sched_post_fork(struct task_struct *p)
+{ +{
+#ifdef CONFIG_UCLAMP_TASK
+ uclamp_post_fork(p);
+#endif
+} +}
+ +
+#ifdef CONFIG_SCHEDSTATS +#ifdef CONFIG_SCHEDSTATS
@@ -6859,9 +6849,7 @@ index 000000000000..114bd1fd88eb
+ +
+ if (spin_needbreak(lock) || resched) { + if (spin_needbreak(lock) || resched) {
+ spin_unlock(lock); + spin_unlock(lock);
+ if (resched) + if (!_cond_resched())
+ preempt_schedule_common();
+ else
+ cpu_relax(); + cpu_relax();
+ ret = 1; + ret = 1;
+ spin_lock(lock); + spin_lock(lock);
@@ -6879,9 +6867,7 @@ index 000000000000..114bd1fd88eb
+ +
+ if (rwlock_needbreak(lock) || resched) { + if (rwlock_needbreak(lock) || resched) {
+ read_unlock(lock); + read_unlock(lock);
+ if (resched) + if (!_cond_resched())
+ preempt_schedule_common();
+ else
+ cpu_relax(); + cpu_relax();
+ ret = 1; + ret = 1;
+ read_lock(lock); + read_lock(lock);
@@ -6899,9 +6885,7 @@ index 000000000000..114bd1fd88eb
+ +
+ if (rwlock_needbreak(lock) || resched) { + if (rwlock_needbreak(lock) || resched) {
+ write_unlock(lock); + write_unlock(lock);
+ if (resched) + if (!_cond_resched())
+ preempt_schedule_common();
+ else
+ cpu_relax(); + cpu_relax();
+ ret = 1; + ret = 1;
+ write_lock(lock); + write_lock(lock);
@@ -7917,12 +7901,6 @@ index 000000000000..114bd1fd88eb
+} +}
+ +
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+static inline int preempt_count_equals(int preempt_offset)
+{
+ int nested = preempt_count() + rcu_preempt_depth();
+
+ return (nested == preempt_offset);
+}
+ +
+void __might_sleep(const char *file, int line) +void __might_sleep(const char *file, int line)
+{ +{
@@ -7942,7 +7920,28 @@ index 000000000000..114bd1fd88eb
+} +}
+EXPORT_SYMBOL(__might_sleep); +EXPORT_SYMBOL(__might_sleep);
+ +
+void __might_resched(const char *file, int line, int preempt_offset) +static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
+{
+ if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
+ return;
+
+ if (preempt_count() == preempt_offset)
+ return;
+
+ pr_err("Preemption disabled at:");
+ print_ip_sym(KERN_ERR, ip);
+}
+
+static inline bool resched_offsets_ok(unsigned int offsets)
+{
+ unsigned int nested = preempt_count();
+
+ nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
+
+ return nested == offsets;
+}
+
+void __might_resched(const char *file, int line, unsigned int offsets)
+{ +{
+ /* Ratelimiting timestamp: */ + /* Ratelimiting timestamp: */
+ static unsigned long prev_jiffy; + static unsigned long prev_jiffy;
@@ -7952,7 +7951,7 @@ index 000000000000..114bd1fd88eb
+ /* WARN_ON_ONCE() by default, no rate limit required: */ + /* WARN_ON_ONCE() by default, no rate limit required: */
+ rcu_sleep_check(); + rcu_sleep_check();
+ +
+ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && + if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
+ !is_idle_task(current) && !current->non_block_count) || + !is_idle_task(current) && !current->non_block_count) ||
+ system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || + system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
+ oops_in_progress) + oops_in_progress)
@@ -7969,6 +7968,13 @@ index 000000000000..114bd1fd88eb
+ pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n", + pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
+ in_atomic(), irqs_disabled(), current->non_block_count, + in_atomic(), irqs_disabled(), current->non_block_count,
+ current->pid, current->comm); + current->pid, current->comm);
+ pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
+ offsets & MIGHT_RESCHED_PREEMPT_MASK);
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
+ pr_err("RCU nest depth: %d, expected: %u\n",
+ rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
+ }
+ +
+ if (task_stack_end_corrupted(current)) + if (task_stack_end_corrupted(current))
+ pr_emerg("Thread overran stack, or stack corrupted\n"); + pr_emerg("Thread overran stack, or stack corrupted\n");
@@ -7976,12 +7982,10 @@ index 000000000000..114bd1fd88eb
+ debug_show_held_locks(current); + debug_show_held_locks(current);
+ if (irqs_disabled()) + if (irqs_disabled())
+ print_irqtrace_events(current); + print_irqtrace_events(current);
+#ifdef CONFIG_DEBUG_PREEMPT +
+ if (!preempt_count_equals(preempt_offset)) { + print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
+ pr_err("Preemption disabled at:"); + preempt_disable_ip);
+ print_ip_sym(KERN_ERR, preempt_disable_ip); +
+ }
+#endif
+ dump_stack(); + dump_stack();
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); + add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+} +}
@@ -8068,6 +8072,10 @@ index 000000000000..114bd1fd88eb
+ if (p->flags & PF_KTHREAD) + if (p->flags & PF_KTHREAD)
+ continue; + continue;
+ +
+ schedstat_set(p->stats.wait_start, 0);
+ schedstat_set(p->stats.sleep_start, 0);
+ schedstat_set(p->stats.block_start, 0);
+
+ if (!rt_task(p)) { + if (!rt_task(p)) {
+ /* + /*
+ * Renice negative nice level userspace + * Renice negative nice level userspace
@@ -8139,9 +8147,9 @@ index 000000000000..114bd1fd88eb
+ kmem_cache_free(task_group_cache, tg); + kmem_cache_free(task_group_cache, tg);
+} +}
+ +
+static void sched_free_group_rcu(struct rcu_head *rcu) +static void sched_free_group_rcu(struct rcu_head *rhp)
+{ +{
+ sched_free_group(container_of(rcu, struct task_group, rcu)); + sched_free_group(container_of(rhp, struct task_group, rcu));
+} +}
+ +
+static void sched_unregister_group(struct task_group *tg) +static void sched_unregister_group(struct task_group *tg)
@@ -8172,13 +8180,13 @@ index 000000000000..114bd1fd88eb
+/* rcu callback to free various structures associated with a task group */ +/* rcu callback to free various structures associated with a task group */
+static void sched_unregister_group_rcu(struct rcu_head *rhp) +static void sched_unregister_group_rcu(struct rcu_head *rhp)
+{ +{
+ /* Now it should be safe to free those cfs_rqs */ + /* Now it should be safe to free those cfs_rqs: */
+ sched_unregister_group(container_of(rhp, struct task_group, rcu)); + sched_unregister_group(container_of(rhp, struct task_group, rcu));
+} +}
+ +
+void sched_destroy_group(struct task_group *tg) +void sched_destroy_group(struct task_group *tg)
+{ +{
+ /* Wait for possible concurrent references to cfs_rqs complete */ + /* Wait for possible concurrent references to cfs_rqs complete: */
+ call_rcu(&tg->rcu, sched_unregister_group_rcu); + call_rcu(&tg->rcu, sched_unregister_group_rcu);
+} +}
+ +
@@ -8368,10 +8376,10 @@ index 000000000000..1212a031700e
+{} +{}
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
new file mode 100644 new file mode 100644
index 000000000000..e78324687f6e index 000000000000..f2b9e686d6a6
--- /dev/null --- /dev/null
+++ b/kernel/sched/alt_sched.h +++ b/kernel/sched/alt_sched.h
@@ -0,0 +1,661 @@ @@ -0,0 +1,667 @@
+#ifndef ALT_SCHED_H +#ifndef ALT_SCHED_H
+#define ALT_SCHED_H +#define ALT_SCHED_H
+ +
@@ -8405,6 +8413,7 @@ index 000000000000..e78324687f6e
+#include <linux/livepatch.h> +#include <linux/livepatch.h>
+#include <linux/membarrier.h> +#include <linux/membarrier.h>
+#include <linux/proc_fs.h> +#include <linux/proc_fs.h>
+#include <linux/profile.h>
+#include <linux/psi.h> +#include <linux/psi.h>
+#include <linux/slab.h> +#include <linux/slab.h>
+#include <linux/stop_machine.h> +#include <linux/stop_machine.h>
@@ -8721,6 +8730,11 @@ index 000000000000..e78324687f6e
+} +}
+#endif +#endif
+ +
+static inline u64 __rq_clock_broken(struct rq *rq)
+{
+ return READ_ONCE(rq->clock);
+}
+
+static inline u64 rq_clock(struct rq *rq) +static inline u64 rq_clock(struct rq *rq)
+{ +{
+ /* + /*
@@ -9199,7 +9213,7 @@ index e7af18857371..3e38816b736e 100644
static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn); static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 9392aea1804e..c1ead972e498 100644 index b7ec42732b28..a855594a540f 100644
--- a/kernel/sched/cputime.c --- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c
@@ -123,7 +123,7 @@ void account_user_time(struct task_struct *p, u64 cputime) @@ -123,7 +123,7 @@ void account_user_time(struct task_struct *p, u64 cputime)
@@ -9495,7 +9509,7 @@ index a554e3bbab2b..3e56f5e6ff5c 100644
* thermal: * thermal:
* *
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index e06071bf3472..adf567df34d4 100644 index c336f5f481bc..5865f14714a9 100644
--- a/kernel/sched/pelt.h --- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h +++ b/kernel/sched/pelt.h
@@ -1,13 +1,15 @@ @@ -1,13 +1,15 @@
@@ -9515,15 +9529,15 @@ index e06071bf3472..adf567df34d4 100644
int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity); int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
static inline u64 thermal_load_avg(struct rq *rq) static inline u64 thermal_load_avg(struct rq *rq)
@@ -42,6 +44,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg) @@ -44,6 +46,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
return LOAD_AVG_MAX - 1024 + avg->period_contrib; return PELT_MIN_DIVIDER + avg->period_contrib;
} }
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
static inline void cfs_se_util_change(struct sched_avg *avg) static inline void cfs_se_util_change(struct sched_avg *avg)
{ {
unsigned int enqueued; unsigned int enqueued;
@@ -153,9 +156,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) @@ -155,9 +158,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
return rq_clock_pelt(rq_of(cfs_rq)); return rq_clock_pelt(rq_of(cfs_rq));
} }
#endif #endif
@@ -9535,7 +9549,7 @@ index e06071bf3472..adf567df34d4 100644
static inline int static inline int
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
{ {
@@ -173,6 +178,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running) @@ -175,6 +180,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
{ {
return 0; return 0;
} }
@@ -9571,22 +9585,6 @@ diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
index 07dde2928c79..6a6edc730dce 100644 index 07dde2928c79..6a6edc730dce 100644
--- a/kernel/sched/stats.c --- a/kernel/sched/stats.c
+++ b/kernel/sched/stats.c +++ b/kernel/sched/stats.c
@@ -4,6 +4,7 @@
*/
#include "sched.h"
+#ifndef CONFIG_SCHED_ALT
void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
struct sched_statistics *stats)
{
@@ -90,6 +90,7 @@
}
}
+#endif
/*
* Current schedstat API version.
*
@@ -126,8 +126,10 @@ static int show_schedstat(struct seq_file *seq, void *v) @@ -126,8 +126,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
} else { } else {
struct rq *rq; struct rq *rq;
@@ -9615,27 +9613,27 @@ index 07dde2928c79..6a6edc730dce 100644
} }
return 0; return 0;
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index cfb0893a83d4..4fb593535447 100644 index 3a3c826dd83a..d80520eca556 100644
--- a/kernel/sched/stats.h --- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h +++ b/kernel/sched/stats.h
@@ -94,6 +94,7 @@ struct sched_entity_stats { @@ -87,6 +87,7 @@ static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delt
} __no_randomize_layout;
#endif #endif /* CONFIG_SCHEDSTATS */
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
static inline struct sched_statistics * #ifdef CONFIG_FAIR_GROUP_SCHED
__schedstats_from_se(struct sched_entity *se) struct sched_entity_stats {
{ struct sched_entity se;
@@ -103,6 +104,7 @@ __schedstats_from_se(struct sched_entity *se) @@ -103,6 +104,7 @@ __schedstats_from_se(struct sched_entity *se)
#endif #endif
return &task_of(se)->stats; return &task_of(se)->stats;
} }
+#endif +#endif /* CONFIG_SCHED_ALT */
#ifdef CONFIG_PSI #ifdef CONFIG_PSI
/* /*
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index d201a7052a29..163cec668095 100644 index d201a7052a29..e5a7a638f3fb 100644
--- a/kernel/sched/topology.c --- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c +++ b/kernel/sched/topology.c
@@ -4,6 +4,7 @@ @@ -4,6 +4,7 @@
@@ -9673,7 +9671,7 @@ index d201a7052a29..163cec668095 100644
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
static const struct cpumask *sd_numa_mask(int cpu) static const struct cpumask *sd_numa_mask(int cpu)
@@ -2531,3 +2536,15 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], @@ -2531,3 +2536,17 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
mutex_unlock(&sched_domains_mutex); mutex_unlock(&sched_domains_mutex);
} }
@@ -9683,6 +9681,8 @@ index d201a7052a29..163cec668095 100644
+{} +{}
+ +
+#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA
+int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
+
+int sched_numa_find_closest(const struct cpumask *cpus, int cpu) +int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
+{ +{
+ return best_mask_cpu(cpu, cpus); + return best_mask_cpu(cpu, cpus);
@@ -9690,7 +9690,7 @@ index d201a7052a29..163cec668095 100644
+#endif /* CONFIG_NUMA */ +#endif /* CONFIG_NUMA */
+#endif +#endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 083be6af29d7..09fc6281d488 100644 index 0586047f7323..e4bc1eacd184 100644
--- a/kernel/sysctl.c --- a/kernel/sysctl.c
+++ b/kernel/sysctl.c +++ b/kernel/sysctl.c
@@ -122,6 +122,10 @@ static unsigned long long_max = LONG_MAX; @@ -122,6 +122,10 @@ static unsigned long long_max = LONG_MAX;
@@ -9704,7 +9704,7 @@ index 083be6af29d7..09fc6281d488 100644
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
static int ten_thousand = 10000; static int ten_thousand = 10000;
#endif #endif
@@ -1771,6 +1775,24 @@ int proc_do_static_key(struct ctl_table *table, int write, @@ -1778,6 +1782,24 @@ int proc_do_static_key(struct ctl_table *table, int write,
} }
static struct ctl_table kern_table[] = { static struct ctl_table kern_table[] = {
@@ -9729,7 +9729,7 @@ index 083be6af29d7..09fc6281d488 100644
{ {
.procname = "sched_child_runs_first", .procname = "sched_child_runs_first",
.data = &sysctl_sched_child_runs_first, .data = &sysctl_sched_child_runs_first,
@@ -1901,6 +1923,7 @@ static struct ctl_table kern_table[] = { @@ -1908,6 +1930,7 @@ static struct ctl_table kern_table[] = {
.extra2 = SYSCTL_ONE, .extra2 = SYSCTL_ONE,
}, },
#endif #endif
@@ -9737,7 +9737,7 @@ index 083be6af29d7..09fc6281d488 100644
#ifdef CONFIG_PROVE_LOCKING #ifdef CONFIG_PROVE_LOCKING
{ {
.procname = "prove_locking", .procname = "prove_locking",
@@ -2477,6 +2500,17 @@ static struct ctl_table kern_table[] = { @@ -2484,6 +2507,17 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec, .proc_handler = proc_dointvec,
}, },
#endif #endif
@@ -9831,10 +9831,10 @@ index 96b4e7810426..83457e8bb5d2 100644
return false; return false;
} }
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index afd937a46496..7fac2e43d668 100644 index abcadbe933bb..d4c778b0ab0e 100644
--- a/kernel/trace/trace_selftest.c --- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c
@@ -1142,10 +1142,15 @@ static int trace_wakeup_test_thread(void *data) @@ -1140,10 +1140,15 @@ static int trace_wakeup_test_thread(void *data)
{ {
/* Make this a -deadline thread */ /* Make this a -deadline thread */
static const struct sched_attr attr = { static const struct sched_attr attr = {

View File

@@ -1,5 +1,5 @@
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index f5a27f067db9..90c934ec13cc 100644 index 7123524a86b8..c9878f85c176 100644
--- a/Documentation/admin-guide/kernel-parameters.txt --- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -5085,6 +5085,12 @@ @@ -5085,6 +5085,12 @@
@@ -176,34 +176,23 @@ index 8874f681b056..59eb72bf7d5f 100644
[RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \
} }
diff --git a/include/linux/sched.h b/include/linux/sched.h diff --git a/include/linux/sched.h b/include/linux/sched.h
index 75ba8aa60248..3de388cb6923 100644 index 75ba8aa60248..6da339d69619 100644
--- a/include/linux/sched.h --- a/include/linux/sched.h
+++ b/include/linux/sched.h +++ b/include/linux/sched.h
@@ -753,8 +753,14 @@ struct task_struct { @@ -753,7 +753,12 @@ struct task_struct {
unsigned int ptrace; unsigned int ptrace;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
- int on_cpu; + struct __call_single_node wake_entry;
struct __call_single_node wake_entry;
+#endif +#endif
+#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT) +#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT)
+ int on_cpu; int on_cpu;
+#endif +#endif
+ +#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT)
+#ifdef CONFIG_SMP struct __call_single_node wake_entry;
+#ifndef CONFIG_SCHED_ALT
unsigned int wakee_flips; unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts; unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee; @@ -776,6 +781,20 @@ struct task_struct {
@@ -768,6 +774,7 @@ struct task_struct {
*/
int recent_used_cpu;
int wake_cpu;
+#endif /* !CONFIG_SCHED_ALT */
#endif
int on_rq;
@@ -776,6 +783,20 @@ struct task_struct {
int normal_prio; int normal_prio;
unsigned int rt_priority; unsigned int rt_priority;
@@ -224,7 +213,7 @@ index 75ba8aa60248..3de388cb6923 100644
struct sched_entity se; struct sched_entity se;
struct sched_rt_entity rt; struct sched_rt_entity rt;
struct sched_dl_entity dl; struct sched_dl_entity dl;
@@ -786,6 +807,7 @@ struct task_struct { @@ -786,6 +805,7 @@ struct task_struct {
unsigned long core_cookie; unsigned long core_cookie;
unsigned int core_occupation; unsigned int core_occupation;
#endif #endif
@@ -232,7 +221,7 @@ index 75ba8aa60248..3de388cb6923 100644
#ifdef CONFIG_CGROUP_SCHED #ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group; struct task_group *sched_task_group;
@@ -1509,6 +1531,15 @@ struct task_struct { @@ -1509,6 +1529,15 @@ struct task_struct {
*/ */
}; };
@@ -352,20 +341,12 @@ index 8054641c0a7b..284687d47059 100644
#else #else
static inline void rebuild_sched_domains_energy(void) static inline void rebuild_sched_domains_energy(void)
diff --git a/init/Kconfig b/init/Kconfig diff --git a/init/Kconfig b/init/Kconfig
index e9119bf54b1f..2213c306065e 100644 index e9119bf54b1f..6be3308a3665 100644
--- a/init/Kconfig --- a/init/Kconfig
+++ b/init/Kconfig +++ b/init/Kconfig
@@ -817,6 +817,7 @@ menu "Scheduler features" @@ -814,9 +814,39 @@ config GENERIC_SCHED_CLOCK
config UCLAMP_TASK
bool "Enable utilization clamping for RT/FAIR tasks"
depends on CPU_FREQ_GOV_SCHEDUTIL
+ depends on !SCHED_ALT
help
This feature enables the scheduler to track the clamped utilization
of each CPU based on RUNNABLE tasks scheduled on that CPU.
@@ -863,6 +864,35 @@ config UCLAMP_BUCKETS_COUNT
If in doubt, use the default value. menu "Scheduler features"
+menuconfig SCHED_ALT +menuconfig SCHED_ALT
+ bool "Alternative CPU Schedulers" + bool "Alternative CPU Schedulers"
@@ -396,9 +377,13 @@ index e9119bf54b1f..2213c306065e 100644
+ +
+endif +endif
+ +
endmenu config UCLAMP_TASK
bool "Enable utilization clamping for RT/FAIR tasks"
# depends on CPU_FREQ_GOV_SCHEDUTIL
+ depends on !SCHED_ALT
help
This feature enables the scheduler to track the clamped utilization
of each CPU based on RUNNABLE tasks scheduled on that CPU.
@@ -907,6 +937,7 @@ config NUMA_BALANCING @@ -907,6 +937,7 @@ config NUMA_BALANCING
depends on ARCH_SUPPORTS_NUMA_BALANCING depends on ARCH_SUPPORTS_NUMA_BALANCING
depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
@@ -647,10 +632,10 @@ index c83b37af155b..c88e9aab0cb3 100644
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
new file mode 100644 new file mode 100644
index 000000000000..c52650a6e72e index 000000000000..6338a97b429e
--- /dev/null --- /dev/null
+++ b/kernel/sched/alt_core.c +++ b/kernel/sched/alt_core.c
@@ -0,0 +1,7680 @@ @@ -0,0 +1,7704 @@
+/* +/*
+ * kernel/sched/alt_core.c + * kernel/sched/alt_core.c
+ * + *
@@ -681,7 +666,6 @@ index 000000000000..c52650a6e72e
+#include <linux/kprobes.h> +#include <linux/kprobes.h>
+#include <linux/mmu_context.h> +#include <linux/mmu_context.h>
+#include <linux/nmi.h> +#include <linux/nmi.h>
+#include <linux/profile.h>
+#include <linux/rcupdate_wait.h> +#include <linux/rcupdate_wait.h>
+#include <linux/security.h> +#include <linux/security.h>
+#include <linux/syscalls.h> +#include <linux/syscalls.h>
@@ -720,7 +704,7 @@ index 000000000000..c52650a6e72e
+#define sched_feat(x) (0) +#define sched_feat(x) (0)
+#endif /* CONFIG_SCHED_DEBUG */ +#endif /* CONFIG_SCHED_DEBUG */
+ +
+#define ALT_SCHED_VERSION "v5.15-r1" +#define ALT_SCHED_VERSION "v5.17-r0"
+ +
+/* rt_prio(prio) defined in include/linux/sched/rt.h */ +/* rt_prio(prio) defined in include/linux/sched/rt.h */
+#define rt_task(p) rt_prio((p)->prio) +#define rt_task(p) rt_prio((p)->prio)
@@ -1359,6 +1343,25 @@ index 000000000000..c52650a6e72e
+ return task_on_rq_queued(p); + return task_on_rq_queued(p);
+} +}
+ +
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long ip = 0;
+ unsigned int state;
+
+ if (!p || p == current)
+ return 0;
+
+ /* Only get wchan if task is blocked and we can keep it that way. */
+ raw_spin_lock_irq(&p->pi_lock);
+ state = READ_ONCE(p->__state);
+ smp_rmb(); /* see try_to_wake_up() */
+ if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
+ ip = __get_wchan(p);
+ raw_spin_unlock_irq(&p->pi_lock);
+
+ return ip;
+}
+
+/* +/*
+ * Add/Remove/Requeue task to/from the runqueue routines + * Add/Remove/Requeue task to/from the runqueue routines
+ * Context: rq->lock + * Context: rq->lock
@@ -1400,25 +1403,6 @@ index 000000000000..c52650a6e72e
+ sched_update_tick_dependency(rq); + sched_update_tick_dependency(rq);
+} +}
+ +
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long ip = 0;
+ unsigned int state;
+
+ if (!p || p == current)
+ return 0;
+
+ /* Only get wchan if task is blocked and we can keep it that way. */
+ raw_spin_lock_irq(&p->pi_lock);
+ state = READ_ONCE(p->__state);
+ smp_rmb(); /* see try_to_wake_up() */
+ if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
+ ip = __get_wchan(p);
+ raw_spin_unlock_irq(&p->pi_lock);
+
+ return ip;
+}
+
+static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags) +static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
+{ +{
+ lockdep_assert_held(&rq->lock); + lockdep_assert_held(&rq->lock);
@@ -1986,6 +1970,7 @@ index 000000000000..c52650a6e72e
+ * per-task data have been completed by this moment. + * per-task data have been completed by this moment.
+ */ + */
+ smp_wmb(); + smp_wmb();
+
+ WRITE_ONCE(task_thread_info(p)->cpu, cpu); + WRITE_ONCE(task_thread_info(p)->cpu, cpu);
+#endif +#endif
+} +}
@@ -2094,6 +2079,9 @@ index 000000000000..c52650a6e72e
+{ +{
+ struct task_struct *p = current; + struct task_struct *p = current;
+ +
+ if (0 == p->migration_disabled)
+ return;
+
+ if (p->migration_disabled > 1) { + if (p->migration_disabled > 1) {
+ p->migration_disabled--; + p->migration_disabled--;
+ return; + return;
@@ -2903,9 +2891,10 @@ index 000000000000..c52650a6e72e
+ rq = this_rq(); + rq = this_rq();
+ +
+#ifdef CONFIG_SMP +#ifdef CONFIG_SMP
+ if (cpu == rq->cpu) + if (cpu == rq->cpu) {
+ __schedstat_inc(rq->ttwu_local); + __schedstat_inc(rq->ttwu_local);
+ else { + __schedstat_inc(p->stats.nr_wakeups_local);
+ } else {
+ /** Alt schedule FW ToDo: + /** Alt schedule FW ToDo:
+ * How to do ttwu_wake_remote + * How to do ttwu_wake_remote
+ */ + */
@@ -2913,6 +2902,7 @@ index 000000000000..c52650a6e72e
+#endif /* CONFIG_SMP */ +#endif /* CONFIG_SMP */
+ +
+ __schedstat_inc(rq->ttwu_count); + __schedstat_inc(rq->ttwu_count);
+ __schedstat_inc(p->stats.nr_wakeups);
+} +}
+ +
+/* +/*
@@ -3103,7 +3093,7 @@ index 000000000000..c52650a6e72e
+ raw_spin_lock_irqsave(&rq->lock, flags); + raw_spin_lock_irqsave(&rq->lock, flags);
+ if (is_idle_task(rq->curr)) + if (is_idle_task(rq->curr))
+ resched_curr(rq); + resched_curr(rq);
+ /* Else CPU is not idle, do nothing here: */ + /* Else CPU is not idle, do nothing here */
+ raw_spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock_irqrestore(&rq->lock, flags);
+ +
+out: +out:
@@ -3523,9 +3513,9 @@ index 000000000000..c52650a6e72e
+ +
+ /* + /*
+ * At this point the task is pinned; either: + * At this point the task is pinned; either:
+ * - blocked and we're holding off wakeups (pi->lock) + * - blocked and we're holding off wakeups (pi->lock)
+ * - woken, and we're holding off enqueue (rq->lock) + * - woken, and we're holding off enqueue (rq->lock)
+ * - queued, and we're holding off schedule (rq->lock) + * - queued, and we're holding off schedule (rq->lock)
+ * - running, and we're holding off de-schedule (rq->lock) + * - running, and we're holding off de-schedule (rq->lock)
+ * + *
+ * The called function (@func) can use: task_curr(), p->on_rq and + * The called function (@func) can use: task_curr(), p->on_rq and
@@ -3576,6 +3566,11 @@ index 000000000000..c52650a6e72e
+ p->stime = 0; + p->stime = 0;
+ p->sched_time = 0; + p->sched_time = 0;
+ +
+#ifdef CONFIG_SCHEDSTATS
+ /* Even if schedstat is disabled, there should not be garbage */
+ memset(&p->stats, 0, sizeof(p->stats));
+#endif
+
+#ifdef CONFIG_PREEMPT_NOTIFIERS +#ifdef CONFIG_PREEMPT_NOTIFIERS
+ INIT_HLIST_HEAD(&p->preempt_notifiers); + INIT_HLIST_HEAD(&p->preempt_notifiers);
+#endif +#endif
@@ -3630,9 +3625,6 @@ index 000000000000..c52650a6e72e
+ if (unlikely(sched_info_on())) + if (unlikely(sched_info_on()))
+ memset(&p->sched_info, 0, sizeof(p->sched_info)); + memset(&p->sched_info, 0, sizeof(p->sched_info));
+#endif +#endif
+#if defined(CONFIG_SMP)
+ p->on_cpu = 0;
+#endif
+ init_task_preempt_count(p); + init_task_preempt_count(p);
+ +
+ return 0; + return 0;
@@ -3644,11 +3636,8 @@ index 000000000000..c52650a6e72e
+ struct rq *rq; + struct rq *rq;
+ +
+ /* + /*
+ * The child is not yet in the pid-hash so no cgroup attach races, + * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
+ * and the cgroup is pinned to this child due to cgroup_fork() + * required yet, but lockdep gets upset if rules are violated.
+ * is ran before sched_fork().
+ *
+ * Silence PROVE_RCU.
+ */ + */
+ raw_spin_lock_irqsave(&p->pi_lock, flags); + raw_spin_lock_irqsave(&p->pi_lock, flags);
+ /* + /*
@@ -3683,9 +3672,6 @@ index 000000000000..c52650a6e72e
+ +
+void sched_post_fork(struct task_struct *p) +void sched_post_fork(struct task_struct *p)
+{ +{
+#ifdef CONFIG_UCLAMP_TASK
+ uclamp_post_fork(p);
+#endif
+} +}
+ +
+#ifdef CONFIG_SCHEDSTATS +#ifdef CONFIG_SCHEDSTATS
@@ -7266,7 +7252,6 @@ index 000000000000..c52650a6e72e
+ +
+ rq->idle = idle; + rq->idle = idle;
+ rcu_assign_pointer(rq->curr, idle); + rcu_assign_pointer(rq->curr, idle);
+ idle->on_rq = TASK_ON_RQ_QUEUED;
+ idle->on_cpu = 1; + idle->on_cpu = 1;
+ +
+ raw_spin_unlock(&rq->lock); + raw_spin_unlock(&rq->lock);
@@ -7919,12 +7904,6 @@ index 000000000000..c52650a6e72e
+} +}
+ +
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+static inline int preempt_count_equals(int preempt_offset)
+{
+ int nested = preempt_count() + rcu_preempt_depth();
+
+ return (nested == preempt_offset);
+}
+ +
+void __might_sleep(const char *file, int line) +void __might_sleep(const char *file, int line)
+{ +{
@@ -7944,7 +7923,28 @@ index 000000000000..c52650a6e72e
+} +}
+EXPORT_SYMBOL(__might_sleep); +EXPORT_SYMBOL(__might_sleep);
+ +
+void __might_resched(const char *file, int line, int preempt_offset) +static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
+{
+ if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
+ return;
+
+ if (preempt_count() == preempt_offset)
+ return;
+
+ pr_err("Preemption disabled at:");
+ print_ip_sym(KERN_ERR, ip);
+}
+
+static inline bool resched_offsets_ok(unsigned int offsets)
+{
+ unsigned int nested = preempt_count();
+
+ nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
+
+ return nested == offsets;
+}
+
+void __might_resched(const char *file, int line, unsigned int offsets)
+{ +{
+ /* Ratelimiting timestamp: */ + /* Ratelimiting timestamp: */
+ static unsigned long prev_jiffy; + static unsigned long prev_jiffy;
@@ -7954,7 +7954,7 @@ index 000000000000..c52650a6e72e
+ /* WARN_ON_ONCE() by default, no rate limit required: */ + /* WARN_ON_ONCE() by default, no rate limit required: */
+ rcu_sleep_check(); + rcu_sleep_check();
+ +
+ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && + if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
+ !is_idle_task(current) && !current->non_block_count) || + !is_idle_task(current) && !current->non_block_count) ||
+ system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || + system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
+ oops_in_progress) + oops_in_progress)
@@ -7971,6 +7971,13 @@ index 000000000000..c52650a6e72e
+ pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n", + pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
+ in_atomic(), irqs_disabled(), current->non_block_count, + in_atomic(), irqs_disabled(), current->non_block_count,
+ current->pid, current->comm); + current->pid, current->comm);
+ pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
+ offsets & MIGHT_RESCHED_PREEMPT_MASK);
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
+ pr_err("RCU nest depth: %d, expected: %u\n",
+ rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
+ }
+ +
+ if (task_stack_end_corrupted(current)) + if (task_stack_end_corrupted(current))
+ pr_emerg("Thread overran stack, or stack corrupted\n"); + pr_emerg("Thread overran stack, or stack corrupted\n");
@@ -7978,12 +7985,10 @@ index 000000000000..c52650a6e72e
+ debug_show_held_locks(current); + debug_show_held_locks(current);
+ if (irqs_disabled()) + if (irqs_disabled())
+ print_irqtrace_events(current); + print_irqtrace_events(current);
+#ifdef CONFIG_DEBUG_PREEMPT +
+ if (!preempt_count_equals(preempt_offset)) { + print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
+ pr_err("Preemption disabled at:"); + preempt_disable_ip);
+ print_ip_sym(KERN_ERR, preempt_disable_ip); +
+ }
+#endif
+ dump_stack(); + dump_stack();
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); + add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+} +}
@@ -8070,6 +8075,10 @@ index 000000000000..c52650a6e72e
+ if (p->flags & PF_KTHREAD) + if (p->flags & PF_KTHREAD)
+ continue; + continue;
+ +
+ schedstat_set(p->stats.wait_start, 0);
+ schedstat_set(p->stats.sleep_start, 0);
+ schedstat_set(p->stats.block_start, 0);
+
+ if (!rt_task(p)) { + if (!rt_task(p)) {
+ /* + /*
+ * Renice negative nice level userspace + * Renice negative nice level userspace
@@ -8141,9 +8150,9 @@ index 000000000000..c52650a6e72e
+ kmem_cache_free(task_group_cache, tg); + kmem_cache_free(task_group_cache, tg);
+} +}
+ +
+static void sched_free_group_rcu(struct rcu_head *rcu) +static void sched_free_group_rcu(struct rcu_head *rhp)
+{ +{
+ sched_free_group(container_of(rcu, struct task_group, rcu)); + sched_free_group(container_of(rhp, struct task_group, rcu));
+} +}
+ +
+static void sched_unregister_group(struct task_group *tg) +static void sched_unregister_group(struct task_group *tg)
@@ -8174,13 +8183,13 @@ index 000000000000..c52650a6e72e
+/* rcu callback to free various structures associated with a task group */ +/* rcu callback to free various structures associated with a task group */
+static void sched_unregister_group_rcu(struct rcu_head *rhp) +static void sched_unregister_group_rcu(struct rcu_head *rhp)
+{ +{
+ /* Now it should be safe to free those cfs_rqs */ + /* Now it should be safe to free those cfs_rqs: */
+ sched_unregister_group(container_of(rhp, struct task_group, rcu)); + sched_unregister_group(container_of(rhp, struct task_group, rcu));
+} +}
+ +
+void sched_destroy_group(struct task_group *tg) +void sched_destroy_group(struct task_group *tg)
+{ +{
+ /* Wait for possible concurrent references to cfs_rqs complete */ + /* Wait for possible concurrent references to cfs_rqs complete: */
+ call_rcu(&tg->rcu, sched_unregister_group_rcu); + call_rcu(&tg->rcu, sched_unregister_group_rcu);
+} +}
+ +
@@ -8370,10 +8379,10 @@ index 000000000000..1212a031700e
+{} +{}
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
new file mode 100644 new file mode 100644
index 000000000000..6ff979a299ab index 000000000000..f2b9e686d6a6
--- /dev/null --- /dev/null
+++ b/kernel/sched/alt_sched.h +++ b/kernel/sched/alt_sched.h
@@ -0,0 +1,662 @@ @@ -0,0 +1,667 @@
+#ifndef ALT_SCHED_H +#ifndef ALT_SCHED_H
+#define ALT_SCHED_H +#define ALT_SCHED_H
+ +
@@ -8724,6 +8733,11 @@ index 000000000000..6ff979a299ab
+} +}
+#endif +#endif
+ +
+static inline u64 __rq_clock_broken(struct rq *rq)
+{
+ return READ_ONCE(rq->clock);
+}
+
+static inline u64 rq_clock(struct rq *rq) +static inline u64 rq_clock(struct rq *rq)
+{ +{
+ /* + /*
@@ -9602,27 +9616,27 @@ index 07dde2928c79..6a6edc730dce 100644
} }
return 0; return 0;
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 3a3c826dd83a..39df2b235944 100644 index 3a3c826dd83a..d80520eca556 100644
--- a/kernel/sched/stats.h --- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h +++ b/kernel/sched/stats.h
@@ -94,6 +94,7 @@ struct sched_entity_stats { @@ -87,6 +87,7 @@ static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delt
} __no_randomize_layout;
#endif #endif /* CONFIG_SCHEDSTATS */
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
static inline struct sched_statistics * #ifdef CONFIG_FAIR_GROUP_SCHED
__schedstats_from_se(struct sched_entity *se) struct sched_entity_stats {
{ struct sched_entity se;
@@ -103,6 +104,7 @@ __schedstats_from_se(struct sched_entity *se) @@ -103,6 +104,7 @@ __schedstats_from_se(struct sched_entity *se)
#endif #endif
return &task_of(se)->stats; return &task_of(se)->stats;
} }
+#endif +#endif /* CONFIG_SCHED_ALT */
#ifdef CONFIG_PSI #ifdef CONFIG_PSI
/* /*
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index d201a7052a29..163cec668095 100644 index d201a7052a29..e5a7a638f3fb 100644
--- a/kernel/sched/topology.c --- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c +++ b/kernel/sched/topology.c
@@ -4,6 +4,7 @@ @@ -4,6 +4,7 @@
@@ -9660,7 +9674,7 @@ index d201a7052a29..163cec668095 100644
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
static const struct cpumask *sd_numa_mask(int cpu) static const struct cpumask *sd_numa_mask(int cpu)
@@ -2531,3 +2536,15 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], @@ -2531,3 +2536,17 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
mutex_unlock(&sched_domains_mutex); mutex_unlock(&sched_domains_mutex);
} }
@@ -9670,6 +9684,8 @@ index d201a7052a29..163cec668095 100644
+{} +{}
+ +
+#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA
+int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
+
+int sched_numa_find_closest(const struct cpumask *cpus, int cpu) +int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
+{ +{
+ return best_mask_cpu(cpu, cpus); + return best_mask_cpu(cpu, cpus);
@@ -9677,21 +9693,21 @@ index d201a7052a29..163cec668095 100644
+#endif /* CONFIG_NUMA */ +#endif /* CONFIG_NUMA */
+#endif +#endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 5ae443b2882e..7bb4e033cae6 100644 index 730ab56d9e92..f2fdf9088055 100644
--- a/kernel/sysctl.c --- a/kernel/sysctl.c
+++ b/kernel/sysctl.c +++ b/kernel/sysctl.c
@@ -94,6 +94,10 @@ @@ -96,6 +96,10 @@
#if defined(CONFIG_SYSCTL) /* Constants used for minimum and maximum */
+#ifdef CONFIG_SCHED_ALT +#ifdef CONFIG_SCHED_ALT
+extern int sched_yield_type; +extern int sched_yield_type;
+#endif +#endif
+ +
#ifdef CONFIG_USER_NS #ifdef CONFIG_PERF_EVENTS
extern int unprivileged_userns_clone; static const int six_hundred_forty_kb = 640 * 1024;
#endif #endif
@@ -1652,6 +1656,24 @@ int proc_do_static_key(struct ctl_table *table, int write, @@ -1659,6 +1663,24 @@ int proc_do_static_key(struct ctl_table *table, int write,
} }
static struct ctl_table kern_table[] = { static struct ctl_table kern_table[] = {
@@ -9716,7 +9732,7 @@ index 5ae443b2882e..7bb4e033cae6 100644
{ {
.procname = "sched_child_runs_first", .procname = "sched_child_runs_first",
.data = &sysctl_sched_child_runs_first, .data = &sysctl_sched_child_runs_first,
@@ -1782,6 +1804,7 @@ static struct ctl_table kern_table[] = { @@ -1789,6 +1811,7 @@ static struct ctl_table kern_table[] = {
.extra2 = SYSCTL_ONE, .extra2 = SYSCTL_ONE,
}, },
#endif #endif
@@ -9724,7 +9740,7 @@ index 5ae443b2882e..7bb4e033cae6 100644
#ifdef CONFIG_PROVE_LOCKING #ifdef CONFIG_PROVE_LOCKING
{ {
.procname = "prove_locking", .procname = "prove_locking",
@@ -2167,6 +2190,17 @@ static struct ctl_table kern_table[] = { @@ -2174,6 +2197,17 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec, .proc_handler = proc_dointvec,
}, },
#endif #endif