From cf70e42101b642f7c66a44f52e7a1fe3c0228be0 Mon Sep 17 00:00:00 2001 From: Tk-Glitch Date: Tue, 12 Jul 2022 23:36:39 +0200 Subject: [PATCH] linux518: Update Project C to v5.18-r2 https://gitlab.com/alfredchen/linux-prjc/-/commits/linux-5.18.y-prjc --- PKGBUILD | 6 +- linux-tkg-config/prepare | 2 +- ...5.18-r1.patch => 0009-prjc_v5.18-r2.patch} | 283 ++++++++---------- 3 files changed, 136 insertions(+), 155 deletions(-) rename linux-tkg-patches/5.18/{0009-prjc_v5.18-r1.patch => 0009-prjc_v5.18-r2.patch} (99%) diff --git a/PKGBUILD b/PKGBUILD index 67c8a50..95628eb 100644 --- a/PKGBUILD +++ b/PKGBUILD @@ -59,7 +59,7 @@ else fi pkgname=("${pkgbase}" "${pkgbase}-headers") pkgver="${_basekernel}"."${_sub}" -pkgrel=263 +pkgrel=264 pkgdesc='Linux-tkg' arch=('x86_64') # no i686 in here url="https://www.kernel.org/" @@ -761,7 +761,7 @@ case $_basever in 0008-5.18-bcachefs.patch 0009-glitched-ondemand-bmq.patch 0009-glitched-bmq.patch - 0009-prjc_v5.18-r1.patch + 0009-prjc_v5.18-r2.patch #0012-linux-hardened.patch 0012-misc-additions.patch # MM Dirty Soft for WRITE_WATCH support in Wine @@ -785,7 +785,7 @@ case $_basever in '86aab236478c138b2e88d64b84edf550bafbc4e06ab330ce0ffa7f2a9f5bab85' '9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177' 'a557b342111849a5f920bbe1c129f3ff1fc1eff62c6bd6685e0972fc88e39911' - '766658d5ec9cf204635f735a8927854991d0133b2e34bdcd9ca36d7e34817e27' + 'bb8a2daf56a513701895f67dc0e6cbde153481fcd0557906af45523d24aa9f76' '428517fbcb161a640b53782000c16f797c2ad27cf2758e7e56133fc62d2d113b' '1b656ad96004f27e9dc63d7f430b50d5c48510d6d4cd595a81c24b21adb70313' 'b0319a7dff9c48b2f3e3d3597ee154bf92223149a633a8b7ce4026252db86da6') diff --git a/linux-tkg-config/prepare b/linux-tkg-config/prepare index 8906a12..09e2c38 100644 --- a/linux-tkg-config/prepare +++ b/linux-tkg-config/prepare @@ -532,7 +532,7 @@ _tkg_srcprep() { elif [ "$_basever" = "516" ]; then rev=1 elif [ "$_basever" = "518" ]; then - rev=1 + rev=2 else rev=0 fi diff --git a/linux-tkg-patches/5.18/0009-prjc_v5.18-r1.patch b/linux-tkg-patches/5.18/0009-prjc_v5.18-r2.patch similarity index 99% rename from linux-tkg-patches/5.18/0009-prjc_v5.18-r1.patch rename to linux-tkg-patches/5.18/0009-prjc_v5.18-r2.patch index 3c6a2a8..6dba709 100644 --- a/linux-tkg-patches/5.18/0009-prjc_v5.18-r1.patch +++ b/linux-tkg-patches/5.18/0009-prjc_v5.18-r2.patch @@ -5,7 +5,7 @@ index 3f1cc5e317ed..e6f88a16732b 100644 @@ -5164,6 +5164,12 @@ sa1100ir [NET] See drivers/net/irda/sa1100_ir.c. - + + sched_timeslice= + [KNL] Time slice in ms for Project C BMQ/PDS scheduler. + Format: integer 2, 4 @@ -13,14 +13,14 @@ index 3f1cc5e317ed..e6f88a16732b 100644 + See Documentation/scheduler/sched-BMQ.txt + sched_verbose [KNL] Enables verbose scheduler debug messages. - + schedstats= [KNL,X86] Enable or disable scheduled statistics. diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index 1144ea3229a3..2accee67d6fb 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -1517,3 +1517,13 @@ is 10 seconds. - + The softlockup threshold is (``2 * watchdog_thresh``). Setting this tunable to zero will disable lockup detection altogether. + @@ -161,7 +161,7 @@ index c1031843cc6a..f2b0af41a3eb 100644 + (unsigned long long)tsk_seruntime(task), (unsigned long long)task->sched_info.run_delay, task->sched_info.pcount); - + diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h index 8874f681b056..59eb72bf7d5f 100644 --- a/include/asm-generic/resource.h @@ -181,7 +181,7 @@ index a8911b1f35aa..7a4bf3a0db5a 100644 +++ b/include/linux/sched.h @@ -753,8 +753,14 @@ struct task_struct { unsigned int ptrace; - + #ifdef CONFIG_SMP - int on_cpu; struct __call_single_node wake_entry; @@ -202,11 +202,11 @@ index a8911b1f35aa..7a4bf3a0db5a 100644 +#endif /* !CONFIG_SCHED_ALT */ #endif int on_rq; - + @@ -776,6 +783,20 @@ struct task_struct { int normal_prio; unsigned int rt_priority; - + +#ifdef CONFIG_SCHED_ALT + u64 last_ran; + s64 time_slice; @@ -229,13 +229,13 @@ index a8911b1f35aa..7a4bf3a0db5a 100644 unsigned int core_occupation; #endif +#endif /* !CONFIG_SCHED_ALT */ - + #ifdef CONFIG_CGROUP_SCHED struct task_group *sched_task_group; @@ -1516,6 +1538,15 @@ struct task_struct { */ }; - + +#ifdef CONFIG_SCHED_ALT +#define tsk_seruntime(t) ((t)->sched_time) +/* replace the uncertian rt_timeout with 0UL */ @@ -254,7 +254,7 @@ index 7c83d4d5a971..fa30f98cb2be 100644 +++ b/include/linux/sched/deadline.h @@ -1,5 +1,24 @@ /* SPDX-License-Identifier: GPL-2.0 */ - + +#ifdef CONFIG_SCHED_ALT + +static inline int dl_task(struct task_struct *p) @@ -282,7 +282,7 @@ index 7c83d4d5a971..fa30f98cb2be 100644 return dl_prio(p->prio); } +#endif /* CONFIG_SCHED_ALT */ - + static inline bool dl_time_before(u64 a, u64 b) { diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h @@ -292,7 +292,7 @@ index ab83d85e1183..6af9ae681116 100644 @@ -18,6 +18,32 @@ #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH) #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2) - + +#ifdef CONFIG_SCHED_ALT + +/* Undefine MAX_PRIO and DEFAULT_PRIO */ @@ -327,7 +327,7 @@ index e5af028c08b4..0a7565d0d3cf 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk) - + if (policy == SCHED_FIFO || policy == SCHED_RR) return true; +#ifndef CONFIG_SCHED_ALT @@ -336,15 +336,15 @@ index e5af028c08b4..0a7565d0d3cf 100644 +#endif return false; } - + diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 56cffe42abbc..e020fc572b22 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -233,7 +233,8 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu) - + #endif /* !CONFIG_SMP */ - + -#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) +#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) && \ + !defined(CONFIG_SCHED_ALT) @@ -364,9 +364,9 @@ index ddcbefe535e9..85616423dc94 100644 This feature enables the scheduler to track the clamped utilization of each CPU based on RUNNABLE tasks scheduled on that CPU. @@ -867,6 +868,35 @@ config UCLAMP_BUCKETS_COUNT - + If in doubt, use the default value. - + +menuconfig SCHED_ALT + bool "Alternative CPU Schedulers" + default y @@ -397,7 +397,7 @@ index ddcbefe535e9..85616423dc94 100644 +endif + endmenu - + # @@ -911,6 +941,7 @@ config NUMA_BALANCING depends on ARCH_SUPPORTS_NUMA_BALANCING @@ -410,7 +410,7 @@ index ddcbefe535e9..85616423dc94 100644 @@ -1003,6 +1034,7 @@ config FAIR_GROUP_SCHED depends on CGROUP_SCHED default CGROUP_SCHED - + +if !SCHED_ALT config CFS_BANDWIDTH bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED" @@ -418,13 +418,13 @@ index ddcbefe535e9..85616423dc94 100644 @@ -1025,6 +1057,7 @@ config RT_GROUP_SCHED realtime bandwidth for them. See Documentation/scheduler/sched-rt-group.rst for more information. - + +endif #!SCHED_ALT endif #CGROUP_SCHED - + config UCLAMP_TASK_GROUP @@ -1268,6 +1301,7 @@ config CHECKPOINT_RESTORE - + config SCHED_AUTOGROUP bool "Automatic process group scheduling" + depends on !SCHED_ALT @@ -482,7 +482,7 @@ index c2f1fd95a821..41654679b1b2 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -117,7 +117,7 @@ config PREEMPT_DYNAMIC - + config SCHED_CORE bool "Core Scheduling for SMT" - depends on SCHED_SMT @@ -497,7 +497,7 @@ index 71a418858a5e..7e3016873db1 100644 @@ -704,7 +704,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) return ret; } - + -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT) /* @@ -522,9 +522,9 @@ index c5e8cea9e05f..8e90b2a3667a 100644 t2 = tsk->sched_info.run_delay; - t3 = tsk->se.sum_exec_runtime; + t3 = tsk_seruntime(tsk); - + d->cpu_count += t1; - + diff --git a/kernel/exit.c b/kernel/exit.c index f072959fcab7..da97095a2997 100644 --- a/kernel/exit.c @@ -532,11 +532,11 @@ index f072959fcab7..da97095a2997 100644 @@ -124,7 +124,7 @@ static void __exit_signal(struct task_struct *tsk) sig->curr_target = next_thread(tsk); } - + - add_device_randomness((const void*) &tsk->se.sum_exec_runtime, + add_device_randomness((const void*) &tsk_seruntime(tsk), sizeof(unsigned long long)); - + /* @@ -145,7 +145,7 @@ static void __exit_signal(struct task_struct *tsk) sig->inblock += task_io_get_inblock(tsk); @@ -558,14 +558,14 @@ index 8555c4efe97c..a2b3bd3fd85c 100644 - waiter->deadline = task->dl.deadline; + waiter->deadline = __tsk_deadline(task); } - + /* * Only use with rt_mutex_waiter_{less,equal}() */ #define task_to_waiter(p) \ - &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline } + &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = __tsk_deadline(p) } - + static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, struct rt_mutex_waiter *right) { @@ -574,7 +574,7 @@ index 8555c4efe97c..a2b3bd3fd85c 100644 +#else if (left->prio < right->prio) return 1; - + +#ifndef CONFIG_SCHED_BMQ /* * If both waiters have dl_prio(), we check the deadlines of the @@ -584,11 +584,11 @@ index 8555c4efe97c..a2b3bd3fd85c 100644 if (dl_prio(left->prio)) return dl_time_before(left->deadline, right->deadline); +#endif - + return 0; +#endif } - + static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left, struct rt_mutex_waiter *right) { @@ -597,7 +597,7 @@ index 8555c4efe97c..a2b3bd3fd85c 100644 +#else if (left->prio != right->prio) return 0; - + +#ifndef CONFIG_SCHED_BMQ /* * If both waiters have dl_prio(), we check the deadlines of the @@ -607,11 +607,11 @@ index 8555c4efe97c..a2b3bd3fd85c 100644 if (dl_prio(left->prio)) return left->deadline == right->deadline; +#endif - + return 1; +#endif } - + static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter, diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 976092b7bd45..31d587c16ec1 100644 @@ -632,10 +632,10 @@ index 976092b7bd45..31d587c16ec1 100644 obj-y += build_utility.o diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c new file mode 100644 -index 000000000000..a466a05301b8 +index 000000000000..b8e67d568e17 --- /dev/null +++ b/kernel/sched/alt_core.c -@@ -0,0 +1,7768 @@ +@@ -0,0 +1,7750 @@ +/* + * kernel/sched/alt_core.c + * @@ -679,11 +679,11 @@ index 000000000000..a466a05301b8 + +#include "sched.h" + ++#include "pelt.h" ++ +#include "../../fs/io-wq.h" +#include "../smpboot.h" + -+#include "pelt.h" -+ +/* + * Export tracepoints that act as a bare tracehook (ie: have no trace event + * associated with them) to allow external modules to probe them. @@ -705,7 +705,7 @@ index 000000000000..a466a05301b8 +#define sched_feat(x) (0) +#endif /* CONFIG_SCHED_DEBUG */ + -+#define ALT_SCHED_VERSION "v5.18-r1" ++#define ALT_SCHED_VERSION "v5.18-r2" + +/* rt_prio(prio) defined in include/linux/sched/rt.h */ +#define rt_task(p) rt_prio((p)->prio) @@ -785,14 +785,14 @@ index 000000000000..a466a05301b8 +#ifdef CONFIG_SCHED_SMT +static cpumask_t sched_sg_idle_mask ____cacheline_aligned_in_smp; +#endif -+static cpumask_t sched_rq_watermark[SCHED_BITS] ____cacheline_aligned_in_smp; ++static cpumask_t sched_rq_watermark[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp; + +/* sched_queue related functions */ +static inline void sched_queue_init(struct sched_queue *q) +{ + int i; + -+ bitmap_zero(q->bitmap, SCHED_BITS); ++ bitmap_zero(q->bitmap, SCHED_QUEUE_BITS); + for(i = 0; i < SCHED_BITS; i++) + INIT_LIST_HEAD(&q->heads[i]); +} @@ -824,7 +824,7 @@ index 000000000000..a466a05301b8 + cpu = cpu_of(rq); + if (watermark < last_wm) { + for (i = last_wm; i > watermark; i--) -+ cpumask_clear_cpu(cpu, sched_rq_watermark + SCHED_BITS - 1 - i); ++ cpumask_clear_cpu(cpu, sched_rq_watermark + SCHED_QUEUE_BITS - i); +#ifdef CONFIG_SCHED_SMT + if (static_branch_likely(&sched_smt_present) && + IDLE_TASK_SCHED_PRIO == last_wm) @@ -835,7 +835,7 @@ index 000000000000..a466a05301b8 + } + /* last_wm < watermark */ + for (i = watermark; i > last_wm; i--) -+ cpumask_set_cpu(cpu, sched_rq_watermark + SCHED_BITS - 1 - i); ++ cpumask_set_cpu(cpu, sched_rq_watermark + SCHED_QUEUE_BITS - i); +#ifdef CONFIG_SCHED_SMT + if (static_branch_likely(&sched_smt_present) && + IDLE_TASK_SCHED_PRIO == watermark) { @@ -2543,7 +2543,7 @@ index 000000000000..a466a05301b8 +#endif + cpumask_and(&tmp, &chk_mask, sched_rq_watermark) || + cpumask_and(&tmp, &chk_mask, -+ sched_rq_watermark + SCHED_BITS - task_sched_prio(p))) ++ sched_rq_watermark + SCHED_QUEUE_BITS - 1 - task_sched_prio(p))) + return best_mask_cpu(task_cpu(p), &tmp); + + return best_mask_cpu(task_cpu(p), &chk_mask); @@ -4334,24 +4334,6 @@ index 000000000000..a466a05301b8 + */ +void sched_exec(void) +{ -+ struct task_struct *p = current; -+ unsigned long flags; -+ int dest_cpu; -+ -+ raw_spin_lock_irqsave(&p->pi_lock, flags); -+ dest_cpu = cpumask_any(p->cpus_ptr); -+ if (dest_cpu == smp_processor_id()) -+ goto unlock; -+ -+ if (likely(cpu_active(dest_cpu))) { -+ struct migration_arg arg = { p, dest_cpu }; -+ -+ raw_spin_unlock_irqrestore(&p->pi_lock, flags); -+ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); -+ return; -+ } -+unlock: -+ raw_spin_unlock_irqrestore(&p->pi_lock, flags); +} + +#endif @@ -4519,7 +4501,7 @@ index 000000000000..a466a05301b8 +} + +#ifdef CONFIG_SCHED_SMT -+static inline int active_load_balance_cpu_stop(void *data) ++static inline int sg_balance_cpu_stop(void *data) +{ + struct rq *rq = this_rq(); + struct task_struct *p = data; @@ -4570,15 +4552,15 @@ index 000000000000..a466a05301b8 + raw_spin_unlock_irqrestore(&rq->lock, flags); + + if (res) -+ stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, -+ curr, &rq->active_balance_work); ++ stop_one_cpu_nowait(cpu, sg_balance_cpu_stop, curr, ++ &rq->active_balance_work); + return res; +} + +/* -+ * sg_balance_check - slibing group balance check for run queue @rq ++ * sg_balance - slibing group balance check for run queue @rq + */ -+static inline void sg_balance_check(struct rq *rq) ++static inline void sg_balance(struct rq *rq) +{ + cpumask_t chk; + int cpu = cpu_of(rq); @@ -5243,7 +5225,7 @@ index 000000000000..a466a05301b8 + } + +#ifdef CONFIG_SCHED_SMT -+ sg_balance_check(rq); ++ sg_balance(rq); +#endif +} + @@ -7884,7 +7866,7 @@ index 000000000000..a466a05301b8 + wait_bit_init(); + +#ifdef CONFIG_SMP -+ for (i = 0; i < SCHED_BITS; i++) ++ for (i = 0; i < SCHED_QUEUE_BITS; i++) + cpumask_copy(sched_rq_watermark + i, cpu_present_mask); +#endif + @@ -9094,10 +9076,10 @@ index 000000000000..611424bbfa9b +#endif /* ALT_SCHED_H */ diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h new file mode 100644 -index 000000000000..bf7ac80ec242 +index 000000000000..66b77291b9d0 --- /dev/null +++ b/kernel/sched/bmq.h -@@ -0,0 +1,111 @@ +@@ -0,0 +1,110 @@ +#define ALT_SCHED_VERSION_MSG "sched/bmq: BMQ CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n" + +/* @@ -9185,8 +9167,7 @@ index 000000000000..bf7ac80ec242 + +static void sched_task_fork(struct task_struct *p, struct rq *rq) +{ -+ p->boost_prio = (p->boost_prio < 0) ? -+ p->boost_prio + MAX_PRIORITY_ADJ : MAX_PRIORITY_ADJ; ++ p->boost_prio = MAX_PRIORITY_ADJ; +} + +static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq) @@ -9214,23 +9195,23 @@ index e0104b45029a..5eb28f1fdd74 100644 --- a/kernel/sched/build_policy.c +++ b/kernel/sched/build_policy.c @@ -40,13 +40,19 @@ - + #include "idle.c" - + +#ifndef CONFIG_SCHED_ALT #include "rt.c" +#endif - + #ifdef CONFIG_SMP +#ifndef CONFIG_SCHED_ALT # include "cpudeadline.c" +#endif # include "pelt.c" #endif - + #include "cputime.c" -#include "deadline.c" - + +#ifndef CONFIG_SCHED_ALT +#include "deadline.c" +#endif @@ -9239,7 +9220,7 @@ index eec0849b2aae..880f4f819d77 100644 --- a/kernel/sched/build_utility.c +++ b/kernel/sched/build_utility.c @@ -84,7 +84,9 @@ - + #ifdef CONFIG_SMP # include "cpupri.c" +#ifndef CONFIG_SCHED_ALT @@ -9247,14 +9228,14 @@ index eec0849b2aae..880f4f819d77 100644 +#endif # include "topology.c" #endif - + diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 3dbf351d12d5..b2590f961139 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -160,9 +160,14 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu) unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu); - + sg_cpu->max = max; +#ifndef CONFIG_SCHED_ALT sg_cpu->bw_dl = cpu_bw_dl(rq); @@ -9265,7 +9246,7 @@ index 3dbf351d12d5..b2590f961139 100644 + sg_cpu->util = rq_load_util(rq, max); +#endif /* CONFIG_SCHED_ALT */ } - + /** @@ -306,8 +311,10 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } */ @@ -9276,11 +9257,11 @@ index 3dbf351d12d5..b2590f961139 100644 sg_cpu->sg_policy->limits_changed = true; +#endif } - + static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu, @@ -607,6 +614,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy) } - + ret = sched_setattr_nocheck(thread, &attr); + if (ret) { @@ -9295,7 +9276,7 @@ index 3dbf351d12d5..b2590f961139 100644 +#endif /* CONFIG_SCHED_ALT */ } static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn); - + diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 78a233d43757..b3bbc87d4352 100644 --- a/kernel/sched/cputime.c @@ -9303,15 +9284,15 @@ index 78a233d43757..b3bbc87d4352 100644 @@ -122,7 +122,7 @@ void account_user_time(struct task_struct *p, u64 cputime) p->utime += cputime; account_group_user_time(p, cputime); - + - index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; + index = task_running_nice(p) ? CPUTIME_NICE : CPUTIME_USER; - + /* Add user time to cpustat. */ task_group_account_field(p, index, cputime); @@ -146,7 +146,7 @@ void account_guest_time(struct task_struct *p, u64 cputime) p->gtime += cputime; - + /* Add guest time to cpustat. */ - if (task_nice(p) > 0) { + if (task_running_nice(p)) { @@ -9329,12 +9310,12 @@ index 78a233d43757..b3bbc87d4352 100644 static u64 read_sum_exec_runtime(struct task_struct *t) @@ -279,7 +279,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t) struct rq *rq; - + rq = task_rq_lock(t, &rf); - ns = t->se.sum_exec_runtime; + ns = tsk_seruntime(t); task_rq_unlock(rq, t, &rf); - + return ns; @@ -611,7 +611,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) @@ -9343,7 +9324,7 @@ index 78a233d43757..b3bbc87d4352 100644 - .sum_exec_runtime = p->se.sum_exec_runtime, + .sum_exec_runtime = tsk_seruntime(p), }; - + if (task_cputime(p, &cputime.utime, &cputime.stime)) diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index bb3d63bdf4ae..4e1680785704 100644 @@ -9352,39 +9333,39 @@ index bb3d63bdf4ae..4e1680785704 100644 @@ -7,6 +7,7 @@ * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar */ - + +#ifndef CONFIG_SCHED_ALT /* * This allows printing both to /proc/sched_debug and * to the console @@ -215,6 +216,7 @@ static const struct file_operations sched_scaling_fops = { }; - + #endif /* SMP */ +#endif /* !CONFIG_SCHED_ALT */ - + #ifdef CONFIG_PREEMPT_DYNAMIC - + @@ -278,6 +280,7 @@ static const struct file_operations sched_dynamic_fops = { - + #endif /* CONFIG_PREEMPT_DYNAMIC */ - + +#ifndef CONFIG_SCHED_ALT __read_mostly bool sched_debug_verbose; - + static const struct seq_operations sched_debug_sops; @@ -293,6 +296,7 @@ static const struct file_operations sched_debug_fops = { .llseek = seq_lseek, .release = seq_release, }; +#endif /* !CONFIG_SCHED_ALT */ - + static struct dentry *debugfs_sched; - + @@ -302,12 +306,15 @@ static __init int sched_init_debug(void) - + debugfs_sched = debugfs_create_dir("sched", NULL); - + +#ifndef CONFIG_SCHED_ALT debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops); debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose); @@ -9392,31 +9373,31 @@ index bb3d63bdf4ae..4e1680785704 100644 #ifdef CONFIG_PREEMPT_DYNAMIC debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops); #endif - + +#ifndef CONFIG_SCHED_ALT debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency); debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity); debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity); @@ -336,11 +343,13 @@ static __init int sched_init_debug(void) #endif - + debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops); +#endif /* !CONFIG_SCHED_ALT */ - + return 0; } late_initcall(sched_init_debug); - + +#ifndef CONFIG_SCHED_ALT #ifdef CONFIG_SMP - + static cpumask_var_t sd_sysctl_cpus; @@ -1067,6 +1076,7 @@ void proc_sched_set_task(struct task_struct *p) memset(&p->stats, 0, sizeof(p->stats)); #endif } +#endif /* !CONFIG_SCHED_ALT */ - + void resched_latency_warn(int cpu, u64 latency) { diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c @@ -9426,7 +9407,7 @@ index ecb0d7052877..000c0d87de78 100644 @@ -400,6 +400,7 @@ void cpu_startup_entry(enum cpuhp_state state) do_idle(); } - + +#ifndef CONFIG_SCHED_ALT /* * idle-task scheduling class. @@ -9576,17 +9557,17 @@ index 0f310768260c..bd38bf738fe9 100644 @@ -266,6 +266,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load) WRITE_ONCE(sa->util_avg, sa->util_sum / divider); } - + +#ifndef CONFIG_SCHED_ALT /* * sched_entity: * @@ -383,8 +384,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) - + return 0; } +#endif - + -#ifdef CONFIG_SCHED_THERMAL_PRESSURE +#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT) /* @@ -9599,7 +9580,7 @@ index c336f5f481bc..5865f14714a9 100644 @@ -1,13 +1,15 @@ #ifdef CONFIG_SMP #include "sched-pelt.h" - + +#ifndef CONFIG_SCHED_ALT int __update_load_avg_blocked_se(u64 now, struct sched_entity *se); int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se); @@ -9607,16 +9588,16 @@ index c336f5f481bc..5865f14714a9 100644 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); +#endif - + -#ifdef CONFIG_SCHED_THERMAL_PRESSURE +#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT) int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity); - + static inline u64 thermal_load_avg(struct rq *rq) @@ -44,6 +46,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg) return PELT_MIN_DIVIDER + avg->period_contrib; } - + +#ifndef CONFIG_SCHED_ALT static inline void cfs_se_util_change(struct sched_avg *avg) { @@ -9626,9 +9607,9 @@ index c336f5f481bc..5865f14714a9 100644 } #endif +#endif /* CONFIG_SCHED_ALT */ - + #else - + +#ifndef CONFIG_SCHED_ALT static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) @@ -9638,7 +9619,7 @@ index c336f5f481bc..5865f14714a9 100644 return 0; } +#endif - + static inline int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h @@ -9648,7 +9629,7 @@ index 8dccb34eb190..bb3598e0ba5d 100644 @@ -5,6 +5,10 @@ #ifndef _KERNEL_SCHED_SCHED_H #define _KERNEL_SCHED_SCHED_H - + +#ifdef CONFIG_SCHED_ALT +#include "alt_sched.h" +#else @@ -9659,7 +9640,7 @@ index 8dccb34eb190..bb3598e0ba5d 100644 @@ -3087,4 +3091,9 @@ extern int sched_dynamic_mode(const char *str); extern void sched_dynamic_update(int mode); #endif - + +static inline int task_running_nice(struct task_struct *p) +{ + return (task_nice(p) > 0); @@ -9683,7 +9664,7 @@ index 857f837f52cb..5486c63e4790 100644 rq = cpu_rq(cpu); @@ -143,6 +145,7 @@ static int show_schedstat(struct seq_file *seq, void *v) seq_printf(seq, "\n"); - + #ifdef CONFIG_SMP +#ifndef CONFIG_SCHED_ALT /* domain-specific stats */ @@ -9702,9 +9683,9 @@ index baa839c1ba96..15238be0581b 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -89,6 +89,7 @@ static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delt - + #endif /* CONFIG_SCHEDSTATS */ - + +#ifndef CONFIG_SCHED_ALT #ifdef CONFIG_FAIR_GROUP_SCHED struct sched_entity_stats { @@ -9714,7 +9695,7 @@ index baa839c1ba96..15238be0581b 100644 return &task_of(se)->stats; } +#endif /* CONFIG_SCHED_ALT */ - + #ifdef CONFIG_PSI /* diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c @@ -9724,37 +9705,37 @@ index 810750e62118..f2cdbb696dba 100644 @@ -3,6 +3,7 @@ * Scheduler topology setup/handling methods */ - + +#ifndef CONFIG_SCHED_ALT DEFINE_MUTEX(sched_domains_mutex); - + /* Protected by sched_domains_mutex: */ @@ -1392,8 +1393,10 @@ static void asym_cpu_capacity_scan(void) */ - + static int default_relax_domain_level = -1; +#endif /* CONFIG_SCHED_ALT */ int sched_domain_level_max; - + +#ifndef CONFIG_SCHED_ALT static int __init setup_relax_domain_level(char *str) { if (kstrtoint(str, 0, &default_relax_domain_level)) @@ -1626,6 +1629,7 @@ sd_init(struct sched_domain_topology_level *tl, - + return sd; } +#endif /* CONFIG_SCHED_ALT */ - + /* * Topology list, bottom-up. @@ -1662,6 +1666,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl) sched_domain_topology_saved = NULL; } - + +#ifndef CONFIG_SCHED_ALT #ifdef CONFIG_NUMA - + static const struct cpumask *sd_numa_mask(int cpu) @@ -2617,3 +2622,15 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); @@ -9777,9 +9758,9 @@ index 830aaf8ca08e..7ad676d5ae3b 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -96,6 +96,10 @@ - + /* Constants used for minimum and maximum */ - + +#ifdef CONFIG_SCHED_ALT +extern int sched_yield_type; +#endif @@ -9789,7 +9770,7 @@ index 830aaf8ca08e..7ad676d5ae3b 100644 #endif @@ -1659,6 +1663,24 @@ int proc_do_static_key(struct ctl_table *table, int write, } - + static struct ctl_table kern_table[] = { +#ifdef CONFIG_SCHED_ALT +/* In ALT, only supported "sched_schedstats" */ @@ -9845,13 +9826,13 @@ index 0ea8702eb516..a27a0f3a654d 100644 @@ -2088,8 +2088,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode, int ret = 0; u64 slack; - + +#ifndef CONFIG_SCHED_ALT slack = current->timer_slack_ns; if (dl_task(current) || rt_task(current)) +#endif slack = 0; - + hrtimer_init_sleeper_on_stack(&t, clockid, mode); diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 0a97193984db..e32235cdc3b1 100644 @@ -9859,17 +9840,17 @@ index 0a97193984db..e32235cdc3b1 100644 +++ b/kernel/time/posix-cpu-timers.c @@ -223,7 +223,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples) u64 stime, utime; - + task_cputime(p, &utime, &stime); - store_samples(samples, stime, utime, p->se.sum_exec_runtime); + store_samples(samples, stime, utime, tsk_seruntime(p)); } - + static void proc_sample_cputime_atomic(struct task_cputime_atomic *at, @@ -866,6 +866,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples, } } - + +#ifndef CONFIG_SCHED_ALT static inline void check_dl_overrun(struct task_struct *tsk) { @@ -9879,18 +9860,18 @@ index 0a97193984db..e32235cdc3b1 100644 } } +#endif - + static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard) { @@ -900,8 +902,10 @@ static void check_thread_timers(struct task_struct *tsk, u64 samples[CPUCLOCK_MAX]; unsigned long soft; - + +#ifndef CONFIG_SCHED_ALT if (dl_task(tsk)) check_dl_overrun(tsk); +#endif - + if (expiry_cache_is_inactive(pct)) return; @@ -915,7 +919,7 @@ static void check_thread_timers(struct task_struct *tsk, @@ -9900,17 +9881,17 @@ index 0a97193984db..e32235cdc3b1 100644 - unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ); + unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ); unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME); - + /* At the hard limit, send SIGKILL. No further action. */ @@ -1151,8 +1155,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk) return true; } - + +#ifndef CONFIG_SCHED_ALT if (dl_task(tsk) && tsk->dl.dl_overrun) return true; +#endif - + return false; } diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c @@ -9932,4 +9913,4 @@ index abcadbe933bb..d4c778b0ab0e 100644 +#endif }; struct wakeup_test_data *x = data; - +