This commit is contained in:
Tk-Glitch
2021-11-02 12:25:08 +01:00
parent d553c7eb1c
commit 3196e797c9
2 changed files with 141 additions and 131 deletions

View File

@@ -59,7 +59,7 @@ else
fi fi
pkgname=("${pkgbase}" "${pkgbase}-headers") pkgname=("${pkgbase}" "${pkgbase}-headers")
pkgver="${_basekernel}"."${_sub}" pkgver="${_basekernel}"."${_sub}"
pkgrel=211 pkgrel=212
pkgdesc='Linux-tkg' pkgdesc='Linux-tkg'
arch=('x86_64') # no i686 in here arch=('x86_64') # no i686 in here
url="http://www.kernel.org/" url="http://www.kernel.org/"
@@ -649,7 +649,7 @@ case $_basever in
'034d12a73b507133da2c69a34d61efd2f6b6618549650aa26d748142d22002e1' '034d12a73b507133da2c69a34d61efd2f6b6618549650aa26d748142d22002e1'
'9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177' '9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177'
'a557b342111849a5f920bbe1c129f3ff1fc1eff62c6bd6685e0972fc88e39911' 'a557b342111849a5f920bbe1c129f3ff1fc1eff62c6bd6685e0972fc88e39911'
'f215a286490071d6f870dda89677236123d84b3eede51bdb2b516e33ad219d86' 'decd4a55c0d47b1eb808733490cdfea1207a2022d46f06d04a3cc60fdcb3f32c'
'1aa0a172e1e27fb8171053f3047dcf4a61bd2eda5ea18f02b2bb391741a69887' '1aa0a172e1e27fb8171053f3047dcf4a61bd2eda5ea18f02b2bb391741a69887'
'1b656ad96004f27e9dc63d7f430b50d5c48510d6d4cd595a81c24b21adb70313' '1b656ad96004f27e9dc63d7f430b50d5c48510d6d4cd595a81c24b21adb70313'
'b0319a7dff9c48b2f3e3d3597ee154bf92223149a633a8b7ce4026252db86da6') 'b0319a7dff9c48b2f3e3d3597ee154bf92223149a633a8b7ce4026252db86da6')

View File

@@ -1,10 +1,10 @@
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index bdb22006f713..d755d7df632f 100644 index 43dc35fe5bc0..0873e92ca5d0 100644
--- a/Documentation/admin-guide/kernel-parameters.txt --- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -4947,6 +4947,12 @@ @@ -4985,6 +4985,12 @@
sa1100ir [NET]
sbni= [NET] Granch SBNI12 leased line adapter See drivers/net/irda/sa1100_ir.c.
+ sched_timeslice= + sched_timeslice=
+ [KNL] Time slice in ms for Project C BMQ/PDS scheduler. + [KNL] Time slice in ms for Project C BMQ/PDS scheduler.
@@ -150,10 +150,10 @@ index 000000000000..05c84eec0f31
+priority boost from unblocking while background threads that do most of the +priority boost from unblocking while background threads that do most of the
+processing receive the priority penalty for using their entire timeslice. +processing receive the priority penalty for using their entire timeslice.
diff --git a/fs/proc/base.c b/fs/proc/base.c diff --git a/fs/proc/base.c b/fs/proc/base.c
index e5b5f7709d48..284b3c4b7d90 100644 index 533d5836eb9a..5756c51c9b58 100644
--- a/fs/proc/base.c --- a/fs/proc/base.c
+++ b/fs/proc/base.c +++ b/fs/proc/base.c
@@ -476,7 +476,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns, @@ -477,7 +477,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
seq_puts(m, "0 0 0\n"); seq_puts(m, "0 0 0\n");
else else
seq_printf(m, "%llu %llu %lu\n", seq_printf(m, "%llu %llu %lu\n",
@@ -176,10 +176,10 @@ index 8874f681b056..59eb72bf7d5f 100644
[RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \
} }
diff --git a/include/linux/sched.h b/include/linux/sched.h diff --git a/include/linux/sched.h b/include/linux/sched.h
index ec8d07d88641..b12f660404fd 100644 index c1a927ddec64..a7eb91d15442 100644
--- a/include/linux/sched.h --- a/include/linux/sched.h
+++ b/include/linux/sched.h +++ b/include/linux/sched.h
@@ -681,12 +681,18 @@ struct task_struct { @@ -748,12 +748,18 @@ struct task_struct {
unsigned int ptrace; unsigned int ptrace;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@@ -199,7 +199,7 @@ index ec8d07d88641..b12f660404fd 100644
unsigned int wakee_flips; unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts; unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee; struct task_struct *last_wakee;
@@ -700,6 +706,7 @@ struct task_struct { @@ -767,6 +773,7 @@ struct task_struct {
*/ */
int recent_used_cpu; int recent_used_cpu;
int wake_cpu; int wake_cpu;
@@ -207,7 +207,7 @@ index ec8d07d88641..b12f660404fd 100644
#endif #endif
int on_rq; int on_rq;
@@ -708,6 +715,20 @@ struct task_struct { @@ -775,6 +782,20 @@ struct task_struct {
int normal_prio; int normal_prio;
unsigned int rt_priority; unsigned int rt_priority;
@@ -228,7 +228,7 @@ index ec8d07d88641..b12f660404fd 100644
const struct sched_class *sched_class; const struct sched_class *sched_class;
struct sched_entity se; struct sched_entity se;
struct sched_rt_entity rt; struct sched_rt_entity rt;
@@ -718,6 +739,7 @@ struct task_struct { @@ -785,6 +806,7 @@ struct task_struct {
unsigned long core_cookie; unsigned long core_cookie;
unsigned int core_occupation; unsigned int core_occupation;
#endif #endif
@@ -236,7 +236,7 @@ index ec8d07d88641..b12f660404fd 100644
#ifdef CONFIG_CGROUP_SCHED #ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group; struct task_group *sched_task_group;
@@ -1417,6 +1439,15 @@ struct task_struct { @@ -1505,6 +1527,15 @@ struct task_struct {
*/ */
}; };
@@ -356,10 +356,10 @@ index 8f0f778b7c91..991f2280475b 100644
#else #else
static inline void rebuild_sched_domains_energy(void) static inline void rebuild_sched_domains_energy(void)
diff --git a/init/Kconfig b/init/Kconfig diff --git a/init/Kconfig b/init/Kconfig
index 55f9f7738ebb..9a9b244d3ca3 100644 index 11f8a845f259..c8e82fcafb9e 100644
--- a/init/Kconfig --- a/init/Kconfig
+++ b/init/Kconfig +++ b/init/Kconfig
@@ -786,9 +786,39 @@ config GENERIC_SCHED_CLOCK @@ -814,9 +814,39 @@ config GENERIC_SCHED_CLOCK
menu "Scheduler features" menu "Scheduler features"
@@ -399,7 +399,7 @@ index 55f9f7738ebb..9a9b244d3ca3 100644
help help
This feature enables the scheduler to track the clamped utilization This feature enables the scheduler to track the clamped utilization
of each CPU based on RUNNABLE tasks scheduled on that CPU. of each CPU based on RUNNABLE tasks scheduled on that CPU.
@@ -874,6 +904,7 @@ config NUMA_BALANCING @@ -902,6 +932,7 @@ config NUMA_BALANCING
depends on ARCH_SUPPORTS_NUMA_BALANCING depends on ARCH_SUPPORTS_NUMA_BALANCING
depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
depends on SMP && NUMA && MIGRATION depends on SMP && NUMA && MIGRATION
@@ -407,7 +407,7 @@ index 55f9f7738ebb..9a9b244d3ca3 100644
help help
This option adds support for automatic NUMA aware memory/task placement. This option adds support for automatic NUMA aware memory/task placement.
The mechanism is quite primitive and is based on migrating memory when The mechanism is quite primitive and is based on migrating memory when
@@ -966,6 +997,7 @@ config FAIR_GROUP_SCHED @@ -994,6 +1025,7 @@ config FAIR_GROUP_SCHED
depends on CGROUP_SCHED depends on CGROUP_SCHED
default CGROUP_SCHED default CGROUP_SCHED
@@ -415,7 +415,7 @@ index 55f9f7738ebb..9a9b244d3ca3 100644
config CFS_BANDWIDTH config CFS_BANDWIDTH
bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED" bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
depends on FAIR_GROUP_SCHED depends on FAIR_GROUP_SCHED
@@ -988,6 +1020,7 @@ config RT_GROUP_SCHED @@ -1016,6 +1048,7 @@ config RT_GROUP_SCHED
realtime bandwidth for them. realtime bandwidth for them.
See Documentation/scheduler/sched-rt-group.rst for more information. See Documentation/scheduler/sched-rt-group.rst for more information.
@@ -423,7 +423,7 @@ index 55f9f7738ebb..9a9b244d3ca3 100644
endif #CGROUP_SCHED endif #CGROUP_SCHED
config UCLAMP_TASK_GROUP config UCLAMP_TASK_GROUP
@@ -1231,6 +1264,7 @@ config CHECKPOINT_RESTORE @@ -1259,6 +1292,7 @@ config CHECKPOINT_RESTORE
config SCHED_AUTOGROUP config SCHED_AUTOGROUP
bool "Automatic process group scheduling" bool "Automatic process group scheduling"
@@ -432,7 +432,7 @@ index 55f9f7738ebb..9a9b244d3ca3 100644
select CGROUP_SCHED select CGROUP_SCHED
select FAIR_GROUP_SCHED select FAIR_GROUP_SCHED
diff --git a/init/init_task.c b/init/init_task.c diff --git a/init/init_task.c b/init/init_task.c
index 562f2ef8d157..177b63db4ce0 100644 index 2d024066e27b..49f706df0904 100644
--- a/init/init_task.c --- a/init/init_task.c
+++ b/init/init_task.c +++ b/init/init_task.c
@@ -75,9 +75,15 @@ struct task_struct init_task @@ -75,9 +75,15 @@ struct task_struct init_task
@@ -450,8 +450,8 @@ index 562f2ef8d157..177b63db4ce0 100644
+#endif +#endif
.policy = SCHED_NORMAL, .policy = SCHED_NORMAL,
.cpus_ptr = &init_task.cpus_mask, .cpus_ptr = &init_task.cpus_mask,
.cpus_mask = CPU_MASK_ALL, .user_cpus_ptr = NULL,
@@ -87,6 +93,17 @@ struct task_struct init_task @@ -88,6 +94,17 @@ struct task_struct init_task
.restart_block = { .restart_block = {
.fn = do_no_restart_syscall, .fn = do_no_restart_syscall,
}, },
@@ -469,7 +469,7 @@ index 562f2ef8d157..177b63db4ce0 100644
.se = { .se = {
.group_node = LIST_HEAD_INIT(init_task.se.group_node), .group_node = LIST_HEAD_INIT(init_task.se.group_node),
}, },
@@ -94,6 +111,7 @@ struct task_struct init_task @@ -95,6 +112,7 @@ struct task_struct init_task
.run_list = LIST_HEAD_INIT(init_task.rt.run_list), .run_list = LIST_HEAD_INIT(init_task.rt.run_list),
.time_slice = RR_TIMESLICE, .time_slice = RR_TIMESLICE,
}, },
@@ -491,10 +491,10 @@ index 5876e30c5740..7594d0a31869 100644
This option permits Core Scheduling, a means of coordinated task This option permits Core Scheduling, a means of coordinated task
selection across SMT siblings. When enabled -- see selection across SMT siblings. When enabled -- see
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index adb5190c4429..8c02bce63146 100644 index 2a9695ccb65f..292112c267b8 100644
--- a/kernel/cgroup/cpuset.c --- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c
@@ -636,7 +636,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) @@ -664,7 +664,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
return ret; return ret;
} }
@@ -503,7 +503,7 @@ index adb5190c4429..8c02bce63146 100644
/* /*
* Helper routine for generate_sched_domains(). * Helper routine for generate_sched_domains().
* Do cpusets a, b have overlapping effective cpus_allowed masks? * Do cpusets a, b have overlapping effective cpus_allowed masks?
@@ -1032,7 +1032,7 @@ static void rebuild_sched_domains_locked(void) @@ -1060,7 +1060,7 @@ static void rebuild_sched_domains_locked(void)
/* Have scheduler rebuild the domains */ /* Have scheduler rebuild the domains */
partition_and_rebuild_sched_domains(ndoms, doms, attr); partition_and_rebuild_sched_domains(ndoms, doms, attr);
} }
@@ -526,7 +526,7 @@ index 51530d5b15a8..e542d71bb94b 100644
d->cpu_count += t1; d->cpu_count += t1;
diff --git a/kernel/exit.c b/kernel/exit.c diff --git a/kernel/exit.c b/kernel/exit.c
index 9a89e7f36acb..7fe34c56bd08 100644 index 91a43e57a32e..4b157befc10c 100644
--- a/kernel/exit.c --- a/kernel/exit.c
+++ b/kernel/exit.c +++ b/kernel/exit.c
@@ -122,7 +122,7 @@ static void __exit_signal(struct task_struct *tsk) @@ -122,7 +122,7 @@ static void __exit_signal(struct task_struct *tsk)
@@ -548,7 +548,7 @@ index 9a89e7f36acb..7fe34c56bd08 100644
__unhash_process(tsk, group_dead); __unhash_process(tsk, group_dead);
write_sequnlock(&sig->stats_lock); write_sequnlock(&sig->stats_lock);
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index 3a4beb9395c4..98a709628cb3 100644 index 291b857a6e20..f3480cdb7497 100644
--- a/kernel/livepatch/transition.c --- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c
@@ -307,7 +307,11 @@ static bool klp_try_switch_task(struct task_struct *task) @@ -307,7 +307,11 @@ static bool klp_try_switch_task(struct task_struct *task)
@@ -564,10 +564,10 @@ index 3a4beb9395c4..98a709628cb3 100644
"%s: %s:%d is running\n", __func__, task->comm, "%s: %s:%d is running\n", __func__, task->comm,
task->pid); task->pid);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index ad0db322ed3b..350b0e506c17 100644 index 6bb116c559b4..d4c8168a8270 100644
--- a/kernel/locking/rtmutex.c --- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c
@@ -654,7 +654,7 @@ waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) @@ -298,21 +298,25 @@ static __always_inline void
waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
{ {
waiter->prio = __waiter_prio(task); waiter->prio = __waiter_prio(task);
@@ -576,7 +576,6 @@ index ad0db322ed3b..350b0e506c17 100644
} }
/* /*
@@ -227,14 +227,18 @@ static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
* Only use with rt_mutex_waiter_{less,equal}() * Only use with rt_mutex_waiter_{less,equal}()
*/ */
#define task_to_waiter(p) \ #define task_to_waiter(p) \
@@ -596,7 +595,7 @@ index ad0db322ed3b..350b0e506c17 100644
/* /*
* If both waiters have dl_prio(), we check the deadlines of the * If both waiters have dl_prio(), we check the deadlines of the
* associated tasks. * associated tasks.
@@ -243,16 +247,22 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, @@ -321,16 +325,22 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
*/ */
if (dl_prio(left->prio)) if (dl_prio(left->prio))
return dl_time_before(left->deadline, right->deadline); return dl_time_before(left->deadline, right->deadline);
@@ -619,7 +618,7 @@ index ad0db322ed3b..350b0e506c17 100644
/* /*
* If both waiters have dl_prio(), we check the deadlines of the * If both waiters have dl_prio(), we check the deadlines of the
* associated tasks. * associated tasks.
@@ -261,8 +271,10 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left, @@ -339,8 +349,10 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
*/ */
if (dl_prio(left->prio)) if (dl_prio(left->prio))
return left->deadline == right->deadline; return left->deadline == right->deadline;
@@ -629,7 +628,7 @@ index ad0db322ed3b..350b0e506c17 100644
+#endif +#endif
} }
#define __node_2_waiter(node) \ static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 978fcfca5871..0425ee149b4d 100644 index 978fcfca5871..0425ee149b4d 100644
--- a/kernel/sched/Makefile --- a/kernel/sched/Makefile
@@ -664,10 +663,10 @@ index 978fcfca5871..0425ee149b4d 100644
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
new file mode 100644 new file mode 100644
index 000000000000..56aed2b1e42c index 000000000000..9576c57f82da
--- /dev/null --- /dev/null
+++ b/kernel/sched/alt_core.c +++ b/kernel/sched/alt_core.c
@@ -0,0 +1,7615 @@ @@ -0,0 +1,7626 @@
+/* +/*
+ * kernel/sched/alt_core.c + * kernel/sched/alt_core.c
+ * + *
@@ -737,7 +736,7 @@ index 000000000000..56aed2b1e42c
+#define sched_feat(x) (0) +#define sched_feat(x) (0)
+#endif /* CONFIG_SCHED_DEBUG */ +#endif /* CONFIG_SCHED_DEBUG */
+ +
+#define ALT_SCHED_VERSION "v5.14-r3" +#define ALT_SCHED_VERSION "v5.15-r0"
+ +
+/* rt_prio(prio) defined in include/linux/sched/rt.h */ +/* rt_prio(prio) defined in include/linux/sched/rt.h */
+#define rt_task(p) rt_prio((p)->prio) +#define rt_task(p) rt_prio((p)->prio)
@@ -1273,7 +1272,7 @@ index 000000000000..56aed2b1e42c
+ u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(rq->load_stamp), + u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(rq->load_stamp),
+ RQ_LOAD_HISTORY_BITS - 1); + RQ_LOAD_HISTORY_BITS - 1);
+ u64 prev = !!(rq->load_history & CURRENT_LOAD_BIT); + u64 prev = !!(rq->load_history & CURRENT_LOAD_BIT);
+ u64 curr = !!cpu_rq(rq->cpu)->nr_running; + u64 curr = !!rq->nr_running;
+ +
+ if (delta) { + if (delta) {
+ rq->load_history = rq->load_history >> delta; + rq->load_history = rq->load_history >> delta;
@@ -2542,8 +2541,8 @@ index 000000000000..56aed2b1e42c
+ * leave kernel. + * leave kernel.
+ */ + */
+ if (p->mm && printk_ratelimit()) { + if (p->mm && printk_ratelimit()) {
+ printk("process %d (%s) no longer affine to cpu%d\n", + printk_deferred("process %d (%s) no longer affine to cpu%d\n",
+ task_pid_nr(p), p->comm, cpu); + task_pid_nr(p), p->comm, cpu);
+ } + }
+ } + }
+ +
@@ -2613,17 +2612,50 @@ index 000000000000..56aed2b1e42c
+ } + }
+} +}
+ +
+/* +static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu,
+ * Called with both p->pi_lock and rq->lock held; drops both before returning. + raw_spinlock_t *lock, unsigned long irq_flags)
+ */ +{
+ /* Can the task run on the task's current CPU? If so, we're done */
+ if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
+ if (p->migration_disabled) {
+ if (likely(p->cpus_ptr != &p->cpus_mask))
+ __do_set_cpus_ptr(p, &p->cpus_mask);
+ p->migration_disabled = 0;
+ p->migration_flags |= MDF_FORCE_ENABLED;
+ /* When p is migrate_disabled, rq->lock should be held */
+ rq->nr_pinned--;
+ }
+
+ if (task_running(p) || READ_ONCE(p->__state) == TASK_WAKING) {
+ struct migration_arg arg = { p, dest_cpu };
+
+ /* Need help from migration thread: drop lock and wait. */
+ __task_access_unlock(p, lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
+ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
+ return 0;
+ }
+ if (task_on_rq_queued(p)) {
+ /*
+ * OK, since we're going to drop the lock immediately
+ * afterwards anyway.
+ */
+ update_rq_clock(rq);
+ rq = move_queued_task(rq, p, dest_cpu);
+ lock = &rq->lock;
+ }
+ }
+ __task_access_unlock(p, lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
+ return 0;
+}
+
+static int __set_cpus_allowed_ptr_locked(struct task_struct *p, +static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
+ const struct cpumask *new_mask, + const struct cpumask *new_mask,
+ u32 flags, + u32 flags,
+ unsigned long *irq_flags,
+ struct rq *rq, + struct rq *rq,
+ raw_spinlock_t *lock) + raw_spinlock_t *lock,
+ __releases(rq->lock) + unsigned long irq_flags)
+ __releases(p->pi_lock)
+{ +{
+ const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p); + const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
+ const struct cpumask *cpu_valid_mask = cpu_active_mask; + const struct cpumask *cpu_valid_mask = cpu_active_mask;
@@ -2674,47 +2706,19 @@ index 000000000000..56aed2b1e42c
+ if (flags & SCA_USER) + if (flags & SCA_USER)
+ user_mask = clear_user_cpus_ptr(p); + user_mask = clear_user_cpus_ptr(p);
+ +
+ /* Can the task run on the task's current CPU? If so, we're done */ + ret = affine_move_task(rq, p, dest_cpu, lock, irq_flags);
+ if (cpumask_test_cpu(task_cpu(p), new_mask))
+ goto out;
+ +
+ if (p->migration_disabled) {
+ if (likely(p->cpus_ptr != &p->cpus_mask))
+ __do_set_cpus_ptr(p, &p->cpus_mask);
+ p->migration_disabled = 0;
+ p->migration_flags |= MDF_FORCE_ENABLED;
+ /* When p is migrate_disabled, rq->lock should be held */
+ rq->nr_pinned--;
+ }
+
+ if (task_running(p) || READ_ONCE(p->__state) == TASK_WAKING) {
+ struct migration_arg arg = { p, dest_cpu };
+
+ /* Need help from migration thread: drop lock and wait. */
+ __task_access_unlock(p, lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, *irq_flags);
+ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
+ kfree(user_mask);
+ return 0;
+ }
+ if (task_on_rq_queued(p)) {
+ /*
+ * OK, since we're going to drop the lock immediately
+ * afterwards anyway.
+ */
+ update_rq_clock(rq);
+ rq = move_queued_task(rq, p, dest_cpu);
+ lock = &rq->lock;
+ }
+ kfree(user_mask); + kfree(user_mask);
+ +
+ return ret;
+
+out: +out:
+ __task_access_unlock(p, lock); + __task_access_unlock(p, lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, *irq_flags); + raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
+ +
+ return ret; + return ret;
+} +}
+ +
+/* +/*
+ * Change a given task's CPU affinity. Migrate the thread to a + * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on + * proper CPU and schedule it away if the CPU it's executing on
@@ -2733,7 +2737,8 @@ index 000000000000..56aed2b1e42c
+ +
+ raw_spin_lock_irqsave(&p->pi_lock, irq_flags); + raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
+ rq = __task_access_lock(p, &lock); + rq = __task_access_lock(p, &lock);
+ return __set_cpus_allowed_ptr_locked(p, new_mask, flags, &irq_flags, rq, lock); +
+ return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, lock, irq_flags);
+} +}
+ +
+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) +int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
@@ -2755,8 +2760,8 @@ index 000000000000..56aed2b1e42c
+{ +{
+ struct cpumask *user_mask = NULL; + struct cpumask *user_mask = NULL;
+ unsigned long irq_flags; + unsigned long irq_flags;
+ struct rq *rq;
+ raw_spinlock_t *lock; + raw_spinlock_t *lock;
+ struct rq *rq;
+ int err; + int err;
+ +
+ if (!p->user_cpus_ptr) { + if (!p->user_cpus_ptr) {
@@ -2782,7 +2787,8 @@ index 000000000000..56aed2b1e42c
+ p->user_cpus_ptr = user_mask; + p->user_cpus_ptr = user_mask;
+ } + }
+ +
+ return __set_cpus_allowed_ptr_locked(p, new_mask, 0, &irq_flags, rq, lock); + /*return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);*/
+ return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, lock, irq_flags);
+ +
+err_unlock: +err_unlock:
+ __task_access_unlock(p, lock); + __task_access_unlock(p, lock);
@@ -2825,8 +2831,9 @@ index 000000000000..56aed2b1e42c
+ +
+out_set_mask: +out_set_mask:
+ if (printk_ratelimit()) { + if (printk_ratelimit()) {
+ printk("Overriding affinity for process %d (%s) to CPUs %*pbl\n", + printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
+ task_pid_nr(p), p->comm, cpumask_pr_args(override_mask)); + task_pid_nr(p), p->comm,
+ cpumask_pr_args(override_mask));
+ } + }
+ +
+ WARN_ON(set_cpus_allowed_ptr(p, override_mask)); + WARN_ON(set_cpus_allowed_ptr(p, override_mask));
@@ -2875,8 +2882,7 @@ index 000000000000..56aed2b1e42c
+ +
+static inline int +static inline int
+__set_cpus_allowed_ptr(struct task_struct *p, +__set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask, + const struct cpumask *new_mask, u32 flags)
+ u32 flags)
+{ +{
+ return set_cpus_allowed_ptr(p, new_mask); + return set_cpus_allowed_ptr(p, new_mask);
+} +}
@@ -6327,14 +6333,6 @@ index 000000000000..56aed2b1e42c
+ return -E2BIG; + return -E2BIG;
+} +}
+ +
+static void get_params(struct task_struct *p, struct sched_attr *attr)
+{
+ if (task_has_rt_policy(p))
+ attr->sched_priority = p->rt_priority;
+ else
+ attr->sched_nice = task_nice(p);
+}
+
+/** +/**
+ * sys_sched_setscheduler - set/change the scheduler policy and RT priority + * sys_sched_setscheduler - set/change the scheduler policy and RT priority
+ * @pid: the pid in question. + * @pid: the pid in question.
@@ -6393,8 +6391,6 @@ index 000000000000..56aed2b1e42c
+ rcu_read_unlock(); + rcu_read_unlock();
+ +
+ if (likely(p)) { + if (likely(p)) {
+ if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
+ get_params(p, &attr);
+ retval = sched_setattr(p, &attr); + retval = sched_setattr(p, &attr);
+ put_task_struct(p); + put_task_struct(p);
+ } + }
@@ -6545,7 +6541,10 @@ index 000000000000..56aed2b1e42c
+ kattr.sched_policy = p->policy; + kattr.sched_policy = p->policy;
+ if (p->sched_reset_on_fork) + if (p->sched_reset_on_fork)
+ kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; + kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
+ get_params(p, &kattr); + if (task_has_rt_policy(p))
+ kattr.sched_priority = p->rt_priority;
+ else
+ kattr.sched_nice = task_nice(p);
+ kattr.sched_flags &= SCHED_FLAG_ALL; + kattr.sched_flags &= SCHED_FLAG_ALL;
+ +
+#ifdef CONFIG_UCLAMP_TASK +#ifdef CONFIG_UCLAMP_TASK
@@ -6578,7 +6577,6 @@ index 000000000000..56aed2b1e42c
+ +
+ cpuset_cpus_allowed(p, cpus_allowed); + cpuset_cpus_allowed(p, cpus_allowed);
+ cpumask_and(new_mask, mask, cpus_allowed); + cpumask_and(new_mask, mask, cpus_allowed);
+
+again: +again:
+ retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER); + retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER);
+ if (retval) + if (retval)
@@ -6587,8 +6585,9 @@ index 000000000000..56aed2b1e42c
+ cpuset_cpus_allowed(p, cpus_allowed); + cpuset_cpus_allowed(p, cpus_allowed);
+ if (!cpumask_subset(new_mask, cpus_allowed)) { + if (!cpumask_subset(new_mask, cpus_allowed)) {
+ /* + /*
+ * We must have raced with a concurrent cpuset update. + * We must have raced with a concurrent cpuset
+ * Just reset the cpumask to the cpuset's cpus_allowed. + * update. Just reset the cpus_allowed to the
+ * cpuset's cpus_allowed
+ */ + */
+ cpumask_copy(new_mask, cpus_allowed); + cpumask_copy(new_mask, cpus_allowed);
+ goto again; + goto again;
@@ -6791,6 +6790,17 @@ index 000000000000..56aed2b1e42c
+ preempt_schedule_common(); + preempt_schedule_common();
+ return 1; + return 1;
+ } + }
+ /*
+ * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
+ * whether the current CPU is in an RCU read-side critical section,
+ * so the tick can report quiescent states even for CPUs looping
+ * in kernel context. In contrast, in non-preemptible kernels,
+ * RCU readers leave no in-memory hints, which means that CPU-bound
+ * processes executing in kernel context might never report an
+ * RCU quiescent state. Therefore, the following code causes
+ * cond_resched() to report a quiescent state, but only when RCU
+ * is in urgent need of one.
+ */
+#ifndef CONFIG_PREEMPT_RCU +#ifndef CONFIG_PREEMPT_RCU
+ rcu_all_qs(); + rcu_all_qs();
+#endif +#endif
@@ -9110,7 +9120,7 @@ index 000000000000..be3ee4a553ca
+ +
+static inline void update_rq_time_edge(struct rq *rq) {} +static inline void update_rq_time_edge(struct rq *rq) {}
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 57124614363d..f0e9c7543542 100644 index e7af18857371..3e38816b736e 100644
--- a/kernel/sched/cpufreq_schedutil.c --- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c
@@ -167,9 +167,14 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu) @@ -167,9 +167,14 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
@@ -9139,7 +9149,7 @@ index 57124614363d..f0e9c7543542 100644
} }
static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu, static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
@@ -599,6 +606,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy) @@ -607,6 +614,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
} }
ret = sched_setattr_nocheck(thread, &attr); ret = sched_setattr_nocheck(thread, &attr);
@@ -9147,7 +9157,7 @@ index 57124614363d..f0e9c7543542 100644
if (ret) { if (ret) {
kthread_stop(thread); kthread_stop(thread);
pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__); pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
@@ -833,7 +841,9 @@ cpufreq_governor_init(schedutil_gov); @@ -839,7 +847,9 @@ cpufreq_governor_init(schedutil_gov);
#ifdef CONFIG_ENERGY_MODEL #ifdef CONFIG_ENERGY_MODEL
static void rebuild_sd_workfn(struct work_struct *work) static void rebuild_sd_workfn(struct work_struct *work)
{ {
@@ -9207,7 +9217,7 @@ index 872e481d5098..f920c8b48ec1 100644
task_cputime(p, &cputime.utime, &cputime.stime); task_cputime(p, &cputime.utime, &cputime.stime);
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 0c5ec2776ddf..e3f4fe3f6e2c 100644 index 17a653b67006..17ab2fe34d7a 100644
--- a/kernel/sched/debug.c --- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c +++ b/kernel/sched/debug.c
@@ -8,6 +8,7 @@ @@ -8,6 +8,7 @@
@@ -9218,7 +9228,7 @@ index 0c5ec2776ddf..e3f4fe3f6e2c 100644
/* /*
* This allows printing both to /proc/sched_debug and * This allows printing both to /proc/sched_debug and
* to the console * to the console
@@ -210,6 +211,7 @@ static const struct file_operations sched_scaling_fops = { @@ -216,6 +217,7 @@ static const struct file_operations sched_scaling_fops = {
}; };
#endif /* SMP */ #endif /* SMP */
@@ -9226,7 +9236,7 @@ index 0c5ec2776ddf..e3f4fe3f6e2c 100644
#ifdef CONFIG_PREEMPT_DYNAMIC #ifdef CONFIG_PREEMPT_DYNAMIC
@@ -273,6 +275,7 @@ static const struct file_operations sched_dynamic_fops = { @@ -279,6 +281,7 @@ static const struct file_operations sched_dynamic_fops = {
#endif /* CONFIG_PREEMPT_DYNAMIC */ #endif /* CONFIG_PREEMPT_DYNAMIC */
@@ -9234,7 +9244,7 @@ index 0c5ec2776ddf..e3f4fe3f6e2c 100644
__read_mostly bool sched_debug_verbose; __read_mostly bool sched_debug_verbose;
static const struct seq_operations sched_debug_sops; static const struct seq_operations sched_debug_sops;
@@ -288,6 +291,7 @@ static const struct file_operations sched_debug_fops = { @@ -294,6 +297,7 @@ static const struct file_operations sched_debug_fops = {
.llseek = seq_lseek, .llseek = seq_lseek,
.release = seq_release, .release = seq_release,
}; };
@@ -9242,7 +9252,7 @@ index 0c5ec2776ddf..e3f4fe3f6e2c 100644
static struct dentry *debugfs_sched; static struct dentry *debugfs_sched;
@@ -297,12 +301,15 @@ static __init int sched_init_debug(void) @@ -303,12 +307,15 @@ static __init int sched_init_debug(void)
debugfs_sched = debugfs_create_dir("sched", NULL); debugfs_sched = debugfs_create_dir("sched", NULL);
@@ -9258,7 +9268,7 @@ index 0c5ec2776ddf..e3f4fe3f6e2c 100644
debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency); debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity); debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity); debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
@@ -330,11 +337,13 @@ static __init int sched_init_debug(void) @@ -336,11 +343,13 @@ static __init int sched_init_debug(void)
#endif #endif
debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops); debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
@@ -9272,7 +9282,7 @@ index 0c5ec2776ddf..e3f4fe3f6e2c 100644
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static cpumask_var_t sd_sysctl_cpus; static cpumask_var_t sd_sysctl_cpus;
@@ -1047,6 +1056,7 @@ void proc_sched_set_task(struct task_struct *p) @@ -1063,6 +1072,7 @@ void proc_sched_set_task(struct task_struct *p)
memset(&p->se.statistics, 0, sizeof(p->se.statistics)); memset(&p->se.statistics, 0, sizeof(p->se.statistics));
#endif #endif
} }
@@ -9281,7 +9291,7 @@ index 0c5ec2776ddf..e3f4fe3f6e2c 100644
void resched_latency_warn(int cpu, u64 latency) void resched_latency_warn(int cpu, u64 latency)
{ {
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 912b47aa99d8..7f6b13883c2a 100644 index d17b0a5ce6ac..6ff77fc6b73a 100644
--- a/kernel/sched/idle.c --- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c +++ b/kernel/sched/idle.c
@@ -403,6 +403,7 @@ void cpu_startup_entry(enum cpuhp_state state) @@ -403,6 +403,7 @@ void cpu_startup_entry(enum cpuhp_state state)
@@ -9503,7 +9513,7 @@ index e06071bf3472..adf567df34d4 100644
static inline int static inline int
update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ddefb0419d7a..658c41b15d3c 100644 index 3d3e5793e117..c1d976ef623f 100644
--- a/kernel/sched/sched.h --- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h +++ b/kernel/sched/sched.h
@@ -2,6 +2,10 @@ @@ -2,6 +2,10 @@
@@ -9517,7 +9527,7 @@ index ddefb0419d7a..658c41b15d3c 100644
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/autogroup.h> #include <linux/sched/autogroup.h>
@@ -3038,3 +3042,8 @@ extern int sched_dynamic_mode(const char *str); @@ -3064,3 +3068,8 @@ extern int sched_dynamic_mode(const char *str);
extern void sched_dynamic_update(int mode); extern void sched_dynamic_update(int mode);
#endif #endif
@@ -9558,7 +9568,7 @@ index 3f93fc3b5648..528b71e144e9 100644
} }
return 0; return 0;
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index b77ad49dc14f..be9edf086412 100644 index 4e8698e62f07..36c61551252e 100644
--- a/kernel/sched/topology.c --- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c +++ b/kernel/sched/topology.c
@@ -4,6 +4,7 @@ @@ -4,6 +4,7 @@
@@ -9580,7 +9590,7 @@ index b77ad49dc14f..be9edf086412 100644
static int __init setup_relax_domain_level(char *str) static int __init setup_relax_domain_level(char *str)
{ {
if (kstrtoint(str, 0, &default_relax_domain_level)) if (kstrtoint(str, 0, &default_relax_domain_level))
@@ -1617,6 +1620,7 @@ sd_init(struct sched_domain_topology_level *tl, @@ -1619,6 +1622,7 @@ sd_init(struct sched_domain_topology_level *tl,
return sd; return sd;
} }
@@ -9588,7 +9598,7 @@ index b77ad49dc14f..be9edf086412 100644
/* /*
* Topology list, bottom-up. * Topology list, bottom-up.
@@ -1646,6 +1650,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl) @@ -1648,6 +1652,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl)
sched_domain_topology = tl; sched_domain_topology = tl;
} }
@@ -9596,7 +9606,7 @@ index b77ad49dc14f..be9edf086412 100644
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
static const struct cpumask *sd_numa_mask(int cpu) static const struct cpumask *sd_numa_mask(int cpu)
@@ -2451,3 +2456,17 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], @@ -2516,3 +2521,17 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
mutex_unlock(&sched_domains_mutex); mutex_unlock(&sched_domains_mutex);
} }
@@ -9615,7 +9625,7 @@ index b77ad49dc14f..be9edf086412 100644
+#endif /* CONFIG_NUMA */ +#endif /* CONFIG_NUMA */
+#endif +#endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 272f4a272f8c..1c9455c8ecf6 100644 index 083be6af29d7..09fc6281d488 100644
--- a/kernel/sysctl.c --- a/kernel/sysctl.c
+++ b/kernel/sysctl.c +++ b/kernel/sysctl.c
@@ -122,6 +122,10 @@ static unsigned long long_max = LONG_MAX; @@ -122,6 +122,10 @@ static unsigned long long_max = LONG_MAX;
@@ -9629,7 +9639,7 @@ index 272f4a272f8c..1c9455c8ecf6 100644
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
static int ten_thousand = 10000; static int ten_thousand = 10000;
#endif #endif
@@ -1730,6 +1734,24 @@ int proc_do_static_key(struct ctl_table *table, int write, @@ -1771,6 +1775,24 @@ int proc_do_static_key(struct ctl_table *table, int write,
} }
static struct ctl_table kern_table[] = { static struct ctl_table kern_table[] = {
@@ -9654,7 +9664,7 @@ index 272f4a272f8c..1c9455c8ecf6 100644
{ {
.procname = "sched_child_runs_first", .procname = "sched_child_runs_first",
.data = &sysctl_sched_child_runs_first, .data = &sysctl_sched_child_runs_first,
@@ -1860,6 +1882,7 @@ static struct ctl_table kern_table[] = { @@ -1901,6 +1923,7 @@ static struct ctl_table kern_table[] = {
.extra2 = SYSCTL_ONE, .extra2 = SYSCTL_ONE,
}, },
#endif #endif
@@ -9662,7 +9672,7 @@ index 272f4a272f8c..1c9455c8ecf6 100644
#ifdef CONFIG_PROVE_LOCKING #ifdef CONFIG_PROVE_LOCKING
{ {
.procname = "prove_locking", .procname = "prove_locking",
@@ -2436,6 +2459,17 @@ static struct ctl_table kern_table[] = { @@ -2477,6 +2500,17 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec, .proc_handler = proc_dointvec,
}, },
#endif #endif
@@ -9681,10 +9691,10 @@ index 272f4a272f8c..1c9455c8ecf6 100644
{ {
.procname = "spin_retry", .procname = "spin_retry",
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 4a66725b1d4a..cb80ed5c1f5c 100644 index 0ea8702eb516..a27a0f3a654d 100644
--- a/kernel/time/hrtimer.c --- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c
@@ -1940,8 +1940,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode, @@ -2088,8 +2088,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
int ret = 0; int ret = 0;
u64 slack; u64 slack;
@@ -9696,7 +9706,7 @@ index 4a66725b1d4a..cb80ed5c1f5c 100644
hrtimer_init_sleeper_on_stack(&t, clockid, mode); hrtimer_init_sleeper_on_stack(&t, clockid, mode);
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 517be7fd175e..de3afe8e0800 100644 index 643d412ac623..6bf27565242f 100644
--- a/kernel/time/posix-cpu-timers.c --- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c
@@ -216,7 +216,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples) @@ -216,7 +216,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
@@ -9708,7 +9718,7 @@ index 517be7fd175e..de3afe8e0800 100644
} }
static void proc_sample_cputime_atomic(struct task_cputime_atomic *at, static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
@@ -801,6 +801,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples, @@ -859,6 +859,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
} }
} }
@@ -9716,7 +9726,7 @@ index 517be7fd175e..de3afe8e0800 100644
static inline void check_dl_overrun(struct task_struct *tsk) static inline void check_dl_overrun(struct task_struct *tsk)
{ {
if (tsk->dl.dl_overrun) { if (tsk->dl.dl_overrun) {
@@ -808,6 +809,7 @@ static inline void check_dl_overrun(struct task_struct *tsk) @@ -866,6 +867,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
} }
} }
@@ -9724,7 +9734,7 @@ index 517be7fd175e..de3afe8e0800 100644
static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard) static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
{ {
@@ -835,8 +837,10 @@ static void check_thread_timers(struct task_struct *tsk, @@ -893,8 +895,10 @@ static void check_thread_timers(struct task_struct *tsk,
u64 samples[CPUCLOCK_MAX]; u64 samples[CPUCLOCK_MAX];
unsigned long soft; unsigned long soft;
@@ -9735,7 +9745,7 @@ index 517be7fd175e..de3afe8e0800 100644
if (expiry_cache_is_inactive(pct)) if (expiry_cache_is_inactive(pct))
return; return;
@@ -850,7 +854,7 @@ static void check_thread_timers(struct task_struct *tsk, @@ -908,7 +912,7 @@ static void check_thread_timers(struct task_struct *tsk,
soft = task_rlimit(tsk, RLIMIT_RTTIME); soft = task_rlimit(tsk, RLIMIT_RTTIME);
if (soft != RLIM_INFINITY) { if (soft != RLIM_INFINITY) {
/* Task RT timeout is accounted in jiffies. RTTIME is usec */ /* Task RT timeout is accounted in jiffies. RTTIME is usec */
@@ -9744,7 +9754,7 @@ index 517be7fd175e..de3afe8e0800 100644
unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME); unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
/* At the hard limit, send SIGKILL. No further action. */ /* At the hard limit, send SIGKILL. No further action. */
@@ -1086,8 +1090,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk) @@ -1144,8 +1148,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
return true; return true;
} }