Files
linux-tkg/linux-tkg-patches/5.15/0005-glitched-pds.patch

156 lines
5.0 KiB
Diff

From f7f49141a5dbe9c99d78196b58c44307fb2e6be3 Mon Sep 17 00:00:00 2001
From: Tk-Glitch <ti3nou@gmail.com>
Date: Wed, 4 Jul 2018 04:30:08 +0200
Subject: glitched - PDS
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 2a202a846757..1d9c7ed79b11 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -4,7 +4,7 @@
choice
prompt "Timer frequency"
- default HZ_250
+ default HZ_500
help
Allows the configuration of the timer frequency. It is customary
to have the timer interrupt run at 1000 Hz but 100 Hz may be more
@@ -39,6 +39,13 @@ choice
on SMP and NUMA systems and exactly dividing by both PAL and
NTSC frame rates for video and multimedia work.
+ config HZ_500
+ bool "500 HZ"
+ help
+ 500 Hz is a balanced timer frequency. Provides fast interactivity
+ on desktops with great smoothness without increasing CPU power
+ consumption and sacrificing the battery life on laptops.
+
config HZ_1000
bool "1000 HZ"
help
@@ -52,6 +59,7 @@ config HZ
default 100 if HZ_100
default 250 if HZ_250
default 300 if HZ_300
+ default 500 if HZ_500
default 1000 if HZ_1000
config SCHED_HRTICK
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 2a202a846757..1d9c7ed79b11 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -4,7 +4,7 @@
choice
prompt "Timer frequency"
- default HZ_500
+ default HZ_750
help
Allows the configuration of the timer frequency. It is customary
to have the timer interrupt run at 1000 Hz but 100 Hz may be more
@@ -46,6 +46,13 @@ choice
on desktops with great smoothness without increasing CPU power
consumption and sacrificing the battery life on laptops.
+ config HZ_750
+ bool "750 HZ"
+ help
+ 750 Hz is a good timer frequency for desktops. Provides fast
+ interactivity with great smoothness without sacrificing too
+ much throughput.
+
config HZ_1000
bool "1000 HZ"
help
@@ -60,6 +67,7 @@ config HZ
default 250 if HZ_250
default 300 if HZ_300
default 500 if HZ_500
+ default 750 if HZ_750
default 1000 if HZ_1000
config SCHED_HRTICK
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9270a4370d54..30d01e647417 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -169,7 +169,7 @@
/*
* From 0 .. 200. Higher means more swappy.
*/
-int vm_swappiness = 60;
+int vm_swappiness = 20;
static void set_task_reclaim_state(struct task_struct *task,
struct reclaim_state *rs)
From f0438700352f5adebbdf122c00a3360bb0f5247c Mon Sep 17 00:00:00 2001
From: Torge Matthies <openglfreak@googlemail.com>
Date: Sun, 21 Nov 2021 23:58:50 +0100
Subject: [PATCH 1/2] sched/alt: Optimize loops in update_sched_rq_watermark.
With the old code, gcc misses an optimization opportunity and compiles
the loops to five instructions each:
0x0000000000000ed3 <+83>: lock bts %rdi,(%rax)
0x0000000000000ed8 <+88>: dec %rdx
0x0000000000000edb <+91>: add $0x400,%rax
0x0000000000000ee1 <+97>: cmp %rdx,%rsi
0x0000000000000ee4 <+100>: jne 0xed3 <update_sched_rq_watermark+83>
...
0x0000000000000f13 <+147>: lock btr %rdi,(%rax)
0x0000000000000f18 <+152>: dec %rdx
0x0000000000000f1b <+155>: add $0x400,%rax
0x0000000000000f21 <+161>: cmp %rcx,%rdx
0x0000000000000f24 <+164>: jne 0xf13 <update_sched_rq_watermark+147>
With this change, the loops get optimized to four instructions each:
0x0000000000000ed7 <+87>: lock bts %rsi,(%rdx)
0x0000000000000edc <+92>: add $0x400,%rdx
0x0000000000000ee3 <+99>: dec %rcx
0x0000000000000ee6 <+102>: jne 0xed7 <update_sched_rq_watermark+87>
...
0x0000000000000f1a <+154>: lock btr %rsi,(%rax)
0x0000000000000f1f <+159>: add $0x400,%rax
0x0000000000000f25 <+165>: dec %rdx
0x0000000000000f28 <+168>: jne 0xf1a <update_sched_rq_watermark+154>
Signed-off-by: Torge Matthies <openglfreak@googlemail.com>
---
kernel/sched/alt_core.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
index 8b0ddbdd24e4..7d926e8eab96 100644
--- a/kernel/sched/alt_core.c
+++ b/kernel/sched/alt_core.c
@@ -185,8 +185,8 @@ static inline void update_sched_rq_watermark(struct rq *rq)
rq->watermark = watermark;
cpu = cpu_of(rq);
if (watermark < last_wm) {
- for (i = last_wm; i > watermark; i--)
- cpumask_clear_cpu(cpu, sched_rq_watermark + SCHED_BITS - 1 - i);
+ for (i = last_wm - watermark; i > 0; i--)
+ cpumask_clear_cpu(cpu, sched_rq_watermark + SCHED_BITS - 1 - (i + watermark));
#ifdef CONFIG_SCHED_SMT
if (static_branch_likely(&sched_smt_present) &&
IDLE_TASK_SCHED_PRIO == last_wm)
@@ -196,8 +196,8 @@ static inline void update_sched_rq_watermark(struct rq *rq)
return;
}
/* last_wm < watermark */
- for (i = watermark; i > last_wm; i--)
- cpumask_set_cpu(cpu, sched_rq_watermark + SCHED_BITS - 1 - i);
+ for (i = watermark - last_wm; i > 0; i--)
+ cpumask_set_cpu(cpu, sched_rq_watermark + SCHED_BITS - 1 - (i + last_wm));
#ifdef CONFIG_SCHED_SMT
if (static_branch_likely(&sched_smt_present) &&
IDLE_TASK_SCHED_PRIO == watermark) {
--
GitLab