Compare commits

...

4 Commits

Author SHA1 Message Date
Sravan Balaji
3cc28c3531 PDS Kernel Configuration 2023-10-13 09:45:53 -04:00
Piotr Górski
18f10e2989 6.5: Add new EEVDF patches for BORE 3.2.0 (#826)
Signed-off-by: Piotr Gorski <lucjan.lucjanov@gmail.com>
2023-10-13 15:26:14 +02:00
FintasticMan
d1caaffa2e Add some more kernel modules to diet config (#814)
Partly taken from my database, partly from @ryanmusante's.
Also update the sorting to match what modprobed-db outputs, so that it's
easier to check which modules are missing from the diet config.
2023-10-09 22:12:49 +02:00
Yifan Zhu
097c2ed1b5 Correct comments for modprobed.db option (#823)
Prompting at build time was removed in commit 1a69f04e6e

Co-authored-by: Yifan Zhu <fanzhuyifan@gmail.com>
2023-10-09 22:11:00 +02:00
3 changed files with 589 additions and 271 deletions

View File

@@ -3,7 +3,7 @@
# Linux distribution you are using, options are "Arch", "Ubuntu", "Debian", "Fedora", "Suse", "Gentoo", "Generic".
# It is automatically set to "Arch" when using PKGBUILD.
# If left empty, the script will prompt
_distro=""
_distro="Arch"
# Kernel Version - x.x format without the subversion (will always grab latest available subversion) is recommended
# you can also set a specific kernel version, e.g. "6.0-rc4" or "5.10.51",
@@ -46,7 +46,7 @@ CUSTOM_GCC_PATH=""
CUSTOM_LLVM_PATH=""
# Set to true to bypass makepkg.conf and use all available threads for compilation. False will respect your makepkg.conf options.
_force_all_threads="true"
_force_all_threads="false"
# Set to true to prevent ccache from being used and set CONFIG_GCC_PLUGINS=y (which needs to be disabled for ccache to work properly)
_noccache="false"
@@ -59,17 +59,17 @@ _kernel_on_diet="false"
# Set to true to use modprobed db to clean config from unneeded modules. Speeds up compilation considerably. Requires root - https://wiki.archlinux.org/index.php/Modprobed-db
# Using this option can trigger user prompts if the config doesn't go smoothly.
# !!!! Make sure to have a well populated db !!!! - Leave empty to be asked about it at build time
# !!!! Make sure to have a well populated db !!!!
_modprobeddb="false"
# modprobed-db database file location
_modprobeddb_db_path=~/.config/modprobed.db
# Set to "1" to call make menuconfig, "2" to call make nconfig, "3" to call make xconfig, before building the kernel. Set to false to disable and skip the prompt.
_menunconfig=""
_menunconfig="false"
# Set to true to generate a kernel config fragment from your changes in menuconfig/nconfig. Set to false to disable and skip the prompt.
_diffconfig=""
_diffconfig="false"
# Set to the file name where the generated config fragment should be written to. Only used if _diffconfig is active.
_diffconfig_name=""
@@ -104,11 +104,11 @@ _STRIP="true"
# CPU scheduler - Options are "pds", "bmq", "cacule", "tt", "bore", "bore-eevdf", "eevdf" or "cfs" (kernel's default)
# "upds" (TkG's Undead PDS) and "muqss" are also available on legacy kernel revisions
_cpusched=""
_cpusched="pds"
# Compiler to use - Options are "gcc" or "llvm".
# For advanced users.
_compiler=""
_compiler="gcc"
# Force the use of the LLVM Integrated Assembler whether using LLVM, LTO or not.
# Set to "1" to enable.
@@ -138,7 +138,7 @@ _preempt_rt_force=""
# For BMQ: 0: No yield.
# 1: Deboost and requeue task. (Default)
# 2: Set rq skip task.
_sched_yield_type=""
_sched_yield_type="0"
# Round Robin interval is the longest duration two tasks with the same nice level will be delayed for. When CPU time is requested by a task, it receives a time slice equal
# to the rr_interval in addition to a virtual deadline. When using yield_type 2, a low value can help offset the disadvantages of rescheduling a process that has yielded.
@@ -146,7 +146,7 @@ _sched_yield_type=""
# PDS default: 4ms"
# BMQ default: 2ms"
# Set to "1" for 2ms, "2" for 4ms, "3" for 6ms, "4" for 8ms, or "default" to keep the chosen scheduler defaults.
_rr_interval=""
_rr_interval="default"
# Set to "true" to disable FUNCTION_TRACER/GRAPH_TRACER, lowering overhead but limiting debugging and analyzing of kernel functions - Kernel default is "false"
_ftracedisable="false"
@@ -161,10 +161,10 @@ _misc_adds="true"
# Full tickless can give higher performances in case you use isolation of CPUs for tasks
# and it works only when using the nohz_full kernel parameter, otherwise behaves like idle.
# Just tickless idle perform better for most platforms.
_tickless=""
_tickless="2"
# Set to "true" to use ACS override patch - https://wiki.archlinux.org/index.php/PCI_passthrough_via_OVMF#Bypassing_the_IOMMU_groups_.28ACS_override_patch.29 - Kernel default is "false"
_acs_override=""
_acs_override="false"
# Set to "true" to add Bcache filesystem support. You'll have to install bcachefs-tools-git from AUR for utilities - https://bcachefs.org/ - If in doubt, set to "false"
# This can be buggy and isn't recommended on a production machine, also enabling this option will not allow you to enable MGLRU.
@@ -175,7 +175,7 @@ _bcachefs="false"
_winesync="false"
# Set to "true" to enable Binder modules to use Waydroid Android containers
_waydroid=""
_waydroid="false"
# Various patches and tweaks from Zen/Liquorix, Xanmod and the community - Default is "true"
_glitched_base="true"
@@ -185,7 +185,7 @@ _glitched_base="true"
_zenify="true"
# compiler optimization level - 1. Optimize for performance (-O2); 2. Optimize harder (-O3); 3. Optimize for size (-Os) - Kernel default is "1"
_compileroptlevel="1"
_compileroptlevel="2"
# CPU compiler optimizations - Defaults to prompt at kernel config if left empty
# AMD CPUs : "k8" "k8sse3" "k10" "barcelona" "bobcat" "jaguar" "bulldozer" "piledriver" "steamroller" "excavator" "zen" "zen2" "zen3" "zen4" (zen3 opt support depends on GCC11) (zen4 opt support depends on GCC13)
@@ -199,7 +199,7 @@ _compileroptlevel="1"
# - "generic_v2" (depends on GCC11 - to share the package between machines with different CPU µarch supporting at least x86-64-v2
# - "generic_v3" (depends on GCC11 - to share the package between machines with different CPU µarch supporting at least x86-64-v3
# - "generic_v4" (depends on GCC11 - to share the package between machines with different CPU µarch supporting at least x86-64-v4
_processor_opt=""
_processor_opt="skylake"
# CacULE only - Enable Response Driven Balancer, an experimental load balancer for CacULE
_cacule_rdb="false"
@@ -212,13 +212,13 @@ _cacule_rdb_interval="19"
_tt_high_hz="false"
# MuQSS and PDS only - SMT (Hyperthreading) aware nice priority and policy support (SMT_NICE) - Kernel default is "true" - You can disable this on non-SMT/HT CPUs for lower overhead
_smt_nice=""
_smt_nice="true"
# Trust the CPU manufacturer to initialize Linux's CRNG (RANDOM_TRUST_CPU) - Kernel default is "false"
_random_trust_cpu="true"
# Timer frequency - "100" "250" "300" "500" "750" "1000" ("2000" is available for cacule cpusched only) - More options available in kernel config prompt when left empty depending on selected cpusched with the default option pointed with a ">" (2000 for cacule, 100 for muqss and 1000 for other cpu schedulers)
_timer_freq=""
_timer_freq="1000"
# Default CPU governor - "performance", "ondemand", "schedutil" or leave empty for default (schedutil)
_default_cpu_gov="ondemand"
@@ -234,7 +234,7 @@ _aggressive_ondemand="true"
_tcp_cong_alg=""
# You can pass a default set of kernel command line options here - example: "intel_pstate=passive nowatchdog amdgpu.ppfeaturemask=0xfffd7fff mitigations=off"
_custom_commandline="intel_pstate=passive split_lock_detect=off"
_custom_commandline=""
# Selection of Clearlinux patches
_clear_patches="true"

View File

@@ -25,9 +25,9 @@ algif_hash
algif_skcipher
alx
amd64_edac
amdgpu
amd_pmc
amd_pstate
amdgpu
amdxcp
apple_mfi_fastcharge
appletalk
@@ -54,8 +54,8 @@ bluetooth
bnep
bpf
bpf_preload
bridge
br_netfilter
bridge
btbcm
btcoexist
btintel
@@ -91,10 +91,10 @@ cmdlinepart
coretemp
cpufreq_ondemand
crc16
crc32c_generic
crc32c_intel
crc32_generic
crc32_pclmul
crc32c_generic
crc32c_intel
crc64
crc64_rocksoft
crc64_rocksoft_generic
@@ -109,13 +109,13 @@ cuse
dca
des_generic
dm_crypt
dmi_sysfs
dm_log
dm_mirror
dm_mod
dm_multipath
dm_region_hash
dm_round_robin
dmi_sysfs
dns_resolver
drm
drm_buddy
@@ -127,10 +127,10 @@ drm_ttm_helper
dvb_core
ebtable_filter
ebtables
ec_sys
ecb
ecc
ecdh_generic
ec_sys
edac_mce_amd
ee1004
eeepc_wmi
@@ -178,7 +178,6 @@ hid_logitech_hidpp
hid_microsoft
hid_multitouch
hid_nintendo
hidp
hid_playstation
hid_roccat
hid_roccat_common
@@ -187,6 +186,8 @@ hid_roccat_ryos
hid_sony
hid_steam
hid_wiimote
hidp
hp_wmi
hv_balloon
hv_netvsc
hv_storvsc
@@ -207,6 +208,8 @@ i2c_piix4
i2c_smbus
i8042
i915
iTCO_vendor_support
iTCO_wdt
ib_cm
ib_core
idma64
@@ -215,10 +218,12 @@ igc
inet_diag
input_leds
int3400_thermal
int3403_thermal
int340x_thermal_zone
intel_agp
intel_cstate
intel_gtt
intel_hid
intel_lpss
intel_lpss_pci
intel_pch_thermal
@@ -227,37 +232,36 @@ intel_powerclamp
intel_rapl_common
intel_rapl_msr
intel_soc_dts_iosf
intel_tcc_cooling
intel_uncore
intel_vsec
intel_wmi_thunderbolt
iommufd
iommu_v2
iommufd
ip6_tables
ip6_udp_tunnel
ip6t_REJECT
ip6t_rt
ip6table_filter
ip6table_mangle
ip6table_nat
ip6table_raw
ip6_tables
ip6table_security
ip6t_REJECT
ip6t_rt
ip6_udp_tunnel
ip_set
ip_tables
ipmi_devintf
ipmi_msghandler
ip_set
ipt_REJECT
iptable_filter
iptable_mangle
iptable_nat
iptable_raw
ip_tables
iptable_security
ipt_REJECT
ipv6
ir_kbd_i2c
irqbypass
isofs
it87
iTCO_vendor_support
iTCO_wdt
iw_cm
iwlmei
iwlmvm
@@ -301,15 +305,17 @@ mbcache
mc
mc44s803
md4
md_mod
mdio
mdio_devres
md_mod
mei
mei_gsc
mei_hdcp
mei_me
mei_pxp
mii
minix
mmc_core
mousedev
mptcp_diag
mrp
@@ -352,14 +358,14 @@ nf_nat_irc
nf_nat_pptp
nf_nat_sip
nf_nat_tftp
nf_reject_ipv4
nf_reject_ipv6
nf_tables
nfnetlink
nfnetlink_log
nfnetlink_queue
nf_reject_ipv4
nf_reject_ipv6
nfs
nfsv4
nf_tables
nft_chain_nat
nft_compat
nft_ct
@@ -383,6 +389,7 @@ nls_utf8
nouveau
ntfs
ntfs3
nvidia_wmi_ec_backlight
nvme
nvme_common
nvme_core
@@ -438,12 +445,18 @@ rt2800usb
rt2x00lib
rt2x00usb
rtl8192ee
rtl8723_common
rtl8723ae
rtl8723be
rtl8723_common
rtl8821ae
rtl_pci
rtlwifi
rtsx_pci
rtsx_pci_sdmmc
rtw88_8821c
rtw88_8821ce
rtw88_core
rtw88_pci
sch_cake
sch_fq_codel
sch_ingress
@@ -460,8 +473,11 @@ ses
sg
sha512_ssse3
snd
snd_acp3x_pdm_dma
snd_acp3x_rn
snd_acp6x_pdm_dma
snd_acp_config
snd_acp_pci
snd_aloop
snd_compress
snd_ctl_led
@@ -479,10 +495,12 @@ snd_intel_sdw_acpi
snd_pci_acp3x
snd_pci_acp5x
snd_pci_acp6x
snd_pci_ps
snd_pcm
snd_pcm_dmaengine
snd_rawmidi
snd_rn_pci_acp3x
snd_rpl_pci_acp6x
snd_seq
snd_seq_device
snd_seq_dummy
@@ -501,12 +519,14 @@ snd_soc_sst_dsp
snd_soc_sst_ipc
snd_sof
snd_sof_amd_acp
snd_sof_amd_rembrandt
snd_sof_amd_renoir
snd_sof_intel_hda
snd_sof_intel_hda_common
snd_sof_intel_hda_mlink
snd_sof_pci
snd_sof_pci_intel_cnl
snd_sof_pci_intel_tgl
snd_sof_probes
snd_sof_utils
snd_sof_xtensa_dsp
@@ -514,9 +534,9 @@ snd_timer
snd_ua101
snd_ump
snd_usb_audio
snd_usbmidi_lib
snd_usb_us122l
snd_usb_usx2y
snd_usbmidi_lib
snd_virmidi
soundcore
soundwire_bus
@@ -549,6 +569,7 @@ tea5767
tee
tg3
thermal
thunderbolt
tiny_power_button
tls
tpm
@@ -578,14 +599,14 @@ uio
uio_pdrv_genirq
unix_diag
usb_common
usb_storage
usb_wwan
usbcore
usbhid
usbip_core
usbip_host
usbmon
usbnet
usb_storage
usb_wwan
uvc
uvcvideo
uvesafb
@@ -618,9 +639,11 @@ vsock_loopback
wacom
watchdog
wireguard
wireless_hotkey
wmi
wmi_bmof
x86_pkg_temp_thermal
x_tables
xc2028
xc4000
xc5000
@@ -632,21 +655,20 @@ xhci_pci
xhci_pci_renesas
xor
xpad
x_tables
xt_CHECKSUM
xt_LOG
xt_MASQUERADE
xt_NFQUEUE
xt_REDIRECT
xt_addrtype
xt_cgroup
xt_CHECKSUM
xt_comment
xt_conntrack
xt_hl
xt_limit
xt_LOG
xt_mark
xt_MASQUERADE
xt_nat
xt_NFQUEUE
xt_recent
xt_REDIRECT
xt_state
xt_tcpudp
xxhash_generic

View File

@@ -1,7 +1,7 @@
From 6f9fee6b2a2ceb4561a58c152467fd5e6d5c47e8 Mon Sep 17 00:00:00 2001
From d931ed7fc8d6728204d36d31a18d4c8b60593821 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Wed, 31 May 2023 13:58:40 +0200
Subject: [PATCH 01/15] sched/fair: Add cfs_rq::avg_vruntime
Subject: [PATCH 01/16] sched/fair: Add cfs_rq::avg_vruntime
In order to move to an eligibility based scheduling policy, we need
to have a better approximation of the ideal scheduler.
@@ -295,10 +295,10 @@ index e93e006a9..4ccb73d85 100644
2.42.0
From 826b8e2df1d3c69e138c6c89f6872df2be4ad1cb Mon Sep 17 00:00:00 2001
From 4e5d4ab816239fc30595a76ffcd41c323bdd4996 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Wed, 31 May 2023 13:58:41 +0200
Subject: [PATCH 02/15] sched/fair: Remove sched_feat(START_DEBIT)
Subject: [PATCH 02/16] sched/fair: Remove sched_feat(START_DEBIT)
With the introduction of avg_vruntime() there is no need to use worse
approximations. Take the 0-lag point as starting point for inserting
@@ -372,10 +372,10 @@ index ee7f23c76..fa828b365 100644
2.42.0
From 5672ddd48026e6f590a9eae4d122bb0eed50e109 Mon Sep 17 00:00:00 2001
From 49ba3e84689bb047d9411e8a3a6ae99020070f37 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Wed, 31 May 2023 13:58:42 +0200
Subject: [PATCH 03/15] sched/fair: Add lag based placement
Subject: [PATCH 03/16] sched/fair: Add lag based placement
With the introduction of avg_vruntime, it is possible to approximate
lag (the entire purpose of introducing it in fact). Use this to do lag
@@ -410,7 +410,7 @@ index 609bde814..52910bfb9 100644
u64 nr_migrations;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c52c2eba7..3bb4df5bb 100644
index e8f73ff12..acb9d9ff3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4501,6 +4501,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
@@ -661,10 +661,10 @@ index fa828b365..7958a10fe 100644
2.42.0
From e9818f093795a5d7b1ee08248d8db84ed88411dd Mon Sep 17 00:00:00 2001
From 31462b52019e938357395e7bd0f630fcd550e27c Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Wed, 31 May 2023 13:58:43 +0200
Subject: [PATCH 04/15] rbtree: Add rb_add_augmented_cached() helper
Subject: [PATCH 04/16] rbtree: Add rb_add_augmented_cached() helper
While slightly sub-optimal, updating the augmented data while going
down the tree during lookup would be faster -- alas the augment
@@ -719,10 +719,10 @@ index 7ee7ed5de..6dbc5a1bf 100644
2.42.0
From cb798272c085050f0db104befcf8092da0931210 Mon Sep 17 00:00:00 2001
From e8c55c05618756cf090470c355f2864dafe0a618 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Wed, 31 May 2023 13:58:44 +0200
Subject: [PATCH 05/15] sched/fair: Implement an EEVDF-like scheduling policy
Subject: [PATCH 05/16] sched/fair: Implement an EEVDF-like scheduling policy
Where CFS is currently a WFQ based scheduler with only a single knob,
the weight. The addition of a second, latency oriented parameter,
@@ -785,7 +785,7 @@ index 52910bfb9..35331c35f 100644
u64 nr_migrations;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3bb4df5bb..d7291206f 100644
index acb9d9ff3..427d694ff 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4502,6 +4502,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
@@ -1398,10 +1398,10 @@ index 4ccb73d85..1fc81dd7f 100644
2.42.0
From 792befe9ba4d972eeb1ba144cfa3062e48fd98ff Mon Sep 17 00:00:00 2001
From 6aa7145ce28656863846e7f67ad98e3ed89473f3 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Wed, 31 May 2023 13:58:45 +0200
Subject: [PATCH 06/15] sched/fair: Commit to lag based placement
Subject: [PATCH 06/16] sched/fair: Commit to lag based placement
Removes the FAIR_SLEEPERS code in favour of the new LAG based
placement.
@@ -1526,10 +1526,10 @@ index 60cce1e6f..2a830eccd 100644
2.42.0
From 26b3a580ff53a5b6e3b01810b7f223b672cab5e9 Mon Sep 17 00:00:00 2001
From 12c67a50f08fe4b97fda8f13302e2574e10351c7 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Wed, 31 May 2023 13:58:46 +0200
Subject: [PATCH 07/15] sched/smp: Use lag to simplify cross-runqueue placement
Subject: [PATCH 07/16] sched/smp: Use lag to simplify cross-runqueue placement
Using lag is both more correct and simpler when moving between
runqueues.
@@ -1793,10 +1793,10 @@ index 91f25d6c8..b7daccfb2 100644
2.42.0
From 35645d3d36593126531a3ee2f7402c9acfdb3e6d Mon Sep 17 00:00:00 2001
From 8e2fcd5cb320987439faec8442f7f73ccb234875 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Wed, 31 May 2023 13:58:47 +0200
Subject: [PATCH 08/15] sched/fair: Commit to EEVDF
Subject: [PATCH 08/16] sched/fair: Commit to EEVDF
EEVDF is a better defined scheduling policy, as a result it has less
heuristics/tunables. There is no compelling reason to keep CFS around.
@@ -2569,10 +2569,10 @@ index 1fc81dd7f..83bbcd35c 100644
2.42.0
From a1bff7f7a7608a50d8b1108e68f766daa920b4a3 Mon Sep 17 00:00:00 2001
From 55aa8349238fbe34a1f8198d56210a5e773851f1 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Wed, 31 May 2023 13:58:48 +0200
Subject: [PATCH 09/15] sched/debug: Rename sysctl_sched_min_granularity to
Subject: [PATCH 09/16] sched/debug: Rename sysctl_sched_min_granularity to
sysctl_sched_base_slice
EEVDF uses this tunable as the base request/slice -- make sure the
@@ -2589,7 +2589,7 @@ Link: https://lore.kernel.org/r/20230531124604.205287511@infradead.org
4 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d7291206f..8116ef56d 100644
index 427d694ff..be77d999d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4502,7 +4502,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
@@ -2685,10 +2685,10 @@ index 83bbcd35c..e21f6a048 100644
2.42.0
From 5ef098d5a57aa3f3a054935b500d694c4027834a Mon Sep 17 00:00:00 2001
From d059ffad9f9729ec63ad32fc3840a1a308cbd8a7 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Wed, 31 May 2023 13:58:49 +0200
Subject: [PATCH 10/15] sched/fair: Propagate enqueue flags into place_entity()
Subject: [PATCH 10/16] sched/fair: Propagate enqueue flags into place_entity()
This allows place_entity() to consider ENQUEUE_WAKEUP and
ENQUEUE_MIGRATED.
@@ -2766,10 +2766,10 @@ index e21f6a048..576d371c8 100644
2.42.0
From 30054e00408a19d0a9ba9c2682217544b09c4937 Mon Sep 17 00:00:00 2001
From 80cdbd469974a44e5150be88f5c696ec241f6087 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Wed, 16 Aug 2023 15:40:59 +0200
Subject: [PATCH 11/15] sched/eevdf: Curb wakeup-preemption
Subject: [PATCH 11/16] sched/eevdf: Curb wakeup-preemption
Mike and others noticed that EEVDF does like to over-schedule quite a
bit -- which does hurt performance of a number of benchmarks /
@@ -2858,10 +2858,10 @@ index 54334ca5c..546d212ef 100644
2.42.0
From 3e8371461b6d790eb57788495c157d3092ae4ce9 Mon Sep 17 00:00:00 2001
From 7d5bf4ed3cc74835a55db18eead11af61557a795 Mon Sep 17 00:00:00 2001
From: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
Date: Thu, 24 Aug 2023 13:33:42 +0530
Subject: [PATCH 12/15] sched/eevdf/doc: Modify the documented knob to
Subject: [PATCH 12/16] sched/eevdf/doc: Modify the documented knob to
base_slice_ns as well
After committing the scheduler to EEVDF, we renamed the 'min_granularity_ns'
@@ -2896,3 +2896,299 @@ index 03db55504..f68919800 100644
--
2.42.0
From bff784de63e9a8567d91b630e8f2bf842aef894b Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Fri, 15 Sep 2023 00:48:55 +0200
Subject: [PATCH 13/16] sched/eevdf: Also update slice on placement
Tasks that never consume their full slice would not update their slice value.
This means that tasks that are spawned before the sysctl scaling keep their
original (UP) slice length.
Fixes: 147f3efaa241 ("sched/fair: Implement an EEVDF-like scheduling policy")
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20230915124822.847197830@noisy.programming.kicks-ass.net
---
kernel/sched/fair.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1cdc95725..efbcdc69c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4918,10 +4918,12 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
- u64 vslice = calc_delta_fair(se->slice, se);
- u64 vruntime = avg_vruntime(cfs_rq);
+ u64 vslice, vruntime = avg_vruntime(cfs_rq);
s64 lag = 0;
+ se->slice = sysctl_sched_base_slice;
+ vslice = calc_delta_fair(se->slice, se);
+
/*
* Due to how V is constructed as the weighted average of entities,
* adding tasks with positive lag, or removing tasks with negative lag
--
2.42.0
From 163619e41993d6e481a745466c05cc0dfb3dcda8 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Tue, 26 Sep 2023 14:29:50 +0200
Subject: [PATCH 14/16] sched/eevdf: Fix avg_vruntime()
The expectation is that placing a task at avg_vruntime() makes it
eligible. Turns out there is a corner case where this is not the case.
Specifically, avg_vruntime() relies on the fact that integer division
is a flooring function (eg. it discards the remainder). By this
property the value returned is slightly left of the true average.
However! when the average is a negative (relative to min_vruntime) the
effect is flipped and it becomes a ceil, with the result that the
returned value is just right of the average and thus not eligible.
Fixes: af4cf40470c2 ("sched/fair: Add cfs_rq::avg_vruntime")
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/sched/fair.c | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index efbcdc69c..9dbf3ce61 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -664,6 +664,10 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
cfs_rq->avg_vruntime -= cfs_rq->avg_load * delta;
}
+/*
+ * Specifically: avg_runtime() + 0 must result in entity_eligible() := true
+ * For this to be so, the result of this function must have a left bias.
+ */
u64 avg_vruntime(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
@@ -677,8 +681,12 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
load += weight;
}
- if (load)
+ if (load) {
+ /* sign flips effective floor / ceil */
+ if (avg < 0)
+ avg -= (load - 1);
avg = div_s64(avg, load);
+ }
return cfs_rq->min_vruntime + avg;
}
--
2.42.0
From 217895647edb558ce9b28d0e07418f66fdaf85bc Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Fri, 6 Oct 2023 21:24:45 +0200
Subject: [PATCH 15/16] sched/eevdf: Fix min_deadline heap integrity
Marek and Biju reported instances of:
"EEVDF scheduling fail, picking leftmost"
which Mike correlated with cgroup scheduling and the min_deadline heap
getting corrupted; some trace output confirms:
> And yeah, min_deadline is hosed somehow:
>
> validate_cfs_rq: --- /
> __print_se: ffff88845cf48080 w: 1024 ve: -58857638 lag: 870381 vd: -55861854 vmd: -66302085 E (11372/tr)
> __print_se: ffff88810d165800 w: 25 ve: -80323686 lag: 22336429 vd: -41496434 vmd: -66302085 E (-1//autogroup-31)
> __print_se: ffff888108379000 w: 25 ve: 0 lag: -57987257 vd: 114632828 vmd: 114632828 N (-1//autogroup-33)
> validate_cfs_rq: min_deadline: -55861854 avg_vruntime: -62278313462 / 1074 = -57987256
Turns out that reweight_entity(), which tries really hard to be fast,
does not do the normal dequeue+update+enqueue pattern but *does* scale
the deadline.
However, it then fails to propagate the updated deadline value up the
heap.
Fixes: 147f3efaa241 ("sched/fair: Implement an EEVDF-like scheduling policy")
Reported-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reported-by: Biju Das <biju.das.jz@bp.renesas.com>
Reported-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20231006192445.GE743@noisy.programming.kicks-ass.net
---
kernel/sched/fair.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9dbf3ce61..a0f1d9578 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3612,6 +3612,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
*/
deadline = div_s64(deadline * old_weight, weight);
se->deadline = se->vruntime + deadline;
+ min_deadline_cb_propagate(&se->run_node, NULL);
}
#ifdef CONFIG_SMP
--
2.42.0
From 71f1c08f8102e48a5235bb145af59edfa597cf72 Mon Sep 17 00:00:00 2001
From: Benjamin Segall <bsegall@google.com>
Date: Fri, 29 Sep 2023 17:09:30 -0700
Subject: [PATCH 16/16] sched/eevdf: Fix pick_eevdf()
The old pick_eevdf() could fail to find the actual earliest eligible
deadline when it descended to the right looking for min_deadline, but
it turned out that that min_deadline wasn't actually eligible. In that
case we need to go back and search through any left branches we
skipped looking for the actual best _eligible_ min_deadline.
This is more expensive, but still O(log n), and at worst should only
involve descending two branches of the rbtree.
I've run this through a userspace stress test (thank you
tools/lib/rbtree.c), so hopefully this implementation doesn't miss any
corner cases.
Fixes: 147f3efaa241 ("sched/fair: Implement an EEVDF-like scheduling policy")
Signed-off-by: Ben Segall <bsegall@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/xm261qego72d.fsf_-_@google.com
---
kernel/sched/fair.c | 72 ++++++++++++++++++++++++++++++++++++---------
1 file changed, 58 insertions(+), 14 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a0f1d9578..caec9b43c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -872,14 +872,16 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
*
* Which allows an EDF like search on (sub)trees.
*/
-static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
+static struct sched_entity *__pick_eevdf(struct cfs_rq *cfs_rq)
{
struct rb_node *node = cfs_rq->tasks_timeline.rb_root.rb_node;
struct sched_entity *curr = cfs_rq->curr;
struct sched_entity *best = NULL;
+ struct sched_entity *best_left = NULL;
if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
curr = NULL;
+ best = curr;
/*
* Once selected, run a task until it either becomes non-eligible or
@@ -900,33 +902,75 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
}
/*
- * If this entity has an earlier deadline than the previous
- * best, take this one. If it also has the earliest deadline
- * of its subtree, we're done.
+ * Now we heap search eligible trees for the best (min_)deadline
*/
- if (!best || deadline_gt(deadline, best, se)) {
+ if (!best || deadline_gt(deadline, best, se))
best = se;
- if (best->deadline == best->min_deadline)
- break;
- }
/*
- * If the earlest deadline in this subtree is in the fully
- * eligible left half of our space, go there.
+ * Every se in a left branch is eligible, keep track of the
+ * branch with the best min_deadline
*/
+ if (node->rb_left) {
+ struct sched_entity *left = __node_2_se(node->rb_left);
+
+ if (!best_left || deadline_gt(min_deadline, best_left, left))
+ best_left = left;
+
+ /*
+ * min_deadline is in the left branch. rb_left and all
+ * descendants are eligible, so immediately switch to the second
+ * loop.
+ */
+ if (left->min_deadline == se->min_deadline)
+ break;
+ }
+
+ /* min_deadline is at this node, no need to look right */
+ if (se->deadline == se->min_deadline)
+ break;
+
+ /* else min_deadline is in the right branch. */
+ node = node->rb_right;
+ }
+
+ /*
+ * We ran into an eligible node which is itself the best.
+ * (Or nr_running == 0 and both are NULL)
+ */
+ if (!best_left || (s64)(best_left->min_deadline - best->deadline) > 0)
+ return best;
+
+ /*
+ * Now best_left and all of its children are eligible, and we are just
+ * looking for deadline == min_deadline
+ */
+ node = &best_left->run_node;
+ while (node) {
+ struct sched_entity *se = __node_2_se(node);
+
+ /* min_deadline is the current node */
+ if (se->deadline == se->min_deadline)
+ return se;
+
+ /* min_deadline is in the left branch */
if (node->rb_left &&
__node_2_se(node->rb_left)->min_deadline == se->min_deadline) {
node = node->rb_left;
continue;
}
+ /* else min_deadline is in the right branch */
node = node->rb_right;
}
+ return NULL;
+}
- if (!best || (curr && deadline_gt(deadline, best, curr)))
- best = curr;
+static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
+{
+ struct sched_entity *se = __pick_eevdf(cfs_rq);
- if (unlikely(!best)) {
+ if (!se) {
struct sched_entity *left = __pick_first_entity(cfs_rq);
if (left) {
pr_err("EEVDF scheduling fail, picking leftmost\n");
@@ -934,7 +978,7 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
}
}
- return best;
+ return se;
}
#ifdef CONFIG_SCHED_DEBUG
--
2.42.0