Compare commits

...

7 Commits
v6.2 ... v6.2.2

8 changed files with 991 additions and 203 deletions

View File

@@ -3,7 +3,7 @@
# Linux distribution you are using, options are "Arch", "Ubuntu", "Debian", "Fedora", "Suse", "Gentoo", "Generic". # Linux distribution you are using, options are "Arch", "Ubuntu", "Debian", "Fedora", "Suse", "Gentoo", "Generic".
# It is automatically set to "Arch" when using PKGBUILD. # It is automatically set to "Arch" when using PKGBUILD.
# If left empty, the script will prompt # If left empty, the script will prompt
_distro="" _distro="Arch"
# Kernel Version - Options are "5.4", and from "5.7" to "5.19" # Kernel Version - Options are "5.4", and from "5.7" to "5.19"
# you can also set a specific kernel version, e.g. "6.0-rc4" or "5.10.51", # you can also set a specific kernel version, e.g. "6.0-rc4" or "5.10.51",
@@ -46,7 +46,7 @@ CUSTOM_GCC_PATH=""
CUSTOM_LLVM_PATH="" CUSTOM_LLVM_PATH=""
# Set to true to bypass makepkg.conf and use all available threads for compilation. False will respect your makepkg.conf options. # Set to true to bypass makepkg.conf and use all available threads for compilation. False will respect your makepkg.conf options.
_force_all_threads="true" _force_all_threads="false"
# Set to true to prevent ccache from being used and set CONFIG_GCC_PLUGINS=y (which needs to be disabled for ccache to work properly) # Set to true to prevent ccache from being used and set CONFIG_GCC_PLUGINS=y (which needs to be disabled for ccache to work properly)
_noccache="false" _noccache="false"
@@ -60,10 +60,10 @@ _modprobeddb="false"
_modprobeddb_db_path=~/.config/modprobed.db _modprobeddb_db_path=~/.config/modprobed.db
# Set to "1" to call make menuconfig, "2" to call make nconfig, "3" to call make xconfig, before building the kernel. Set to false to disable and skip the prompt. # Set to "1" to call make menuconfig, "2" to call make nconfig, "3" to call make xconfig, before building the kernel. Set to false to disable and skip the prompt.
_menunconfig="" _menunconfig="false"
# Set to true to generate a kernel config fragment from your changes in menuconfig/nconfig. Set to false to disable and skip the prompt. # Set to true to generate a kernel config fragment from your changes in menuconfig/nconfig. Set to false to disable and skip the prompt.
_diffconfig="" _diffconfig="false"
# Set to the file name where the generated config fragment should be written to. Only used if _diffconfig is active. # Set to the file name where the generated config fragment should be written to. Only used if _diffconfig is active.
_diffconfig_name="" _diffconfig_name=""
@@ -97,11 +97,11 @@ _STRIP="true"
# LEAVE AN EMPTY VALUE TO BE PROMPTED ABOUT FOLLOWING OPTIONS AT BUILD TIME # LEAVE AN EMPTY VALUE TO BE PROMPTED ABOUT FOLLOWING OPTIONS AT BUILD TIME
# CPU scheduler - Options are "upds" (TkG's Undead PDS), "pds", "bmq", "muqss", "cacule", "tt", "bore" or "cfs" (kernel's default) # CPU scheduler - Options are "upds" (TkG's Undead PDS), "pds", "bmq", "muqss", "cacule", "tt", "bore" or "cfs" (kernel's default)
_cpusched="" _cpusched="pds"
# Compiler to use - Options are "gcc" or "llvm". # Compiler to use - Options are "gcc" or "llvm".
# For advanced users. # For advanced users.
_compiler="" _compiler="gcc"
# Force the use of the LLVM Integrated Assembler whether using LLVM, LTO or not. # Force the use of the LLVM Integrated Assembler whether using LLVM, LTO or not.
# Set to "1" to enable. # Set to "1" to enable.
@@ -131,7 +131,7 @@ _preempt_rt_force=""
# For BMQ: 0: No yield. # For BMQ: 0: No yield.
# 1: Deboost and requeue task. (Default) # 1: Deboost and requeue task. (Default)
# 2: Set rq skip task. # 2: Set rq skip task.
_sched_yield_type="" _sched_yield_type="0"
# Round Robin interval is the longest duration two tasks with the same nice level will be delayed for. When CPU time is requested by a task, it receives a time slice equal # Round Robin interval is the longest duration two tasks with the same nice level will be delayed for. When CPU time is requested by a task, it receives a time slice equal
# to the rr_interval in addition to a virtual deadline. When using yield_type 2, a low value can help offset the disadvantages of rescheduling a process that has yielded. # to the rr_interval in addition to a virtual deadline. When using yield_type 2, a low value can help offset the disadvantages of rescheduling a process that has yielded.
@@ -139,7 +139,7 @@ _sched_yield_type=""
# PDS default: 4ms" # PDS default: 4ms"
# BMQ default: 2ms" # BMQ default: 2ms"
# Set to "1" for 2ms, "2" for 4ms, "3" for 6ms, "4" for 8ms, or "default" to keep the chosen scheduler defaults. # Set to "1" for 2ms, "2" for 4ms, "3" for 6ms, "4" for 8ms, or "default" to keep the chosen scheduler defaults.
_rr_interval="" _rr_interval="2"
# Set to "true" to disable FUNCTION_TRACER/GRAPH_TRACER, lowering overhead but limiting debugging and analyzing of kernel functions - Kernel default is "false" # Set to "true" to disable FUNCTION_TRACER/GRAPH_TRACER, lowering overhead but limiting debugging and analyzing of kernel functions - Kernel default is "false"
_ftracedisable="false" _ftracedisable="false"
@@ -154,10 +154,10 @@ _misc_adds="true"
# Full tickless can give higher performances in case you use isolation of CPUs for tasks # Full tickless can give higher performances in case you use isolation of CPUs for tasks
# and it works only when using the nohz_full kernel parameter, otherwise behaves like idle. # and it works only when using the nohz_full kernel parameter, otherwise behaves like idle.
# Just tickless idle perform better for most platforms. # Just tickless idle perform better for most platforms.
_tickless="" _tickless="2"
# Set to "true" to use ACS override patch - https://wiki.archlinux.org/index.php/PCI_passthrough_via_OVMF#Bypassing_the_IOMMU_groups_.28ACS_override_patch.29 - Kernel default is "false" # Set to "true" to use ACS override patch - https://wiki.archlinux.org/index.php/PCI_passthrough_via_OVMF#Bypassing_the_IOMMU_groups_.28ACS_override_patch.29 - Kernel default is "false"
_acs_override="" _acs_override="false"
# Set to "true" to add Bcache filesystem support. You'll have to install bcachefs-tools-git from AUR for utilities - https://bcachefs.org/ - If in doubt, set to "false" # Set to "true" to add Bcache filesystem support. You'll have to install bcachefs-tools-git from AUR for utilities - https://bcachefs.org/ - If in doubt, set to "false"
# This can be buggy and isn't recommended on a production machine, also enabling this option will not allow you to enable MGLRU. # This can be buggy and isn't recommended on a production machine, also enabling this option will not allow you to enable MGLRU.
@@ -168,13 +168,13 @@ _bcachefs="false"
_winesync="false" _winesync="false"
# Set to "true" to enable Binder and Ashmem, the kernel modules required to use the android emulator Anbox. ! This doesn't apply to 5.4.y ! # Set to "true" to enable Binder and Ashmem, the kernel modules required to use the android emulator Anbox. ! This doesn't apply to 5.4.y !
_anbox="" _anbox="false"
# A selection of patches from Zen/Liquorix kernel and additional tweaks for a better gaming experience (ZENIFY) - Default is "true" # A selection of patches from Zen/Liquorix kernel and additional tweaks for a better gaming experience (ZENIFY) - Default is "true"
_zenify="true" _zenify="true"
# compiler optimization level - 1. Optimize for performance (-O2); 2. Optimize harder (-O3); 3. Optimize for size (-Os) - Kernel default is "1" # compiler optimization level - 1. Optimize for performance (-O2); 2. Optimize harder (-O3); 3. Optimize for size (-Os) - Kernel default is "1"
_compileroptlevel="1" _compileroptlevel="2"
# CPU compiler optimizations - Defaults to prompt at kernel config if left empty # CPU compiler optimizations - Defaults to prompt at kernel config if left empty
# AMD CPUs : "k8" "k8sse3" "k10" "barcelona" "bobcat" "jaguar" "bulldozer" "piledriver" "steamroller" "excavator" "zen" "zen2" "zen3" "zen4" (zen3 opt support depends on GCC11) (zen4 opt support depends on GCC13) # AMD CPUs : "k8" "k8sse3" "k10" "barcelona" "bobcat" "jaguar" "bulldozer" "piledriver" "steamroller" "excavator" "zen" "zen2" "zen3" "zen4" (zen3 opt support depends on GCC11) (zen4 opt support depends on GCC13)
@@ -188,7 +188,7 @@ _compileroptlevel="1"
# - "generic_v2" (depends on GCC11 - to share the package between machines with different CPU µarch supporting at least x86-64-v2 # - "generic_v2" (depends on GCC11 - to share the package between machines with different CPU µarch supporting at least x86-64-v2
# - "generic_v3" (depends on GCC11 - to share the package between machines with different CPU µarch supporting at least x86-64-v3 # - "generic_v3" (depends on GCC11 - to share the package between machines with different CPU µarch supporting at least x86-64-v3
# - "generic_v4" (depends on GCC11 - to share the package between machines with different CPU µarch supporting at least x86-64-v4 # - "generic_v4" (depends on GCC11 - to share the package between machines with different CPU µarch supporting at least x86-64-v4
_processor_opt="" _processor_opt="skylake"
# CacULE only - Enable Response Driven Balancer, an experimental load balancer for CacULE # CacULE only - Enable Response Driven Balancer, an experimental load balancer for CacULE
_cacule_rdb="false" _cacule_rdb="false"
@@ -201,13 +201,13 @@ _cacule_rdb_interval="19"
_tt_high_hz="false" _tt_high_hz="false"
# MuQSS and PDS only - SMT (Hyperthreading) aware nice priority and policy support (SMT_NICE) - Kernel default is "true" - You can disable this on non-SMT/HT CPUs for lower overhead # MuQSS and PDS only - SMT (Hyperthreading) aware nice priority and policy support (SMT_NICE) - Kernel default is "true" - You can disable this on non-SMT/HT CPUs for lower overhead
_smt_nice="" _smt_nice="true"
# Trust the CPU manufacturer to initialize Linux's CRNG (RANDOM_TRUST_CPU) - Kernel default is "false" # Trust the CPU manufacturer to initialize Linux's CRNG (RANDOM_TRUST_CPU) - Kernel default is "false"
_random_trust_cpu="true" _random_trust_cpu="true"
# Timer frequency - "100" "250" "300" "500" "750" "1000" ("2000" is available for cacule cpusched only) - More options available in kernel config prompt when left empty depending on selected cpusched with the default option pointed with a ">" (2000 for cacule, 100 for muqss and 1000 for other cpu schedulers) # Timer frequency - "100" "250" "300" "500" "750" "1000" ("2000" is available for cacule cpusched only) - More options available in kernel config prompt when left empty depending on selected cpusched with the default option pointed with a ">" (2000 for cacule, 100 for muqss and 1000 for other cpu schedulers)
_timer_freq="" _timer_freq="500"
# Default CPU governor - "performance", "ondemand", "schedutil" or leave empty for default (schedutil) # Default CPU governor - "performance", "ondemand", "schedutil" or leave empty for default (schedutil)
_default_cpu_gov="ondemand" _default_cpu_gov="ondemand"

View File

@@ -1,15 +1,15 @@
# #
# Automatically generated file; DO NOT EDIT. # Automatically generated file; DO NOT EDIT.
# Linux/x86 6.2.0-rc4 Kernel Configuration # Linux/x86 6.2.0 Kernel Configuration
# #
CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.0" CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.1 20230201"
CONFIG_CC_IS_GCC=y CONFIG_CC_IS_GCC=y
CONFIG_GCC_VERSION=120200 CONFIG_GCC_VERSION=120201
CONFIG_CLANG_VERSION=0 CONFIG_CLANG_VERSION=0
CONFIG_AS_IS_GNU=y CONFIG_AS_IS_GNU=y
CONFIG_AS_VERSION=23900 CONFIG_AS_VERSION=24000
CONFIG_LD_IS_BFD=y CONFIG_LD_IS_BFD=y
CONFIG_LD_VERSION=23900 CONFIG_LD_VERSION=24000
CONFIG_LLD_VERSION=0 CONFIG_LLD_VERSION=0
CONFIG_CC_CAN_LINK=y CONFIG_CC_CAN_LINK=y
CONFIG_CC_CAN_LINK_STATIC=y CONFIG_CC_CAN_LINK_STATIC=y
@@ -182,10 +182,9 @@ CONFIG_RCU_NOCB_CPU=y
# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set # CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set
# CONFIG_RCU_NOCB_CPU_CB_BOOST is not set # CONFIG_RCU_NOCB_CPU_CB_BOOST is not set
# CONFIG_TASKS_TRACE_RCU_READ_MB is not set # CONFIG_TASKS_TRACE_RCU_READ_MB is not set
# CONFIG_RCU_LAZY is not set CONFIG_RCU_LAZY=y
# end of RCU Subsystem # end of RCU Subsystem
CONFIG_BUILD_BIN2C=y
CONFIG_IKCONFIG=y CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y CONFIG_IKCONFIG_PROC=y
CONFIG_IKHEADERS=m CONFIG_IKHEADERS=m
@@ -488,7 +487,7 @@ CONFIG_X86_INTEL_TSX_MODE_AUTO=y
CONFIG_X86_SGX=y CONFIG_X86_SGX=y
CONFIG_EFI=y CONFIG_EFI=y
CONFIG_EFI_STUB=y CONFIG_EFI_STUB=y
CONFIG_EFI_HANDOVER_PROTOCOL=y # CONFIG_EFI_HANDOVER_PROTOCOL is not set
CONFIG_EFI_MIXED=y CONFIG_EFI_MIXED=y
# CONFIG_EFI_FAKE_MEMMAP is not set # CONFIG_EFI_FAKE_MEMMAP is not set
CONFIG_EFI_RUNTIME_MAP=y CONFIG_EFI_RUNTIME_MAP=y
@@ -638,7 +637,7 @@ CONFIG_ACPI_ADXL=y
CONFIG_ACPI_CONFIGFS=m CONFIG_ACPI_CONFIGFS=m
CONFIG_ACPI_PFRUT=m CONFIG_ACPI_PFRUT=m
CONFIG_ACPI_PCC=y CONFIG_ACPI_PCC=y
# CONFIG_ACPI_FFH is not set CONFIG_ACPI_FFH=y
CONFIG_PMIC_OPREGION=y CONFIG_PMIC_OPREGION=y
CONFIG_BYTCRC_PMIC_OPREGION=y CONFIG_BYTCRC_PMIC_OPREGION=y
CONFIG_CHTCRC_PMIC_OPREGION=y CONFIG_CHTCRC_PMIC_OPREGION=y
@@ -936,7 +935,7 @@ CONFIG_MODULE_SIG_HASH="sha512"
# CONFIG_MODULE_COMPRESS_GZIP is not set # CONFIG_MODULE_COMPRESS_GZIP is not set
# CONFIG_MODULE_COMPRESS_XZ is not set # CONFIG_MODULE_COMPRESS_XZ is not set
CONFIG_MODULE_COMPRESS_ZSTD=y CONFIG_MODULE_COMPRESS_ZSTD=y
# CONFIG_MODULE_DECOMPRESS is not set CONFIG_MODULE_DECOMPRESS=y
CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS=y CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS=y
CONFIG_MODPROBE_PATH="/sbin/modprobe" CONFIG_MODPROBE_PATH="/sbin/modprobe"
CONFIG_MODULES_TREE_LOOKUP=y CONFIG_MODULES_TREE_LOOKUP=y
@@ -1989,7 +1988,7 @@ CONFIG_BT_HCIUART_QCA=y
CONFIG_BT_HCIUART_AG6XX=y CONFIG_BT_HCIUART_AG6XX=y
CONFIG_BT_HCIUART_MRVL=y CONFIG_BT_HCIUART_MRVL=y
CONFIG_BT_HCIBCM203X=m CONFIG_BT_HCIBCM203X=m
# CONFIG_BT_HCIBCM4377 is not set CONFIG_BT_HCIBCM4377=m
CONFIG_BT_HCIBPA10X=m CONFIG_BT_HCIBPA10X=m
CONFIG_BT_HCIBFUSB=m CONFIG_BT_HCIBFUSB=m
CONFIG_BT_HCIDTL1=m CONFIG_BT_HCIDTL1=m
@@ -2327,7 +2326,7 @@ CONFIG_SYSFB=y
CONFIG_FW_CS_DSP=m CONFIG_FW_CS_DSP=m
CONFIG_GOOGLE_FIRMWARE=y CONFIG_GOOGLE_FIRMWARE=y
# CONFIG_GOOGLE_SMI is not set # CONFIG_GOOGLE_SMI is not set
# CONFIG_GOOGLE_CBMEM is not set CONFIG_GOOGLE_CBMEM=m
CONFIG_GOOGLE_COREBOOT_TABLE=m CONFIG_GOOGLE_COREBOOT_TABLE=m
CONFIG_GOOGLE_MEMCONSOLE=m CONFIG_GOOGLE_MEMCONSOLE=m
# CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY is not set # CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY is not set
@@ -2539,7 +2538,7 @@ CONFIG_ZRAM_DEF_COMP_LZORLE=y
CONFIG_ZRAM_DEF_COMP="lzo-rle" CONFIG_ZRAM_DEF_COMP="lzo-rle"
CONFIG_ZRAM_WRITEBACK=y CONFIG_ZRAM_WRITEBACK=y
# CONFIG_ZRAM_MEMORY_TRACKING is not set # CONFIG_ZRAM_MEMORY_TRACKING is not set
# CONFIG_ZRAM_MULTI_COMP is not set CONFIG_ZRAM_MULTI_COMP=y
CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 CONFIG_BLK_DEV_LOOP_MIN_COUNT=0
CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_DRBD=m
@@ -2548,7 +2547,9 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=m CONFIG_BLK_DEV_RAM=m
CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=16384 CONFIG_BLK_DEV_RAM_SIZE=16384
# CONFIG_CDROM_PKTCDVD is not set CONFIG_CDROM_PKTCDVD=m
CONFIG_CDROM_PKTCDVD_BUFFERS=8
# CONFIG_CDROM_PKTCDVD_WCACHE is not set
CONFIG_ATA_OVER_ETH=m CONFIG_ATA_OVER_ETH=m
CONFIG_XEN_BLKDEV_FRONTEND=m CONFIG_XEN_BLKDEV_FRONTEND=m
CONFIG_XEN_BLKDEV_BACKEND=m CONFIG_XEN_BLKDEV_BACKEND=m
@@ -2597,6 +2598,8 @@ CONFIG_TIFM_CORE=m
CONFIG_TIFM_7XX1=m CONFIG_TIFM_7XX1=m
CONFIG_ICS932S401=m CONFIG_ICS932S401=m
CONFIG_ENCLOSURE_SERVICES=m CONFIG_ENCLOSURE_SERVICES=m
CONFIG_SMPRO_ERRMON=m
CONFIG_SMPRO_MISC=m
CONFIG_HP_ILO=m CONFIG_HP_ILO=m
CONFIG_APDS9802ALS=m CONFIG_APDS9802ALS=m
CONFIG_ISL29003=m CONFIG_ISL29003=m
@@ -3321,7 +3324,7 @@ CONFIG_ENC28J60=m
# CONFIG_ENC28J60_WRITEVERIFY is not set # CONFIG_ENC28J60_WRITEVERIFY is not set
CONFIG_ENCX24J600=m CONFIG_ENCX24J600=m
CONFIG_LAN743X=m CONFIG_LAN743X=m
# CONFIG_VCAP is not set CONFIG_VCAP=y
CONFIG_NET_VENDOR_MICROSEMI=y CONFIG_NET_VENDOR_MICROSEMI=y
CONFIG_MSCC_OCELOT_SWITCH_LIB=m CONFIG_MSCC_OCELOT_SWITCH_LIB=m
CONFIG_NET_VENDOR_MICROSOFT=y CONFIG_NET_VENDOR_MICROSOFT=y
@@ -3895,7 +3898,7 @@ CONFIG_MT7921_COMMON=m
CONFIG_MT7921E=m CONFIG_MT7921E=m
CONFIG_MT7921S=m CONFIG_MT7921S=m
CONFIG_MT7921U=m CONFIG_MT7921U=m
# CONFIG_MT7996E is not set CONFIG_MT7996E=m
CONFIG_WLAN_VENDOR_MICROCHIP=y CONFIG_WLAN_VENDOR_MICROCHIP=y
CONFIG_WILC1000=m CONFIG_WILC1000=m
CONFIG_WILC1000_SDIO=m CONFIG_WILC1000_SDIO=m
@@ -3959,27 +3962,29 @@ CONFIG_RTL8XXXU_UNTESTED=y
CONFIG_RTW88=m CONFIG_RTW88=m
CONFIG_RTW88_CORE=m CONFIG_RTW88_CORE=m
CONFIG_RTW88_PCI=m CONFIG_RTW88_PCI=m
CONFIG_RTW88_USB=m
CONFIG_RTW88_8822B=m CONFIG_RTW88_8822B=m
CONFIG_RTW88_8822C=m CONFIG_RTW88_8822C=m
CONFIG_RTW88_8723D=m CONFIG_RTW88_8723D=m
CONFIG_RTW88_8821C=m CONFIG_RTW88_8821C=m
CONFIG_RTW88_8822BE=m CONFIG_RTW88_8822BE=m
# CONFIG_RTW88_8822BU is not set CONFIG_RTW88_8822BU=m
CONFIG_RTW88_8822CE=m CONFIG_RTW88_8822CE=m
# CONFIG_RTW88_8822CU is not set CONFIG_RTW88_8822CU=m
CONFIG_RTW88_8723DE=m CONFIG_RTW88_8723DE=m
# CONFIG_RTW88_8723DU is not set CONFIG_RTW88_8723DU=m
CONFIG_RTW88_8821CE=m CONFIG_RTW88_8821CE=m
# CONFIG_RTW88_8821CU is not set CONFIG_RTW88_8821CU=m
CONFIG_RTW88_DEBUG=y CONFIG_RTW88_DEBUG=y
CONFIG_RTW88_DEBUGFS=y CONFIG_RTW88_DEBUGFS=y
CONFIG_RTW89=m CONFIG_RTW89=m
CONFIG_RTW89_CORE=m CONFIG_RTW89_CORE=m
CONFIG_RTW89_PCI=m CONFIG_RTW89_PCI=m
CONFIG_RTW89_8852A=m CONFIG_RTW89_8852A=m
CONFIG_RTW89_8852B=m
CONFIG_RTW89_8852C=m CONFIG_RTW89_8852C=m
CONFIG_RTW89_8852AE=m CONFIG_RTW89_8852AE=m
# CONFIG_RTW89_8852BE is not set CONFIG_RTW89_8852BE=m
CONFIG_RTW89_8852CE=m CONFIG_RTW89_8852CE=m
CONFIG_RTW89_DEBUG=y CONFIG_RTW89_DEBUG=y
CONFIG_RTW89_DEBUGMSG=y CONFIG_RTW89_DEBUGMSG=y
@@ -4227,7 +4232,7 @@ CONFIG_TOUCHSCREEN_CYTTSP_SPI=m
CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m
CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m
CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m
# CONFIG_TOUCHSCREEN_CYTTSP5 is not set CONFIG_TOUCHSCREEN_CYTTSP5=m
CONFIG_TOUCHSCREEN_DA9034=m CONFIG_TOUCHSCREEN_DA9034=m
CONFIG_TOUCHSCREEN_DA9052=m CONFIG_TOUCHSCREEN_DA9052=m
CONFIG_TOUCHSCREEN_DYNAPRO=m CONFIG_TOUCHSCREEN_DYNAPRO=m
@@ -4239,7 +4244,7 @@ CONFIG_TOUCHSCREEN_FUJITSU=m
CONFIG_TOUCHSCREEN_GOODIX=m CONFIG_TOUCHSCREEN_GOODIX=m
CONFIG_TOUCHSCREEN_HIDEEP=m CONFIG_TOUCHSCREEN_HIDEEP=m
CONFIG_TOUCHSCREEN_HYCON_HY46XX=m CONFIG_TOUCHSCREEN_HYCON_HY46XX=m
# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX=m
CONFIG_TOUCHSCREEN_ILI210X=m CONFIG_TOUCHSCREEN_ILI210X=m
CONFIG_TOUCHSCREEN_ILITEK=m CONFIG_TOUCHSCREEN_ILITEK=m
CONFIG_TOUCHSCREEN_S6SY761=m CONFIG_TOUCHSCREEN_S6SY761=m
@@ -4314,7 +4319,7 @@ CONFIG_TOUCHSCREEN_COLIBRI_VF50=m
CONFIG_TOUCHSCREEN_ROHM_BU21023=m CONFIG_TOUCHSCREEN_ROHM_BU21023=m
CONFIG_TOUCHSCREEN_IQS5XX=m CONFIG_TOUCHSCREEN_IQS5XX=m
CONFIG_TOUCHSCREEN_ZINITIX=m CONFIG_TOUCHSCREEN_ZINITIX=m
# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set CONFIG_TOUCHSCREEN_HIMAX_HX83112B=m
CONFIG_INPUT_MISC=y CONFIG_INPUT_MISC=y
CONFIG_INPUT_88PM860X_ONKEY=m CONFIG_INPUT_88PM860X_ONKEY=m
CONFIG_INPUT_88PM80X_ONKEY=m CONFIG_INPUT_88PM80X_ONKEY=m
@@ -4432,7 +4437,7 @@ CONFIG_HW_CONSOLE=y
CONFIG_VT_HW_CONSOLE_BINDING=y CONFIG_VT_HW_CONSOLE_BINDING=y
CONFIG_UNIX98_PTYS=y CONFIG_UNIX98_PTYS=y
# CONFIG_LEGACY_PTYS is not set # CONFIG_LEGACY_PTYS is not set
CONFIG_LEGACY_TIOCSTI=y # CONFIG_LEGACY_TIOCSTI is not set
CONFIG_LDISC_AUTOLOAD=y CONFIG_LDISC_AUTOLOAD=y
# #
@@ -4525,7 +4530,7 @@ CONFIG_IPMI_SSIF=m
CONFIG_IPMI_IPMB=m CONFIG_IPMI_IPMB=m
CONFIG_IPMI_WATCHDOG=m CONFIG_IPMI_WATCHDOG=m
CONFIG_IPMI_POWEROFF=m CONFIG_IPMI_POWEROFF=m
# CONFIG_SSIF_IPMI_BMC is not set CONFIG_SSIF_IPMI_BMC=m
CONFIG_IPMB_DEVICE_INTERFACE=m CONFIG_IPMB_DEVICE_INTERFACE=m
CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_TIMERIOMEM=m CONFIG_HW_RANDOM_TIMERIOMEM=m
@@ -4721,7 +4726,7 @@ CONFIG_SPI_MICROCHIP_CORE=m
CONFIG_SPI_MICROCHIP_CORE_QSPI=m CONFIG_SPI_MICROCHIP_CORE_QSPI=m
# CONFIG_SPI_LANTIQ_SSC is not set # CONFIG_SPI_LANTIQ_SSC is not set
CONFIG_SPI_OC_TINY=m CONFIG_SPI_OC_TINY=m
# CONFIG_SPI_PCI1XXXX is not set CONFIG_SPI_PCI1XXXX=m
CONFIG_SPI_PXA2XX=m CONFIG_SPI_PXA2XX=m
CONFIG_SPI_PXA2XX_PCI=m CONFIG_SPI_PXA2XX_PCI=m
# CONFIG_SPI_ROCKCHIP is not set # CONFIG_SPI_ROCKCHIP is not set
@@ -4938,7 +4943,7 @@ CONFIG_GPIO_VIPERBOARD=m
# Virtual GPIO drivers # Virtual GPIO drivers
# #
CONFIG_GPIO_AGGREGATOR=m CONFIG_GPIO_AGGREGATOR=m
# CONFIG_GPIO_LATCH is not set CONFIG_GPIO_LATCH=m
CONFIG_GPIO_MOCKUP=m CONFIG_GPIO_MOCKUP=m
CONFIG_GPIO_VIRTIO=m CONFIG_GPIO_VIRTIO=m
CONFIG_GPIO_SIM=m CONFIG_GPIO_SIM=m
@@ -5076,6 +5081,7 @@ CONFIG_HWMON_VID=m
# #
CONFIG_SENSORS_ABITUGURU=m CONFIG_SENSORS_ABITUGURU=m
CONFIG_SENSORS_ABITUGURU3=m CONFIG_SENSORS_ABITUGURU3=m
CONFIG_SENSORS_SMPRO=m
CONFIG_SENSORS_AD7314=m CONFIG_SENSORS_AD7314=m
CONFIG_SENSORS_AD7414=m CONFIG_SENSORS_AD7414=m
CONFIG_SENSORS_AD7418=m CONFIG_SENSORS_AD7418=m
@@ -5195,8 +5201,9 @@ CONFIG_SENSORS_NCT7904=m
CONFIG_SENSORS_NPCM7XX=m CONFIG_SENSORS_NPCM7XX=m
CONFIG_SENSORS_NZXT_KRAKEN2=m CONFIG_SENSORS_NZXT_KRAKEN2=m
CONFIG_SENSORS_NZXT_SMART2=m CONFIG_SENSORS_NZXT_SMART2=m
# CONFIG_SENSORS_OCC_P8_I2C is not set CONFIG_SENSORS_OCC_P8_I2C=m
# CONFIG_SENSORS_OXP is not set CONFIG_SENSORS_OCC=m
CONFIG_SENSORS_OXP=m
CONFIG_SENSORS_PCF8591=m CONFIG_SENSORS_PCF8591=m
CONFIG_PMBUS=m CONFIG_PMBUS=m
CONFIG_SENSORS_PMBUS=m CONFIG_SENSORS_PMBUS=m
@@ -5401,7 +5408,7 @@ CONFIG_MAX63XX_WATCHDOG=m
CONFIG_RETU_WATCHDOG=m CONFIG_RETU_WATCHDOG=m
CONFIG_ACQUIRE_WDT=m CONFIG_ACQUIRE_WDT=m
CONFIG_ADVANTECH_WDT=m CONFIG_ADVANTECH_WDT=m
# CONFIG_ADVANTECH_EC_WDT is not set CONFIG_ADVANTECH_EC_WDT=m
CONFIG_ALIM1535_WDT=m CONFIG_ALIM1535_WDT=m
CONFIG_ALIM7101_WDT=m CONFIG_ALIM7101_WDT=m
CONFIG_EBC_C384_WDT=m CONFIG_EBC_C384_WDT=m
@@ -5483,7 +5490,7 @@ CONFIG_BCMA_DRIVER_GPIO=y
# #
CONFIG_MFD_CORE=y CONFIG_MFD_CORE=y
CONFIG_MFD_AS3711=y CONFIG_MFD_AS3711=y
# CONFIG_MFD_SMPRO is not set CONFIG_MFD_SMPRO=m
CONFIG_PMIC_ADP5520=y CONFIG_PMIC_ADP5520=y
CONFIG_MFD_AAT2870_CORE=y CONFIG_MFD_AAT2870_CORE=y
CONFIG_MFD_BCM590XX=m CONFIG_MFD_BCM590XX=m
@@ -5664,7 +5671,7 @@ CONFIG_REGULATOR_MT6311=m
CONFIG_REGULATOR_MT6323=m CONFIG_REGULATOR_MT6323=m
CONFIG_REGULATOR_MT6331=m CONFIG_REGULATOR_MT6331=m
CONFIG_REGULATOR_MT6332=m CONFIG_REGULATOR_MT6332=m
# CONFIG_REGULATOR_MT6357 is not set CONFIG_REGULATOR_MT6357=m
CONFIG_REGULATOR_MT6358=m CONFIG_REGULATOR_MT6358=m
CONFIG_REGULATOR_MT6359=m CONFIG_REGULATOR_MT6359=m
CONFIG_REGULATOR_MT6360=m CONFIG_REGULATOR_MT6360=m
@@ -5686,7 +5693,7 @@ CONFIG_REGULATOR_RT5120=m
CONFIG_REGULATOR_RT5190A=m CONFIG_REGULATOR_RT5190A=m
CONFIG_REGULATOR_RT5759=m CONFIG_REGULATOR_RT5759=m
CONFIG_REGULATOR_RT6160=m CONFIG_REGULATOR_RT6160=m
# CONFIG_REGULATOR_RT6190 is not set CONFIG_REGULATOR_RT6190=m
CONFIG_REGULATOR_RT6245=m CONFIG_REGULATOR_RT6245=m
CONFIG_REGULATOR_RTQ2134=m CONFIG_REGULATOR_RTQ2134=m
CONFIG_REGULATOR_RTMV20=m CONFIG_REGULATOR_RTMV20=m
@@ -6180,7 +6187,8 @@ CONFIG_VIDEO_VIMC=m
CONFIG_VIDEO_VIVID=m CONFIG_VIDEO_VIVID=m
CONFIG_VIDEO_VIVID_CEC=y CONFIG_VIDEO_VIVID_CEC=y
CONFIG_VIDEO_VIVID_MAX_DEVS=64 CONFIG_VIDEO_VIVID_MAX_DEVS=64
# CONFIG_VIDEO_VISL is not set CONFIG_VIDEO_VISL=m
# CONFIG_VISL_DEBUGFS is not set
CONFIG_DVB_TEST_DRIVERS=y CONFIG_DVB_TEST_DRIVERS=y
CONFIG_DVB_VIDTV=m CONFIG_DVB_VIDTV=m
@@ -6255,7 +6263,7 @@ CONFIG_VIDEO_NOON010PC30=m
CONFIG_VIDEO_OG01A1B=m CONFIG_VIDEO_OG01A1B=m
CONFIG_VIDEO_OV02A10=m CONFIG_VIDEO_OV02A10=m
CONFIG_VIDEO_OV08D10=m CONFIG_VIDEO_OV08D10=m
# CONFIG_VIDEO_OV08X40 is not set CONFIG_VIDEO_OV08X40=m
CONFIG_VIDEO_OV13858=m CONFIG_VIDEO_OV13858=m
CONFIG_VIDEO_OV13B10=m CONFIG_VIDEO_OV13B10=m
CONFIG_VIDEO_OV2640=m CONFIG_VIDEO_OV2640=m
@@ -6263,7 +6271,7 @@ CONFIG_VIDEO_OV2659=m
CONFIG_VIDEO_OV2680=m CONFIG_VIDEO_OV2680=m
CONFIG_VIDEO_OV2685=m CONFIG_VIDEO_OV2685=m
CONFIG_VIDEO_OV2740=m CONFIG_VIDEO_OV2740=m
# CONFIG_VIDEO_OV4689 is not set CONFIG_VIDEO_OV4689=m
CONFIG_VIDEO_OV5647=m CONFIG_VIDEO_OV5647=m
CONFIG_VIDEO_OV5648=m CONFIG_VIDEO_OV5648=m
CONFIG_VIDEO_OV5670=m CONFIG_VIDEO_OV5670=m
@@ -6573,7 +6581,6 @@ CONFIG_DRM=y
CONFIG_DRM_MIPI_DBI=m CONFIG_DRM_MIPI_DBI=m
CONFIG_DRM_MIPI_DSI=y CONFIG_DRM_MIPI_DSI=y
# CONFIG_DRM_DEBUG_MM is not set # CONFIG_DRM_DEBUG_MM is not set
CONFIG_DRM_USE_DYNAMIC_DEBUG=y
CONFIG_DRM_KMS_HELPER=y CONFIG_DRM_KMS_HELPER=y
CONFIG_DRM_FBDEV_EMULATION=y CONFIG_DRM_FBDEV_EMULATION=y
CONFIG_DRM_FBDEV_OVERALLOC=100 CONFIG_DRM_FBDEV_OVERALLOC=100
@@ -6851,7 +6858,7 @@ CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y
# CONFIG_LOGO is not set # CONFIG_LOGO is not set
# end of Graphics support # end of Graphics support
# CONFIG_DRM_ACCEL is not set CONFIG_DRM_ACCEL=y
CONFIG_SOUND=m CONFIG_SOUND=m
CONFIG_SOUND_OSS_CORE=y CONFIG_SOUND_OSS_CORE=y
# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set # CONFIG_SOUND_OSS_CORE_PRECLAIM is not set
@@ -7163,11 +7170,11 @@ CONFIG_SND_SOC_INTEL_AVS_MACH_DA7219=m
CONFIG_SND_SOC_INTEL_AVS_MACH_DMIC=m CONFIG_SND_SOC_INTEL_AVS_MACH_DMIC=m
CONFIG_SND_SOC_INTEL_AVS_MACH_HDAUDIO=m CONFIG_SND_SOC_INTEL_AVS_MACH_HDAUDIO=m
CONFIG_SND_SOC_INTEL_AVS_MACH_I2S_TEST=m CONFIG_SND_SOC_INTEL_AVS_MACH_I2S_TEST=m
# CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98927 is not set CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98927=m
CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98357A=m CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98357A=m
CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98373=m CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98373=m
CONFIG_SND_SOC_INTEL_AVS_MACH_NAU8825=m CONFIG_SND_SOC_INTEL_AVS_MACH_NAU8825=m
# CONFIG_SND_SOC_INTEL_AVS_MACH_PROBE is not set CONFIG_SND_SOC_INTEL_AVS_MACH_PROBE=m
CONFIG_SND_SOC_INTEL_AVS_MACH_RT274=m CONFIG_SND_SOC_INTEL_AVS_MACH_RT274=m
CONFIG_SND_SOC_INTEL_AVS_MACH_RT286=m CONFIG_SND_SOC_INTEL_AVS_MACH_RT286=m
CONFIG_SND_SOC_INTEL_AVS_MACH_RT298=m CONFIG_SND_SOC_INTEL_AVS_MACH_RT298=m
@@ -7520,7 +7527,7 @@ CONFIG_SND_SOC_WM8903=m
CONFIG_SND_SOC_WM8904=m CONFIG_SND_SOC_WM8904=m
CONFIG_SND_SOC_WM8940=m CONFIG_SND_SOC_WM8940=m
CONFIG_SND_SOC_WM8960=m CONFIG_SND_SOC_WM8960=m
# CONFIG_SND_SOC_WM8961 is not set CONFIG_SND_SOC_WM8961=m
CONFIG_SND_SOC_WM8962=m CONFIG_SND_SOC_WM8962=m
CONFIG_SND_SOC_WM8974=m CONFIG_SND_SOC_WM8974=m
CONFIG_SND_SOC_WM8978=m CONFIG_SND_SOC_WM8978=m
@@ -8350,7 +8357,7 @@ CONFIG_INFINIBAND_HFI1=m
# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set # CONFIG_HFI1_DEBUG_SDMA_ORDER is not set
# CONFIG_SDMA_VERBOSITY is not set # CONFIG_SDMA_VERBOSITY is not set
CONFIG_INFINIBAND_IRDMA=m CONFIG_INFINIBAND_IRDMA=m
# CONFIG_MANA_INFINIBAND is not set CONFIG_MANA_INFINIBAND=m
CONFIG_MLX4_INFINIBAND=m CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m
CONFIG_INFINIBAND_MTHCA=m CONFIG_INFINIBAND_MTHCA=m
@@ -8626,7 +8633,7 @@ CONFIG_NITRO_ENCLAVES=m
CONFIG_ACRN_HSM=m CONFIG_ACRN_HSM=m
CONFIG_EFI_SECRET=m CONFIG_EFI_SECRET=m
CONFIG_SEV_GUEST=m CONFIG_SEV_GUEST=m
# CONFIG_TDX_GUEST_DRIVER is not set CONFIG_TDX_GUEST_DRIVER=m
CONFIG_VIRTIO_ANCHOR=y CONFIG_VIRTIO_ANCHOR=y
CONFIG_VIRTIO=y CONFIG_VIRTIO=y
CONFIG_VIRTIO_PCI_LIB=m CONFIG_VIRTIO_PCI_LIB=m
@@ -8824,7 +8831,7 @@ CONFIG_CROS_EC_DEBUGFS=m
CONFIG_CROS_EC_SENSORHUB=m CONFIG_CROS_EC_SENSORHUB=m
CONFIG_CROS_EC_SYSFS=m CONFIG_CROS_EC_SYSFS=m
CONFIG_CROS_EC_TYPEC=m CONFIG_CROS_EC_TYPEC=m
# CONFIG_CROS_HPS_I2C is not set CONFIG_CROS_HPS_I2C=m
CONFIG_CROS_USBPD_LOGGER=m CONFIG_CROS_USBPD_LOGGER=m
CONFIG_CROS_USBPD_NOTIFY=m CONFIG_CROS_USBPD_NOTIFY=m
CONFIG_CHROMEOS_PRIVACY_SCREEN=m CONFIG_CHROMEOS_PRIVACY_SCREEN=m
@@ -8901,7 +8908,9 @@ CONFIG_AMILO_RFKILL=m
CONFIG_FUJITSU_LAPTOP=m CONFIG_FUJITSU_LAPTOP=m
CONFIG_FUJITSU_TABLET=m CONFIG_FUJITSU_TABLET=m
CONFIG_GPD_POCKET_FAN=m CONFIG_GPD_POCKET_FAN=m
# CONFIG_X86_PLATFORM_DRIVERS_HP is not set CONFIG_X86_PLATFORM_DRIVERS_HP=y
CONFIG_HP_ACCEL=m
CONFIG_HP_WMI=m
CONFIG_WIRELESS_HOTKEY=m CONFIG_WIRELESS_HOTKEY=m
CONFIG_IBM_RTL=m CONFIG_IBM_RTL=m
CONFIG_IDEAPAD_LAPTOP=m CONFIG_IDEAPAD_LAPTOP=m
@@ -8916,7 +8925,7 @@ CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
CONFIG_THINKPAD_LMI=m CONFIG_THINKPAD_LMI=m
CONFIG_INTEL_ATOMISP2_PDX86=y CONFIG_INTEL_ATOMISP2_PDX86=y
CONFIG_INTEL_ATOMISP2_LED=m CONFIG_INTEL_ATOMISP2_LED=m
# CONFIG_INTEL_IFS is not set CONFIG_INTEL_IFS=m
CONFIG_INTEL_SAR_INT1092=m CONFIG_INTEL_SAR_INT1092=m
CONFIG_INTEL_SKL_INT3472=m CONFIG_INTEL_SKL_INT3472=m
CONFIG_INTEL_PMC_CORE=y CONFIG_INTEL_PMC_CORE=y
@@ -9042,7 +9051,7 @@ CONFIG_INTEL_IOMMU_SVM=y
# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set # CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
CONFIG_INTEL_IOMMU_FLOPPY_WA=y CONFIG_INTEL_IOMMU_FLOPPY_WA=y
CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y
# CONFIG_IOMMUFD is not set CONFIG_IOMMUFD=m
CONFIG_IRQ_REMAP=y CONFIG_IRQ_REMAP=y
CONFIG_HYPERV_IOMMU=y CONFIG_HYPERV_IOMMU=y
CONFIG_VIRTIO_IOMMU=m CONFIG_VIRTIO_IOMMU=m
@@ -9219,8 +9228,9 @@ CONFIG_IIO_CROS_EC_ACCEL_LEGACY=m
CONFIG_IIO_ST_ACCEL_3AXIS=m CONFIG_IIO_ST_ACCEL_3AXIS=m
CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m
CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m
# CONFIG_IIO_KX022A_SPI is not set CONFIG_IIO_KX022A=m
# CONFIG_IIO_KX022A_I2C is not set CONFIG_IIO_KX022A_SPI=m
CONFIG_IIO_KX022A_I2C=m
CONFIG_KXSD9=m CONFIG_KXSD9=m
CONFIG_KXSD9_SPI=m CONFIG_KXSD9_SPI=m
CONFIG_KXSD9_I2C=m CONFIG_KXSD9_I2C=m
@@ -9247,7 +9257,7 @@ CONFIG_STK8BA50=m
# Analog to digital converters # Analog to digital converters
# #
CONFIG_AD_SIGMA_DELTA=m CONFIG_AD_SIGMA_DELTA=m
# CONFIG_AD4130 is not set CONFIG_AD4130=m
CONFIG_AD7091R5=m CONFIG_AD7091R5=m
CONFIG_AD7124=m CONFIG_AD7124=m
CONFIG_AD7192=m CONFIG_AD7192=m
@@ -9288,7 +9298,7 @@ CONFIG_MAX1027=m
CONFIG_MAX11100=m CONFIG_MAX11100=m
CONFIG_MAX1118=m CONFIG_MAX1118=m
CONFIG_MAX11205=m CONFIG_MAX11205=m
# CONFIG_MAX11410 is not set CONFIG_MAX11410=m
CONFIG_MAX1241=m CONFIG_MAX1241=m
CONFIG_MAX1363=m CONFIG_MAX1363=m
CONFIG_MAX9611=m CONFIG_MAX9611=m
@@ -9296,7 +9306,7 @@ CONFIG_MCP320X=m
CONFIG_MCP3422=m CONFIG_MCP3422=m
CONFIG_MCP3911=m CONFIG_MCP3911=m
CONFIG_MEDIATEK_MT6360_ADC=m CONFIG_MEDIATEK_MT6360_ADC=m
# CONFIG_MEDIATEK_MT6370_ADC is not set CONFIG_MEDIATEK_MT6370_ADC=m
CONFIG_MEN_Z188_ADC=m CONFIG_MEN_Z188_ADC=m
CONFIG_MP2629_ADC=m CONFIG_MP2629_ADC=m
CONFIG_NAU7802=m CONFIG_NAU7802=m
@@ -9329,7 +9339,7 @@ CONFIG_XILINX_XADC=m
# #
# Analog to digital and digital to analog converters # Analog to digital and digital to analog converters
# #
# CONFIG_AD74115 is not set CONFIG_AD74115=m
CONFIG_AD74413R=m CONFIG_AD74413R=m
# end of Analog to digital and digital to analog converters # end of Analog to digital and digital to analog converters
@@ -9479,7 +9489,7 @@ CONFIG_AD9523=m
# #
CONFIG_ADF4350=m CONFIG_ADF4350=m
CONFIG_ADF4371=m CONFIG_ADF4371=m
# CONFIG_ADF4377 is not set CONFIG_ADF4377=m
CONFIG_ADMV1013=m CONFIG_ADMV1013=m
CONFIG_ADMV1014=m CONFIG_ADMV1014=m
CONFIG_ADMV4420=m CONFIG_ADMV4420=m
@@ -9788,7 +9798,7 @@ CONFIG_TMP007=m
CONFIG_TMP117=m CONFIG_TMP117=m
CONFIG_TSYS01=m CONFIG_TSYS01=m
CONFIG_TSYS02D=m CONFIG_TSYS02D=m
# CONFIG_MAX30208 is not set CONFIG_MAX30208=m
CONFIG_MAX31856=m CONFIG_MAX31856=m
CONFIG_MAX31865=m CONFIG_MAX31865=m
# end of Temperature sensors # end of Temperature sensors
@@ -9942,7 +9952,8 @@ CONFIG_FPGA_DFL_NIOS_INTEL_PAC_N3000=m
CONFIG_FPGA_DFL_PCI=m CONFIG_FPGA_DFL_PCI=m
CONFIG_FPGA_M10_BMC_SEC_UPDATE=m CONFIG_FPGA_M10_BMC_SEC_UPDATE=m
CONFIG_FPGA_MGR_MICROCHIP_SPI=m CONFIG_FPGA_MGR_MICROCHIP_SPI=m
# CONFIG_FPGA_MGR_LATTICE_SYSCONFIG_SPI is not set CONFIG_FPGA_MGR_LATTICE_SYSCONFIG=m
CONFIG_FPGA_MGR_LATTICE_SYSCONFIG_SPI=m
CONFIG_TEE=m CONFIG_TEE=m
CONFIG_AMDTEE=m CONFIG_AMDTEE=m
CONFIG_MULTIPLEXER=m CONFIG_MULTIPLEXER=m
@@ -10187,10 +10198,10 @@ CONFIG_SQUASHFS=m
# CONFIG_SQUASHFS_FILE_CACHE is not set # CONFIG_SQUASHFS_FILE_CACHE is not set
CONFIG_SQUASHFS_FILE_DIRECT=y CONFIG_SQUASHFS_FILE_DIRECT=y
CONFIG_SQUASHFS_DECOMP_SINGLE=y CONFIG_SQUASHFS_DECOMP_SINGLE=y
# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set CONFIG_SQUASHFS_DECOMP_MULTI=y
CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT=y
# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set CONFIG_SQUASHFS_MOUNT_DECOMP_THREADS=y
CONFIG_SQUASHFS_XATTR=y CONFIG_SQUASHFS_XATTR=y
CONFIG_SQUASHFS_ZLIB=y CONFIG_SQUASHFS_ZLIB=y
CONFIG_SQUASHFS_LZ4=y CONFIG_SQUASHFS_LZ4=y
@@ -10869,6 +10880,7 @@ CONFIG_TEXTSEARCH_BM=m
CONFIG_TEXTSEARCH_FSM=m CONFIG_TEXTSEARCH_FSM=m
CONFIG_BTREE=y CONFIG_BTREE=y
CONFIG_INTERVAL_TREE=y CONFIG_INTERVAL_TREE=y
CONFIG_INTERVAL_TREE_SPAN_ITER=y
CONFIG_XARRAY_MULTI=y CONFIG_XARRAY_MULTI=y
CONFIG_ASSOCIATIVE_ARRAY=y CONFIG_ASSOCIATIVE_ARRAY=y
CONFIG_HAS_IOMEM=y CONFIG_HAS_IOMEM=y

View File

@@ -866,6 +866,11 @@ _tkg_srcprep() {
_disable "CPU_FREQ_DEFAULT_GOV_ONDEMAND" "CPU_FREQ_DEFAULT_GOV_CONSERVATIVE" "CPU_FREQ_DEFAULT_GOV_PERFORMANCE" "CPU_FREQ_DEFAULT_GOV_PERFORMANCE_NODEF" _disable "CPU_FREQ_DEFAULT_GOV_ONDEMAND" "CPU_FREQ_DEFAULT_GOV_CONSERVATIVE" "CPU_FREQ_DEFAULT_GOV_PERFORMANCE" "CPU_FREQ_DEFAULT_GOV_PERFORMANCE_NODEF"
_module "BLK_DEV_LOOP" _module "BLK_DEV_LOOP"
# buggy project C/PSI interaction workaround
if [ "${_cpusched}" = "pds" ] || [ "${_cpusched}" = "bmq" ]; then
_enable "PSI_DEFAULT_DISABLED"
fi
if [ -n "$_custom_commandline" ]; then if [ -n "$_custom_commandline" ]; then
_enable "CMDLINE_BOOL" _enable "CMDLINE_BOOL"
_disable "CMDLINE_OVERRIDE" _disable "CMDLINE_OVERRIDE"

View File

@@ -358,3 +358,263 @@ index c0cd1b9..af1e2fb 100644
-- --
https://clearlinux.org https://clearlinux.org
From 676c2dc63592f52b716515573a3a825582a371e9 Mon Sep 17 00:00:00 2001
From: Arjan van de Ven <arjan@linux.intel.com>
Date: Sat, 8 Dec 2018 18:21:32 +0000
Subject: [PATCH 1/9] x86/vdso: Use lfence instead of rep and nop
Signed-off-by: Alexandre Frade <kernel@xanmod.org>
---
arch/x86/include/asm/vdso/processor.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/vdso/processor.h b/arch/x86/include/asm/vdso/processor.h
index 57b1a7034c64..e2c45674f989 100644
--- a/arch/x86/include/asm/vdso/processor.h
+++ b/arch/x86/include/asm/vdso/processor.h
@@ -10,7 +10,7 @@
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static __always_inline void rep_nop(void)
{
- asm volatile("rep; nop" ::: "memory");
+ asm volatile("lfence" ::: "memory");
}
static __always_inline void cpu_relax(void)
--
2.39.1
From 48dc9669f8db68adc480ffc2698ed8204440e45b Mon Sep 17 00:00:00 2001
From: Arjan van de Ven <arjan@linux.intel.com>
Date: Thu, 13 Dec 2018 01:00:49 +0000
Subject: [PATCH 2/9] sched/wait: Do accept() in LIFO order for cache
efficiency
Signed-off-by: Alexandre Frade <kernel@xanmod.org>
---
include/linux/wait.h | 2 ++
kernel/sched/wait.c | 24 ++++++++++++++++++++++++
net/ipv4/inet_connection_sock.c | 2 +-
3 files changed, 27 insertions(+), 1 deletion(-)
diff --git a/include/linux/wait.h b/include/linux/wait.h
index a0307b516b09..edc21128f387 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -165,6 +165,7 @@ static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
+extern void add_wait_queue_exclusive_lifo(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
@@ -1192,6 +1193,7 @@ do { \
*/
void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
+void prepare_to_wait_exclusive_lifo(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 133b74730738..1647fb8662eb 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -47,6 +47,17 @@ void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_
}
EXPORT_SYMBOL_GPL(add_wait_queue_priority);
+void add_wait_queue_exclusive_lifo(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
+{
+ unsigned long flags;
+
+ wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
+ spin_lock_irqsave(&wq_head->lock, flags);
+ __add_wait_queue(wq_head, wq_entry);
+ spin_unlock_irqrestore(&wq_head->lock, flags);
+}
+EXPORT_SYMBOL(add_wait_queue_exclusive_lifo);
+
void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
unsigned long flags;
@@ -293,6 +304,19 @@ prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_ent
}
EXPORT_SYMBOL(prepare_to_wait_exclusive);
+void prepare_to_wait_exclusive_lifo(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
+{
+ unsigned long flags;
+
+ wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
+ spin_lock_irqsave(&wq_head->lock, flags);
+ if (list_empty(&wq_entry->entry))
+ __add_wait_queue(wq_head, wq_entry);
+ set_current_state(state);
+ spin_unlock_irqrestore(&wq_head->lock, flags);
+}
+EXPORT_SYMBOL(prepare_to_wait_exclusive_lifo);
+
void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
{
wq_entry->flags = flags;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index f2c43f67187d..9885bfb429a2 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -606,7 +606,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
* having to remove and re-insert us on the wait queue.
*/
for (;;) {
- prepare_to_wait_exclusive(sk_sleep(sk), &wait,
+ prepare_to_wait_exclusive_lifo(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
release_sock(sk);
if (reqsk_queue_empty(&icsk->icsk_accept_queue))
--
2.39.1
From afa213811c5490906caf394b20bb4b616fc6f12a Mon Sep 17 00:00:00 2001
From: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
Date: Thu, 25 Aug 2022 15:55:26 -0700
Subject: [PATCH 3/9] sched/fair: Simplify asym_packing logic for SMT sched
groups
When the destination CPU is an SMT sibling and idle, it can only help the
busiest group if all of its other SMT siblings are also idle. Otherwise,
there is not increase in throughput.
It does not matter whether the busiest group has SMT siblings. Simply
check if there are any tasks running on the local group before proceeding.
Cc: Ben Segall <bsegall@google.com>
Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Len Brown <len.brown@intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Tim C. Chen <tim.c.chen@intel.com>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: x86@kernel.org
Cc: linux-kernel@vger.kernel.org
Reviewed-by: Len Brown <len.brown@intel.com>
Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
Signed-off-by: Alexandre Frade <kernel@xanmod.org>
---
kernel/sched/fair.c | 29 +++++++++--------------------
1 file changed, 9 insertions(+), 20 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 345cc5e9fa6e..60f9690a5626 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8921,12 +8921,10 @@ static bool asym_smt_can_pull_tasks(int dst_cpu, struct sd_lb_stats *sds,
struct sched_group *sg)
{
#ifdef CONFIG_SCHED_SMT
- bool local_is_smt, sg_is_smt;
+ bool local_is_smt;
int sg_busy_cpus;
local_is_smt = sds->local->flags & SD_SHARE_CPUCAPACITY;
- sg_is_smt = sg->flags & SD_SHARE_CPUCAPACITY;
-
sg_busy_cpus = sgs->group_weight - sgs->idle_cpus;
if (!local_is_smt) {
@@ -8947,25 +8945,16 @@ static bool asym_smt_can_pull_tasks(int dst_cpu, struct sd_lb_stats *sds,
return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu);
}
- /* @dst_cpu has SMT siblings. */
-
- if (sg_is_smt) {
- int local_busy_cpus = sds->local->group_weight -
- sds->local_stat.idle_cpus;
- int busy_cpus_delta = sg_busy_cpus - local_busy_cpus;
-
- if (busy_cpus_delta == 1)
- return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu);
-
- return false;
- }
-
/*
- * @sg does not have SMT siblings. Ensure that @sds::local does not end
- * up with more than one busy SMT sibling and only pull tasks if there
- * are not busy CPUs (i.e., no CPU has running tasks).
+ * @dst_cpu has SMT siblings. When both @dst_cpu and the busiest core
+ * have one or more busy siblings, moving tasks between them results
+ * in the same throughput. Only if all the siblings of @dst_cpu are
+ * idle throughput can increase.
+ *
+ * If the difference in the number of busy CPUs is two or more, let
+ * find_busiest_group() take care of it.
*/
- if (!sds->local_stat.sum_nr_running)
+ if (sg_busy_cpus == 1 && !sds->local_stat.sum_nr_running)
return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu);
return false;
--
2.39.1
From a1f627fd10ced4f5eeae678bc4ba96ea7fa01d7e Mon Sep 17 00:00:00 2001
From: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
Date: Thu, 25 Aug 2022 15:55:28 -0700
Subject: [PATCH 4/9] sched/fair: Let lower-priority CPUs do active balancing
When more than one SMT siblings of a physical core are busy, an idle CPU
of lower priority can help.
Indicate that the low priority CPU can do active balancing from the high-
priority CPU only if they belong to separate cores.
Cc: Ben Segall <bsegall@google.com>
Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Len Brown <len.brown@intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Tim C. Chen <tim.c.chen@intel.com>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: x86@kernel.org
Cc: linux-kernel@vger.kernel.org
Reviewed-by: Len Brown <len.brown@intel.com>
Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
Signed-off-by: Alexandre Frade <kernel@xanmod.org>
---
kernel/sched/fair.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 60f9690a5626..67b0eacad0e9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10176,9 +10176,14 @@ asym_active_balance(struct lb_env *env)
* ASYM_PACKING needs to force migrate tasks from busy but
* lower priority CPUs in order to pack all tasks in the
* highest priority CPUs.
+ *
+ * If the busy CPU has higher priority but is an SMT sibling
+ * in which other SMT siblings are also busy, a lower-priority
+ * CPU in a separate core can help.
*/
return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
- sched_asym_prefer(env->dst_cpu, env->src_cpu);
+ (sched_asym_prefer(env->dst_cpu, env->src_cpu) ||
+ !(env->sd->flags & SD_SHARE_CPUCAPACITY));
}
static inline bool
--
2.39.1

View File

@@ -820,3 +820,31 @@ index a0b0397e29ee4c..87a983a356530c 100644
spin_unlock(&zone->lock); spin_unlock(&zone->lock);
return allocated; return allocated;
} }
From 6329525a0fa10cd13f39b76948b1296150f75c95 Mon Sep 17 00:00:00 2001
From: Alexandre Frade <kernel@xanmod.org>
Date: Mon, 29 Aug 2022 16:47:26 +0000
Subject: [PATCH 14/16] XANMOD: Makefile: Disable GCC vectorization on trees
Signed-off-by: Alexandre Frade <kernel@xanmod.org>
---
Makefile | 3 +++
1 file changed, 3 insertions(+)
diff --git a/Makefile b/Makefile
index 3f6628780eb2..35a5ae1ede42 100644
--- a/Makefile
+++ b/Makefile
@@ -1069,6 +1069,9 @@ endif
KBUILD_CFLAGS-$(call gcc-min-version, 90100) += -Wno-alloc-size-larger-than
KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
+# disable GCC vectorization on trees
+KBUILD_CFLAGS += $(call cc-option, -fno-tree-vectorize)
+
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += -fno-strict-overflow
--
2.39.1

View File

@@ -20,3 +20,265 @@ index 6b3b59cc51d6..2a0072192c3d 100644
int sched_thermal_decay_shift; int sched_thermal_decay_shift;
static int __init setup_sched_thermal_decay_shift(char *str) static int __init setup_sched_thermal_decay_shift(char *str)
From 5d5b708e3731e135ea7ae168571ad78d883e63e8 Mon Sep 17 00:00:00 2001
From: Alexandre Frade <kernel@xanmod.org>
Date: Wed, 1 Feb 2023 10:17:47 +0000
Subject: [PATCH 02/16] XANMOD: fair: Remove all energy efficiency functions
Signed-off-by: Alexandre Frade <kernel@xanmod.org>
---
kernel/sched/fair.c | 224 +-------------------------------------------
1 file changed, 3 insertions(+), 221 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0f8736991427..345cc5e9fa6e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -19,6 +19,9 @@
*
* Adaptive scheduling granularity, math enhancements by Peter Zijlstra
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
+ *
+ * Remove energy efficiency functions by Alexandre Frade
+ * (C) 2021 Alexandre Frade <kernel@xanmod.org>
*/
#include <linux/energy_model.h>
#include <linux/mmap_lock.h>
@@ -7136,219 +7139,6 @@ eenv_pd_max_util(struct energy_env *eenv, struct cpumask *pd_cpus,
return min(max_util, eenv->cpu_cap);
}
-/*
- * compute_energy(): Use the Energy Model to estimate the energy that @pd would
- * consume for a given utilization landscape @eenv. When @dst_cpu < 0, the task
- * contribution is ignored.
- */
-static inline unsigned long
-compute_energy(struct energy_env *eenv, struct perf_domain *pd,
- struct cpumask *pd_cpus, struct task_struct *p, int dst_cpu)
-{
- unsigned long max_util = eenv_pd_max_util(eenv, pd_cpus, p, dst_cpu);
- unsigned long busy_time = eenv->pd_busy_time;
-
- if (dst_cpu >= 0)
- busy_time = min(eenv->pd_cap, busy_time + eenv->task_busy_time);
-
- return em_cpu_energy(pd->em_pd, max_util, busy_time, eenv->cpu_cap);
-}
-
-/*
- * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the
- * waking task. find_energy_efficient_cpu() looks for the CPU with maximum
- * spare capacity in each performance domain and uses it as a potential
- * candidate to execute the task. Then, it uses the Energy Model to figure
- * out which of the CPU candidates is the most energy-efficient.
- *
- * The rationale for this heuristic is as follows. In a performance domain,
- * all the most energy efficient CPU candidates (according to the Energy
- * Model) are those for which we'll request a low frequency. When there are
- * several CPUs for which the frequency request will be the same, we don't
- * have enough data to break the tie between them, because the Energy Model
- * only includes active power costs. With this model, if we assume that
- * frequency requests follow utilization (e.g. using schedutil), the CPU with
- * the maximum spare capacity in a performance domain is guaranteed to be among
- * the best candidates of the performance domain.
- *
- * In practice, it could be preferable from an energy standpoint to pack
- * small tasks on a CPU in order to let other CPUs go in deeper idle states,
- * but that could also hurt our chances to go cluster idle, and we have no
- * ways to tell with the current Energy Model if this is actually a good
- * idea or not. So, find_energy_efficient_cpu() basically favors
- * cluster-packing, and spreading inside a cluster. That should at least be
- * a good thing for latency, and this is consistent with the idea that most
- * of the energy savings of EAS come from the asymmetry of the system, and
- * not so much from breaking the tie between identical CPUs. That's also the
- * reason why EAS is enabled in the topology code only for systems where
- * SD_ASYM_CPUCAPACITY is set.
- *
- * NOTE: Forkees are not accepted in the energy-aware wake-up path because
- * they don't have any useful utilization data yet and it's not possible to
- * forecast their impact on energy consumption. Consequently, they will be
- * placed by find_idlest_cpu() on the least loaded CPU, which might turn out
- * to be energy-inefficient in some use-cases. The alternative would be to
- * bias new tasks towards specific types of CPUs first, or to try to infer
- * their util_avg from the parent task, but those heuristics could hurt
- * other use-cases too. So, until someone finds a better way to solve this,
- * let's keep things simple by re-using the existing slow path.
- */
-static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
-{
- struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
- unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
- unsigned long p_util_min = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MIN) : 0;
- unsigned long p_util_max = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MAX) : 1024;
- struct root_domain *rd = this_rq()->rd;
- int cpu, best_energy_cpu, target = -1;
- struct sched_domain *sd;
- struct perf_domain *pd;
- struct energy_env eenv;
-
- rcu_read_lock();
- pd = rcu_dereference(rd->pd);
- if (!pd || READ_ONCE(rd->overutilized))
- goto unlock;
-
- /*
- * Energy-aware wake-up happens on the lowest sched_domain starting
- * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
- */
- sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity));
- while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
- sd = sd->parent;
- if (!sd)
- goto unlock;
-
- target = prev_cpu;
-
- sync_entity_load_avg(&p->se);
- if (!uclamp_task_util(p, p_util_min, p_util_max))
- goto unlock;
-
- eenv_task_busy_time(&eenv, p, prev_cpu);
-
- for (; pd; pd = pd->next) {
- unsigned long util_min = p_util_min, util_max = p_util_max;
- unsigned long cpu_cap, cpu_thermal_cap, util;
- unsigned long cur_delta, max_spare_cap = 0;
- unsigned long rq_util_min, rq_util_max;
- unsigned long prev_spare_cap = 0;
- int max_spare_cap_cpu = -1;
- unsigned long base_energy;
-
- cpumask_and(cpus, perf_domain_span(pd), cpu_online_mask);
-
- if (cpumask_empty(cpus))
- continue;
-
- /* Account thermal pressure for the energy estimation */
- cpu = cpumask_first(cpus);
- cpu_thermal_cap = arch_scale_cpu_capacity(cpu);
- cpu_thermal_cap -= arch_scale_thermal_pressure(cpu);
-
- eenv.cpu_cap = cpu_thermal_cap;
- eenv.pd_cap = 0;
-
- for_each_cpu(cpu, cpus) {
- struct rq *rq = cpu_rq(cpu);
-
- eenv.pd_cap += cpu_thermal_cap;
-
- if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
- continue;
-
- if (!cpumask_test_cpu(cpu, p->cpus_ptr))
- continue;
-
- util = cpu_util_next(cpu, p, cpu);
- cpu_cap = capacity_of(cpu);
-
- /*
- * Skip CPUs that cannot satisfy the capacity request.
- * IOW, placing the task there would make the CPU
- * overutilized. Take uclamp into account to see how
- * much capacity we can get out of the CPU; this is
- * aligned with sched_cpu_util().
- */
- if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) {
- /*
- * Open code uclamp_rq_util_with() except for
- * the clamp() part. Ie: apply max aggregation
- * only. util_fits_cpu() logic requires to
- * operate on non clamped util but must use the
- * max-aggregated uclamp_{min, max}.
- */
- rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN);
- rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX);
-
- util_min = max(rq_util_min, p_util_min);
- util_max = max(rq_util_max, p_util_max);
- }
- if (!util_fits_cpu(util, util_min, util_max, cpu))
- continue;
-
- lsub_positive(&cpu_cap, util);
-
- if (cpu == prev_cpu) {
- /* Always use prev_cpu as a candidate. */
- prev_spare_cap = cpu_cap;
- } else if (cpu_cap > max_spare_cap) {
- /*
- * Find the CPU with the maximum spare capacity
- * among the remaining CPUs in the performance
- * domain.
- */
- max_spare_cap = cpu_cap;
- max_spare_cap_cpu = cpu;
- }
- }
-
- if (max_spare_cap_cpu < 0 && prev_spare_cap == 0)
- continue;
-
- eenv_pd_busy_time(&eenv, cpus, p);
- /* Compute the 'base' energy of the pd, without @p */
- base_energy = compute_energy(&eenv, pd, cpus, p, -1);
-
- /* Evaluate the energy impact of using prev_cpu. */
- if (prev_spare_cap > 0) {
- prev_delta = compute_energy(&eenv, pd, cpus, p,
- prev_cpu);
- /* CPU utilization has changed */
- if (prev_delta < base_energy)
- goto unlock;
- prev_delta -= base_energy;
- best_delta = min(best_delta, prev_delta);
- }
-
- /* Evaluate the energy impact of using max_spare_cap_cpu. */
- if (max_spare_cap_cpu >= 0 && max_spare_cap > prev_spare_cap) {
- cur_delta = compute_energy(&eenv, pd, cpus, p,
- max_spare_cap_cpu);
- /* CPU utilization has changed */
- if (cur_delta < base_energy)
- goto unlock;
- cur_delta -= base_energy;
- if (cur_delta < best_delta) {
- best_delta = cur_delta;
- best_energy_cpu = max_spare_cap_cpu;
- }
- }
- }
- rcu_read_unlock();
-
- if (best_delta < prev_delta)
- target = best_energy_cpu;
-
- return target;
-
-unlock:
- rcu_read_unlock();
-
- return target;
-}
-
/*
* select_task_rq_fair: Select target runqueue for the waking task in domains
* that have the relevant SD flag set. In practice, this is SD_BALANCE_WAKE,
@@ -7376,14 +7166,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
lockdep_assert_held(&p->pi_lock);
if (wake_flags & WF_TTWU) {
record_wakee(p);
-
- if (sched_energy_enabled()) {
- new_cpu = find_energy_efficient_cpu(p, prev_cpu);
- if (new_cpu >= 0)
- return new_cpu;
- new_cpu = prev_cpu;
- }
-
want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
}
--
2.39.1

View File

@@ -88,3 +88,31 @@ index 6b423eebfd5d..61e3271675d6 100644
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
#define MIN_FREQUENCY_UP_THRESHOLD (1) #define MIN_FREQUENCY_UP_THRESHOLD (1)
#define MAX_FREQUENCY_UP_THRESHOLD (100) #define MAX_FREQUENCY_UP_THRESHOLD (100)
From cba31b19f8c38696b13ba48e0e8b6dbe747d6bae Mon Sep 17 00:00:00 2001
From: Alexandre Frade <admfrade@gmail.com>
Date: Mon, 29 Jan 2018 17:31:25 +0000
Subject: [PATCH 10/16] XANMOD: mm/vmscan: vm_swappiness = 30 decreases the
amount of swapping
Signed-off-by: Alexandre Frade <admfrade@gmail.com>
Signed-off-by: Alexandre Frade <kernel@xanmod.org>
---
mm/vmscan.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5b7b8d4f5297..549684b29418 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -190,7 +190,7 @@ struct scan_control {
/*
* From 0 .. 200. Higher means more swappy.
*/
-int vm_swappiness = 60;
+int vm_swappiness = 30;
static void set_task_reclaim_state(struct task_struct *task,
struct reclaim_state *rs)
--
2.39.1

View File

@@ -1,8 +1,62 @@
From e44ef62b127f6a161a131c84db92a7527d8fc72d Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Wed, 22 Feb 2023 19:24:36 +0100
Subject: [PATCH] prjc
Signed-off-by: Peter Jung <admin@ptr1337.dev>
---
.../admin-guide/kernel-parameters.txt | 6 +
Documentation/admin-guide/sysctl/kernel.rst | 10 +
Documentation/scheduler/sched-BMQ.txt | 110 +
fs/proc/base.c | 2 +-
include/asm-generic/resource.h | 2 +-
include/linux/sched.h | 33 +-
include/linux/sched/deadline.h | 20 +
include/linux/sched/prio.h | 26 +
include/linux/sched/rt.h | 2 +
include/linux/sched/topology.h | 3 +-
init/Kconfig | 34 +
init/init_task.c | 18 +
kernel/Kconfig.preempt | 2 +-
kernel/cgroup/cpuset.c | 4 +-
kernel/delayacct.c | 2 +-
kernel/exit.c | 4 +-
kernel/locking/rtmutex.c | 16 +-
kernel/sched/Makefile | 5 +
kernel/sched/alt_core.c | 8111 +++++++++++++++++
kernel/sched/alt_debug.c | 31 +
kernel/sched/alt_sched.h | 671 ++
kernel/sched/bmq.h | 110 +
kernel/sched/build_policy.c | 8 +-
kernel/sched/build_utility.c | 2 +
kernel/sched/cpufreq_schedutil.c | 10 +
kernel/sched/cputime.c | 10 +-
kernel/sched/debug.c | 10 +
kernel/sched/idle.c | 2 +
kernel/sched/pds.h | 127 +
kernel/sched/pelt.c | 4 +-
kernel/sched/pelt.h | 8 +-
kernel/sched/sched.h | 9 +
kernel/sched/stats.c | 4 +
kernel/sched/stats.h | 2 +
kernel/sched/topology.c | 17 +
kernel/sysctl.c | 15 +
kernel/time/hrtimer.c | 2 +
kernel/time/posix-cpu-timers.c | 10 +-
kernel/trace/trace_selftest.c | 5 +
39 files changed, 9445 insertions(+), 22 deletions(-)
create mode 100644 Documentation/scheduler/sched-BMQ.txt
create mode 100644 kernel/sched/alt_core.c
create mode 100644 kernel/sched/alt_debug.c
create mode 100644 kernel/sched/alt_sched.h
create mode 100644 kernel/sched/bmq.h
create mode 100644 kernel/sched/pds.h
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 42af9ca0127e..31747ec54f9d 100644 index 6cfa6e3996cf..1b6a407213da 100644
--- a/Documentation/admin-guide/kernel-parameters.txt --- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -5406,6 +5406,12 @@ @@ -5437,6 +5437,12 @@
sa1100ir [NET] sa1100ir [NET]
See drivers/net/irda/sa1100_ir.c. See drivers/net/irda/sa1100_ir.c.
@@ -16,10 +70,10 @@ index 42af9ca0127e..31747ec54f9d 100644
schedstats= [KNL,X86] Enable or disable scheduled statistics. schedstats= [KNL,X86] Enable or disable scheduled statistics.
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index 98d1b198b2b4..d7c78a107f93 100644 index 46e3d62c0eea..fb4568c919d0 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst --- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -1552,3 +1552,13 @@ is 10 seconds. @@ -1597,3 +1597,13 @@ is 10 seconds.
The softlockup threshold is (``2 * watchdog_thresh``). Setting this The softlockup threshold is (``2 * watchdog_thresh``). Setting this
tunable to zero will disable lockup detection altogether. tunable to zero will disable lockup detection altogether.
@@ -176,7 +230,7 @@ index 8874f681b056..59eb72bf7d5f 100644
[RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \
} }
diff --git a/include/linux/sched.h b/include/linux/sched.h diff --git a/include/linux/sched.h b/include/linux/sched.h
index ffb6eb55cd13..2e730a59caa2 100644 index 853d08f7562b..ad7e050d7455 100644
--- a/include/linux/sched.h --- a/include/linux/sched.h
+++ b/include/linux/sched.h +++ b/include/linux/sched.h
@@ -762,8 +762,14 @@ struct task_struct { @@ -762,8 +762,14 @@ struct task_struct {
@@ -232,7 +286,7 @@ index ffb6eb55cd13..2e730a59caa2 100644
#ifdef CONFIG_CGROUP_SCHED #ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group; struct task_group *sched_task_group;
@@ -1545,6 +1567,15 @@ struct task_struct { @@ -1539,6 +1561,15 @@ struct task_struct {
*/ */
}; };
@@ -352,10 +406,10 @@ index 816df6cc444e..c8da08e18c91 100644
#else #else
static inline void rebuild_sched_domains_energy(void) static inline void rebuild_sched_domains_energy(void)
diff --git a/init/Kconfig b/init/Kconfig diff --git a/init/Kconfig b/init/Kconfig
index 94125d3b6893..c87ba766d354 100644 index 44e90b28a30f..af24591984ab 100644
--- a/init/Kconfig --- a/init/Kconfig
+++ b/init/Kconfig +++ b/init/Kconfig
@@ -819,6 +819,7 @@ menu "Scheduler features" @@ -821,6 +821,7 @@ menu "Scheduler features"
config UCLAMP_TASK config UCLAMP_TASK
bool "Enable utilization clamping for RT/FAIR tasks" bool "Enable utilization clamping for RT/FAIR tasks"
depends on CPU_FREQ_GOV_SCHEDUTIL depends on CPU_FREQ_GOV_SCHEDUTIL
@@ -363,7 +417,7 @@ index 94125d3b6893..c87ba766d354 100644
help help
This feature enables the scheduler to track the clamped utilization This feature enables the scheduler to track the clamped utilization
of each CPU based on RUNNABLE tasks scheduled on that CPU. of each CPU based on RUNNABLE tasks scheduled on that CPU.
@@ -865,6 +866,35 @@ config UCLAMP_BUCKETS_COUNT @@ -867,6 +868,35 @@ config UCLAMP_BUCKETS_COUNT
If in doubt, use the default value. If in doubt, use the default value.
@@ -399,7 +453,7 @@ index 94125d3b6893..c87ba766d354 100644
endmenu endmenu
# #
@@ -918,6 +948,7 @@ config NUMA_BALANCING @@ -924,6 +954,7 @@ config NUMA_BALANCING
depends on ARCH_SUPPORTS_NUMA_BALANCING depends on ARCH_SUPPORTS_NUMA_BALANCING
depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
depends on SMP && NUMA && MIGRATION && !PREEMPT_RT depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
@@ -407,7 +461,7 @@ index 94125d3b6893..c87ba766d354 100644
help help
This option adds support for automatic NUMA aware memory/task placement. This option adds support for automatic NUMA aware memory/task placement.
The mechanism is quite primitive and is based on migrating memory when The mechanism is quite primitive and is based on migrating memory when
@@ -1015,6 +1046,7 @@ config FAIR_GROUP_SCHED @@ -1021,6 +1052,7 @@ config FAIR_GROUP_SCHED
depends on CGROUP_SCHED depends on CGROUP_SCHED
default CGROUP_SCHED default CGROUP_SCHED
@@ -415,7 +469,7 @@ index 94125d3b6893..c87ba766d354 100644
config CFS_BANDWIDTH config CFS_BANDWIDTH
bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED" bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
depends on FAIR_GROUP_SCHED depends on FAIR_GROUP_SCHED
@@ -1037,6 +1069,7 @@ config RT_GROUP_SCHED @@ -1043,6 +1075,7 @@ config RT_GROUP_SCHED
realtime bandwidth for them. realtime bandwidth for them.
See Documentation/scheduler/sched-rt-group.rst for more information. See Documentation/scheduler/sched-rt-group.rst for more information.
@@ -423,7 +477,7 @@ index 94125d3b6893..c87ba766d354 100644
endif #CGROUP_SCHED endif #CGROUP_SCHED
config UCLAMP_TASK_GROUP config UCLAMP_TASK_GROUP
@@ -1281,6 +1314,7 @@ config CHECKPOINT_RESTORE @@ -1287,6 +1320,7 @@ config CHECKPOINT_RESTORE
config SCHED_AUTOGROUP config SCHED_AUTOGROUP
bool "Automatic process group scheduling" bool "Automatic process group scheduling"
@@ -491,10 +545,10 @@ index c2f1fd95a821..41654679b1b2 100644
This option permits Core Scheduling, a means of coordinated task This option permits Core Scheduling, a means of coordinated task
selection across SMT siblings. When enabled -- see selection across SMT siblings. When enabled -- see
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index b474289c15b8..a23224b45b03 100644 index ca826bd1eba3..60e194f1d6d8 100644
--- a/kernel/cgroup/cpuset.c --- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c
@@ -787,7 +787,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) @@ -791,7 +791,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
return ret; return ret;
} }
@@ -503,7 +557,7 @@ index b474289c15b8..a23224b45b03 100644
/* /*
* Helper routine for generate_sched_domains(). * Helper routine for generate_sched_domains().
* Do cpusets a, b have overlapping effective cpus_allowed masks? * Do cpusets a, b have overlapping effective cpus_allowed masks?
@@ -1183,7 +1183,7 @@ static void rebuild_sched_domains_locked(void) @@ -1187,7 +1187,7 @@ static void rebuild_sched_domains_locked(void)
/* Have scheduler rebuild the domains */ /* Have scheduler rebuild the domains */
partition_and_rebuild_sched_domains(ndoms, doms, attr); partition_and_rebuild_sched_domains(ndoms, doms, attr);
} }
@@ -526,10 +580,10 @@ index e39cb696cfbd..463423572e09 100644
d->cpu_count += t1; d->cpu_count += t1;
diff --git a/kernel/exit.c b/kernel/exit.c diff --git a/kernel/exit.c b/kernel/exit.c
index 35e0a31a0315..64e368441cf4 100644 index 15dc2ec80c46..1e583e0f89a7 100644
--- a/kernel/exit.c --- a/kernel/exit.c
+++ b/kernel/exit.c +++ b/kernel/exit.c
@@ -125,7 +125,7 @@ static void __exit_signal(struct task_struct *tsk) @@ -172,7 +172,7 @@ static void __exit_signal(struct task_struct *tsk)
sig->curr_target = next_thread(tsk); sig->curr_target = next_thread(tsk);
} }
@@ -538,7 +592,7 @@ index 35e0a31a0315..64e368441cf4 100644
sizeof(unsigned long long)); sizeof(unsigned long long));
/* /*
@@ -146,7 +146,7 @@ static void __exit_signal(struct task_struct *tsk) @@ -193,7 +193,7 @@ static void __exit_signal(struct task_struct *tsk)
sig->inblock += task_io_get_inblock(tsk); sig->inblock += task_io_get_inblock(tsk);
sig->oublock += task_io_get_oublock(tsk); sig->oublock += task_io_get_oublock(tsk);
task_io_accounting_add(&sig->ioac, &tsk->ioac); task_io_accounting_add(&sig->ioac, &tsk->ioac);
@@ -548,10 +602,10 @@ index 35e0a31a0315..64e368441cf4 100644
__unhash_process(tsk, group_dead); __unhash_process(tsk, group_dead);
write_sequnlock(&sig->stats_lock); write_sequnlock(&sig->stats_lock);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 7779ee8abc2a..5b9893cdfb1b 100644 index 728f434de2bb..0e1082a4e878 100644
--- a/kernel/locking/rtmutex.c --- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c
@@ -300,21 +300,25 @@ static __always_inline void @@ -337,21 +337,25 @@ static __always_inline void
waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
{ {
waiter->prio = __waiter_prio(task); waiter->prio = __waiter_prio(task);
@@ -579,7 +633,7 @@ index 7779ee8abc2a..5b9893cdfb1b 100644
/* /*
* If both waiters have dl_prio(), we check the deadlines of the * If both waiters have dl_prio(), we check the deadlines of the
* associated tasks. * associated tasks.
@@ -323,16 +327,22 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, @@ -360,16 +364,22 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
*/ */
if (dl_prio(left->prio)) if (dl_prio(left->prio))
return dl_time_before(left->deadline, right->deadline); return dl_time_before(left->deadline, right->deadline);
@@ -602,7 +656,7 @@ index 7779ee8abc2a..5b9893cdfb1b 100644
/* /*
* If both waiters have dl_prio(), we check the deadlines of the * If both waiters have dl_prio(), we check the deadlines of the
* associated tasks. * associated tasks.
@@ -341,8 +351,10 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left, @@ -378,8 +388,10 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
*/ */
if (dl_prio(left->prio)) if (dl_prio(left->prio))
return left->deadline == right->deadline; return left->deadline == right->deadline;
@@ -632,10 +686,10 @@ index 976092b7bd45..31d587c16ec1 100644
obj-y += build_utility.o obj-y += build_utility.o
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
new file mode 100644 new file mode 100644
index 000000000000..acb8657e811d index 000000000000..f5e9c01f9382
--- /dev/null --- /dev/null
+++ b/kernel/sched/alt_core.c +++ b/kernel/sched/alt_core.c
@@ -0,0 +1,7978 @@ @@ -0,0 +1,8111 @@
+/* +/*
+ * kernel/sched/alt_core.c + * kernel/sched/alt_core.c
+ * + *
@@ -705,7 +759,7 @@ index 000000000000..acb8657e811d
+#define sched_feat(x) (0) +#define sched_feat(x) (0)
+#endif /* CONFIG_SCHED_DEBUG */ +#endif /* CONFIG_SCHED_DEBUG */
+ +
+#define ALT_SCHED_VERSION "v6.1-r3" +#define ALT_SCHED_VERSION "v6.2-r0"
+ +
+/* rt_prio(prio) defined in include/linux/sched/rt.h */ +/* rt_prio(prio) defined in include/linux/sched/rt.h */
+#define rt_task(p) rt_prio((p)->prio) +#define rt_task(p) rt_prio((p)->prio)
@@ -726,6 +780,12 @@ index 000000000000..acb8657e811d
+#include "pds.h" +#include "pds.h"
+#endif +#endif
+ +
+struct affinity_context {
+ const struct cpumask *new_mask;
+ struct cpumask *user_mask;
+ unsigned int flags;
+};
+
+static int __init sched_timeslice(char *str) +static int __init sched_timeslice(char *str)
+{ +{
+ int timeslice_ms; + int timeslice_ms;
@@ -788,6 +848,14 @@ index 000000000000..acb8657e811d
+static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp; +static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp;
+static cpumask_t *const sched_idle_mask = &sched_preempt_mask[0]; +static cpumask_t *const sched_idle_mask = &sched_preempt_mask[0];
+ +
+/* task function */
+static inline const struct cpumask *task_user_cpus(struct task_struct *p)
+{
+ if (!p->user_cpus_ptr)
+ return cpu_possible_mask; /* &init_task.cpus_mask */
+ return p->user_cpus_ptr;
+}
+
+/* sched_queue related functions */ +/* sched_queue related functions */
+static inline void sched_queue_init(struct sched_queue *q) +static inline void sched_queue_init(struct sched_queue *q)
+{ +{
@@ -1400,7 +1468,7 @@ index 000000000000..acb8657e811d
+ +
+#define __SCHED_ENQUEUE_TASK(p, rq, flags) \ +#define __SCHED_ENQUEUE_TASK(p, rq, flags) \
+ sched_info_enqueue(rq, p); \ + sched_info_enqueue(rq, p); \
+ psi_enqueue(p, flags); \ + psi_enqueue(p, flags & ENQUEUE_WAKEUP); \
+ \ + \
+ p->sq_idx = task_sched_prio_idx(p, rq); \ + p->sq_idx = task_sched_prio_idx(p, rq); \
+ list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]); \ + list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]); \
@@ -2268,35 +2336,101 @@ index 000000000000..acb8657e811d
+} +}
+ +
+static inline void +static inline void
+set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) +set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
+{ +{
+ cpumask_copy(&p->cpus_mask, new_mask); + cpumask_copy(&p->cpus_mask, ctx->new_mask);
+ p->nr_cpus_allowed = cpumask_weight(new_mask); + p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
+
+ /*
+ * Swap in a new user_cpus_ptr if SCA_USER flag set
+ */
+ if (ctx->flags & SCA_USER)
+ swap(p->user_cpus_ptr, ctx->user_mask);
+} +}
+ +
+static void +static void
+__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
+{ +{
+ lockdep_assert_held(&p->pi_lock); + lockdep_assert_held(&p->pi_lock);
+ set_cpus_allowed_common(p, new_mask); + set_cpus_allowed_common(p, ctx);
+} +}
+ +
+/*
+ * Used for kthread_bind() and select_fallback_rq(), in both cases the user
+ * affinity (if any) should be destroyed too.
+ */
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{ +{
+ __do_set_cpus_allowed(p, new_mask); + struct affinity_context ac = {
+ .new_mask = new_mask,
+ .user_mask = NULL,
+ .flags = SCA_USER, /* clear the user requested mask */
+ };
+ union cpumask_rcuhead {
+ cpumask_t cpumask;
+ struct rcu_head rcu;
+ };
+
+ __do_set_cpus_allowed(p, &ac);
+
+ /*
+ * Because this is called with p->pi_lock held, it is not possible
+ * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
+ * kfree_rcu().
+ */
+ kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
+}
+
+static cpumask_t *alloc_user_cpus_ptr(int node)
+{
+ /*
+ * See do_set_cpus_allowed() above for the rcu_head usage.
+ */
+ int size = max_t(int, cpumask_size(), sizeof(struct rcu_head));
+
+ return kmalloc_node(size, GFP_KERNEL, node);
+} +}
+ +
+int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, +int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
+ int node) + int node)
+{ +{
+ if (!src->user_cpus_ptr) + cpumask_t *user_mask;
+ unsigned long flags;
+
+ /*
+ * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
+ * may differ by now due to racing.
+ */
+ dst->user_cpus_ptr = NULL;
+
+ /*
+ * This check is racy and losing the race is a valid situation.
+ * It is not worth the extra overhead of taking the pi_lock on
+ * every fork/clone.
+ */
+ if (data_race(!src->user_cpus_ptr))
+ return 0; + return 0;
+ +
+ dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node); + user_mask = alloc_user_cpus_ptr(node);
+ if (!dst->user_cpus_ptr) + if (!user_mask)
+ return -ENOMEM; + return -ENOMEM;
+ +
+ cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr); + /*
+ * Use pi_lock to protect content of user_cpus_ptr
+ *
+ * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
+ * do_set_cpus_allowed().
+ */
+ raw_spin_lock_irqsave(&src->pi_lock, flags);
+ if (src->user_cpus_ptr) {
+ swap(dst->user_cpus_ptr, user_mask);
+ cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
+ }
+ raw_spin_unlock_irqrestore(&src->pi_lock, flags);
+
+ if (unlikely(user_mask))
+ kfree(user_mask);
+
+ return 0; + return 0;
+} +}
+ +
@@ -2641,6 +2775,8 @@ index 000000000000..acb8657e811d
+ +
+static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu, +static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu,
+ raw_spinlock_t *lock, unsigned long irq_flags) + raw_spinlock_t *lock, unsigned long irq_flags)
+ __releases(rq->lock)
+ __releases(p->pi_lock)
+{ +{
+ /* Can the task run on the task's current CPU? If so, we're done */ + /* Can the task run on the task's current CPU? If so, we're done */
+ if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { + if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
@@ -2678,8 +2814,7 @@ index 000000000000..acb8657e811d
+} +}
+ +
+static int __set_cpus_allowed_ptr_locked(struct task_struct *p, +static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
+ const struct cpumask *new_mask, + struct affinity_context *ctx,
+ u32 flags,
+ struct rq *rq, + struct rq *rq,
+ raw_spinlock_t *lock, + raw_spinlock_t *lock,
+ unsigned long irq_flags) + unsigned long irq_flags)
@@ -2687,7 +2822,6 @@ index 000000000000..acb8657e811d
+ const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p); + const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
+ const struct cpumask *cpu_valid_mask = cpu_active_mask; + const struct cpumask *cpu_valid_mask = cpu_active_mask;
+ bool kthread = p->flags & PF_KTHREAD; + bool kthread = p->flags & PF_KTHREAD;
+ struct cpumask *user_mask = NULL;
+ int dest_cpu; + int dest_cpu;
+ int ret = 0; + int ret = 0;
+ +
@@ -2705,7 +2839,7 @@ index 000000000000..acb8657e811d
+ cpu_valid_mask = cpu_online_mask; + cpu_valid_mask = cpu_online_mask;
+ } + }
+ +
+ if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) { + if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
+ ret = -EINVAL; + ret = -EINVAL;
+ goto out; + goto out;
+ } + }
@@ -2714,30 +2848,23 @@ index 000000000000..acb8657e811d
+ * Must re-check here, to close a race against __kthread_bind(), + * Must re-check here, to close a race against __kthread_bind(),
+ * sched_setaffinity() is not guaranteed to observe the flag. + * sched_setaffinity() is not guaranteed to observe the flag.
+ */ + */
+ if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { + if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
+ ret = -EINVAL; + ret = -EINVAL;
+ goto out; + goto out;
+ } + }
+ +
+ if (cpumask_equal(&p->cpus_mask, new_mask)) + if (cpumask_equal(&p->cpus_mask, ctx->new_mask))
+ goto out; + goto out;
+ +
+ dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); + dest_cpu = cpumask_any_and(cpu_valid_mask, ctx->new_mask);
+ if (dest_cpu >= nr_cpu_ids) { + if (dest_cpu >= nr_cpu_ids) {
+ ret = -EINVAL; + ret = -EINVAL;
+ goto out; + goto out;
+ } + }
+ +
+ __do_set_cpus_allowed(p, new_mask); + __do_set_cpus_allowed(p, ctx);
+ +
+ if (flags & SCA_USER) + return affine_move_task(rq, p, dest_cpu, lock, irq_flags);
+ user_mask = clear_user_cpus_ptr(p);
+
+ ret = affine_move_task(rq, p, dest_cpu, lock, irq_flags);
+
+ kfree(user_mask);
+
+ return ret;
+ +
+out: +out:
+ __task_access_unlock(p, lock); + __task_access_unlock(p, lock);
@@ -2748,7 +2875,6 @@ index 000000000000..acb8657e811d
+ +
+/* +/*
+ * Change a given task's CPU affinity. Migrate the thread to a + * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on
+ * is removed from the allowed bitmask. + * is removed from the allowed bitmask.
+ * + *
+ * NOTE: the caller must have a valid reference to the task, the + * NOTE: the caller must have a valid reference to the task, the
@@ -2756,7 +2882,7 @@ index 000000000000..acb8657e811d
+ * call is not atomic; no spinlocks may be held. + * call is not atomic; no spinlocks may be held.
+ */ + */
+static int __set_cpus_allowed_ptr(struct task_struct *p, +static int __set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask, u32 flags) + struct affinity_context *ctx)
+{ +{
+ unsigned long irq_flags; + unsigned long irq_flags;
+ struct rq *rq; + struct rq *rq;
@@ -2764,20 +2890,36 @@ index 000000000000..acb8657e811d
+ +
+ raw_spin_lock_irqsave(&p->pi_lock, irq_flags); + raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
+ rq = __task_access_lock(p, &lock); + rq = __task_access_lock(p, &lock);
+ /*
+ * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
+ * flags are set.
+ */
+ if (p->user_cpus_ptr &&
+ !(ctx->flags & SCA_USER) &&
+ cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
+ ctx->new_mask = rq->scratch_mask;
+ +
+ return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, lock, irq_flags); +
+ return __set_cpus_allowed_ptr_locked(p, ctx, rq, lock, irq_flags);
+} +}
+ +
+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) +int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+{ +{
+ return __set_cpus_allowed_ptr(p, new_mask, 0); + struct affinity_context ac = {
+ .new_mask = new_mask,
+ .flags = 0,
+ };
+
+ return __set_cpus_allowed_ptr(p, &ac);
+} +}
+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); +EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+ +
+/* +/*
+ * Change a given task's CPU affinity to the intersection of its current + * Change a given task's CPU affinity to the intersection of its current
+ * affinity mask and @subset_mask, writing the resulting mask to @new_mask + * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
+ * and pointing @p->user_cpus_ptr to a copy of the old mask. + * If user_cpus_ptr is defined, use it as the basis for restricting CPU
+ * affinity or use cpu_online_mask instead.
+ *
+ * If the resulting mask is empty, leave the affinity unchanged and return + * If the resulting mask is empty, leave the affinity unchanged and return
+ * -EINVAL. + * -EINVAL.
+ */ + */
@@ -2785,48 +2927,34 @@ index 000000000000..acb8657e811d
+ struct cpumask *new_mask, + struct cpumask *new_mask,
+ const struct cpumask *subset_mask) + const struct cpumask *subset_mask)
+{ +{
+ struct cpumask *user_mask = NULL; + struct affinity_context ac = {
+ .new_mask = new_mask,
+ .flags = 0,
+ };
+ unsigned long irq_flags; + unsigned long irq_flags;
+ raw_spinlock_t *lock; + raw_spinlock_t *lock;
+ struct rq *rq; + struct rq *rq;
+ int err; + int err;
+ +
+ if (!p->user_cpus_ptr) {
+ user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
+ if (!user_mask)
+ return -ENOMEM;
+ }
+
+ raw_spin_lock_irqsave(&p->pi_lock, irq_flags); + raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
+ rq = __task_access_lock(p, &lock); + rq = __task_access_lock(p, &lock);
+ +
+ if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) { + if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
+ err = -EINVAL; + err = -EINVAL;
+ goto err_unlock; + goto err_unlock;
+ } + }
+ +
+ /* + return __set_cpus_allowed_ptr_locked(p, &ac, rq, lock, irq_flags);
+ * We're about to butcher the task affinity, so keep track of what
+ * the user asked for in case we're able to restore it later on.
+ */
+ if (user_mask) {
+ cpumask_copy(user_mask, p->cpus_ptr);
+ p->user_cpus_ptr = user_mask;
+ }
+
+ /*return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);*/
+ return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, lock, irq_flags);
+ +
+err_unlock: +err_unlock:
+ __task_access_unlock(p, lock); + __task_access_unlock(p, lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags); + raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
+ kfree(user_mask);
+ return err; + return err;
+} +}
+ +
+/* +/*
+ * Restrict the CPU affinity of task @p so that it is a subset of + * Restrict the CPU affinity of task @p so that it is a subset of
+ * task_cpu_possible_mask() and point @p->user_cpu_ptr to a copy of the + * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
+ * old affinity mask. If the resulting mask is empty, we warn and walk + * old affinity mask. If the resulting mask is empty, we warn and walk
+ * up the cpuset hierarchy until we find a suitable mask. + * up the cpuset hierarchy until we find a suitable mask.
+ */ + */
@@ -2870,34 +2998,29 @@ index 000000000000..acb8657e811d
+} +}
+ +
+static int +static int
+__sched_setaffinity(struct task_struct *p, const struct cpumask *mask); +__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
+ +
+/* +/*
+ * Restore the affinity of a task @p which was previously restricted by a + * Restore the affinity of a task @p which was previously restricted by a
+ * call to force_compatible_cpus_allowed_ptr(). This will clear (and free) + * call to force_compatible_cpus_allowed_ptr().
+ * @p->user_cpus_ptr.
+ * + *
+ * It is the caller's responsibility to serialise this with any calls to + * It is the caller's responsibility to serialise this with any calls to
+ * force_compatible_cpus_allowed_ptr(@p). + * force_compatible_cpus_allowed_ptr(@p).
+ */ + */
+void relax_compatible_cpus_allowed_ptr(struct task_struct *p) +void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
+{ +{
+ struct cpumask *user_mask = p->user_cpus_ptr; + struct affinity_context ac = {
+ unsigned long flags; + .new_mask = task_user_cpus(p),
+ .flags = 0,
+ };
+ int ret;
+ +
+ /* + /*
+ * Try to restore the old affinity mask. If this fails, then + * Try to restore the old affinity mask with __sched_setaffinity().
+ * we free the mask explicitly to avoid it being inherited across + * Cpuset masking will be done there too.
+ * a subsequent fork().
+ */ + */
+ if (!user_mask || !__sched_setaffinity(p, user_mask)) + ret = __sched_setaffinity(p, &ac);
+ return; + WARN_ON_ONCE(ret);
+
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+ user_mask = clear_user_cpus_ptr(p);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+ kfree(user_mask);
+} +}
+ +
+#else /* CONFIG_SMP */ +#else /* CONFIG_SMP */
@@ -2909,9 +3032,9 @@ index 000000000000..acb8657e811d
+ +
+static inline int +static inline int
+__set_cpus_allowed_ptr(struct task_struct *p, +__set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask, u32 flags) + struct affinity_context *ctx)
+{ +{
+ return set_cpus_allowed_ptr(p, new_mask); + return set_cpus_allowed_ptr(p, ctx->new_mask);
+} +}
+ +
+static inline bool rq_has_pinned_tasks(struct rq *rq) +static inline bool rq_has_pinned_tasks(struct rq *rq)
@@ -2919,6 +3042,11 @@ index 000000000000..acb8657e811d
+ return false; + return false;
+} +}
+ +
+static inline cpumask_t *alloc_user_cpus_ptr(int node)
+{
+ return NULL;
+}
+
+#endif /* !CONFIG_SMP */ +#endif /* !CONFIG_SMP */
+ +
+static void +static void
@@ -3030,13 +3158,6 @@ index 000000000000..acb8657e811d
+ if (!llist) + if (!llist)
+ return; + return;
+ +
+ /*
+ * rq::ttwu_pending racy indication of out-standing wakeups.
+ * Races such that false-negatives are possible, since they
+ * are shorter lived that false-positives would be.
+ */
+ WRITE_ONCE(rq->ttwu_pending, 0);
+
+ rq_lock_irqsave(rq, &rf); + rq_lock_irqsave(rq, &rf);
+ update_rq_clock(rq); + update_rq_clock(rq);
+ +
@@ -3050,6 +3171,17 @@ index 000000000000..acb8657e811d
+ ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0); + ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0);
+ } + }
+ +
+ /*
+ * Must be after enqueueing at least once task such that
+ * idle_cpu() does not observe a false-negative -- if it does,
+ * it is possible for select_idle_siblings() to stack a number
+ * of tasks on this CPU during that window.
+ *
+ * It is ok to clear ttwu_pending when another task pending.
+ * We will receive IPI after local irq enabled and then enqueue it.
+ * Since now nr_running > 0, idle_cpu() will always get correct result.
+ */
+ WRITE_ONCE(rq->ttwu_pending, 0);
+ rq_unlock_irqrestore(rq, &rf); + rq_unlock_irqrestore(rq, &rf);
+} +}
+ +
@@ -4635,7 +4767,9 @@ index 000000000000..acb8657e811d
+ struct rq *rq = cpu_rq(cpu); + struct rq *rq = cpu_rq(cpu);
+ u64 resched_latency; + u64 resched_latency;
+ +
+ arch_scale_freq_tick(); + if (housekeeping_cpu(cpu, HK_TYPE_TICK))
+ arch_scale_freq_tick();
+
+ sched_clock_tick(); + sched_clock_tick();
+ +
+ raw_spin_lock(&rq->lock); + raw_spin_lock(&rq->lock);
@@ -4734,7 +4868,7 @@ index 000000000000..acb8657e811d
+ int i; + int i;
+ +
+ for_each_cpu_wrap(i, &chk, cpu) { + for_each_cpu_wrap(i, &chk, cpu) {
+ if (cpumask_subset(cpu_smt_mask(i), &chk) && + if (!cpumask_intersects(cpu_smt_mask(i), sched_idle_mask) &&\
+ sg_balance_trigger(i)) + sg_balance_trigger(i))
+ return; + return;
+ } + }
@@ -4857,6 +4991,7 @@ index 000000000000..acb8657e811d
+static void sched_tick_stop(int cpu) +static void sched_tick_stop(int cpu)
+{ +{
+ struct tick_work *twork; + struct tick_work *twork;
+ int os;
+ +
+ if (housekeeping_cpu(cpu, HK_TYPE_TICK)) + if (housekeeping_cpu(cpu, HK_TYPE_TICK))
+ return; + return;
@@ -4864,7 +4999,10 @@ index 000000000000..acb8657e811d
+ WARN_ON_ONCE(!tick_work_cpu); + WARN_ON_ONCE(!tick_work_cpu);
+ +
+ twork = per_cpu_ptr(tick_work_cpu, cpu); + twork = per_cpu_ptr(tick_work_cpu, cpu);
+ cancel_delayed_work_sync(&twork->work); + /* There cannot be competing actions, but don't rely on stop-machine. */
+ os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
+ WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
+ /* Don't cancel, as this would mess up the state machine. */
+} +}
+#endif /* CONFIG_HOTPLUG_CPU */ +#endif /* CONFIG_HOTPLUG_CPU */
+ +
@@ -4988,8 +5126,7 @@ index 000000000000..acb8657e811d
+ pr_err("Preemption disabled at:"); + pr_err("Preemption disabled at:");
+ print_ip_sym(KERN_ERR, preempt_disable_ip); + print_ip_sym(KERN_ERR, preempt_disable_ip);
+ } + }
+ if (panic_on_warn) + check_panic_on_warn("scheduling while atomic");
+ panic("scheduling while atomic\n");
+ +
+ dump_stack(); + dump_stack();
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); + add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
@@ -5305,7 +5442,7 @@ index 000000000000..acb8657e811d
+ prev->sched_contributes_to_load = + prev->sched_contributes_to_load =
+ (prev_state & TASK_UNINTERRUPTIBLE) && + (prev_state & TASK_UNINTERRUPTIBLE) &&
+ !(prev_state & TASK_NOLOAD) && + !(prev_state & TASK_NOLOAD) &&
+ !(prev->flags & TASK_FROZEN); + !(prev_state & TASK_FROZEN);
+ +
+ if (prev->sched_contributes_to_load) + if (prev->sched_contributes_to_load)
+ rq->nr_uninterruptible++; + rq->nr_uninterruptible++;
@@ -6653,7 +6790,7 @@ index 000000000000..acb8657e811d
+#endif +#endif
+ +
+static int +static int
+__sched_setaffinity(struct task_struct *p, const struct cpumask *mask) +__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
+{ +{
+ int retval; + int retval;
+ cpumask_var_t cpus_allowed, new_mask; + cpumask_var_t cpus_allowed, new_mask;
@@ -6667,9 +6804,12 @@ index 000000000000..acb8657e811d
+ } + }
+ +
+ cpuset_cpus_allowed(p, cpus_allowed); + cpuset_cpus_allowed(p, cpus_allowed);
+ cpumask_and(new_mask, mask, cpus_allowed); + cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
+again: +
+ retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER); + ctx->new_mask = new_mask;
+ ctx->flags |= SCA_CHECK;
+
+ retval = __set_cpus_allowed_ptr(p, ctx);
+ if (retval) + if (retval)
+ goto out_free_new_mask; + goto out_free_new_mask;
+ +
@@ -6681,7 +6821,24 @@ index 000000000000..acb8657e811d
+ * cpuset's cpus_allowed + * cpuset's cpus_allowed
+ */ + */
+ cpumask_copy(new_mask, cpus_allowed); + cpumask_copy(new_mask, cpus_allowed);
+ goto again; +
+ /*
+ * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
+ * will restore the previous user_cpus_ptr value.
+ *
+ * In the unlikely event a previous user_cpus_ptr exists,
+ * we need to further restrict the mask to what is allowed
+ * by that old user_cpus_ptr.
+ */
+ if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
+ bool empty = !cpumask_and(new_mask, new_mask,
+ ctx->user_mask);
+
+ if (WARN_ON_ONCE(empty))
+ cpumask_copy(new_mask, cpus_allowed);
+ }
+ __set_cpus_allowed_ptr(p, ctx);
+ retval = -EINVAL;
+ } + }
+ +
+out_free_new_mask: +out_free_new_mask:
@@ -6693,6 +6850,8 @@ index 000000000000..acb8657e811d
+ +
+long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) +long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+{ +{
+ struct affinity_context ac;
+ struct cpumask *user_mask;
+ struct task_struct *p; + struct task_struct *p;
+ int retval; + int retval;
+ +
@@ -6727,7 +6886,27 @@ index 000000000000..acb8657e811d
+ if (retval) + if (retval)
+ goto out_put_task; + goto out_put_task;
+ +
+ retval = __sched_setaffinity(p, in_mask); + /*
+ * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
+ * alloc_user_cpus_ptr() returns NULL.
+ */
+ user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
+ if (user_mask) {
+ cpumask_copy(user_mask, in_mask);
+ } else if (IS_ENABLED(CONFIG_SMP)) {
+ retval = -ENOMEM;
+ goto out_put_task;
+ }
+
+ ac = (struct affinity_context){
+ .new_mask = in_mask,
+ .user_mask = user_mask,
+ .flags = SCA_USER,
+ };
+
+ retval = __sched_setaffinity(p, &ac);
+ kfree(ac.user_mask);
+
+out_put_task: +out_put_task:
+ put_task_struct(p); + put_task_struct(p);
+ return retval; + return retval;
@@ -7483,6 +7662,12 @@ index 000000000000..acb8657e811d
+ */ + */
+void __init init_idle(struct task_struct *idle, int cpu) +void __init init_idle(struct task_struct *idle, int cpu)
+{ +{
+#ifdef CONFIG_SMP
+ struct affinity_context ac = (struct affinity_context) {
+ .new_mask = cpumask_of(cpu),
+ .flags = 0,
+ };
+#endif
+ struct rq *rq = cpu_rq(cpu); + struct rq *rq = cpu_rq(cpu);
+ unsigned long flags; + unsigned long flags;
+ +
@@ -7509,7 +7694,7 @@ index 000000000000..acb8657e811d
+ * + *
+ * And since this is boot we can forgo the serialisation. + * And since this is boot we can forgo the serialisation.
+ */ + */
+ set_cpus_allowed_common(idle, cpumask_of(cpu)); + set_cpus_allowed_common(idle, &ac);
+#endif +#endif
+ +
+ /* Silence PROVE_RCU */ + /* Silence PROVE_RCU */
@@ -8137,6 +8322,8 @@ index 000000000000..acb8657e811d
+ +
+ hrtick_rq_init(rq); + hrtick_rq_init(rq);
+ atomic_set(&rq->nr_iowait, 0); + atomic_set(&rq->nr_iowait, 0);
+
+ zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
+ } + }
+#ifdef CONFIG_SMP +#ifdef CONFIG_SMP
+ /* Set rq->online for cpu 0 */ + /* Set rq->online for cpu 0 */
@@ -8653,10 +8840,10 @@ index 000000000000..1212a031700e
+{} +{}
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
new file mode 100644 new file mode 100644
index 000000000000..c32403ed82b6 index 000000000000..0b563999d4c1
--- /dev/null --- /dev/null
+++ b/kernel/sched/alt_sched.h +++ b/kernel/sched/alt_sched.h
@@ -0,0 +1,668 @@ @@ -0,0 +1,671 @@
+#ifndef ALT_SCHED_H +#ifndef ALT_SCHED_H
+#define ALT_SCHED_H +#define ALT_SCHED_H
+ +
@@ -8903,6 +9090,9 @@ index 000000000000..c32403ed82b6
+#endif +#endif
+ atomic_t nohz_flags; + atomic_t nohz_flags;
+#endif /* CONFIG_NO_HZ_COMMON */ +#endif /* CONFIG_NO_HZ_COMMON */
+
+ /* Scratch cpumask to be temporarily used under rq_lock */
+ cpumask_var_t scratch_mask;
+}; +};
+ +
+extern unsigned long rq_load_util(struct rq *rq, unsigned long max); +extern unsigned long rq_load_util(struct rq *rq, unsigned long max);
@@ -9874,7 +10064,7 @@ index 3a0e0dc28721..e8a7d84aa5a5 100644
static inline int static inline int
update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a4a20046e586..c363693cd869 100644 index 771f8ddb7053..787a5069d69a 100644
--- a/kernel/sched/sched.h --- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h +++ b/kernel/sched/sched.h
@@ -5,6 +5,10 @@ @@ -5,6 +5,10 @@
@@ -9888,7 +10078,7 @@ index a4a20046e586..c363693cd869 100644
#include <linux/sched/affinity.h> #include <linux/sched/affinity.h>
#include <linux/sched/autogroup.h> #include <linux/sched/autogroup.h>
#include <linux/sched/cpufreq.h> #include <linux/sched/cpufreq.h>
@@ -3183,4 +3187,9 @@ static inline void update_current_exec_runtime(struct task_struct *curr, @@ -3261,4 +3265,9 @@ static inline void update_current_exec_runtime(struct task_struct *curr,
cgroup_account_cputime(curr, delta_exec); cgroup_account_cputime(curr, delta_exec);
} }
@@ -9930,7 +10120,7 @@ index 857f837f52cb..5486c63e4790 100644
} }
return 0; return 0;
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 84a188913cc9..53934e7ef5db 100644 index 38f3698f5e5b..b9d597394316 100644
--- a/kernel/sched/stats.h --- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h +++ b/kernel/sched/stats.h
@@ -89,6 +89,7 @@ static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delt @@ -89,6 +89,7 @@ static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delt
@@ -10005,7 +10195,7 @@ index 8739c2a5a54e..d8dd6c15eb47 100644
+#endif /* CONFIG_NUMA */ +#endif /* CONFIG_NUMA */
+#endif +#endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c6d9dec11b74..2bc42ce8b48e 100644 index 137d4abe3eda..6bada3a6d571 100644
--- a/kernel/sysctl.c --- a/kernel/sysctl.c
+++ b/kernel/sysctl.c +++ b/kernel/sysctl.c
@@ -93,6 +93,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals); @@ -93,6 +93,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals);
@@ -10019,7 +10209,7 @@ index c6d9dec11b74..2bc42ce8b48e 100644
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
static const int six_hundred_forty_kb = 640 * 1024; static const int six_hundred_forty_kb = 640 * 1024;
#endif #endif
@@ -1953,6 +1959,17 @@ static struct ctl_table kern_table[] = { @@ -1934,6 +1938,17 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec, .proc_handler = proc_dointvec,
}, },
#endif #endif
@@ -10113,10 +10303,10 @@ index cb925e8ef9a8..67d823510f5c 100644
return false; return false;
} }
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index a2d301f58ced..2ccdede8585c 100644 index ff0536cea968..ce266990006d 100644
--- a/kernel/trace/trace_selftest.c --- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c
@@ -1143,10 +1143,15 @@ static int trace_wakeup_test_thread(void *data) @@ -1150,10 +1150,15 @@ static int trace_wakeup_test_thread(void *data)
{ {
/* Make this a -deadline thread */ /* Make this a -deadline thread */
static const struct sched_attr attr = { static const struct sched_attr attr = {
@@ -10132,3 +10322,6 @@ index a2d301f58ced..2ccdede8585c 100644
}; };
struct wakeup_test_data *x = data; struct wakeup_test_data *x = data;
--
2.39.2