Compare commits

..

1 Commits

Author SHA1 Message Date
Sravan Balaji
13e3a3ac21 PDS Kernel Configuration 2023-01-23 11:31:26 -05:00
6 changed files with 2842 additions and 559 deletions

View File

@@ -163,6 +163,24 @@ _acs_override="false"
# This can be buggy and isn't recommended on a production machine, also enabling this option will not allow you to enable MGLRU. # This can be buggy and isn't recommended on a production machine, also enabling this option will not allow you to enable MGLRU.
_bcachefs="false" _bcachefs="false"
# Set to "true" to add multi-generational LRU framework support on kernel 5.18+ - Improves memory pressure handling - https://lore.kernel.org/lkml/20220706220022.968789-1-yuzhao@google.com/
# Older kernel versions might have a patch available in the community-patches repo
# ! This option will be disabled when bcachefs is enabled for now !
_mglru="true"
# Set to "true" to enable support for fsync, an experimental replacement for esync found in Valve Proton 4.11+ - https://steamcommunity.com/games/221410/announcements/detail/2957094910196249305
# Can be enabled alongside _futex_waitv on 5.13+ to use it as a fallback for older Proton builds
_fsync="true"
# Set to "true" to enable support for futex2, an experimental interface that can be used by proton-tkg and proton 5.13 experimental through Fsync - Can be enabled alongside fsync to use it as a fallback
# https://gitlab.collabora.com/tonyk/linux/-/tree/futex2-dev
_futex2="true"
# Set to "true" to enable backported patches to add support for the futex_waitv() syscall, a new interface for fsync. It will appear in mainline at Linux 5.16 release and requires a wine/proton with builtin support for it. It's expected to be available in Valve Proton 6.3 stable soon - https://github.com/ValveSoftware/wine/pull/128
# !! Disables futex2 interfaces support !!
# https://github.com/andrealmeid/futex_waitv_patches
_futex_waitv="false"
# Set to "true" to enable support for winesync, an experimental replacement for esync - requires patched wine - https://repo.or.cz/linux/zf.git/shortlog/refs/heads/winesync4 # Set to "true" to enable support for winesync, an experimental replacement for esync - requires patched wine - https://repo.or.cz/linux/zf.git/shortlog/refs/heads/winesync4
# ! Can't be used on multiple kernels installed side-by-side, which will require https://aur.archlinux.org/packages/winesync-dkms/ instead of this option ! # ! Can't be used on multiple kernels installed side-by-side, which will require https://aur.archlinux.org/packages/winesync-dkms/ instead of this option !
_winesync="false" _winesync="false"
@@ -190,6 +208,9 @@ _compileroptlevel="2"
# - "generic_v4" (depends on GCC11 - to share the package between machines with different CPU µarch supporting at least x86-64-v4 # - "generic_v4" (depends on GCC11 - to share the package between machines with different CPU µarch supporting at least x86-64-v4
_processor_opt="skylake" _processor_opt="skylake"
# MuQSS only - Make IRQ threading compulsory (FORCE_IRQ_THREADING) - Default is "false"
_irq_threading="false"
# CacULE only - Enable Response Driven Balancer, an experimental load balancer for CacULE # CacULE only - Enable Response Driven Balancer, an experimental load balancer for CacULE
_cacule_rdb="false" _cacule_rdb="false"
@@ -206,6 +227,10 @@ _smt_nice="true"
# Trust the CPU manufacturer to initialize Linux's CRNG (RANDOM_TRUST_CPU) - Kernel default is "false" # Trust the CPU manufacturer to initialize Linux's CRNG (RANDOM_TRUST_CPU) - Kernel default is "false"
_random_trust_cpu="true" _random_trust_cpu="true"
# MuQSS only - CPU scheduler runqueue sharing - No sharing (RQ_NONE), SMT (hyperthread) siblings (RQ_SMT), Multicore siblings (RQ_MC), Symmetric Multi-Processing (RQ_SMP), NUMA (RQ_ALL)
# Valid values are "none", "smt", "mc", "mc-llc"(for zen), "smp", "all" - Kernel default is "smt"
_runqueue_sharing=""
# Timer frequency - "100" "250" "300" "500" "750" "1000" ("2000" is available for cacule cpusched only) - More options available in kernel config prompt when left empty depending on selected cpusched with the default option pointed with a ">" (2000 for cacule, 100 for muqss and 1000 for other cpu schedulers) # Timer frequency - "100" "250" "300" "500" "750" "1000" ("2000" is available for cacule cpusched only) - More options available in kernel config prompt when left empty depending on selected cpusched with the default option pointed with a ">" (2000 for cacule, 100 for muqss and 1000 for other cpu schedulers)
_timer_freq="500" _timer_freq="500"
@@ -239,41 +264,14 @@ _custom_pkgbase=""
# If left empty, it will use "-tkg-${_cpusched}${_compiler}" where "${_cpusched}" will be replaced by the user chosen scheduler, ${_compiler} will be replaced by "-llvm" if clang is used (nothing for GCC). # If left empty, it will use "-tkg-${_cpusched}${_compiler}" where "${_cpusched}" will be replaced by the user chosen scheduler, ${_compiler} will be replaced by "-llvm" if clang is used (nothing for GCC).
_kernel_localversion="" _kernel_localversion=""
# Set to "true" to add back missing symbol for AES-NI/AVX support on ZFS - This is a legacy option that can be ignored on 5.10+ kernels - https://github.com/NixOS/nixpkgs/blob/master/pkgs/os-specific/linux/kernel/export_kernel_fpu_functions.patch
_zfsfix="true"
# Set to your maximum number of CPUs (physical + logical cores) - Lower means less overhead - You can set it to "$(nproc)" to use the current host's CPU(s) core count, or leave empty to use default # Set to your maximum number of CPUs (physical + logical cores) - Lower means less overhead - You can set it to "$(nproc)" to use the current host's CPU(s) core count, or leave empty to use default
# If you set this to a lower value than you have cores, some cores will be disabled # If you set this to a lower value than you have cores, some cores will be disabled
# Default Arch kernel value is 320 # Default Arch kernel value is 320
_NR_CPUS_value="" _NR_CPUS_value=""
#### LEGACY OPTIONS ####
# Set to "true" to enable support for fsync, an experimental replacement for esync found in Valve Proton 4.11+ - https://steamcommunity.com/games/221410/announcements/detail/2957094910196249305
# Can be enabled alongside _futex_waitv on 5.13+ to use it as a fallback for older Proton builds
_fsync="true"
# Set to "true" to enable backported patches to add support for the futex_waitv() syscall, a new interface for fsync. Upstream as of 5.16 and requires a wine/proton with builtin support for it - https://github.com/ValveSoftware/wine/pull/128
# !! Disables futex2 interfaces support !!
# https://github.com/andrealmeid/futex_waitv_patches
_futex_waitv="true"
# Set to "true" to enable support for futex2, an experimental interface that can be used by proton-tkg and proton 5.13 experimental through Fsync - Can be enabled alongside fsync to use it as a fallback
# https://gitlab.collabora.com/tonyk/linux/-/tree/futex2-dev
_futex2="true"
# Set to "true" to add back missing symbol for AES-NI/AVX support on ZFS - This is a legacy option that can be ignored on 5.10+ kernels - https://github.com/NixOS/nixpkgs/blob/master/pkgs/os-specific/linux/kernel/export_kernel_fpu_functions.patch
_zfsfix="true"
# MuQSS only - CPU scheduler runqueue sharing - No sharing (RQ_NONE), SMT (hyperthread) siblings (RQ_SMT), Multicore siblings (RQ_MC), Symmetric Multi-Processing (RQ_SMP), NUMA (RQ_ALL)
# Valid values are "none", "smt", "mc", "mc-llc"(for zen), "smp", "all" - Kernel default is "smt"
_runqueue_sharing=""
# MuQSS only - Make IRQ threading compulsory (FORCE_IRQ_THREADING) - Default is "false"
_irq_threading="false"
# Set to "true" to add multi-generational LRU framework support on kernel 5.18+ - Improves memory pressure handling - https://lore.kernel.org/lkml/20220706220022.968789-1-yuzhao@google.com/
# Older kernel versions might have a patch available in the community-patches repo
# Upstream as of 6.1
# ! This option will be disabled when bcachefs is enabled !
_mglru="true"
#### USER PATCHES #### #### USER PATCHES ####

View File

@@ -1,15 +1,15 @@
# #
# Automatically generated file; DO NOT EDIT. # Automatically generated file; DO NOT EDIT.
# Linux/x86 6.2.0 Kernel Configuration # Linux/x86 6.2.0-rc4 Kernel Configuration
# #
CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.1 20230201" CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.0"
CONFIG_CC_IS_GCC=y CONFIG_CC_IS_GCC=y
CONFIG_GCC_VERSION=120201 CONFIG_GCC_VERSION=120200
CONFIG_CLANG_VERSION=0 CONFIG_CLANG_VERSION=0
CONFIG_AS_IS_GNU=y CONFIG_AS_IS_GNU=y
CONFIG_AS_VERSION=24000 CONFIG_AS_VERSION=23900
CONFIG_LD_IS_BFD=y CONFIG_LD_IS_BFD=y
CONFIG_LD_VERSION=24000 CONFIG_LD_VERSION=23900
CONFIG_LLD_VERSION=0 CONFIG_LLD_VERSION=0
CONFIG_CC_CAN_LINK=y CONFIG_CC_CAN_LINK=y
CONFIG_CC_CAN_LINK_STATIC=y CONFIG_CC_CAN_LINK_STATIC=y
@@ -182,9 +182,10 @@ CONFIG_RCU_NOCB_CPU=y
# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set # CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set
# CONFIG_RCU_NOCB_CPU_CB_BOOST is not set # CONFIG_RCU_NOCB_CPU_CB_BOOST is not set
# CONFIG_TASKS_TRACE_RCU_READ_MB is not set # CONFIG_TASKS_TRACE_RCU_READ_MB is not set
CONFIG_RCU_LAZY=y # CONFIG_RCU_LAZY is not set
# end of RCU Subsystem # end of RCU Subsystem
CONFIG_BUILD_BIN2C=y
CONFIG_IKCONFIG=y CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y CONFIG_IKCONFIG_PROC=y
CONFIG_IKHEADERS=m CONFIG_IKHEADERS=m
@@ -487,7 +488,7 @@ CONFIG_X86_INTEL_TSX_MODE_AUTO=y
CONFIG_X86_SGX=y CONFIG_X86_SGX=y
CONFIG_EFI=y CONFIG_EFI=y
CONFIG_EFI_STUB=y CONFIG_EFI_STUB=y
# CONFIG_EFI_HANDOVER_PROTOCOL is not set CONFIG_EFI_HANDOVER_PROTOCOL=y
CONFIG_EFI_MIXED=y CONFIG_EFI_MIXED=y
# CONFIG_EFI_FAKE_MEMMAP is not set # CONFIG_EFI_FAKE_MEMMAP is not set
CONFIG_EFI_RUNTIME_MAP=y CONFIG_EFI_RUNTIME_MAP=y
@@ -637,7 +638,7 @@ CONFIG_ACPI_ADXL=y
CONFIG_ACPI_CONFIGFS=m CONFIG_ACPI_CONFIGFS=m
CONFIG_ACPI_PFRUT=m CONFIG_ACPI_PFRUT=m
CONFIG_ACPI_PCC=y CONFIG_ACPI_PCC=y
CONFIG_ACPI_FFH=y # CONFIG_ACPI_FFH is not set
CONFIG_PMIC_OPREGION=y CONFIG_PMIC_OPREGION=y
CONFIG_BYTCRC_PMIC_OPREGION=y CONFIG_BYTCRC_PMIC_OPREGION=y
CONFIG_CHTCRC_PMIC_OPREGION=y CONFIG_CHTCRC_PMIC_OPREGION=y
@@ -935,7 +936,7 @@ CONFIG_MODULE_SIG_HASH="sha512"
# CONFIG_MODULE_COMPRESS_GZIP is not set # CONFIG_MODULE_COMPRESS_GZIP is not set
# CONFIG_MODULE_COMPRESS_XZ is not set # CONFIG_MODULE_COMPRESS_XZ is not set
CONFIG_MODULE_COMPRESS_ZSTD=y CONFIG_MODULE_COMPRESS_ZSTD=y
CONFIG_MODULE_DECOMPRESS=y # CONFIG_MODULE_DECOMPRESS is not set
CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS=y CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS=y
CONFIG_MODPROBE_PATH="/sbin/modprobe" CONFIG_MODPROBE_PATH="/sbin/modprobe"
CONFIG_MODULES_TREE_LOOKUP=y CONFIG_MODULES_TREE_LOOKUP=y
@@ -1988,7 +1989,7 @@ CONFIG_BT_HCIUART_QCA=y
CONFIG_BT_HCIUART_AG6XX=y CONFIG_BT_HCIUART_AG6XX=y
CONFIG_BT_HCIUART_MRVL=y CONFIG_BT_HCIUART_MRVL=y
CONFIG_BT_HCIBCM203X=m CONFIG_BT_HCIBCM203X=m
CONFIG_BT_HCIBCM4377=m # CONFIG_BT_HCIBCM4377 is not set
CONFIG_BT_HCIBPA10X=m CONFIG_BT_HCIBPA10X=m
CONFIG_BT_HCIBFUSB=m CONFIG_BT_HCIBFUSB=m
CONFIG_BT_HCIDTL1=m CONFIG_BT_HCIDTL1=m
@@ -2326,7 +2327,7 @@ CONFIG_SYSFB=y
CONFIG_FW_CS_DSP=m CONFIG_FW_CS_DSP=m
CONFIG_GOOGLE_FIRMWARE=y CONFIG_GOOGLE_FIRMWARE=y
# CONFIG_GOOGLE_SMI is not set # CONFIG_GOOGLE_SMI is not set
CONFIG_GOOGLE_CBMEM=m # CONFIG_GOOGLE_CBMEM is not set
CONFIG_GOOGLE_COREBOOT_TABLE=m CONFIG_GOOGLE_COREBOOT_TABLE=m
CONFIG_GOOGLE_MEMCONSOLE=m CONFIG_GOOGLE_MEMCONSOLE=m
# CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY is not set # CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY is not set
@@ -2538,7 +2539,7 @@ CONFIG_ZRAM_DEF_COMP_LZORLE=y
CONFIG_ZRAM_DEF_COMP="lzo-rle" CONFIG_ZRAM_DEF_COMP="lzo-rle"
CONFIG_ZRAM_WRITEBACK=y CONFIG_ZRAM_WRITEBACK=y
# CONFIG_ZRAM_MEMORY_TRACKING is not set # CONFIG_ZRAM_MEMORY_TRACKING is not set
CONFIG_ZRAM_MULTI_COMP=y # CONFIG_ZRAM_MULTI_COMP is not set
CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 CONFIG_BLK_DEV_LOOP_MIN_COUNT=0
CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_DRBD=m
@@ -2547,9 +2548,7 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=m CONFIG_BLK_DEV_RAM=m
CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=16384 CONFIG_BLK_DEV_RAM_SIZE=16384
CONFIG_CDROM_PKTCDVD=m # CONFIG_CDROM_PKTCDVD is not set
CONFIG_CDROM_PKTCDVD_BUFFERS=8
# CONFIG_CDROM_PKTCDVD_WCACHE is not set
CONFIG_ATA_OVER_ETH=m CONFIG_ATA_OVER_ETH=m
CONFIG_XEN_BLKDEV_FRONTEND=m CONFIG_XEN_BLKDEV_FRONTEND=m
CONFIG_XEN_BLKDEV_BACKEND=m CONFIG_XEN_BLKDEV_BACKEND=m
@@ -2598,8 +2597,6 @@ CONFIG_TIFM_CORE=m
CONFIG_TIFM_7XX1=m CONFIG_TIFM_7XX1=m
CONFIG_ICS932S401=m CONFIG_ICS932S401=m
CONFIG_ENCLOSURE_SERVICES=m CONFIG_ENCLOSURE_SERVICES=m
CONFIG_SMPRO_ERRMON=m
CONFIG_SMPRO_MISC=m
CONFIG_HP_ILO=m CONFIG_HP_ILO=m
CONFIG_APDS9802ALS=m CONFIG_APDS9802ALS=m
CONFIG_ISL29003=m CONFIG_ISL29003=m
@@ -3324,7 +3321,7 @@ CONFIG_ENC28J60=m
# CONFIG_ENC28J60_WRITEVERIFY is not set # CONFIG_ENC28J60_WRITEVERIFY is not set
CONFIG_ENCX24J600=m CONFIG_ENCX24J600=m
CONFIG_LAN743X=m CONFIG_LAN743X=m
CONFIG_VCAP=y # CONFIG_VCAP is not set
CONFIG_NET_VENDOR_MICROSEMI=y CONFIG_NET_VENDOR_MICROSEMI=y
CONFIG_MSCC_OCELOT_SWITCH_LIB=m CONFIG_MSCC_OCELOT_SWITCH_LIB=m
CONFIG_NET_VENDOR_MICROSOFT=y CONFIG_NET_VENDOR_MICROSOFT=y
@@ -3898,7 +3895,7 @@ CONFIG_MT7921_COMMON=m
CONFIG_MT7921E=m CONFIG_MT7921E=m
CONFIG_MT7921S=m CONFIG_MT7921S=m
CONFIG_MT7921U=m CONFIG_MT7921U=m
CONFIG_MT7996E=m # CONFIG_MT7996E is not set
CONFIG_WLAN_VENDOR_MICROCHIP=y CONFIG_WLAN_VENDOR_MICROCHIP=y
CONFIG_WILC1000=m CONFIG_WILC1000=m
CONFIG_WILC1000_SDIO=m CONFIG_WILC1000_SDIO=m
@@ -3962,29 +3959,27 @@ CONFIG_RTL8XXXU_UNTESTED=y
CONFIG_RTW88=m CONFIG_RTW88=m
CONFIG_RTW88_CORE=m CONFIG_RTW88_CORE=m
CONFIG_RTW88_PCI=m CONFIG_RTW88_PCI=m
CONFIG_RTW88_USB=m
CONFIG_RTW88_8822B=m CONFIG_RTW88_8822B=m
CONFIG_RTW88_8822C=m CONFIG_RTW88_8822C=m
CONFIG_RTW88_8723D=m CONFIG_RTW88_8723D=m
CONFIG_RTW88_8821C=m CONFIG_RTW88_8821C=m
CONFIG_RTW88_8822BE=m CONFIG_RTW88_8822BE=m
CONFIG_RTW88_8822BU=m # CONFIG_RTW88_8822BU is not set
CONFIG_RTW88_8822CE=m CONFIG_RTW88_8822CE=m
CONFIG_RTW88_8822CU=m # CONFIG_RTW88_8822CU is not set
CONFIG_RTW88_8723DE=m CONFIG_RTW88_8723DE=m
CONFIG_RTW88_8723DU=m # CONFIG_RTW88_8723DU is not set
CONFIG_RTW88_8821CE=m CONFIG_RTW88_8821CE=m
CONFIG_RTW88_8821CU=m # CONFIG_RTW88_8821CU is not set
CONFIG_RTW88_DEBUG=y CONFIG_RTW88_DEBUG=y
CONFIG_RTW88_DEBUGFS=y CONFIG_RTW88_DEBUGFS=y
CONFIG_RTW89=m CONFIG_RTW89=m
CONFIG_RTW89_CORE=m CONFIG_RTW89_CORE=m
CONFIG_RTW89_PCI=m CONFIG_RTW89_PCI=m
CONFIG_RTW89_8852A=m CONFIG_RTW89_8852A=m
CONFIG_RTW89_8852B=m
CONFIG_RTW89_8852C=m CONFIG_RTW89_8852C=m
CONFIG_RTW89_8852AE=m CONFIG_RTW89_8852AE=m
CONFIG_RTW89_8852BE=m # CONFIG_RTW89_8852BE is not set
CONFIG_RTW89_8852CE=m CONFIG_RTW89_8852CE=m
CONFIG_RTW89_DEBUG=y CONFIG_RTW89_DEBUG=y
CONFIG_RTW89_DEBUGMSG=y CONFIG_RTW89_DEBUGMSG=y
@@ -4232,7 +4227,7 @@ CONFIG_TOUCHSCREEN_CYTTSP_SPI=m
CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m
CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m
CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m
CONFIG_TOUCHSCREEN_CYTTSP5=m # CONFIG_TOUCHSCREEN_CYTTSP5 is not set
CONFIG_TOUCHSCREEN_DA9034=m CONFIG_TOUCHSCREEN_DA9034=m
CONFIG_TOUCHSCREEN_DA9052=m CONFIG_TOUCHSCREEN_DA9052=m
CONFIG_TOUCHSCREEN_DYNAPRO=m CONFIG_TOUCHSCREEN_DYNAPRO=m
@@ -4244,7 +4239,7 @@ CONFIG_TOUCHSCREEN_FUJITSU=m
CONFIG_TOUCHSCREEN_GOODIX=m CONFIG_TOUCHSCREEN_GOODIX=m
CONFIG_TOUCHSCREEN_HIDEEP=m CONFIG_TOUCHSCREEN_HIDEEP=m
CONFIG_TOUCHSCREEN_HYCON_HY46XX=m CONFIG_TOUCHSCREEN_HYCON_HY46XX=m
CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX=m # CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set
CONFIG_TOUCHSCREEN_ILI210X=m CONFIG_TOUCHSCREEN_ILI210X=m
CONFIG_TOUCHSCREEN_ILITEK=m CONFIG_TOUCHSCREEN_ILITEK=m
CONFIG_TOUCHSCREEN_S6SY761=m CONFIG_TOUCHSCREEN_S6SY761=m
@@ -4319,7 +4314,7 @@ CONFIG_TOUCHSCREEN_COLIBRI_VF50=m
CONFIG_TOUCHSCREEN_ROHM_BU21023=m CONFIG_TOUCHSCREEN_ROHM_BU21023=m
CONFIG_TOUCHSCREEN_IQS5XX=m CONFIG_TOUCHSCREEN_IQS5XX=m
CONFIG_TOUCHSCREEN_ZINITIX=m CONFIG_TOUCHSCREEN_ZINITIX=m
CONFIG_TOUCHSCREEN_HIMAX_HX83112B=m # CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set
CONFIG_INPUT_MISC=y CONFIG_INPUT_MISC=y
CONFIG_INPUT_88PM860X_ONKEY=m CONFIG_INPUT_88PM860X_ONKEY=m
CONFIG_INPUT_88PM80X_ONKEY=m CONFIG_INPUT_88PM80X_ONKEY=m
@@ -4437,7 +4432,7 @@ CONFIG_HW_CONSOLE=y
CONFIG_VT_HW_CONSOLE_BINDING=y CONFIG_VT_HW_CONSOLE_BINDING=y
CONFIG_UNIX98_PTYS=y CONFIG_UNIX98_PTYS=y
# CONFIG_LEGACY_PTYS is not set # CONFIG_LEGACY_PTYS is not set
# CONFIG_LEGACY_TIOCSTI is not set CONFIG_LEGACY_TIOCSTI=y
CONFIG_LDISC_AUTOLOAD=y CONFIG_LDISC_AUTOLOAD=y
# #
@@ -4530,7 +4525,7 @@ CONFIG_IPMI_SSIF=m
CONFIG_IPMI_IPMB=m CONFIG_IPMI_IPMB=m
CONFIG_IPMI_WATCHDOG=m CONFIG_IPMI_WATCHDOG=m
CONFIG_IPMI_POWEROFF=m CONFIG_IPMI_POWEROFF=m
CONFIG_SSIF_IPMI_BMC=m # CONFIG_SSIF_IPMI_BMC is not set
CONFIG_IPMB_DEVICE_INTERFACE=m CONFIG_IPMB_DEVICE_INTERFACE=m
CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_TIMERIOMEM=m CONFIG_HW_RANDOM_TIMERIOMEM=m
@@ -4726,7 +4721,7 @@ CONFIG_SPI_MICROCHIP_CORE=m
CONFIG_SPI_MICROCHIP_CORE_QSPI=m CONFIG_SPI_MICROCHIP_CORE_QSPI=m
# CONFIG_SPI_LANTIQ_SSC is not set # CONFIG_SPI_LANTIQ_SSC is not set
CONFIG_SPI_OC_TINY=m CONFIG_SPI_OC_TINY=m
CONFIG_SPI_PCI1XXXX=m # CONFIG_SPI_PCI1XXXX is not set
CONFIG_SPI_PXA2XX=m CONFIG_SPI_PXA2XX=m
CONFIG_SPI_PXA2XX_PCI=m CONFIG_SPI_PXA2XX_PCI=m
# CONFIG_SPI_ROCKCHIP is not set # CONFIG_SPI_ROCKCHIP is not set
@@ -4943,7 +4938,7 @@ CONFIG_GPIO_VIPERBOARD=m
# Virtual GPIO drivers # Virtual GPIO drivers
# #
CONFIG_GPIO_AGGREGATOR=m CONFIG_GPIO_AGGREGATOR=m
CONFIG_GPIO_LATCH=m # CONFIG_GPIO_LATCH is not set
CONFIG_GPIO_MOCKUP=m CONFIG_GPIO_MOCKUP=m
CONFIG_GPIO_VIRTIO=m CONFIG_GPIO_VIRTIO=m
CONFIG_GPIO_SIM=m CONFIG_GPIO_SIM=m
@@ -5081,7 +5076,6 @@ CONFIG_HWMON_VID=m
# #
CONFIG_SENSORS_ABITUGURU=m CONFIG_SENSORS_ABITUGURU=m
CONFIG_SENSORS_ABITUGURU3=m CONFIG_SENSORS_ABITUGURU3=m
CONFIG_SENSORS_SMPRO=m
CONFIG_SENSORS_AD7314=m CONFIG_SENSORS_AD7314=m
CONFIG_SENSORS_AD7414=m CONFIG_SENSORS_AD7414=m
CONFIG_SENSORS_AD7418=m CONFIG_SENSORS_AD7418=m
@@ -5201,9 +5195,8 @@ CONFIG_SENSORS_NCT7904=m
CONFIG_SENSORS_NPCM7XX=m CONFIG_SENSORS_NPCM7XX=m
CONFIG_SENSORS_NZXT_KRAKEN2=m CONFIG_SENSORS_NZXT_KRAKEN2=m
CONFIG_SENSORS_NZXT_SMART2=m CONFIG_SENSORS_NZXT_SMART2=m
CONFIG_SENSORS_OCC_P8_I2C=m # CONFIG_SENSORS_OCC_P8_I2C is not set
CONFIG_SENSORS_OCC=m # CONFIG_SENSORS_OXP is not set
CONFIG_SENSORS_OXP=m
CONFIG_SENSORS_PCF8591=m CONFIG_SENSORS_PCF8591=m
CONFIG_PMBUS=m CONFIG_PMBUS=m
CONFIG_SENSORS_PMBUS=m CONFIG_SENSORS_PMBUS=m
@@ -5408,7 +5401,7 @@ CONFIG_MAX63XX_WATCHDOG=m
CONFIG_RETU_WATCHDOG=m CONFIG_RETU_WATCHDOG=m
CONFIG_ACQUIRE_WDT=m CONFIG_ACQUIRE_WDT=m
CONFIG_ADVANTECH_WDT=m CONFIG_ADVANTECH_WDT=m
CONFIG_ADVANTECH_EC_WDT=m # CONFIG_ADVANTECH_EC_WDT is not set
CONFIG_ALIM1535_WDT=m CONFIG_ALIM1535_WDT=m
CONFIG_ALIM7101_WDT=m CONFIG_ALIM7101_WDT=m
CONFIG_EBC_C384_WDT=m CONFIG_EBC_C384_WDT=m
@@ -5490,7 +5483,7 @@ CONFIG_BCMA_DRIVER_GPIO=y
# #
CONFIG_MFD_CORE=y CONFIG_MFD_CORE=y
CONFIG_MFD_AS3711=y CONFIG_MFD_AS3711=y
CONFIG_MFD_SMPRO=m # CONFIG_MFD_SMPRO is not set
CONFIG_PMIC_ADP5520=y CONFIG_PMIC_ADP5520=y
CONFIG_MFD_AAT2870_CORE=y CONFIG_MFD_AAT2870_CORE=y
CONFIG_MFD_BCM590XX=m CONFIG_MFD_BCM590XX=m
@@ -5671,7 +5664,7 @@ CONFIG_REGULATOR_MT6311=m
CONFIG_REGULATOR_MT6323=m CONFIG_REGULATOR_MT6323=m
CONFIG_REGULATOR_MT6331=m CONFIG_REGULATOR_MT6331=m
CONFIG_REGULATOR_MT6332=m CONFIG_REGULATOR_MT6332=m
CONFIG_REGULATOR_MT6357=m # CONFIG_REGULATOR_MT6357 is not set
CONFIG_REGULATOR_MT6358=m CONFIG_REGULATOR_MT6358=m
CONFIG_REGULATOR_MT6359=m CONFIG_REGULATOR_MT6359=m
CONFIG_REGULATOR_MT6360=m CONFIG_REGULATOR_MT6360=m
@@ -5693,7 +5686,7 @@ CONFIG_REGULATOR_RT5120=m
CONFIG_REGULATOR_RT5190A=m CONFIG_REGULATOR_RT5190A=m
CONFIG_REGULATOR_RT5759=m CONFIG_REGULATOR_RT5759=m
CONFIG_REGULATOR_RT6160=m CONFIG_REGULATOR_RT6160=m
CONFIG_REGULATOR_RT6190=m # CONFIG_REGULATOR_RT6190 is not set
CONFIG_REGULATOR_RT6245=m CONFIG_REGULATOR_RT6245=m
CONFIG_REGULATOR_RTQ2134=m CONFIG_REGULATOR_RTQ2134=m
CONFIG_REGULATOR_RTMV20=m CONFIG_REGULATOR_RTMV20=m
@@ -6187,8 +6180,7 @@ CONFIG_VIDEO_VIMC=m
CONFIG_VIDEO_VIVID=m CONFIG_VIDEO_VIVID=m
CONFIG_VIDEO_VIVID_CEC=y CONFIG_VIDEO_VIVID_CEC=y
CONFIG_VIDEO_VIVID_MAX_DEVS=64 CONFIG_VIDEO_VIVID_MAX_DEVS=64
CONFIG_VIDEO_VISL=m # CONFIG_VIDEO_VISL is not set
# CONFIG_VISL_DEBUGFS is not set
CONFIG_DVB_TEST_DRIVERS=y CONFIG_DVB_TEST_DRIVERS=y
CONFIG_DVB_VIDTV=m CONFIG_DVB_VIDTV=m
@@ -6263,7 +6255,7 @@ CONFIG_VIDEO_NOON010PC30=m
CONFIG_VIDEO_OG01A1B=m CONFIG_VIDEO_OG01A1B=m
CONFIG_VIDEO_OV02A10=m CONFIG_VIDEO_OV02A10=m
CONFIG_VIDEO_OV08D10=m CONFIG_VIDEO_OV08D10=m
CONFIG_VIDEO_OV08X40=m # CONFIG_VIDEO_OV08X40 is not set
CONFIG_VIDEO_OV13858=m CONFIG_VIDEO_OV13858=m
CONFIG_VIDEO_OV13B10=m CONFIG_VIDEO_OV13B10=m
CONFIG_VIDEO_OV2640=m CONFIG_VIDEO_OV2640=m
@@ -6271,7 +6263,7 @@ CONFIG_VIDEO_OV2659=m
CONFIG_VIDEO_OV2680=m CONFIG_VIDEO_OV2680=m
CONFIG_VIDEO_OV2685=m CONFIG_VIDEO_OV2685=m
CONFIG_VIDEO_OV2740=m CONFIG_VIDEO_OV2740=m
CONFIG_VIDEO_OV4689=m # CONFIG_VIDEO_OV4689 is not set
CONFIG_VIDEO_OV5647=m CONFIG_VIDEO_OV5647=m
CONFIG_VIDEO_OV5648=m CONFIG_VIDEO_OV5648=m
CONFIG_VIDEO_OV5670=m CONFIG_VIDEO_OV5670=m
@@ -6581,6 +6573,7 @@ CONFIG_DRM=y
CONFIG_DRM_MIPI_DBI=m CONFIG_DRM_MIPI_DBI=m
CONFIG_DRM_MIPI_DSI=y CONFIG_DRM_MIPI_DSI=y
# CONFIG_DRM_DEBUG_MM is not set # CONFIG_DRM_DEBUG_MM is not set
CONFIG_DRM_USE_DYNAMIC_DEBUG=y
CONFIG_DRM_KMS_HELPER=y CONFIG_DRM_KMS_HELPER=y
CONFIG_DRM_FBDEV_EMULATION=y CONFIG_DRM_FBDEV_EMULATION=y
CONFIG_DRM_FBDEV_OVERALLOC=100 CONFIG_DRM_FBDEV_OVERALLOC=100
@@ -6858,7 +6851,7 @@ CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y
# CONFIG_LOGO is not set # CONFIG_LOGO is not set
# end of Graphics support # end of Graphics support
CONFIG_DRM_ACCEL=y # CONFIG_DRM_ACCEL is not set
CONFIG_SOUND=m CONFIG_SOUND=m
CONFIG_SOUND_OSS_CORE=y CONFIG_SOUND_OSS_CORE=y
# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set # CONFIG_SOUND_OSS_CORE_PRECLAIM is not set
@@ -7170,11 +7163,11 @@ CONFIG_SND_SOC_INTEL_AVS_MACH_DA7219=m
CONFIG_SND_SOC_INTEL_AVS_MACH_DMIC=m CONFIG_SND_SOC_INTEL_AVS_MACH_DMIC=m
CONFIG_SND_SOC_INTEL_AVS_MACH_HDAUDIO=m CONFIG_SND_SOC_INTEL_AVS_MACH_HDAUDIO=m
CONFIG_SND_SOC_INTEL_AVS_MACH_I2S_TEST=m CONFIG_SND_SOC_INTEL_AVS_MACH_I2S_TEST=m
CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98927=m # CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98927 is not set
CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98357A=m CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98357A=m
CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98373=m CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98373=m
CONFIG_SND_SOC_INTEL_AVS_MACH_NAU8825=m CONFIG_SND_SOC_INTEL_AVS_MACH_NAU8825=m
CONFIG_SND_SOC_INTEL_AVS_MACH_PROBE=m # CONFIG_SND_SOC_INTEL_AVS_MACH_PROBE is not set
CONFIG_SND_SOC_INTEL_AVS_MACH_RT274=m CONFIG_SND_SOC_INTEL_AVS_MACH_RT274=m
CONFIG_SND_SOC_INTEL_AVS_MACH_RT286=m CONFIG_SND_SOC_INTEL_AVS_MACH_RT286=m
CONFIG_SND_SOC_INTEL_AVS_MACH_RT298=m CONFIG_SND_SOC_INTEL_AVS_MACH_RT298=m
@@ -7527,7 +7520,7 @@ CONFIG_SND_SOC_WM8903=m
CONFIG_SND_SOC_WM8904=m CONFIG_SND_SOC_WM8904=m
CONFIG_SND_SOC_WM8940=m CONFIG_SND_SOC_WM8940=m
CONFIG_SND_SOC_WM8960=m CONFIG_SND_SOC_WM8960=m
CONFIG_SND_SOC_WM8961=m # CONFIG_SND_SOC_WM8961 is not set
CONFIG_SND_SOC_WM8962=m CONFIG_SND_SOC_WM8962=m
CONFIG_SND_SOC_WM8974=m CONFIG_SND_SOC_WM8974=m
CONFIG_SND_SOC_WM8978=m CONFIG_SND_SOC_WM8978=m
@@ -8357,7 +8350,7 @@ CONFIG_INFINIBAND_HFI1=m
# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set # CONFIG_HFI1_DEBUG_SDMA_ORDER is not set
# CONFIG_SDMA_VERBOSITY is not set # CONFIG_SDMA_VERBOSITY is not set
CONFIG_INFINIBAND_IRDMA=m CONFIG_INFINIBAND_IRDMA=m
CONFIG_MANA_INFINIBAND=m # CONFIG_MANA_INFINIBAND is not set
CONFIG_MLX4_INFINIBAND=m CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m
CONFIG_INFINIBAND_MTHCA=m CONFIG_INFINIBAND_MTHCA=m
@@ -8633,7 +8626,7 @@ CONFIG_NITRO_ENCLAVES=m
CONFIG_ACRN_HSM=m CONFIG_ACRN_HSM=m
CONFIG_EFI_SECRET=m CONFIG_EFI_SECRET=m
CONFIG_SEV_GUEST=m CONFIG_SEV_GUEST=m
CONFIG_TDX_GUEST_DRIVER=m # CONFIG_TDX_GUEST_DRIVER is not set
CONFIG_VIRTIO_ANCHOR=y CONFIG_VIRTIO_ANCHOR=y
CONFIG_VIRTIO=y CONFIG_VIRTIO=y
CONFIG_VIRTIO_PCI_LIB=m CONFIG_VIRTIO_PCI_LIB=m
@@ -8831,7 +8824,7 @@ CONFIG_CROS_EC_DEBUGFS=m
CONFIG_CROS_EC_SENSORHUB=m CONFIG_CROS_EC_SENSORHUB=m
CONFIG_CROS_EC_SYSFS=m CONFIG_CROS_EC_SYSFS=m
CONFIG_CROS_EC_TYPEC=m CONFIG_CROS_EC_TYPEC=m
CONFIG_CROS_HPS_I2C=m # CONFIG_CROS_HPS_I2C is not set
CONFIG_CROS_USBPD_LOGGER=m CONFIG_CROS_USBPD_LOGGER=m
CONFIG_CROS_USBPD_NOTIFY=m CONFIG_CROS_USBPD_NOTIFY=m
CONFIG_CHROMEOS_PRIVACY_SCREEN=m CONFIG_CHROMEOS_PRIVACY_SCREEN=m
@@ -8908,9 +8901,7 @@ CONFIG_AMILO_RFKILL=m
CONFIG_FUJITSU_LAPTOP=m CONFIG_FUJITSU_LAPTOP=m
CONFIG_FUJITSU_TABLET=m CONFIG_FUJITSU_TABLET=m
CONFIG_GPD_POCKET_FAN=m CONFIG_GPD_POCKET_FAN=m
CONFIG_X86_PLATFORM_DRIVERS_HP=y # CONFIG_X86_PLATFORM_DRIVERS_HP is not set
CONFIG_HP_ACCEL=m
CONFIG_HP_WMI=m
CONFIG_WIRELESS_HOTKEY=m CONFIG_WIRELESS_HOTKEY=m
CONFIG_IBM_RTL=m CONFIG_IBM_RTL=m
CONFIG_IDEAPAD_LAPTOP=m CONFIG_IDEAPAD_LAPTOP=m
@@ -8925,7 +8916,7 @@ CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
CONFIG_THINKPAD_LMI=m CONFIG_THINKPAD_LMI=m
CONFIG_INTEL_ATOMISP2_PDX86=y CONFIG_INTEL_ATOMISP2_PDX86=y
CONFIG_INTEL_ATOMISP2_LED=m CONFIG_INTEL_ATOMISP2_LED=m
CONFIG_INTEL_IFS=m # CONFIG_INTEL_IFS is not set
CONFIG_INTEL_SAR_INT1092=m CONFIG_INTEL_SAR_INT1092=m
CONFIG_INTEL_SKL_INT3472=m CONFIG_INTEL_SKL_INT3472=m
CONFIG_INTEL_PMC_CORE=y CONFIG_INTEL_PMC_CORE=y
@@ -9051,7 +9042,7 @@ CONFIG_INTEL_IOMMU_SVM=y
# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set # CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
CONFIG_INTEL_IOMMU_FLOPPY_WA=y CONFIG_INTEL_IOMMU_FLOPPY_WA=y
CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y
CONFIG_IOMMUFD=m # CONFIG_IOMMUFD is not set
CONFIG_IRQ_REMAP=y CONFIG_IRQ_REMAP=y
CONFIG_HYPERV_IOMMU=y CONFIG_HYPERV_IOMMU=y
CONFIG_VIRTIO_IOMMU=m CONFIG_VIRTIO_IOMMU=m
@@ -9228,9 +9219,8 @@ CONFIG_IIO_CROS_EC_ACCEL_LEGACY=m
CONFIG_IIO_ST_ACCEL_3AXIS=m CONFIG_IIO_ST_ACCEL_3AXIS=m
CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m
CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m
CONFIG_IIO_KX022A=m # CONFIG_IIO_KX022A_SPI is not set
CONFIG_IIO_KX022A_SPI=m # CONFIG_IIO_KX022A_I2C is not set
CONFIG_IIO_KX022A_I2C=m
CONFIG_KXSD9=m CONFIG_KXSD9=m
CONFIG_KXSD9_SPI=m CONFIG_KXSD9_SPI=m
CONFIG_KXSD9_I2C=m CONFIG_KXSD9_I2C=m
@@ -9257,7 +9247,7 @@ CONFIG_STK8BA50=m
# Analog to digital converters # Analog to digital converters
# #
CONFIG_AD_SIGMA_DELTA=m CONFIG_AD_SIGMA_DELTA=m
CONFIG_AD4130=m # CONFIG_AD4130 is not set
CONFIG_AD7091R5=m CONFIG_AD7091R5=m
CONFIG_AD7124=m CONFIG_AD7124=m
CONFIG_AD7192=m CONFIG_AD7192=m
@@ -9298,7 +9288,7 @@ CONFIG_MAX1027=m
CONFIG_MAX11100=m CONFIG_MAX11100=m
CONFIG_MAX1118=m CONFIG_MAX1118=m
CONFIG_MAX11205=m CONFIG_MAX11205=m
CONFIG_MAX11410=m # CONFIG_MAX11410 is not set
CONFIG_MAX1241=m CONFIG_MAX1241=m
CONFIG_MAX1363=m CONFIG_MAX1363=m
CONFIG_MAX9611=m CONFIG_MAX9611=m
@@ -9306,7 +9296,7 @@ CONFIG_MCP320X=m
CONFIG_MCP3422=m CONFIG_MCP3422=m
CONFIG_MCP3911=m CONFIG_MCP3911=m
CONFIG_MEDIATEK_MT6360_ADC=m CONFIG_MEDIATEK_MT6360_ADC=m
CONFIG_MEDIATEK_MT6370_ADC=m # CONFIG_MEDIATEK_MT6370_ADC is not set
CONFIG_MEN_Z188_ADC=m CONFIG_MEN_Z188_ADC=m
CONFIG_MP2629_ADC=m CONFIG_MP2629_ADC=m
CONFIG_NAU7802=m CONFIG_NAU7802=m
@@ -9339,7 +9329,7 @@ CONFIG_XILINX_XADC=m
# #
# Analog to digital and digital to analog converters # Analog to digital and digital to analog converters
# #
CONFIG_AD74115=m # CONFIG_AD74115 is not set
CONFIG_AD74413R=m CONFIG_AD74413R=m
# end of Analog to digital and digital to analog converters # end of Analog to digital and digital to analog converters
@@ -9489,7 +9479,7 @@ CONFIG_AD9523=m
# #
CONFIG_ADF4350=m CONFIG_ADF4350=m
CONFIG_ADF4371=m CONFIG_ADF4371=m
CONFIG_ADF4377=m # CONFIG_ADF4377 is not set
CONFIG_ADMV1013=m CONFIG_ADMV1013=m
CONFIG_ADMV1014=m CONFIG_ADMV1014=m
CONFIG_ADMV4420=m CONFIG_ADMV4420=m
@@ -9798,7 +9788,7 @@ CONFIG_TMP007=m
CONFIG_TMP117=m CONFIG_TMP117=m
CONFIG_TSYS01=m CONFIG_TSYS01=m
CONFIG_TSYS02D=m CONFIG_TSYS02D=m
CONFIG_MAX30208=m # CONFIG_MAX30208 is not set
CONFIG_MAX31856=m CONFIG_MAX31856=m
CONFIG_MAX31865=m CONFIG_MAX31865=m
# end of Temperature sensors # end of Temperature sensors
@@ -9952,8 +9942,7 @@ CONFIG_FPGA_DFL_NIOS_INTEL_PAC_N3000=m
CONFIG_FPGA_DFL_PCI=m CONFIG_FPGA_DFL_PCI=m
CONFIG_FPGA_M10_BMC_SEC_UPDATE=m CONFIG_FPGA_M10_BMC_SEC_UPDATE=m
CONFIG_FPGA_MGR_MICROCHIP_SPI=m CONFIG_FPGA_MGR_MICROCHIP_SPI=m
CONFIG_FPGA_MGR_LATTICE_SYSCONFIG=m # CONFIG_FPGA_MGR_LATTICE_SYSCONFIG_SPI is not set
CONFIG_FPGA_MGR_LATTICE_SYSCONFIG_SPI=m
CONFIG_TEE=m CONFIG_TEE=m
CONFIG_AMDTEE=m CONFIG_AMDTEE=m
CONFIG_MULTIPLEXER=m CONFIG_MULTIPLEXER=m
@@ -10198,10 +10187,10 @@ CONFIG_SQUASHFS=m
# CONFIG_SQUASHFS_FILE_CACHE is not set # CONFIG_SQUASHFS_FILE_CACHE is not set
CONFIG_SQUASHFS_FILE_DIRECT=y CONFIG_SQUASHFS_FILE_DIRECT=y
CONFIG_SQUASHFS_DECOMP_SINGLE=y CONFIG_SQUASHFS_DECOMP_SINGLE=y
CONFIG_SQUASHFS_DECOMP_MULTI=y # CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set
CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y
CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT=y # CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set
CONFIG_SQUASHFS_MOUNT_DECOMP_THREADS=y # CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set
CONFIG_SQUASHFS_XATTR=y CONFIG_SQUASHFS_XATTR=y
CONFIG_SQUASHFS_ZLIB=y CONFIG_SQUASHFS_ZLIB=y
CONFIG_SQUASHFS_LZ4=y CONFIG_SQUASHFS_LZ4=y
@@ -10880,7 +10869,6 @@ CONFIG_TEXTSEARCH_BM=m
CONFIG_TEXTSEARCH_FSM=m CONFIG_TEXTSEARCH_FSM=m
CONFIG_BTREE=y CONFIG_BTREE=y
CONFIG_INTERVAL_TREE=y CONFIG_INTERVAL_TREE=y
CONFIG_INTERVAL_TREE_SPAN_ITER=y
CONFIG_XARRAY_MULTI=y CONFIG_XARRAY_MULTI=y
CONFIG_ASSOCIATIVE_ARRAY=y CONFIG_ASSOCIATIVE_ARRAY=y
CONFIG_HAS_IOMEM=y CONFIG_HAS_IOMEM=y

View File

@@ -723,7 +723,7 @@ _tkg_srcprep() {
elif [ "$_kver" = "518" ]; then elif [ "$_kver" = "518" ]; then
rev=2 rev=2
elif [ "$_kver" = "601" ]; then elif [ "$_kver" = "601" ]; then
rev=1 rev=3
else else
rev=0 rev=0
fi fi
@@ -866,11 +866,6 @@ _tkg_srcprep() {
_disable "CPU_FREQ_DEFAULT_GOV_ONDEMAND" "CPU_FREQ_DEFAULT_GOV_CONSERVATIVE" "CPU_FREQ_DEFAULT_GOV_PERFORMANCE" "CPU_FREQ_DEFAULT_GOV_PERFORMANCE_NODEF" _disable "CPU_FREQ_DEFAULT_GOV_ONDEMAND" "CPU_FREQ_DEFAULT_GOV_CONSERVATIVE" "CPU_FREQ_DEFAULT_GOV_PERFORMANCE" "CPU_FREQ_DEFAULT_GOV_PERFORMANCE_NODEF"
_module "BLK_DEV_LOOP" _module "BLK_DEV_LOOP"
# buggy project C/PSI interaction workaround
if [ "${_cpusched}" = "pds" ] || [ "${_cpusched}" = "bmq" ]; then
_enable "PSI_DEFAULT_DISABLED"
fi
if [ -n "$_custom_commandline" ]; then if [ -n "$_custom_commandline" ]; then
_enable "CMDLINE_BOOL" _enable "CMDLINE_BOOL"
_disable "CMDLINE_OVERRIDE" _disable "CMDLINE_OVERRIDE"

View File

@@ -5,7 +5,7 @@ index 42af9ca0127e..31747ec54f9d 100644
@@ -5406,6 +5406,12 @@ @@ -5406,6 +5406,12 @@
sa1100ir [NET] sa1100ir [NET]
See drivers/net/irda/sa1100_ir.c. See drivers/net/irda/sa1100_ir.c.
+ sched_timeslice= + sched_timeslice=
+ [KNL] Time slice in ms for Project C BMQ/PDS scheduler. + [KNL] Time slice in ms for Project C BMQ/PDS scheduler.
+ Format: integer 2, 4 + Format: integer 2, 4
@@ -13,14 +13,14 @@ index 42af9ca0127e..31747ec54f9d 100644
+ See Documentation/scheduler/sched-BMQ.txt + See Documentation/scheduler/sched-BMQ.txt
+ +
sched_verbose [KNL] Enables verbose scheduler debug messages. sched_verbose [KNL] Enables verbose scheduler debug messages.
schedstats= [KNL,X86] Enable or disable scheduled statistics. schedstats= [KNL,X86] Enable or disable scheduled statistics.
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index 98d1b198b2b4..d7c78a107f93 100644 index 98d1b198b2b4..d7c78a107f93 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst --- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -1552,3 +1552,13 @@ is 10 seconds. @@ -1552,3 +1552,13 @@ is 10 seconds.
The softlockup threshold is (``2 * watchdog_thresh``). Setting this The softlockup threshold is (``2 * watchdog_thresh``). Setting this
tunable to zero will disable lockup detection altogether. tunable to zero will disable lockup detection altogether.
+ +
@@ -161,7 +161,7 @@ index 9e479d7d202b..2a8530021b23 100644
+ (unsigned long long)tsk_seruntime(task), + (unsigned long long)tsk_seruntime(task),
(unsigned long long)task->sched_info.run_delay, (unsigned long long)task->sched_info.run_delay,
task->sched_info.pcount); task->sched_info.pcount);
diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
index 8874f681b056..59eb72bf7d5f 100644 index 8874f681b056..59eb72bf7d5f 100644
--- a/include/asm-generic/resource.h --- a/include/asm-generic/resource.h
@@ -181,7 +181,7 @@ index ffb6eb55cd13..2e730a59caa2 100644
+++ b/include/linux/sched.h +++ b/include/linux/sched.h
@@ -762,8 +762,14 @@ struct task_struct { @@ -762,8 +762,14 @@ struct task_struct {
unsigned int ptrace; unsigned int ptrace;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
- int on_cpu; - int on_cpu;
struct __call_single_node wake_entry; struct __call_single_node wake_entry;
@@ -202,11 +202,11 @@ index ffb6eb55cd13..2e730a59caa2 100644
+#endif /* !CONFIG_SCHED_ALT */ +#endif /* !CONFIG_SCHED_ALT */
#endif #endif
int on_rq; int on_rq;
@@ -785,6 +792,20 @@ struct task_struct { @@ -785,6 +792,20 @@ struct task_struct {
int normal_prio; int normal_prio;
unsigned int rt_priority; unsigned int rt_priority;
+#ifdef CONFIG_SCHED_ALT +#ifdef CONFIG_SCHED_ALT
+ u64 last_ran; + u64 last_ran;
+ s64 time_slice; + s64 time_slice;
@@ -229,13 +229,13 @@ index ffb6eb55cd13..2e730a59caa2 100644
unsigned int core_occupation; unsigned int core_occupation;
#endif #endif
+#endif /* !CONFIG_SCHED_ALT */ +#endif /* !CONFIG_SCHED_ALT */
#ifdef CONFIG_CGROUP_SCHED #ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group; struct task_group *sched_task_group;
@@ -1545,6 +1567,15 @@ struct task_struct { @@ -1545,6 +1567,15 @@ struct task_struct {
*/ */
}; };
+#ifdef CONFIG_SCHED_ALT +#ifdef CONFIG_SCHED_ALT
+#define tsk_seruntime(t) ((t)->sched_time) +#define tsk_seruntime(t) ((t)->sched_time)
+/* replace the uncertian rt_timeout with 0UL */ +/* replace the uncertian rt_timeout with 0UL */
@@ -254,7 +254,7 @@ index 7c83d4d5a971..fa30f98cb2be 100644
+++ b/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h
@@ -1,5 +1,24 @@ @@ -1,5 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
+#ifdef CONFIG_SCHED_ALT +#ifdef CONFIG_SCHED_ALT
+ +
+static inline int dl_task(struct task_struct *p) +static inline int dl_task(struct task_struct *p)
@@ -282,7 +282,7 @@ index 7c83d4d5a971..fa30f98cb2be 100644
return dl_prio(p->prio); return dl_prio(p->prio);
} }
+#endif /* CONFIG_SCHED_ALT */ +#endif /* CONFIG_SCHED_ALT */
static inline bool dl_time_before(u64 a, u64 b) static inline bool dl_time_before(u64 a, u64 b)
{ {
diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
@@ -292,7 +292,7 @@ index ab83d85e1183..6af9ae681116 100644
@@ -18,6 +18,32 @@ @@ -18,6 +18,32 @@
#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH) #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2) #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
+#ifdef CONFIG_SCHED_ALT +#ifdef CONFIG_SCHED_ALT
+ +
+/* Undefine MAX_PRIO and DEFAULT_PRIO */ +/* Undefine MAX_PRIO and DEFAULT_PRIO */
@@ -327,7 +327,7 @@ index 994c25640e15..8c050a59ece1 100644
--- a/include/linux/sched/rt.h --- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h
@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk) @@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
if (policy == SCHED_FIFO || policy == SCHED_RR) if (policy == SCHED_FIFO || policy == SCHED_RR)
return true; return true;
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
@@ -336,15 +336,15 @@ index 994c25640e15..8c050a59ece1 100644
+#endif +#endif
return false; return false;
} }
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 816df6cc444e..c8da08e18c91 100644 index 816df6cc444e..c8da08e18c91 100644
--- a/include/linux/sched/topology.h --- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h
@@ -234,7 +234,8 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu) @@ -234,7 +234,8 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
#endif /* !CONFIG_SMP */ #endif /* !CONFIG_SMP */
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) -#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) && \ +#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) && \
+ !defined(CONFIG_SCHED_ALT) + !defined(CONFIG_SCHED_ALT)
@@ -364,9 +364,9 @@ index 94125d3b6893..c87ba766d354 100644
This feature enables the scheduler to track the clamped utilization This feature enables the scheduler to track the clamped utilization
of each CPU based on RUNNABLE tasks scheduled on that CPU. of each CPU based on RUNNABLE tasks scheduled on that CPU.
@@ -865,6 +866,35 @@ config UCLAMP_BUCKETS_COUNT @@ -865,6 +866,35 @@ config UCLAMP_BUCKETS_COUNT
If in doubt, use the default value. If in doubt, use the default value.
+menuconfig SCHED_ALT +menuconfig SCHED_ALT
+ bool "Alternative CPU Schedulers" + bool "Alternative CPU Schedulers"
+ default y + default y
@@ -397,7 +397,7 @@ index 94125d3b6893..c87ba766d354 100644
+endif +endif
+ +
endmenu endmenu
# #
@@ -918,6 +948,7 @@ config NUMA_BALANCING @@ -918,6 +948,7 @@ config NUMA_BALANCING
depends on ARCH_SUPPORTS_NUMA_BALANCING depends on ARCH_SUPPORTS_NUMA_BALANCING
@@ -410,7 +410,7 @@ index 94125d3b6893..c87ba766d354 100644
@@ -1015,6 +1046,7 @@ config FAIR_GROUP_SCHED @@ -1015,6 +1046,7 @@ config FAIR_GROUP_SCHED
depends on CGROUP_SCHED depends on CGROUP_SCHED
default CGROUP_SCHED default CGROUP_SCHED
+if !SCHED_ALT +if !SCHED_ALT
config CFS_BANDWIDTH config CFS_BANDWIDTH
bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED" bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
@@ -418,13 +418,13 @@ index 94125d3b6893..c87ba766d354 100644
@@ -1037,6 +1069,7 @@ config RT_GROUP_SCHED @@ -1037,6 +1069,7 @@ config RT_GROUP_SCHED
realtime bandwidth for them. realtime bandwidth for them.
See Documentation/scheduler/sched-rt-group.rst for more information. See Documentation/scheduler/sched-rt-group.rst for more information.
+endif #!SCHED_ALT +endif #!SCHED_ALT
endif #CGROUP_SCHED endif #CGROUP_SCHED
config UCLAMP_TASK_GROUP config UCLAMP_TASK_GROUP
@@ -1281,6 +1314,7 @@ config CHECKPOINT_RESTORE @@ -1281,6 +1314,7 @@ config CHECKPOINT_RESTORE
config SCHED_AUTOGROUP config SCHED_AUTOGROUP
bool "Automatic process group scheduling" bool "Automatic process group scheduling"
+ depends on !SCHED_ALT + depends on !SCHED_ALT
@@ -482,7 +482,7 @@ index c2f1fd95a821..41654679b1b2 100644
--- a/kernel/Kconfig.preempt --- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt
@@ -117,7 +117,7 @@ config PREEMPT_DYNAMIC @@ -117,7 +117,7 @@ config PREEMPT_DYNAMIC
config SCHED_CORE config SCHED_CORE
bool "Core Scheduling for SMT" bool "Core Scheduling for SMT"
- depends on SCHED_SMT - depends on SCHED_SMT
@@ -497,7 +497,7 @@ index b474289c15b8..a23224b45b03 100644
@@ -787,7 +787,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) @@ -787,7 +787,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
return ret; return ret;
} }
-#ifdef CONFIG_SMP -#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT) +#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT)
/* /*
@@ -522,9 +522,9 @@ index e39cb696cfbd..463423572e09 100644
t2 = tsk->sched_info.run_delay; t2 = tsk->sched_info.run_delay;
- t3 = tsk->se.sum_exec_runtime; - t3 = tsk->se.sum_exec_runtime;
+ t3 = tsk_seruntime(tsk); + t3 = tsk_seruntime(tsk);
d->cpu_count += t1; d->cpu_count += t1;
diff --git a/kernel/exit.c b/kernel/exit.c diff --git a/kernel/exit.c b/kernel/exit.c
index 35e0a31a0315..64e368441cf4 100644 index 35e0a31a0315..64e368441cf4 100644
--- a/kernel/exit.c --- a/kernel/exit.c
@@ -532,11 +532,11 @@ index 35e0a31a0315..64e368441cf4 100644
@@ -125,7 +125,7 @@ static void __exit_signal(struct task_struct *tsk) @@ -125,7 +125,7 @@ static void __exit_signal(struct task_struct *tsk)
sig->curr_target = next_thread(tsk); sig->curr_target = next_thread(tsk);
} }
- add_device_randomness((const void*) &tsk->se.sum_exec_runtime, - add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
+ add_device_randomness((const void*) &tsk_seruntime(tsk), + add_device_randomness((const void*) &tsk_seruntime(tsk),
sizeof(unsigned long long)); sizeof(unsigned long long));
/* /*
@@ -146,7 +146,7 @@ static void __exit_signal(struct task_struct *tsk) @@ -146,7 +146,7 @@ static void __exit_signal(struct task_struct *tsk)
sig->inblock += task_io_get_inblock(tsk); sig->inblock += task_io_get_inblock(tsk);
@@ -558,14 +558,14 @@ index 7779ee8abc2a..5b9893cdfb1b 100644
- waiter->deadline = task->dl.deadline; - waiter->deadline = task->dl.deadline;
+ waiter->deadline = __tsk_deadline(task); + waiter->deadline = __tsk_deadline(task);
} }
/* /*
* Only use with rt_mutex_waiter_{less,equal}() * Only use with rt_mutex_waiter_{less,equal}()
*/ */
#define task_to_waiter(p) \ #define task_to_waiter(p) \
- &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline } - &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
+ &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = __tsk_deadline(p) } + &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = __tsk_deadline(p) }
static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
struct rt_mutex_waiter *right) struct rt_mutex_waiter *right)
{ {
@@ -574,7 +574,7 @@ index 7779ee8abc2a..5b9893cdfb1b 100644
+#else +#else
if (left->prio < right->prio) if (left->prio < right->prio)
return 1; return 1;
+#ifndef CONFIG_SCHED_BMQ +#ifndef CONFIG_SCHED_BMQ
/* /*
* If both waiters have dl_prio(), we check the deadlines of the * If both waiters have dl_prio(), we check the deadlines of the
@@ -584,11 +584,11 @@ index 7779ee8abc2a..5b9893cdfb1b 100644
if (dl_prio(left->prio)) if (dl_prio(left->prio))
return dl_time_before(left->deadline, right->deadline); return dl_time_before(left->deadline, right->deadline);
+#endif +#endif
return 0; return 0;
+#endif +#endif
} }
static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left, static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
struct rt_mutex_waiter *right) struct rt_mutex_waiter *right)
{ {
@@ -597,7 +597,7 @@ index 7779ee8abc2a..5b9893cdfb1b 100644
+#else +#else
if (left->prio != right->prio) if (left->prio != right->prio)
return 0; return 0;
+#ifndef CONFIG_SCHED_BMQ +#ifndef CONFIG_SCHED_BMQ
/* /*
* If both waiters have dl_prio(), we check the deadlines of the * If both waiters have dl_prio(), we check the deadlines of the
@@ -607,11 +607,11 @@ index 7779ee8abc2a..5b9893cdfb1b 100644
if (dl_prio(left->prio)) if (dl_prio(left->prio))
return left->deadline == right->deadline; return left->deadline == right->deadline;
+#endif +#endif
return 1; return 1;
+#endif +#endif
} }
static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter, static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 976092b7bd45..31d587c16ec1 100644 index 976092b7bd45..31d587c16ec1 100644
@@ -632,10 +632,10 @@ index 976092b7bd45..31d587c16ec1 100644
obj-y += build_utility.o obj-y += build_utility.o
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
new file mode 100644 new file mode 100644
index 000000000000..572eab74418f index 000000000000..acb8657e811d
--- /dev/null --- /dev/null
+++ b/kernel/sched/alt_core.c +++ b/kernel/sched/alt_core.c
@@ -0,0 +1,7961 @@ @@ -0,0 +1,7978 @@
+/* +/*
+ * kernel/sched/alt_core.c + * kernel/sched/alt_core.c
+ * + *
@@ -665,7 +665,6 @@ index 000000000000..572eab74418f
+#include <linux/init_task.h> +#include <linux/init_task.h>
+#include <linux/kcov.h> +#include <linux/kcov.h>
+#include <linux/kprobes.h> +#include <linux/kprobes.h>
+#include <linux/profile.h>
+#include <linux/nmi.h> +#include <linux/nmi.h>
+#include <linux/scs.h> +#include <linux/scs.h>
+ +
@@ -706,7 +705,7 @@ index 000000000000..572eab74418f
+#define sched_feat(x) (0) +#define sched_feat(x) (0)
+#endif /* CONFIG_SCHED_DEBUG */ +#endif /* CONFIG_SCHED_DEBUG */
+ +
+#define ALT_SCHED_VERSION "v6.1-r1" +#define ALT_SCHED_VERSION "v6.1-r3"
+ +
+/* rt_prio(prio) defined in include/linux/sched/rt.h */ +/* rt_prio(prio) defined in include/linux/sched/rt.h */
+#define rt_task(p) rt_prio((p)->prio) +#define rt_task(p) rt_prio((p)->prio)
@@ -815,14 +814,14 @@ index 000000000000..572eab74418f
+clear_recorded_preempt_mask(int pr, int low, int high, int cpu) +clear_recorded_preempt_mask(int pr, int low, int high, int cpu)
+{ +{
+ if (low < pr && pr <= high) + if (low < pr && pr <= high)
+ cpumask_clear_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - 1 - pr); + cpumask_clear_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - pr);
+} +}
+ +
+static inline void +static inline void
+set_recorded_preempt_mask(int pr, int low, int high, int cpu) +set_recorded_preempt_mask(int pr, int low, int high, int cpu)
+{ +{
+ if (low < pr && pr <= high) + if (low < pr && pr <= high)
+ cpumask_set_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - 1 - pr); + cpumask_set_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - pr);
+} +}
+ +
+static atomic_t sched_prio_record = ATOMIC_INIT(0); +static atomic_t sched_prio_record = ATOMIC_INIT(0);
@@ -1392,8 +1391,8 @@ index 000000000000..572eab74418f
+ * Context: rq->lock + * Context: rq->lock
+ */ + */
+#define __SCHED_DEQUEUE_TASK(p, rq, flags) \ +#define __SCHED_DEQUEUE_TASK(p, rq, flags) \
+ psi_dequeue(p, flags & DEQUEUE_SLEEP); \
+ sched_info_dequeue(rq, p); \ + sched_info_dequeue(rq, p); \
+ psi_dequeue(p, flags & DEQUEUE_SLEEP); \
+ \ + \
+ list_del(&p->sq_node); \ + list_del(&p->sq_node); \
+ if (list_empty(&rq->queue.heads[p->sq_idx])) \ + if (list_empty(&rq->queue.heads[p->sq_idx])) \
@@ -2030,11 +2029,13 @@ index 000000000000..572eab74418f
+ +
+ WARN_ON_ONCE(is_migration_disabled(p)); + WARN_ON_ONCE(is_migration_disabled(p));
+#endif +#endif
+ if (task_cpu(p) == new_cpu)
+ return;
+ trace_sched_migrate_task(p, new_cpu); + trace_sched_migrate_task(p, new_cpu);
+ rseq_migrate(p); +
+ perf_event_task_migrate(p); + if (task_cpu(p) != new_cpu)
+ {
+ rseq_migrate(p);
+ perf_event_task_migrate(p);
+ }
+ +
+ __set_task_cpu(p, new_cpu); + __set_task_cpu(p, new_cpu);
+} +}
@@ -5110,15 +5111,15 @@ index 000000000000..572eab74418f
+ if (src_rq->nr_running < 2) + if (src_rq->nr_running < 2)
+ cpumask_clear_cpu(i, &sched_rq_pending_mask); + cpumask_clear_cpu(i, &sched_rq_pending_mask);
+ +
+ spin_release(&src_rq->lock.dep_map, _RET_IP_);
+ do_raw_spin_unlock(&src_rq->lock);
+
+ rq->nr_running += nr_migrated; + rq->nr_running += nr_migrated;
+ if (rq->nr_running > 1) + if (rq->nr_running > 1)
+ cpumask_set_cpu(cpu, &sched_rq_pending_mask); + cpumask_set_cpu(cpu, &sched_rq_pending_mask);
+ +
+ cpufreq_update_util(rq, 0); + cpufreq_update_util(rq, 0);
+ +
+ spin_release(&src_rq->lock.dep_map, _RET_IP_);
+ do_raw_spin_unlock(&src_rq->lock);
+
+ return 1; + return 1;
+ } + }
+ +
@@ -5147,7 +5148,7 @@ index 000000000000..572eab74418f
+} +}
+ +
+static inline struct task_struct * +static inline struct task_struct *
+choose_next_task(struct rq *rq, int cpu, struct task_struct *prev) +choose_next_task(struct rq *rq, int cpu)
+{ +{
+ struct task_struct *next; + struct task_struct *next;
+ +
@@ -5334,7 +5335,7 @@ index 000000000000..572eab74418f
+ +
+ check_curr(prev, rq); + check_curr(prev, rq);
+ +
+ next = choose_next_task(rq, cpu, prev); + next = choose_next_task(rq, cpu);
+ clear_tsk_need_resched(prev); + clear_tsk_need_resched(prev);
+ clear_preempt_need_resched(); + clear_preempt_need_resched();
+#ifdef CONFIG_SCHED_DEBUG +#ifdef CONFIG_SCHED_DEBUG
@@ -5764,6 +5765,7 @@ index 000000000000..572eab74418f
+ return; + return;
+ +
+ rq = __task_access_lock(p, &lock); + rq = __task_access_lock(p, &lock);
+ update_rq_clock(rq);
+ /* + /*
+ * Set under pi_lock && rq->lock, such that the value can be used under + * Set under pi_lock && rq->lock, such that the value can be used under
+ * either lock. + * either lock.
@@ -6643,6 +6645,13 @@ index 000000000000..572eab74418f
+ return retval; + return retval;
+} +}
+ +
+#ifdef CONFIG_SMP
+int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
+{
+ return 0;
+}
+#endif
+
+static int +static int
+__sched_setaffinity(struct task_struct *p, const struct cpumask *mask) +__sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
+{ +{
@@ -8027,6 +8036,14 @@ index 000000000000..572eab74418f
+ +
+ sched_smp_initialized = true; + sched_smp_initialized = true;
+} +}
+
+static int __init migration_init(void)
+{
+ sched_cpu_starting(smp_processor_id());
+ return 0;
+}
+early_initcall(migration_init);
+
+#else +#else
+void __init sched_init_smp(void) +void __init sched_init_smp(void)
+{ +{
@@ -8636,14 +8653,15 @@ index 000000000000..1212a031700e
+{} +{}
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
new file mode 100644 new file mode 100644
index 000000000000..e3b6320a397a index 000000000000..c32403ed82b6
--- /dev/null --- /dev/null
+++ b/kernel/sched/alt_sched.h +++ b/kernel/sched/alt_sched.h
@@ -0,0 +1,667 @@ @@ -0,0 +1,668 @@
+#ifndef ALT_SCHED_H +#ifndef ALT_SCHED_H
+#define ALT_SCHED_H +#define ALT_SCHED_H
+ +
+#include <linux/context_tracking.h> +#include <linux/context_tracking.h>
+#include <linux/profile.h>
+#include <linux/psi.h> +#include <linux/psi.h>
+#include <linux/stop_machine.h> +#include <linux/stop_machine.h>
+#include <linux/syscalls.h> +#include <linux/syscalls.h>
@@ -9428,23 +9446,23 @@ index d9dc9ab3773f..71a25540d65e 100644
--- a/kernel/sched/build_policy.c --- a/kernel/sched/build_policy.c
+++ b/kernel/sched/build_policy.c +++ b/kernel/sched/build_policy.c
@@ -42,13 +42,19 @@ @@ -42,13 +42,19 @@
#include "idle.c" #include "idle.c"
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
#include "rt.c" #include "rt.c"
+#endif +#endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
# include "cpudeadline.c" # include "cpudeadline.c"
+#endif +#endif
# include "pelt.c" # include "pelt.c"
#endif #endif
#include "cputime.c" #include "cputime.c"
-#include "deadline.c" -#include "deadline.c"
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
+#include "deadline.c" +#include "deadline.c"
+#endif +#endif
@@ -9453,7 +9471,7 @@ index 99bdd96f454f..23f80a86d2d7 100644
--- a/kernel/sched/build_utility.c --- a/kernel/sched/build_utility.c
+++ b/kernel/sched/build_utility.c +++ b/kernel/sched/build_utility.c
@@ -85,7 +85,9 @@ @@ -85,7 +85,9 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
# include "cpupri.c" # include "cpupri.c"
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
@@ -9461,14 +9479,14 @@ index 99bdd96f454f..23f80a86d2d7 100644
+#endif +#endif
# include "topology.c" # include "topology.c"
#endif #endif
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 1207c78f85c1..68812e0756cb 100644 index 1207c78f85c1..68812e0756cb 100644
--- a/kernel/sched/cpufreq_schedutil.c --- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c
@@ -159,9 +159,14 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu) @@ -159,9 +159,14 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
struct rq *rq = cpu_rq(sg_cpu->cpu); struct rq *rq = cpu_rq(sg_cpu->cpu);
sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu); sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
sg_cpu->bw_dl = cpu_bw_dl(rq); sg_cpu->bw_dl = cpu_bw_dl(rq);
@@ -9479,7 +9497,7 @@ index 1207c78f85c1..68812e0756cb 100644
+ sg_cpu->util = rq_load_util(rq, sg_cpu->max); + sg_cpu->util = rq_load_util(rq, sg_cpu->max);
+#endif /* CONFIG_SCHED_ALT */ +#endif /* CONFIG_SCHED_ALT */
} }
/** /**
@@ -305,8 +310,10 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } @@ -305,8 +310,10 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
*/ */
@@ -9490,11 +9508,11 @@ index 1207c78f85c1..68812e0756cb 100644
sg_cpu->sg_policy->limits_changed = true; sg_cpu->sg_policy->limits_changed = true;
+#endif +#endif
} }
static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu, static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
@@ -606,6 +613,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy) @@ -606,6 +613,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
} }
ret = sched_setattr_nocheck(thread, &attr); ret = sched_setattr_nocheck(thread, &attr);
+ +
if (ret) { if (ret) {
@@ -9509,7 +9527,7 @@ index 1207c78f85c1..68812e0756cb 100644
+#endif /* CONFIG_SCHED_ALT */ +#endif /* CONFIG_SCHED_ALT */
} }
static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn); static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 95fc77853743..b48b3f9ed47f 100644 index 95fc77853743..b48b3f9ed47f 100644
--- a/kernel/sched/cputime.c --- a/kernel/sched/cputime.c
@@ -9517,15 +9535,15 @@ index 95fc77853743..b48b3f9ed47f 100644
@@ -122,7 +122,7 @@ void account_user_time(struct task_struct *p, u64 cputime) @@ -122,7 +122,7 @@ void account_user_time(struct task_struct *p, u64 cputime)
p->utime += cputime; p->utime += cputime;
account_group_user_time(p, cputime); account_group_user_time(p, cputime);
- index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; - index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
+ index = task_running_nice(p) ? CPUTIME_NICE : CPUTIME_USER; + index = task_running_nice(p) ? CPUTIME_NICE : CPUTIME_USER;
/* Add user time to cpustat. */ /* Add user time to cpustat. */
task_group_account_field(p, index, cputime); task_group_account_field(p, index, cputime);
@@ -146,7 +146,7 @@ void account_guest_time(struct task_struct *p, u64 cputime) @@ -146,7 +146,7 @@ void account_guest_time(struct task_struct *p, u64 cputime)
p->gtime += cputime; p->gtime += cputime;
/* Add guest time to cpustat. */ /* Add guest time to cpustat. */
- if (task_nice(p) > 0) { - if (task_nice(p) > 0) {
+ if (task_running_nice(p)) { + if (task_running_nice(p)) {
@@ -9543,12 +9561,12 @@ index 95fc77853743..b48b3f9ed47f 100644
static u64 read_sum_exec_runtime(struct task_struct *t) static u64 read_sum_exec_runtime(struct task_struct *t)
@@ -294,7 +294,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t) @@ -294,7 +294,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
struct rq *rq; struct rq *rq;
rq = task_rq_lock(t, &rf); rq = task_rq_lock(t, &rf);
- ns = t->se.sum_exec_runtime; - ns = t->se.sum_exec_runtime;
+ ns = tsk_seruntime(t); + ns = tsk_seruntime(t);
task_rq_unlock(rq, t, &rf); task_rq_unlock(rq, t, &rf);
return ns; return ns;
@@ -626,7 +626,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, @@ -626,7 +626,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
@@ -9557,7 +9575,7 @@ index 95fc77853743..b48b3f9ed47f 100644
- .sum_exec_runtime = p->se.sum_exec_runtime, - .sum_exec_runtime = p->se.sum_exec_runtime,
+ .sum_exec_runtime = tsk_seruntime(p), + .sum_exec_runtime = tsk_seruntime(p),
}; };
if (task_cputime(p, &cputime.utime, &cputime.stime)) if (task_cputime(p, &cputime.utime, &cputime.stime))
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 1637b65ba07a..033c6deeb515 100644 index 1637b65ba07a..033c6deeb515 100644
@@ -9566,39 +9584,39 @@ index 1637b65ba07a..033c6deeb515 100644
@@ -7,6 +7,7 @@ @@ -7,6 +7,7 @@
* Copyright(C) 2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
*/ */
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
/* /*
* This allows printing both to /proc/sched_debug and * This allows printing both to /proc/sched_debug and
* to the console * to the console
@@ -215,6 +216,7 @@ static const struct file_operations sched_scaling_fops = { @@ -215,6 +216,7 @@ static const struct file_operations sched_scaling_fops = {
}; };
#endif /* SMP */ #endif /* SMP */
+#endif /* !CONFIG_SCHED_ALT */ +#endif /* !CONFIG_SCHED_ALT */
#ifdef CONFIG_PREEMPT_DYNAMIC #ifdef CONFIG_PREEMPT_DYNAMIC
@@ -278,6 +280,7 @@ static const struct file_operations sched_dynamic_fops = { @@ -278,6 +280,7 @@ static const struct file_operations sched_dynamic_fops = {
#endif /* CONFIG_PREEMPT_DYNAMIC */ #endif /* CONFIG_PREEMPT_DYNAMIC */
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
__read_mostly bool sched_debug_verbose; __read_mostly bool sched_debug_verbose;
static const struct seq_operations sched_debug_sops; static const struct seq_operations sched_debug_sops;
@@ -293,6 +296,7 @@ static const struct file_operations sched_debug_fops = { @@ -293,6 +296,7 @@ static const struct file_operations sched_debug_fops = {
.llseek = seq_lseek, .llseek = seq_lseek,
.release = seq_release, .release = seq_release,
}; };
+#endif /* !CONFIG_SCHED_ALT */ +#endif /* !CONFIG_SCHED_ALT */
static struct dentry *debugfs_sched; static struct dentry *debugfs_sched;
@@ -302,12 +306,15 @@ static __init int sched_init_debug(void) @@ -302,12 +306,15 @@ static __init int sched_init_debug(void)
debugfs_sched = debugfs_create_dir("sched", NULL); debugfs_sched = debugfs_create_dir("sched", NULL);
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops); debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose); debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
@@ -9606,31 +9624,31 @@ index 1637b65ba07a..033c6deeb515 100644
#ifdef CONFIG_PREEMPT_DYNAMIC #ifdef CONFIG_PREEMPT_DYNAMIC
debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops); debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
#endif #endif
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency); debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity); debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity); debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
@@ -337,11 +344,13 @@ static __init int sched_init_debug(void) @@ -337,11 +344,13 @@ static __init int sched_init_debug(void)
#endif #endif
debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops); debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
+#endif /* !CONFIG_SCHED_ALT */ +#endif /* !CONFIG_SCHED_ALT */
return 0; return 0;
} }
late_initcall(sched_init_debug); late_initcall(sched_init_debug);
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static cpumask_var_t sd_sysctl_cpus; static cpumask_var_t sd_sysctl_cpus;
@@ -1068,6 +1077,7 @@ void proc_sched_set_task(struct task_struct *p) @@ -1068,6 +1077,7 @@ void proc_sched_set_task(struct task_struct *p)
memset(&p->stats, 0, sizeof(p->stats)); memset(&p->stats, 0, sizeof(p->stats));
#endif #endif
} }
+#endif /* !CONFIG_SCHED_ALT */ +#endif /* !CONFIG_SCHED_ALT */
void resched_latency_warn(int cpu, u64 latency) void resched_latency_warn(int cpu, u64 latency)
{ {
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
@@ -9640,7 +9658,7 @@ index f26ab2675f7d..480d4ad16d45 100644
@@ -400,6 +400,7 @@ void cpu_startup_entry(enum cpuhp_state state) @@ -400,6 +400,7 @@ void cpu_startup_entry(enum cpuhp_state state)
do_idle(); do_idle();
} }
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
/* /*
* idle-task scheduling class. * idle-task scheduling class.
@@ -9790,17 +9808,17 @@ index 0f310768260c..bd38bf738fe9 100644
@@ -266,6 +266,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load) @@ -266,6 +266,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
WRITE_ONCE(sa->util_avg, sa->util_sum / divider); WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
} }
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
/* /*
* sched_entity: * sched_entity:
* *
@@ -383,8 +384,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) @@ -383,8 +384,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
return 0; return 0;
} }
+#endif +#endif
-#ifdef CONFIG_SCHED_THERMAL_PRESSURE -#ifdef CONFIG_SCHED_THERMAL_PRESSURE
+#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT) +#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
/* /*
@@ -9813,7 +9831,7 @@ index 3a0e0dc28721..e8a7d84aa5a5 100644
@@ -1,13 +1,15 @@ @@ -1,13 +1,15 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#include "sched-pelt.h" #include "sched-pelt.h"
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
int __update_load_avg_blocked_se(u64 now, struct sched_entity *se); int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se); int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
@@ -9821,16 +9839,16 @@ index 3a0e0dc28721..e8a7d84aa5a5 100644
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
+#endif +#endif
-#ifdef CONFIG_SCHED_THERMAL_PRESSURE -#ifdef CONFIG_SCHED_THERMAL_PRESSURE
+#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT) +#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity); int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
static inline u64 thermal_load_avg(struct rq *rq) static inline u64 thermal_load_avg(struct rq *rq)
@@ -44,6 +46,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg) @@ -44,6 +46,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
return PELT_MIN_DIVIDER + avg->period_contrib; return PELT_MIN_DIVIDER + avg->period_contrib;
} }
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
static inline void cfs_se_util_change(struct sched_avg *avg) static inline void cfs_se_util_change(struct sched_avg *avg)
{ {
@@ -9840,9 +9858,9 @@ index 3a0e0dc28721..e8a7d84aa5a5 100644
} }
#endif #endif
+#endif /* CONFIG_SCHED_ALT */ +#endif /* CONFIG_SCHED_ALT */
#else #else
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
static inline int static inline int
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
@@ -9852,7 +9870,7 @@ index 3a0e0dc28721..e8a7d84aa5a5 100644
return 0; return 0;
} }
+#endif +#endif
static inline int static inline int
update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
@@ -9862,7 +9880,7 @@ index a4a20046e586..c363693cd869 100644
@@ -5,6 +5,10 @@ @@ -5,6 +5,10 @@
#ifndef _KERNEL_SCHED_SCHED_H #ifndef _KERNEL_SCHED_SCHED_H
#define _KERNEL_SCHED_SCHED_H #define _KERNEL_SCHED_SCHED_H
+#ifdef CONFIG_SCHED_ALT +#ifdef CONFIG_SCHED_ALT
+#include "alt_sched.h" +#include "alt_sched.h"
+#else +#else
@@ -9873,7 +9891,7 @@ index a4a20046e586..c363693cd869 100644
@@ -3183,4 +3187,9 @@ static inline void update_current_exec_runtime(struct task_struct *curr, @@ -3183,4 +3187,9 @@ static inline void update_current_exec_runtime(struct task_struct *curr,
cgroup_account_cputime(curr, delta_exec); cgroup_account_cputime(curr, delta_exec);
} }
+static inline int task_running_nice(struct task_struct *p) +static inline int task_running_nice(struct task_struct *p)
+{ +{
+ return (task_nice(p) > 0); + return (task_nice(p) > 0);
@@ -9897,7 +9915,7 @@ index 857f837f52cb..5486c63e4790 100644
rq = cpu_rq(cpu); rq = cpu_rq(cpu);
@@ -143,6 +145,7 @@ static int show_schedstat(struct seq_file *seq, void *v) @@ -143,6 +145,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
seq_printf(seq, "\n"); seq_printf(seq, "\n");
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
/* domain-specific stats */ /* domain-specific stats */
@@ -9916,9 +9934,9 @@ index 84a188913cc9..53934e7ef5db 100644
--- a/kernel/sched/stats.h --- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h +++ b/kernel/sched/stats.h
@@ -89,6 +89,7 @@ static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delt @@ -89,6 +89,7 @@ static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delt
#endif /* CONFIG_SCHEDSTATS */ #endif /* CONFIG_SCHEDSTATS */
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
struct sched_entity_stats { struct sched_entity_stats {
@@ -9928,7 +9946,7 @@ index 84a188913cc9..53934e7ef5db 100644
return &task_of(se)->stats; return &task_of(se)->stats;
} }
+#endif /* CONFIG_SCHED_ALT */ +#endif /* CONFIG_SCHED_ALT */
#ifdef CONFIG_PSI #ifdef CONFIG_PSI
void psi_task_change(struct task_struct *task, int clear, int set); void psi_task_change(struct task_struct *task, int clear, int set);
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
@@ -9938,37 +9956,37 @@ index 8739c2a5a54e..d8dd6c15eb47 100644
@@ -3,6 +3,7 @@ @@ -3,6 +3,7 @@
* Scheduler topology setup/handling methods * Scheduler topology setup/handling methods
*/ */
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
DEFINE_MUTEX(sched_domains_mutex); DEFINE_MUTEX(sched_domains_mutex);
/* Protected by sched_domains_mutex: */ /* Protected by sched_domains_mutex: */
@@ -1413,8 +1414,10 @@ static void asym_cpu_capacity_scan(void) @@ -1413,8 +1414,10 @@ static void asym_cpu_capacity_scan(void)
*/ */
static int default_relax_domain_level = -1; static int default_relax_domain_level = -1;
+#endif /* CONFIG_SCHED_ALT */ +#endif /* CONFIG_SCHED_ALT */
int sched_domain_level_max; int sched_domain_level_max;
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
static int __init setup_relax_domain_level(char *str) static int __init setup_relax_domain_level(char *str)
{ {
if (kstrtoint(str, 0, &default_relax_domain_level)) if (kstrtoint(str, 0, &default_relax_domain_level))
@@ -1647,6 +1650,7 @@ sd_init(struct sched_domain_topology_level *tl, @@ -1647,6 +1650,7 @@ sd_init(struct sched_domain_topology_level *tl,
return sd; return sd;
} }
+#endif /* CONFIG_SCHED_ALT */ +#endif /* CONFIG_SCHED_ALT */
/* /*
* Topology list, bottom-up. * Topology list, bottom-up.
@@ -1683,6 +1687,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl) @@ -1683,6 +1687,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl)
sched_domain_topology_saved = NULL; sched_domain_topology_saved = NULL;
} }
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
static const struct cpumask *sd_numa_mask(int cpu) static const struct cpumask *sd_numa_mask(int cpu)
@@ -2645,3 +2650,15 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], @@ -2645,3 +2650,15 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
@@ -9991,9 +10009,9 @@ index c6d9dec11b74..2bc42ce8b48e 100644
--- a/kernel/sysctl.c --- a/kernel/sysctl.c
+++ b/kernel/sysctl.c +++ b/kernel/sysctl.c
@@ -93,6 +93,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals); @@ -93,6 +93,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals);
/* Constants used for minimum and maximum */ /* Constants used for minimum and maximum */
+#ifdef CONFIG_SCHED_ALT +#ifdef CONFIG_SCHED_ALT
+extern int sched_yield_type; +extern int sched_yield_type;
+#endif +#endif
@@ -10003,7 +10021,7 @@ index c6d9dec11b74..2bc42ce8b48e 100644
#endif #endif
@@ -1633,6 +1637,7 @@ int proc_do_static_key(struct ctl_table *table, int write, @@ -1633,6 +1637,7 @@ int proc_do_static_key(struct ctl_table *table, int write,
} }
static struct ctl_table kern_table[] = { static struct ctl_table kern_table[] = {
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
#ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_NUMA_BALANCING
@@ -10042,13 +10060,13 @@ index 3ae661ab6260..35f0176dcdb0 100644
@@ -2088,8 +2088,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode, @@ -2088,8 +2088,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
int ret = 0; int ret = 0;
u64 slack; u64 slack;
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
slack = current->timer_slack_ns; slack = current->timer_slack_ns;
if (dl_task(current) || rt_task(current)) if (dl_task(current) || rt_task(current))
+#endif +#endif
slack = 0; slack = 0;
hrtimer_init_sleeper_on_stack(&t, clockid, mode); hrtimer_init_sleeper_on_stack(&t, clockid, mode);
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index cb925e8ef9a8..67d823510f5c 100644 index cb925e8ef9a8..67d823510f5c 100644
@@ -10056,17 +10074,17 @@ index cb925e8ef9a8..67d823510f5c 100644
+++ b/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c
@@ -223,7 +223,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples) @@ -223,7 +223,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
u64 stime, utime; u64 stime, utime;
task_cputime(p, &utime, &stime); task_cputime(p, &utime, &stime);
- store_samples(samples, stime, utime, p->se.sum_exec_runtime); - store_samples(samples, stime, utime, p->se.sum_exec_runtime);
+ store_samples(samples, stime, utime, tsk_seruntime(p)); + store_samples(samples, stime, utime, tsk_seruntime(p));
} }
static void proc_sample_cputime_atomic(struct task_cputime_atomic *at, static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
@@ -866,6 +866,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples, @@ -866,6 +866,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
} }
} }
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
static inline void check_dl_overrun(struct task_struct *tsk) static inline void check_dl_overrun(struct task_struct *tsk)
{ {
@@ -10076,18 +10094,18 @@ index cb925e8ef9a8..67d823510f5c 100644
} }
} }
+#endif +#endif
static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard) static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
{ {
@@ -900,8 +902,10 @@ static void check_thread_timers(struct task_struct *tsk, @@ -900,8 +902,10 @@ static void check_thread_timers(struct task_struct *tsk,
u64 samples[CPUCLOCK_MAX]; u64 samples[CPUCLOCK_MAX];
unsigned long soft; unsigned long soft;
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
if (dl_task(tsk)) if (dl_task(tsk))
check_dl_overrun(tsk); check_dl_overrun(tsk);
+#endif +#endif
if (expiry_cache_is_inactive(pct)) if (expiry_cache_is_inactive(pct))
return; return;
@@ -915,7 +919,7 @@ static void check_thread_timers(struct task_struct *tsk, @@ -915,7 +919,7 @@ static void check_thread_timers(struct task_struct *tsk,
@@ -10097,17 +10115,17 @@ index cb925e8ef9a8..67d823510f5c 100644
- unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ); - unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
+ unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ); + unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ);
unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME); unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
/* At the hard limit, send SIGKILL. No further action. */ /* At the hard limit, send SIGKILL. No further action. */
@@ -1151,8 +1155,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk) @@ -1151,8 +1155,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
return true; return true;
} }
+#ifndef CONFIG_SCHED_ALT +#ifndef CONFIG_SCHED_ALT
if (dl_task(tsk) && tsk->dl.dl_overrun) if (dl_task(tsk) && tsk->dl.dl_overrun)
return true; return true;
+#endif +#endif
return false; return false;
} }
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
@@ -10129,4 +10147,4 @@ index a2d301f58ced..2ccdede8585c 100644
+#endif +#endif
}; };
struct wakeup_test_data *x = data; struct wakeup_test_data *x = data;

File diff suppressed because it is too large Load Diff

View File

@@ -1,62 +1,8 @@
From e44ef62b127f6a161a131c84db92a7527d8fc72d Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Wed, 22 Feb 2023 19:24:36 +0100
Subject: [PATCH] prjc
Signed-off-by: Peter Jung <admin@ptr1337.dev>
---
.../admin-guide/kernel-parameters.txt | 6 +
Documentation/admin-guide/sysctl/kernel.rst | 10 +
Documentation/scheduler/sched-BMQ.txt | 110 +
fs/proc/base.c | 2 +-
include/asm-generic/resource.h | 2 +-
include/linux/sched.h | 33 +-
include/linux/sched/deadline.h | 20 +
include/linux/sched/prio.h | 26 +
include/linux/sched/rt.h | 2 +
include/linux/sched/topology.h | 3 +-
init/Kconfig | 34 +
init/init_task.c | 18 +
kernel/Kconfig.preempt | 2 +-
kernel/cgroup/cpuset.c | 4 +-
kernel/delayacct.c | 2 +-
kernel/exit.c | 4 +-
kernel/locking/rtmutex.c | 16 +-
kernel/sched/Makefile | 5 +
kernel/sched/alt_core.c | 8111 +++++++++++++++++
kernel/sched/alt_debug.c | 31 +
kernel/sched/alt_sched.h | 671 ++
kernel/sched/bmq.h | 110 +
kernel/sched/build_policy.c | 8 +-
kernel/sched/build_utility.c | 2 +
kernel/sched/cpufreq_schedutil.c | 10 +
kernel/sched/cputime.c | 10 +-
kernel/sched/debug.c | 10 +
kernel/sched/idle.c | 2 +
kernel/sched/pds.h | 127 +
kernel/sched/pelt.c | 4 +-
kernel/sched/pelt.h | 8 +-
kernel/sched/sched.h | 9 +
kernel/sched/stats.c | 4 +
kernel/sched/stats.h | 2 +
kernel/sched/topology.c | 17 +
kernel/sysctl.c | 15 +
kernel/time/hrtimer.c | 2 +
kernel/time/posix-cpu-timers.c | 10 +-
kernel/trace/trace_selftest.c | 5 +
39 files changed, 9445 insertions(+), 22 deletions(-)
create mode 100644 Documentation/scheduler/sched-BMQ.txt
create mode 100644 kernel/sched/alt_core.c
create mode 100644 kernel/sched/alt_debug.c
create mode 100644 kernel/sched/alt_sched.h
create mode 100644 kernel/sched/bmq.h
create mode 100644 kernel/sched/pds.h
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 6cfa6e3996cf..1b6a407213da 100644 index 42af9ca0127e..31747ec54f9d 100644
--- a/Documentation/admin-guide/kernel-parameters.txt --- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -5437,6 +5437,12 @@ @@ -5406,6 +5406,12 @@
sa1100ir [NET] sa1100ir [NET]
See drivers/net/irda/sa1100_ir.c. See drivers/net/irda/sa1100_ir.c.
@@ -70,10 +16,10 @@ index 6cfa6e3996cf..1b6a407213da 100644
schedstats= [KNL,X86] Enable or disable scheduled statistics. schedstats= [KNL,X86] Enable or disable scheduled statistics.
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index 46e3d62c0eea..fb4568c919d0 100644 index 98d1b198b2b4..d7c78a107f93 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst --- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -1597,3 +1597,13 @@ is 10 seconds. @@ -1552,3 +1552,13 @@ is 10 seconds.
The softlockup threshold is (``2 * watchdog_thresh``). Setting this The softlockup threshold is (``2 * watchdog_thresh``). Setting this
tunable to zero will disable lockup detection altogether. tunable to zero will disable lockup detection altogether.
@@ -230,7 +176,7 @@ index 8874f681b056..59eb72bf7d5f 100644
[RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \
} }
diff --git a/include/linux/sched.h b/include/linux/sched.h diff --git a/include/linux/sched.h b/include/linux/sched.h
index 853d08f7562b..ad7e050d7455 100644 index ffb6eb55cd13..2e730a59caa2 100644
--- a/include/linux/sched.h --- a/include/linux/sched.h
+++ b/include/linux/sched.h +++ b/include/linux/sched.h
@@ -762,8 +762,14 @@ struct task_struct { @@ -762,8 +762,14 @@ struct task_struct {
@@ -286,7 +232,7 @@ index 853d08f7562b..ad7e050d7455 100644
#ifdef CONFIG_CGROUP_SCHED #ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group; struct task_group *sched_task_group;
@@ -1539,6 +1561,15 @@ struct task_struct { @@ -1545,6 +1567,15 @@ struct task_struct {
*/ */
}; };
@@ -406,10 +352,10 @@ index 816df6cc444e..c8da08e18c91 100644
#else #else
static inline void rebuild_sched_domains_energy(void) static inline void rebuild_sched_domains_energy(void)
diff --git a/init/Kconfig b/init/Kconfig diff --git a/init/Kconfig b/init/Kconfig
index 44e90b28a30f..af24591984ab 100644 index 94125d3b6893..c87ba766d354 100644
--- a/init/Kconfig --- a/init/Kconfig
+++ b/init/Kconfig +++ b/init/Kconfig
@@ -821,6 +821,7 @@ menu "Scheduler features" @@ -819,6 +819,7 @@ menu "Scheduler features"
config UCLAMP_TASK config UCLAMP_TASK
bool "Enable utilization clamping for RT/FAIR tasks" bool "Enable utilization clamping for RT/FAIR tasks"
depends on CPU_FREQ_GOV_SCHEDUTIL depends on CPU_FREQ_GOV_SCHEDUTIL
@@ -417,7 +363,7 @@ index 44e90b28a30f..af24591984ab 100644
help help
This feature enables the scheduler to track the clamped utilization This feature enables the scheduler to track the clamped utilization
of each CPU based on RUNNABLE tasks scheduled on that CPU. of each CPU based on RUNNABLE tasks scheduled on that CPU.
@@ -867,6 +868,35 @@ config UCLAMP_BUCKETS_COUNT @@ -865,6 +866,35 @@ config UCLAMP_BUCKETS_COUNT
If in doubt, use the default value. If in doubt, use the default value.
@@ -453,7 +399,7 @@ index 44e90b28a30f..af24591984ab 100644
endmenu endmenu
# #
@@ -924,6 +954,7 @@ config NUMA_BALANCING @@ -918,6 +948,7 @@ config NUMA_BALANCING
depends on ARCH_SUPPORTS_NUMA_BALANCING depends on ARCH_SUPPORTS_NUMA_BALANCING
depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
depends on SMP && NUMA && MIGRATION && !PREEMPT_RT depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
@@ -461,7 +407,7 @@ index 44e90b28a30f..af24591984ab 100644
help help
This option adds support for automatic NUMA aware memory/task placement. This option adds support for automatic NUMA aware memory/task placement.
The mechanism is quite primitive and is based on migrating memory when The mechanism is quite primitive and is based on migrating memory when
@@ -1021,6 +1052,7 @@ config FAIR_GROUP_SCHED @@ -1015,6 +1046,7 @@ config FAIR_GROUP_SCHED
depends on CGROUP_SCHED depends on CGROUP_SCHED
default CGROUP_SCHED default CGROUP_SCHED
@@ -469,7 +415,7 @@ index 44e90b28a30f..af24591984ab 100644
config CFS_BANDWIDTH config CFS_BANDWIDTH
bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED" bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
depends on FAIR_GROUP_SCHED depends on FAIR_GROUP_SCHED
@@ -1043,6 +1075,7 @@ config RT_GROUP_SCHED @@ -1037,6 +1069,7 @@ config RT_GROUP_SCHED
realtime bandwidth for them. realtime bandwidth for them.
See Documentation/scheduler/sched-rt-group.rst for more information. See Documentation/scheduler/sched-rt-group.rst for more information.
@@ -477,7 +423,7 @@ index 44e90b28a30f..af24591984ab 100644
endif #CGROUP_SCHED endif #CGROUP_SCHED
config UCLAMP_TASK_GROUP config UCLAMP_TASK_GROUP
@@ -1287,6 +1320,7 @@ config CHECKPOINT_RESTORE @@ -1281,6 +1314,7 @@ config CHECKPOINT_RESTORE
config SCHED_AUTOGROUP config SCHED_AUTOGROUP
bool "Automatic process group scheduling" bool "Automatic process group scheduling"
@@ -545,10 +491,10 @@ index c2f1fd95a821..41654679b1b2 100644
This option permits Core Scheduling, a means of coordinated task This option permits Core Scheduling, a means of coordinated task
selection across SMT siblings. When enabled -- see selection across SMT siblings. When enabled -- see
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index ca826bd1eba3..60e194f1d6d8 100644 index b474289c15b8..a23224b45b03 100644
--- a/kernel/cgroup/cpuset.c --- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c
@@ -791,7 +791,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) @@ -787,7 +787,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
return ret; return ret;
} }
@@ -557,7 +503,7 @@ index ca826bd1eba3..60e194f1d6d8 100644
/* /*
* Helper routine for generate_sched_domains(). * Helper routine for generate_sched_domains().
* Do cpusets a, b have overlapping effective cpus_allowed masks? * Do cpusets a, b have overlapping effective cpus_allowed masks?
@@ -1187,7 +1187,7 @@ static void rebuild_sched_domains_locked(void) @@ -1183,7 +1183,7 @@ static void rebuild_sched_domains_locked(void)
/* Have scheduler rebuild the domains */ /* Have scheduler rebuild the domains */
partition_and_rebuild_sched_domains(ndoms, doms, attr); partition_and_rebuild_sched_domains(ndoms, doms, attr);
} }
@@ -580,10 +526,10 @@ index e39cb696cfbd..463423572e09 100644
d->cpu_count += t1; d->cpu_count += t1;
diff --git a/kernel/exit.c b/kernel/exit.c diff --git a/kernel/exit.c b/kernel/exit.c
index 15dc2ec80c46..1e583e0f89a7 100644 index 35e0a31a0315..64e368441cf4 100644
--- a/kernel/exit.c --- a/kernel/exit.c
+++ b/kernel/exit.c +++ b/kernel/exit.c
@@ -172,7 +172,7 @@ static void __exit_signal(struct task_struct *tsk) @@ -125,7 +125,7 @@ static void __exit_signal(struct task_struct *tsk)
sig->curr_target = next_thread(tsk); sig->curr_target = next_thread(tsk);
} }
@@ -592,7 +538,7 @@ index 15dc2ec80c46..1e583e0f89a7 100644
sizeof(unsigned long long)); sizeof(unsigned long long));
/* /*
@@ -193,7 +193,7 @@ static void __exit_signal(struct task_struct *tsk) @@ -146,7 +146,7 @@ static void __exit_signal(struct task_struct *tsk)
sig->inblock += task_io_get_inblock(tsk); sig->inblock += task_io_get_inblock(tsk);
sig->oublock += task_io_get_oublock(tsk); sig->oublock += task_io_get_oublock(tsk);
task_io_accounting_add(&sig->ioac, &tsk->ioac); task_io_accounting_add(&sig->ioac, &tsk->ioac);
@@ -602,10 +548,10 @@ index 15dc2ec80c46..1e583e0f89a7 100644
__unhash_process(tsk, group_dead); __unhash_process(tsk, group_dead);
write_sequnlock(&sig->stats_lock); write_sequnlock(&sig->stats_lock);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 728f434de2bb..0e1082a4e878 100644 index 7779ee8abc2a..5b9893cdfb1b 100644
--- a/kernel/locking/rtmutex.c --- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c
@@ -337,21 +337,25 @@ static __always_inline void @@ -300,21 +300,25 @@ static __always_inline void
waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
{ {
waiter->prio = __waiter_prio(task); waiter->prio = __waiter_prio(task);
@@ -633,7 +579,7 @@ index 728f434de2bb..0e1082a4e878 100644
/* /*
* If both waiters have dl_prio(), we check the deadlines of the * If both waiters have dl_prio(), we check the deadlines of the
* associated tasks. * associated tasks.
@@ -360,16 +364,22 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, @@ -323,16 +327,22 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
*/ */
if (dl_prio(left->prio)) if (dl_prio(left->prio))
return dl_time_before(left->deadline, right->deadline); return dl_time_before(left->deadline, right->deadline);
@@ -656,7 +602,7 @@ index 728f434de2bb..0e1082a4e878 100644
/* /*
* If both waiters have dl_prio(), we check the deadlines of the * If both waiters have dl_prio(), we check the deadlines of the
* associated tasks. * associated tasks.
@@ -378,8 +388,10 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left, @@ -341,8 +351,10 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
*/ */
if (dl_prio(left->prio)) if (dl_prio(left->prio))
return left->deadline == right->deadline; return left->deadline == right->deadline;
@@ -686,10 +632,10 @@ index 976092b7bd45..31d587c16ec1 100644
obj-y += build_utility.o obj-y += build_utility.o
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
new file mode 100644 new file mode 100644
index 000000000000..f5e9c01f9382 index 000000000000..acb8657e811d
--- /dev/null --- /dev/null
+++ b/kernel/sched/alt_core.c +++ b/kernel/sched/alt_core.c
@@ -0,0 +1,8111 @@ @@ -0,0 +1,7978 @@
+/* +/*
+ * kernel/sched/alt_core.c + * kernel/sched/alt_core.c
+ * + *
@@ -759,7 +705,7 @@ index 000000000000..f5e9c01f9382
+#define sched_feat(x) (0) +#define sched_feat(x) (0)
+#endif /* CONFIG_SCHED_DEBUG */ +#endif /* CONFIG_SCHED_DEBUG */
+ +
+#define ALT_SCHED_VERSION "v6.2-r0" +#define ALT_SCHED_VERSION "v6.1-r3"
+ +
+/* rt_prio(prio) defined in include/linux/sched/rt.h */ +/* rt_prio(prio) defined in include/linux/sched/rt.h */
+#define rt_task(p) rt_prio((p)->prio) +#define rt_task(p) rt_prio((p)->prio)
@@ -780,12 +726,6 @@ index 000000000000..f5e9c01f9382
+#include "pds.h" +#include "pds.h"
+#endif +#endif
+ +
+struct affinity_context {
+ const struct cpumask *new_mask;
+ struct cpumask *user_mask;
+ unsigned int flags;
+};
+
+static int __init sched_timeslice(char *str) +static int __init sched_timeslice(char *str)
+{ +{
+ int timeslice_ms; + int timeslice_ms;
@@ -848,14 +788,6 @@ index 000000000000..f5e9c01f9382
+static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp; +static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp;
+static cpumask_t *const sched_idle_mask = &sched_preempt_mask[0]; +static cpumask_t *const sched_idle_mask = &sched_preempt_mask[0];
+ +
+/* task function */
+static inline const struct cpumask *task_user_cpus(struct task_struct *p)
+{
+ if (!p->user_cpus_ptr)
+ return cpu_possible_mask; /* &init_task.cpus_mask */
+ return p->user_cpus_ptr;
+}
+
+/* sched_queue related functions */ +/* sched_queue related functions */
+static inline void sched_queue_init(struct sched_queue *q) +static inline void sched_queue_init(struct sched_queue *q)
+{ +{
@@ -1468,7 +1400,7 @@ index 000000000000..f5e9c01f9382
+ +
+#define __SCHED_ENQUEUE_TASK(p, rq, flags) \ +#define __SCHED_ENQUEUE_TASK(p, rq, flags) \
+ sched_info_enqueue(rq, p); \ + sched_info_enqueue(rq, p); \
+ psi_enqueue(p, flags & ENQUEUE_WAKEUP); \ + psi_enqueue(p, flags); \
+ \ + \
+ p->sq_idx = task_sched_prio_idx(p, rq); \ + p->sq_idx = task_sched_prio_idx(p, rq); \
+ list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]); \ + list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]); \
@@ -2336,101 +2268,35 @@ index 000000000000..f5e9c01f9382
+} +}
+ +
+static inline void +static inline void
+set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx) +set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
+{ +{
+ cpumask_copy(&p->cpus_mask, ctx->new_mask); + cpumask_copy(&p->cpus_mask, new_mask);
+ p->nr_cpus_allowed = cpumask_weight(ctx->new_mask); + p->nr_cpus_allowed = cpumask_weight(new_mask);
+
+ /*
+ * Swap in a new user_cpus_ptr if SCA_USER flag set
+ */
+ if (ctx->flags & SCA_USER)
+ swap(p->user_cpus_ptr, ctx->user_mask);
+} +}
+ +
+static void +static void
+__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx) +__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{ +{
+ lockdep_assert_held(&p->pi_lock); + lockdep_assert_held(&p->pi_lock);
+ set_cpus_allowed_common(p, ctx); + set_cpus_allowed_common(p, new_mask);
+} +}
+ +
+/*
+ * Used for kthread_bind() and select_fallback_rq(), in both cases the user
+ * affinity (if any) should be destroyed too.
+ */
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{ +{
+ struct affinity_context ac = { + __do_set_cpus_allowed(p, new_mask);
+ .new_mask = new_mask,
+ .user_mask = NULL,
+ .flags = SCA_USER, /* clear the user requested mask */
+ };
+ union cpumask_rcuhead {
+ cpumask_t cpumask;
+ struct rcu_head rcu;
+ };
+
+ __do_set_cpus_allowed(p, &ac);
+
+ /*
+ * Because this is called with p->pi_lock held, it is not possible
+ * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
+ * kfree_rcu().
+ */
+ kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
+}
+
+static cpumask_t *alloc_user_cpus_ptr(int node)
+{
+ /*
+ * See do_set_cpus_allowed() above for the rcu_head usage.
+ */
+ int size = max_t(int, cpumask_size(), sizeof(struct rcu_head));
+
+ return kmalloc_node(size, GFP_KERNEL, node);
+} +}
+ +
+int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, +int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
+ int node) + int node)
+{ +{
+ cpumask_t *user_mask; + if (!src->user_cpus_ptr)
+ unsigned long flags;
+
+ /*
+ * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
+ * may differ by now due to racing.
+ */
+ dst->user_cpus_ptr = NULL;
+
+ /*
+ * This check is racy and losing the race is a valid situation.
+ * It is not worth the extra overhead of taking the pi_lock on
+ * every fork/clone.
+ */
+ if (data_race(!src->user_cpus_ptr))
+ return 0; + return 0;
+ +
+ user_mask = alloc_user_cpus_ptr(node); + dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
+ if (!user_mask) + if (!dst->user_cpus_ptr)
+ return -ENOMEM; + return -ENOMEM;
+ +
+ /* + cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
+ * Use pi_lock to protect content of user_cpus_ptr
+ *
+ * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
+ * do_set_cpus_allowed().
+ */
+ raw_spin_lock_irqsave(&src->pi_lock, flags);
+ if (src->user_cpus_ptr) {
+ swap(dst->user_cpus_ptr, user_mask);
+ cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
+ }
+ raw_spin_unlock_irqrestore(&src->pi_lock, flags);
+
+ if (unlikely(user_mask))
+ kfree(user_mask);
+
+ return 0; + return 0;
+} +}
+ +
@@ -2775,8 +2641,6 @@ index 000000000000..f5e9c01f9382
+ +
+static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu, +static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu,
+ raw_spinlock_t *lock, unsigned long irq_flags) + raw_spinlock_t *lock, unsigned long irq_flags)
+ __releases(rq->lock)
+ __releases(p->pi_lock)
+{ +{
+ /* Can the task run on the task's current CPU? If so, we're done */ + /* Can the task run on the task's current CPU? If so, we're done */
+ if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { + if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
@@ -2814,7 +2678,8 @@ index 000000000000..f5e9c01f9382
+} +}
+ +
+static int __set_cpus_allowed_ptr_locked(struct task_struct *p, +static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
+ struct affinity_context *ctx, + const struct cpumask *new_mask,
+ u32 flags,
+ struct rq *rq, + struct rq *rq,
+ raw_spinlock_t *lock, + raw_spinlock_t *lock,
+ unsigned long irq_flags) + unsigned long irq_flags)
@@ -2822,6 +2687,7 @@ index 000000000000..f5e9c01f9382
+ const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p); + const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
+ const struct cpumask *cpu_valid_mask = cpu_active_mask; + const struct cpumask *cpu_valid_mask = cpu_active_mask;
+ bool kthread = p->flags & PF_KTHREAD; + bool kthread = p->flags & PF_KTHREAD;
+ struct cpumask *user_mask = NULL;
+ int dest_cpu; + int dest_cpu;
+ int ret = 0; + int ret = 0;
+ +
@@ -2839,7 +2705,7 @@ index 000000000000..f5e9c01f9382
+ cpu_valid_mask = cpu_online_mask; + cpu_valid_mask = cpu_online_mask;
+ } + }
+ +
+ if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) { + if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) {
+ ret = -EINVAL; + ret = -EINVAL;
+ goto out; + goto out;
+ } + }
@@ -2848,23 +2714,30 @@ index 000000000000..f5e9c01f9382
+ * Must re-check here, to close a race against __kthread_bind(), + * Must re-check here, to close a race against __kthread_bind(),
+ * sched_setaffinity() is not guaranteed to observe the flag. + * sched_setaffinity() is not guaranteed to observe the flag.
+ */ + */
+ if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { + if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
+ ret = -EINVAL; + ret = -EINVAL;
+ goto out; + goto out;
+ } + }
+ +
+ if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) + if (cpumask_equal(&p->cpus_mask, new_mask))
+ goto out; + goto out;
+ +
+ dest_cpu = cpumask_any_and(cpu_valid_mask, ctx->new_mask); + dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
+ if (dest_cpu >= nr_cpu_ids) { + if (dest_cpu >= nr_cpu_ids) {
+ ret = -EINVAL; + ret = -EINVAL;
+ goto out; + goto out;
+ } + }
+ +
+ __do_set_cpus_allowed(p, ctx); + __do_set_cpus_allowed(p, new_mask);
+ +
+ return affine_move_task(rq, p, dest_cpu, lock, irq_flags); + if (flags & SCA_USER)
+ user_mask = clear_user_cpus_ptr(p);
+
+ ret = affine_move_task(rq, p, dest_cpu, lock, irq_flags);
+
+ kfree(user_mask);
+
+ return ret;
+ +
+out: +out:
+ __task_access_unlock(p, lock); + __task_access_unlock(p, lock);
@@ -2875,6 +2748,7 @@ index 000000000000..f5e9c01f9382
+ +
+/* +/*
+ * Change a given task's CPU affinity. Migrate the thread to a + * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on
+ * is removed from the allowed bitmask. + * is removed from the allowed bitmask.
+ * + *
+ * NOTE: the caller must have a valid reference to the task, the + * NOTE: the caller must have a valid reference to the task, the
@@ -2882,7 +2756,7 @@ index 000000000000..f5e9c01f9382
+ * call is not atomic; no spinlocks may be held. + * call is not atomic; no spinlocks may be held.
+ */ + */
+static int __set_cpus_allowed_ptr(struct task_struct *p, +static int __set_cpus_allowed_ptr(struct task_struct *p,
+ struct affinity_context *ctx) + const struct cpumask *new_mask, u32 flags)
+{ +{
+ unsigned long irq_flags; + unsigned long irq_flags;
+ struct rq *rq; + struct rq *rq;
@@ -2890,36 +2764,20 @@ index 000000000000..f5e9c01f9382
+ +
+ raw_spin_lock_irqsave(&p->pi_lock, irq_flags); + raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
+ rq = __task_access_lock(p, &lock); + rq = __task_access_lock(p, &lock);
+ /*
+ * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
+ * flags are set.
+ */
+ if (p->user_cpus_ptr &&
+ !(ctx->flags & SCA_USER) &&
+ cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
+ ctx->new_mask = rq->scratch_mask;
+ +
+ + return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, lock, irq_flags);
+ return __set_cpus_allowed_ptr_locked(p, ctx, rq, lock, irq_flags);
+} +}
+ +
+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) +int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+{ +{
+ struct affinity_context ac = { + return __set_cpus_allowed_ptr(p, new_mask, 0);
+ .new_mask = new_mask,
+ .flags = 0,
+ };
+
+ return __set_cpus_allowed_ptr(p, &ac);
+} +}
+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); +EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+ +
+/* +/*
+ * Change a given task's CPU affinity to the intersection of its current + * Change a given task's CPU affinity to the intersection of its current
+ * affinity mask and @subset_mask, writing the resulting mask to @new_mask. + * affinity mask and @subset_mask, writing the resulting mask to @new_mask
+ * If user_cpus_ptr is defined, use it as the basis for restricting CPU + * and pointing @p->user_cpus_ptr to a copy of the old mask.
+ * affinity or use cpu_online_mask instead.
+ *
+ * If the resulting mask is empty, leave the affinity unchanged and return + * If the resulting mask is empty, leave the affinity unchanged and return
+ * -EINVAL. + * -EINVAL.
+ */ + */
@@ -2927,34 +2785,48 @@ index 000000000000..f5e9c01f9382
+ struct cpumask *new_mask, + struct cpumask *new_mask,
+ const struct cpumask *subset_mask) + const struct cpumask *subset_mask)
+{ +{
+ struct affinity_context ac = { + struct cpumask *user_mask = NULL;
+ .new_mask = new_mask,
+ .flags = 0,
+ };
+ unsigned long irq_flags; + unsigned long irq_flags;
+ raw_spinlock_t *lock; + raw_spinlock_t *lock;
+ struct rq *rq; + struct rq *rq;
+ int err; + int err;
+ +
+ if (!p->user_cpus_ptr) {
+ user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
+ if (!user_mask)
+ return -ENOMEM;
+ }
+
+ raw_spin_lock_irqsave(&p->pi_lock, irq_flags); + raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
+ rq = __task_access_lock(p, &lock); + rq = __task_access_lock(p, &lock);
+ +
+ if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) { + if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) {
+ err = -EINVAL; + err = -EINVAL;
+ goto err_unlock; + goto err_unlock;
+ } + }
+ +
+ return __set_cpus_allowed_ptr_locked(p, &ac, rq, lock, irq_flags); + /*
+ * We're about to butcher the task affinity, so keep track of what
+ * the user asked for in case we're able to restore it later on.
+ */
+ if (user_mask) {
+ cpumask_copy(user_mask, p->cpus_ptr);
+ p->user_cpus_ptr = user_mask;
+ }
+
+ /*return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);*/
+ return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, lock, irq_flags);
+ +
+err_unlock: +err_unlock:
+ __task_access_unlock(p, lock); + __task_access_unlock(p, lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags); + raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
+ kfree(user_mask);
+ return err; + return err;
+} +}
+ +
+/* +/*
+ * Restrict the CPU affinity of task @p so that it is a subset of + * Restrict the CPU affinity of task @p so that it is a subset of
+ * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the + * task_cpu_possible_mask() and point @p->user_cpu_ptr to a copy of the
+ * old affinity mask. If the resulting mask is empty, we warn and walk + * old affinity mask. If the resulting mask is empty, we warn and walk
+ * up the cpuset hierarchy until we find a suitable mask. + * up the cpuset hierarchy until we find a suitable mask.
+ */ + */
@@ -2998,29 +2870,34 @@ index 000000000000..f5e9c01f9382
+} +}
+ +
+static int +static int
+__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx); +__sched_setaffinity(struct task_struct *p, const struct cpumask *mask);
+ +
+/* +/*
+ * Restore the affinity of a task @p which was previously restricted by a + * Restore the affinity of a task @p which was previously restricted by a
+ * call to force_compatible_cpus_allowed_ptr(). + * call to force_compatible_cpus_allowed_ptr(). This will clear (and free)
+ * @p->user_cpus_ptr.
+ * + *
+ * It is the caller's responsibility to serialise this with any calls to + * It is the caller's responsibility to serialise this with any calls to
+ * force_compatible_cpus_allowed_ptr(@p). + * force_compatible_cpus_allowed_ptr(@p).
+ */ + */
+void relax_compatible_cpus_allowed_ptr(struct task_struct *p) +void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
+{ +{
+ struct affinity_context ac = { + struct cpumask *user_mask = p->user_cpus_ptr;
+ .new_mask = task_user_cpus(p), + unsigned long flags;
+ .flags = 0,
+ };
+ int ret;
+ +
+ /* + /*
+ * Try to restore the old affinity mask with __sched_setaffinity(). + * Try to restore the old affinity mask. If this fails, then
+ * Cpuset masking will be done there too. + * we free the mask explicitly to avoid it being inherited across
+ * a subsequent fork().
+ */ + */
+ ret = __sched_setaffinity(p, &ac); + if (!user_mask || !__sched_setaffinity(p, user_mask))
+ WARN_ON_ONCE(ret); + return;
+
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+ user_mask = clear_user_cpus_ptr(p);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+ kfree(user_mask);
+} +}
+ +
+#else /* CONFIG_SMP */ +#else /* CONFIG_SMP */
@@ -3032,9 +2909,9 @@ index 000000000000..f5e9c01f9382
+ +
+static inline int +static inline int
+__set_cpus_allowed_ptr(struct task_struct *p, +__set_cpus_allowed_ptr(struct task_struct *p,
+ struct affinity_context *ctx) + const struct cpumask *new_mask, u32 flags)
+{ +{
+ return set_cpus_allowed_ptr(p, ctx->new_mask); + return set_cpus_allowed_ptr(p, new_mask);
+} +}
+ +
+static inline bool rq_has_pinned_tasks(struct rq *rq) +static inline bool rq_has_pinned_tasks(struct rq *rq)
@@ -3042,11 +2919,6 @@ index 000000000000..f5e9c01f9382
+ return false; + return false;
+} +}
+ +
+static inline cpumask_t *alloc_user_cpus_ptr(int node)
+{
+ return NULL;
+}
+
+#endif /* !CONFIG_SMP */ +#endif /* !CONFIG_SMP */
+ +
+static void +static void
@@ -3158,6 +3030,13 @@ index 000000000000..f5e9c01f9382
+ if (!llist) + if (!llist)
+ return; + return;
+ +
+ /*
+ * rq::ttwu_pending racy indication of out-standing wakeups.
+ * Races such that false-negatives are possible, since they
+ * are shorter lived that false-positives would be.
+ */
+ WRITE_ONCE(rq->ttwu_pending, 0);
+
+ rq_lock_irqsave(rq, &rf); + rq_lock_irqsave(rq, &rf);
+ update_rq_clock(rq); + update_rq_clock(rq);
+ +
@@ -3171,17 +3050,6 @@ index 000000000000..f5e9c01f9382
+ ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0); + ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0);
+ } + }
+ +
+ /*
+ * Must be after enqueueing at least once task such that
+ * idle_cpu() does not observe a false-negative -- if it does,
+ * it is possible for select_idle_siblings() to stack a number
+ * of tasks on this CPU during that window.
+ *
+ * It is ok to clear ttwu_pending when another task pending.
+ * We will receive IPI after local irq enabled and then enqueue it.
+ * Since now nr_running > 0, idle_cpu() will always get correct result.
+ */
+ WRITE_ONCE(rq->ttwu_pending, 0);
+ rq_unlock_irqrestore(rq, &rf); + rq_unlock_irqrestore(rq, &rf);
+} +}
+ +
@@ -4767,9 +4635,7 @@ index 000000000000..f5e9c01f9382
+ struct rq *rq = cpu_rq(cpu); + struct rq *rq = cpu_rq(cpu);
+ u64 resched_latency; + u64 resched_latency;
+ +
+ if (housekeeping_cpu(cpu, HK_TYPE_TICK)) + arch_scale_freq_tick();
+ arch_scale_freq_tick();
+
+ sched_clock_tick(); + sched_clock_tick();
+ +
+ raw_spin_lock(&rq->lock); + raw_spin_lock(&rq->lock);
@@ -4868,7 +4734,7 @@ index 000000000000..f5e9c01f9382
+ int i; + int i;
+ +
+ for_each_cpu_wrap(i, &chk, cpu) { + for_each_cpu_wrap(i, &chk, cpu) {
+ if (!cpumask_intersects(cpu_smt_mask(i), sched_idle_mask) &&\ + if (cpumask_subset(cpu_smt_mask(i), &chk) &&
+ sg_balance_trigger(i)) + sg_balance_trigger(i))
+ return; + return;
+ } + }
@@ -4991,7 +4857,6 @@ index 000000000000..f5e9c01f9382
+static void sched_tick_stop(int cpu) +static void sched_tick_stop(int cpu)
+{ +{
+ struct tick_work *twork; + struct tick_work *twork;
+ int os;
+ +
+ if (housekeeping_cpu(cpu, HK_TYPE_TICK)) + if (housekeeping_cpu(cpu, HK_TYPE_TICK))
+ return; + return;
@@ -4999,10 +4864,7 @@ index 000000000000..f5e9c01f9382
+ WARN_ON_ONCE(!tick_work_cpu); + WARN_ON_ONCE(!tick_work_cpu);
+ +
+ twork = per_cpu_ptr(tick_work_cpu, cpu); + twork = per_cpu_ptr(tick_work_cpu, cpu);
+ /* There cannot be competing actions, but don't rely on stop-machine. */ + cancel_delayed_work_sync(&twork->work);
+ os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
+ WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
+ /* Don't cancel, as this would mess up the state machine. */
+} +}
+#endif /* CONFIG_HOTPLUG_CPU */ +#endif /* CONFIG_HOTPLUG_CPU */
+ +
@@ -5126,7 +4988,8 @@ index 000000000000..f5e9c01f9382
+ pr_err("Preemption disabled at:"); + pr_err("Preemption disabled at:");
+ print_ip_sym(KERN_ERR, preempt_disable_ip); + print_ip_sym(KERN_ERR, preempt_disable_ip);
+ } + }
+ check_panic_on_warn("scheduling while atomic"); + if (panic_on_warn)
+ panic("scheduling while atomic\n");
+ +
+ dump_stack(); + dump_stack();
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); + add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
@@ -5442,7 +5305,7 @@ index 000000000000..f5e9c01f9382
+ prev->sched_contributes_to_load = + prev->sched_contributes_to_load =
+ (prev_state & TASK_UNINTERRUPTIBLE) && + (prev_state & TASK_UNINTERRUPTIBLE) &&
+ !(prev_state & TASK_NOLOAD) && + !(prev_state & TASK_NOLOAD) &&
+ !(prev_state & TASK_FROZEN); + !(prev->flags & TASK_FROZEN);
+ +
+ if (prev->sched_contributes_to_load) + if (prev->sched_contributes_to_load)
+ rq->nr_uninterruptible++; + rq->nr_uninterruptible++;
@@ -6790,7 +6653,7 @@ index 000000000000..f5e9c01f9382
+#endif +#endif
+ +
+static int +static int
+__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) +__sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
+{ +{
+ int retval; + int retval;
+ cpumask_var_t cpus_allowed, new_mask; + cpumask_var_t cpus_allowed, new_mask;
@@ -6804,12 +6667,9 @@ index 000000000000..f5e9c01f9382
+ } + }
+ +
+ cpuset_cpus_allowed(p, cpus_allowed); + cpuset_cpus_allowed(p, cpus_allowed);
+ cpumask_and(new_mask, ctx->new_mask, cpus_allowed); + cpumask_and(new_mask, mask, cpus_allowed);
+ +again:
+ ctx->new_mask = new_mask; + retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER);
+ ctx->flags |= SCA_CHECK;
+
+ retval = __set_cpus_allowed_ptr(p, ctx);
+ if (retval) + if (retval)
+ goto out_free_new_mask; + goto out_free_new_mask;
+ +
@@ -6821,24 +6681,7 @@ index 000000000000..f5e9c01f9382
+ * cpuset's cpus_allowed + * cpuset's cpus_allowed
+ */ + */
+ cpumask_copy(new_mask, cpus_allowed); + cpumask_copy(new_mask, cpus_allowed);
+ + goto again;
+ /*
+ * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
+ * will restore the previous user_cpus_ptr value.
+ *
+ * In the unlikely event a previous user_cpus_ptr exists,
+ * we need to further restrict the mask to what is allowed
+ * by that old user_cpus_ptr.
+ */
+ if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
+ bool empty = !cpumask_and(new_mask, new_mask,
+ ctx->user_mask);
+
+ if (WARN_ON_ONCE(empty))
+ cpumask_copy(new_mask, cpus_allowed);
+ }
+ __set_cpus_allowed_ptr(p, ctx);
+ retval = -EINVAL;
+ } + }
+ +
+out_free_new_mask: +out_free_new_mask:
@@ -6850,8 +6693,6 @@ index 000000000000..f5e9c01f9382
+ +
+long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) +long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+{ +{
+ struct affinity_context ac;
+ struct cpumask *user_mask;
+ struct task_struct *p; + struct task_struct *p;
+ int retval; + int retval;
+ +
@@ -6886,27 +6727,7 @@ index 000000000000..f5e9c01f9382
+ if (retval) + if (retval)
+ goto out_put_task; + goto out_put_task;
+ +
+ /* + retval = __sched_setaffinity(p, in_mask);
+ * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
+ * alloc_user_cpus_ptr() returns NULL.
+ */
+ user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
+ if (user_mask) {
+ cpumask_copy(user_mask, in_mask);
+ } else if (IS_ENABLED(CONFIG_SMP)) {
+ retval = -ENOMEM;
+ goto out_put_task;
+ }
+
+ ac = (struct affinity_context){
+ .new_mask = in_mask,
+ .user_mask = user_mask,
+ .flags = SCA_USER,
+ };
+
+ retval = __sched_setaffinity(p, &ac);
+ kfree(ac.user_mask);
+
+out_put_task: +out_put_task:
+ put_task_struct(p); + put_task_struct(p);
+ return retval; + return retval;
@@ -7662,12 +7483,6 @@ index 000000000000..f5e9c01f9382
+ */ + */
+void __init init_idle(struct task_struct *idle, int cpu) +void __init init_idle(struct task_struct *idle, int cpu)
+{ +{
+#ifdef CONFIG_SMP
+ struct affinity_context ac = (struct affinity_context) {
+ .new_mask = cpumask_of(cpu),
+ .flags = 0,
+ };
+#endif
+ struct rq *rq = cpu_rq(cpu); + struct rq *rq = cpu_rq(cpu);
+ unsigned long flags; + unsigned long flags;
+ +
@@ -7694,7 +7509,7 @@ index 000000000000..f5e9c01f9382
+ * + *
+ * And since this is boot we can forgo the serialisation. + * And since this is boot we can forgo the serialisation.
+ */ + */
+ set_cpus_allowed_common(idle, &ac); + set_cpus_allowed_common(idle, cpumask_of(cpu));
+#endif +#endif
+ +
+ /* Silence PROVE_RCU */ + /* Silence PROVE_RCU */
@@ -8322,8 +8137,6 @@ index 000000000000..f5e9c01f9382
+ +
+ hrtick_rq_init(rq); + hrtick_rq_init(rq);
+ atomic_set(&rq->nr_iowait, 0); + atomic_set(&rq->nr_iowait, 0);
+
+ zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
+ } + }
+#ifdef CONFIG_SMP +#ifdef CONFIG_SMP
+ /* Set rq->online for cpu 0 */ + /* Set rq->online for cpu 0 */
@@ -8840,10 +8653,10 @@ index 000000000000..1212a031700e
+{} +{}
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
new file mode 100644 new file mode 100644
index 000000000000..0b563999d4c1 index 000000000000..c32403ed82b6
--- /dev/null --- /dev/null
+++ b/kernel/sched/alt_sched.h +++ b/kernel/sched/alt_sched.h
@@ -0,0 +1,671 @@ @@ -0,0 +1,668 @@
+#ifndef ALT_SCHED_H +#ifndef ALT_SCHED_H
+#define ALT_SCHED_H +#define ALT_SCHED_H
+ +
@@ -9090,9 +8903,6 @@ index 000000000000..0b563999d4c1
+#endif +#endif
+ atomic_t nohz_flags; + atomic_t nohz_flags;
+#endif /* CONFIG_NO_HZ_COMMON */ +#endif /* CONFIG_NO_HZ_COMMON */
+
+ /* Scratch cpumask to be temporarily used under rq_lock */
+ cpumask_var_t scratch_mask;
+}; +};
+ +
+extern unsigned long rq_load_util(struct rq *rq, unsigned long max); +extern unsigned long rq_load_util(struct rq *rq, unsigned long max);
@@ -10064,7 +9874,7 @@ index 3a0e0dc28721..e8a7d84aa5a5 100644
static inline int static inline int
update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 771f8ddb7053..787a5069d69a 100644 index a4a20046e586..c363693cd869 100644
--- a/kernel/sched/sched.h --- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h +++ b/kernel/sched/sched.h
@@ -5,6 +5,10 @@ @@ -5,6 +5,10 @@
@@ -10078,7 +9888,7 @@ index 771f8ddb7053..787a5069d69a 100644
#include <linux/sched/affinity.h> #include <linux/sched/affinity.h>
#include <linux/sched/autogroup.h> #include <linux/sched/autogroup.h>
#include <linux/sched/cpufreq.h> #include <linux/sched/cpufreq.h>
@@ -3261,4 +3265,9 @@ static inline void update_current_exec_runtime(struct task_struct *curr, @@ -3183,4 +3187,9 @@ static inline void update_current_exec_runtime(struct task_struct *curr,
cgroup_account_cputime(curr, delta_exec); cgroup_account_cputime(curr, delta_exec);
} }
@@ -10120,7 +9930,7 @@ index 857f837f52cb..5486c63e4790 100644
} }
return 0; return 0;
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 38f3698f5e5b..b9d597394316 100644 index 84a188913cc9..53934e7ef5db 100644
--- a/kernel/sched/stats.h --- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h +++ b/kernel/sched/stats.h
@@ -89,6 +89,7 @@ static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delt @@ -89,6 +89,7 @@ static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delt
@@ -10195,7 +10005,7 @@ index 8739c2a5a54e..d8dd6c15eb47 100644
+#endif /* CONFIG_NUMA */ +#endif /* CONFIG_NUMA */
+#endif +#endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 137d4abe3eda..6bada3a6d571 100644 index c6d9dec11b74..2bc42ce8b48e 100644
--- a/kernel/sysctl.c --- a/kernel/sysctl.c
+++ b/kernel/sysctl.c +++ b/kernel/sysctl.c
@@ -93,6 +93,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals); @@ -93,6 +93,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals);
@@ -10209,7 +10019,7 @@ index 137d4abe3eda..6bada3a6d571 100644
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
static const int six_hundred_forty_kb = 640 * 1024; static const int six_hundred_forty_kb = 640 * 1024;
#endif #endif
@@ -1934,6 +1938,17 @@ static struct ctl_table kern_table[] = { @@ -1953,6 +1959,17 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec, .proc_handler = proc_dointvec,
}, },
#endif #endif
@@ -10303,10 +10113,10 @@ index cb925e8ef9a8..67d823510f5c 100644
return false; return false;
} }
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index ff0536cea968..ce266990006d 100644 index a2d301f58ced..2ccdede8585c 100644
--- a/kernel/trace/trace_selftest.c --- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c
@@ -1150,10 +1150,15 @@ static int trace_wakeup_test_thread(void *data) @@ -1143,10 +1143,15 @@ static int trace_wakeup_test_thread(void *data)
{ {
/* Make this a -deadline thread */ /* Make this a -deadline thread */
static const struct sched_attr attr = { static const struct sched_attr attr = {
@@ -10322,6 +10132,3 @@ index ff0536cea968..ce266990006d 100644
}; };
struct wakeup_test_data *x = data; struct wakeup_test_data *x = data;
--
2.39.2