Compare commits

..

4 Commits
v6.2 ... v6.2.1

Author SHA1 Message Date
Sravan Balaji
96dbd05c1f PDS Kernel Configuration 2023-02-25 12:00:16 -05:00
Tk-Glitch
e1372ce04b Set CONFIG_PSI_DEFAULT_DISABLED to enabled by default when using Project C CPU schedulers. 2023-02-23 13:13:29 +01:00
ptr1337
ccbf7e5a82 6.2: Update prjc patchset (#710)
Signed-off-by: Peter Jung <admin@ptr1337.dev>
2023-02-23 13:01:29 +01:00
ptr1337
0213d7503f 6.2: Sync config (#709)
Signed-off-by: Peter Jung <admin@ptr1337.dev>
2023-02-23 13:00:11 +01:00
3 changed files with 398 additions and 188 deletions

View File

@@ -1,15 +1,15 @@
# #
# Automatically generated file; DO NOT EDIT. # Automatically generated file; DO NOT EDIT.
# Linux/x86 6.2.0-rc4 Kernel Configuration # Linux/x86 6.2.0 Kernel Configuration
# #
CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.0" CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.1 20230201"
CONFIG_CC_IS_GCC=y CONFIG_CC_IS_GCC=y
CONFIG_GCC_VERSION=120200 CONFIG_GCC_VERSION=120201
CONFIG_CLANG_VERSION=0 CONFIG_CLANG_VERSION=0
CONFIG_AS_IS_GNU=y CONFIG_AS_IS_GNU=y
CONFIG_AS_VERSION=23900 CONFIG_AS_VERSION=24000
CONFIG_LD_IS_BFD=y CONFIG_LD_IS_BFD=y
CONFIG_LD_VERSION=23900 CONFIG_LD_VERSION=24000
CONFIG_LLD_VERSION=0 CONFIG_LLD_VERSION=0
CONFIG_CC_CAN_LINK=y CONFIG_CC_CAN_LINK=y
CONFIG_CC_CAN_LINK_STATIC=y CONFIG_CC_CAN_LINK_STATIC=y
@@ -182,10 +182,9 @@ CONFIG_RCU_NOCB_CPU=y
# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set # CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set
# CONFIG_RCU_NOCB_CPU_CB_BOOST is not set # CONFIG_RCU_NOCB_CPU_CB_BOOST is not set
# CONFIG_TASKS_TRACE_RCU_READ_MB is not set # CONFIG_TASKS_TRACE_RCU_READ_MB is not set
# CONFIG_RCU_LAZY is not set CONFIG_RCU_LAZY=y
# end of RCU Subsystem # end of RCU Subsystem
CONFIG_BUILD_BIN2C=y
CONFIG_IKCONFIG=y CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y CONFIG_IKCONFIG_PROC=y
CONFIG_IKHEADERS=m CONFIG_IKHEADERS=m
@@ -488,7 +487,7 @@ CONFIG_X86_INTEL_TSX_MODE_AUTO=y
CONFIG_X86_SGX=y CONFIG_X86_SGX=y
CONFIG_EFI=y CONFIG_EFI=y
CONFIG_EFI_STUB=y CONFIG_EFI_STUB=y
CONFIG_EFI_HANDOVER_PROTOCOL=y # CONFIG_EFI_HANDOVER_PROTOCOL is not set
CONFIG_EFI_MIXED=y CONFIG_EFI_MIXED=y
# CONFIG_EFI_FAKE_MEMMAP is not set # CONFIG_EFI_FAKE_MEMMAP is not set
CONFIG_EFI_RUNTIME_MAP=y CONFIG_EFI_RUNTIME_MAP=y
@@ -638,7 +637,7 @@ CONFIG_ACPI_ADXL=y
CONFIG_ACPI_CONFIGFS=m CONFIG_ACPI_CONFIGFS=m
CONFIG_ACPI_PFRUT=m CONFIG_ACPI_PFRUT=m
CONFIG_ACPI_PCC=y CONFIG_ACPI_PCC=y
# CONFIG_ACPI_FFH is not set CONFIG_ACPI_FFH=y
CONFIG_PMIC_OPREGION=y CONFIG_PMIC_OPREGION=y
CONFIG_BYTCRC_PMIC_OPREGION=y CONFIG_BYTCRC_PMIC_OPREGION=y
CONFIG_CHTCRC_PMIC_OPREGION=y CONFIG_CHTCRC_PMIC_OPREGION=y
@@ -936,7 +935,7 @@ CONFIG_MODULE_SIG_HASH="sha512"
# CONFIG_MODULE_COMPRESS_GZIP is not set # CONFIG_MODULE_COMPRESS_GZIP is not set
# CONFIG_MODULE_COMPRESS_XZ is not set # CONFIG_MODULE_COMPRESS_XZ is not set
CONFIG_MODULE_COMPRESS_ZSTD=y CONFIG_MODULE_COMPRESS_ZSTD=y
# CONFIG_MODULE_DECOMPRESS is not set CONFIG_MODULE_DECOMPRESS=y
CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS=y CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS=y
CONFIG_MODPROBE_PATH="/sbin/modprobe" CONFIG_MODPROBE_PATH="/sbin/modprobe"
CONFIG_MODULES_TREE_LOOKUP=y CONFIG_MODULES_TREE_LOOKUP=y
@@ -1989,7 +1988,7 @@ CONFIG_BT_HCIUART_QCA=y
CONFIG_BT_HCIUART_AG6XX=y CONFIG_BT_HCIUART_AG6XX=y
CONFIG_BT_HCIUART_MRVL=y CONFIG_BT_HCIUART_MRVL=y
CONFIG_BT_HCIBCM203X=m CONFIG_BT_HCIBCM203X=m
# CONFIG_BT_HCIBCM4377 is not set CONFIG_BT_HCIBCM4377=m
CONFIG_BT_HCIBPA10X=m CONFIG_BT_HCIBPA10X=m
CONFIG_BT_HCIBFUSB=m CONFIG_BT_HCIBFUSB=m
CONFIG_BT_HCIDTL1=m CONFIG_BT_HCIDTL1=m
@@ -2327,7 +2326,7 @@ CONFIG_SYSFB=y
CONFIG_FW_CS_DSP=m CONFIG_FW_CS_DSP=m
CONFIG_GOOGLE_FIRMWARE=y CONFIG_GOOGLE_FIRMWARE=y
# CONFIG_GOOGLE_SMI is not set # CONFIG_GOOGLE_SMI is not set
# CONFIG_GOOGLE_CBMEM is not set CONFIG_GOOGLE_CBMEM=m
CONFIG_GOOGLE_COREBOOT_TABLE=m CONFIG_GOOGLE_COREBOOT_TABLE=m
CONFIG_GOOGLE_MEMCONSOLE=m CONFIG_GOOGLE_MEMCONSOLE=m
# CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY is not set # CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY is not set
@@ -2539,7 +2538,7 @@ CONFIG_ZRAM_DEF_COMP_LZORLE=y
CONFIG_ZRAM_DEF_COMP="lzo-rle" CONFIG_ZRAM_DEF_COMP="lzo-rle"
CONFIG_ZRAM_WRITEBACK=y CONFIG_ZRAM_WRITEBACK=y
# CONFIG_ZRAM_MEMORY_TRACKING is not set # CONFIG_ZRAM_MEMORY_TRACKING is not set
# CONFIG_ZRAM_MULTI_COMP is not set CONFIG_ZRAM_MULTI_COMP=y
CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 CONFIG_BLK_DEV_LOOP_MIN_COUNT=0
CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_DRBD=m
@@ -2548,7 +2547,9 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=m CONFIG_BLK_DEV_RAM=m
CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=16384 CONFIG_BLK_DEV_RAM_SIZE=16384
# CONFIG_CDROM_PKTCDVD is not set CONFIG_CDROM_PKTCDVD=m
CONFIG_CDROM_PKTCDVD_BUFFERS=8
# CONFIG_CDROM_PKTCDVD_WCACHE is not set
CONFIG_ATA_OVER_ETH=m CONFIG_ATA_OVER_ETH=m
CONFIG_XEN_BLKDEV_FRONTEND=m CONFIG_XEN_BLKDEV_FRONTEND=m
CONFIG_XEN_BLKDEV_BACKEND=m CONFIG_XEN_BLKDEV_BACKEND=m
@@ -2597,6 +2598,8 @@ CONFIG_TIFM_CORE=m
CONFIG_TIFM_7XX1=m CONFIG_TIFM_7XX1=m
CONFIG_ICS932S401=m CONFIG_ICS932S401=m
CONFIG_ENCLOSURE_SERVICES=m CONFIG_ENCLOSURE_SERVICES=m
CONFIG_SMPRO_ERRMON=m
CONFIG_SMPRO_MISC=m
CONFIG_HP_ILO=m CONFIG_HP_ILO=m
CONFIG_APDS9802ALS=m CONFIG_APDS9802ALS=m
CONFIG_ISL29003=m CONFIG_ISL29003=m
@@ -3321,7 +3324,7 @@ CONFIG_ENC28J60=m
# CONFIG_ENC28J60_WRITEVERIFY is not set # CONFIG_ENC28J60_WRITEVERIFY is not set
CONFIG_ENCX24J600=m CONFIG_ENCX24J600=m
CONFIG_LAN743X=m CONFIG_LAN743X=m
# CONFIG_VCAP is not set CONFIG_VCAP=y
CONFIG_NET_VENDOR_MICROSEMI=y CONFIG_NET_VENDOR_MICROSEMI=y
CONFIG_MSCC_OCELOT_SWITCH_LIB=m CONFIG_MSCC_OCELOT_SWITCH_LIB=m
CONFIG_NET_VENDOR_MICROSOFT=y CONFIG_NET_VENDOR_MICROSOFT=y
@@ -3895,7 +3898,7 @@ CONFIG_MT7921_COMMON=m
CONFIG_MT7921E=m CONFIG_MT7921E=m
CONFIG_MT7921S=m CONFIG_MT7921S=m
CONFIG_MT7921U=m CONFIG_MT7921U=m
# CONFIG_MT7996E is not set CONFIG_MT7996E=m
CONFIG_WLAN_VENDOR_MICROCHIP=y CONFIG_WLAN_VENDOR_MICROCHIP=y
CONFIG_WILC1000=m CONFIG_WILC1000=m
CONFIG_WILC1000_SDIO=m CONFIG_WILC1000_SDIO=m
@@ -3959,27 +3962,29 @@ CONFIG_RTL8XXXU_UNTESTED=y
CONFIG_RTW88=m CONFIG_RTW88=m
CONFIG_RTW88_CORE=m CONFIG_RTW88_CORE=m
CONFIG_RTW88_PCI=m CONFIG_RTW88_PCI=m
CONFIG_RTW88_USB=m
CONFIG_RTW88_8822B=m CONFIG_RTW88_8822B=m
CONFIG_RTW88_8822C=m CONFIG_RTW88_8822C=m
CONFIG_RTW88_8723D=m CONFIG_RTW88_8723D=m
CONFIG_RTW88_8821C=m CONFIG_RTW88_8821C=m
CONFIG_RTW88_8822BE=m CONFIG_RTW88_8822BE=m
# CONFIG_RTW88_8822BU is not set CONFIG_RTW88_8822BU=m
CONFIG_RTW88_8822CE=m CONFIG_RTW88_8822CE=m
# CONFIG_RTW88_8822CU is not set CONFIG_RTW88_8822CU=m
CONFIG_RTW88_8723DE=m CONFIG_RTW88_8723DE=m
# CONFIG_RTW88_8723DU is not set CONFIG_RTW88_8723DU=m
CONFIG_RTW88_8821CE=m CONFIG_RTW88_8821CE=m
# CONFIG_RTW88_8821CU is not set CONFIG_RTW88_8821CU=m
CONFIG_RTW88_DEBUG=y CONFIG_RTW88_DEBUG=y
CONFIG_RTW88_DEBUGFS=y CONFIG_RTW88_DEBUGFS=y
CONFIG_RTW89=m CONFIG_RTW89=m
CONFIG_RTW89_CORE=m CONFIG_RTW89_CORE=m
CONFIG_RTW89_PCI=m CONFIG_RTW89_PCI=m
CONFIG_RTW89_8852A=m CONFIG_RTW89_8852A=m
CONFIG_RTW89_8852B=m
CONFIG_RTW89_8852C=m CONFIG_RTW89_8852C=m
CONFIG_RTW89_8852AE=m CONFIG_RTW89_8852AE=m
# CONFIG_RTW89_8852BE is not set CONFIG_RTW89_8852BE=m
CONFIG_RTW89_8852CE=m CONFIG_RTW89_8852CE=m
CONFIG_RTW89_DEBUG=y CONFIG_RTW89_DEBUG=y
CONFIG_RTW89_DEBUGMSG=y CONFIG_RTW89_DEBUGMSG=y
@@ -4227,7 +4232,7 @@ CONFIG_TOUCHSCREEN_CYTTSP_SPI=m
CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m
CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m
CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m
# CONFIG_TOUCHSCREEN_CYTTSP5 is not set CONFIG_TOUCHSCREEN_CYTTSP5=m
CONFIG_TOUCHSCREEN_DA9034=m CONFIG_TOUCHSCREEN_DA9034=m
CONFIG_TOUCHSCREEN_DA9052=m CONFIG_TOUCHSCREEN_DA9052=m
CONFIG_TOUCHSCREEN_DYNAPRO=m CONFIG_TOUCHSCREEN_DYNAPRO=m
@@ -4239,7 +4244,7 @@ CONFIG_TOUCHSCREEN_FUJITSU=m
CONFIG_TOUCHSCREEN_GOODIX=m CONFIG_TOUCHSCREEN_GOODIX=m
CONFIG_TOUCHSCREEN_HIDEEP=m CONFIG_TOUCHSCREEN_HIDEEP=m
CONFIG_TOUCHSCREEN_HYCON_HY46XX=m CONFIG_TOUCHSCREEN_HYCON_HY46XX=m
# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX=m
CONFIG_TOUCHSCREEN_ILI210X=m CONFIG_TOUCHSCREEN_ILI210X=m
CONFIG_TOUCHSCREEN_ILITEK=m CONFIG_TOUCHSCREEN_ILITEK=m
CONFIG_TOUCHSCREEN_S6SY761=m CONFIG_TOUCHSCREEN_S6SY761=m
@@ -4314,7 +4319,7 @@ CONFIG_TOUCHSCREEN_COLIBRI_VF50=m
CONFIG_TOUCHSCREEN_ROHM_BU21023=m CONFIG_TOUCHSCREEN_ROHM_BU21023=m
CONFIG_TOUCHSCREEN_IQS5XX=m CONFIG_TOUCHSCREEN_IQS5XX=m
CONFIG_TOUCHSCREEN_ZINITIX=m CONFIG_TOUCHSCREEN_ZINITIX=m
# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set CONFIG_TOUCHSCREEN_HIMAX_HX83112B=m
CONFIG_INPUT_MISC=y CONFIG_INPUT_MISC=y
CONFIG_INPUT_88PM860X_ONKEY=m CONFIG_INPUT_88PM860X_ONKEY=m
CONFIG_INPUT_88PM80X_ONKEY=m CONFIG_INPUT_88PM80X_ONKEY=m
@@ -4432,7 +4437,7 @@ CONFIG_HW_CONSOLE=y
CONFIG_VT_HW_CONSOLE_BINDING=y CONFIG_VT_HW_CONSOLE_BINDING=y
CONFIG_UNIX98_PTYS=y CONFIG_UNIX98_PTYS=y
# CONFIG_LEGACY_PTYS is not set # CONFIG_LEGACY_PTYS is not set
CONFIG_LEGACY_TIOCSTI=y # CONFIG_LEGACY_TIOCSTI is not set
CONFIG_LDISC_AUTOLOAD=y CONFIG_LDISC_AUTOLOAD=y
# #
@@ -4525,7 +4530,7 @@ CONFIG_IPMI_SSIF=m
CONFIG_IPMI_IPMB=m CONFIG_IPMI_IPMB=m
CONFIG_IPMI_WATCHDOG=m CONFIG_IPMI_WATCHDOG=m
CONFIG_IPMI_POWEROFF=m CONFIG_IPMI_POWEROFF=m
# CONFIG_SSIF_IPMI_BMC is not set CONFIG_SSIF_IPMI_BMC=m
CONFIG_IPMB_DEVICE_INTERFACE=m CONFIG_IPMB_DEVICE_INTERFACE=m
CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_TIMERIOMEM=m CONFIG_HW_RANDOM_TIMERIOMEM=m
@@ -4721,7 +4726,7 @@ CONFIG_SPI_MICROCHIP_CORE=m
CONFIG_SPI_MICROCHIP_CORE_QSPI=m CONFIG_SPI_MICROCHIP_CORE_QSPI=m
# CONFIG_SPI_LANTIQ_SSC is not set # CONFIG_SPI_LANTIQ_SSC is not set
CONFIG_SPI_OC_TINY=m CONFIG_SPI_OC_TINY=m
# CONFIG_SPI_PCI1XXXX is not set CONFIG_SPI_PCI1XXXX=m
CONFIG_SPI_PXA2XX=m CONFIG_SPI_PXA2XX=m
CONFIG_SPI_PXA2XX_PCI=m CONFIG_SPI_PXA2XX_PCI=m
# CONFIG_SPI_ROCKCHIP is not set # CONFIG_SPI_ROCKCHIP is not set
@@ -4938,7 +4943,7 @@ CONFIG_GPIO_VIPERBOARD=m
# Virtual GPIO drivers # Virtual GPIO drivers
# #
CONFIG_GPIO_AGGREGATOR=m CONFIG_GPIO_AGGREGATOR=m
# CONFIG_GPIO_LATCH is not set CONFIG_GPIO_LATCH=m
CONFIG_GPIO_MOCKUP=m CONFIG_GPIO_MOCKUP=m
CONFIG_GPIO_VIRTIO=m CONFIG_GPIO_VIRTIO=m
CONFIG_GPIO_SIM=m CONFIG_GPIO_SIM=m
@@ -5076,6 +5081,7 @@ CONFIG_HWMON_VID=m
# #
CONFIG_SENSORS_ABITUGURU=m CONFIG_SENSORS_ABITUGURU=m
CONFIG_SENSORS_ABITUGURU3=m CONFIG_SENSORS_ABITUGURU3=m
CONFIG_SENSORS_SMPRO=m
CONFIG_SENSORS_AD7314=m CONFIG_SENSORS_AD7314=m
CONFIG_SENSORS_AD7414=m CONFIG_SENSORS_AD7414=m
CONFIG_SENSORS_AD7418=m CONFIG_SENSORS_AD7418=m
@@ -5195,8 +5201,9 @@ CONFIG_SENSORS_NCT7904=m
CONFIG_SENSORS_NPCM7XX=m CONFIG_SENSORS_NPCM7XX=m
CONFIG_SENSORS_NZXT_KRAKEN2=m CONFIG_SENSORS_NZXT_KRAKEN2=m
CONFIG_SENSORS_NZXT_SMART2=m CONFIG_SENSORS_NZXT_SMART2=m
# CONFIG_SENSORS_OCC_P8_I2C is not set CONFIG_SENSORS_OCC_P8_I2C=m
# CONFIG_SENSORS_OXP is not set CONFIG_SENSORS_OCC=m
CONFIG_SENSORS_OXP=m
CONFIG_SENSORS_PCF8591=m CONFIG_SENSORS_PCF8591=m
CONFIG_PMBUS=m CONFIG_PMBUS=m
CONFIG_SENSORS_PMBUS=m CONFIG_SENSORS_PMBUS=m
@@ -5401,7 +5408,7 @@ CONFIG_MAX63XX_WATCHDOG=m
CONFIG_RETU_WATCHDOG=m CONFIG_RETU_WATCHDOG=m
CONFIG_ACQUIRE_WDT=m CONFIG_ACQUIRE_WDT=m
CONFIG_ADVANTECH_WDT=m CONFIG_ADVANTECH_WDT=m
# CONFIG_ADVANTECH_EC_WDT is not set CONFIG_ADVANTECH_EC_WDT=m
CONFIG_ALIM1535_WDT=m CONFIG_ALIM1535_WDT=m
CONFIG_ALIM7101_WDT=m CONFIG_ALIM7101_WDT=m
CONFIG_EBC_C384_WDT=m CONFIG_EBC_C384_WDT=m
@@ -5483,7 +5490,7 @@ CONFIG_BCMA_DRIVER_GPIO=y
# #
CONFIG_MFD_CORE=y CONFIG_MFD_CORE=y
CONFIG_MFD_AS3711=y CONFIG_MFD_AS3711=y
# CONFIG_MFD_SMPRO is not set CONFIG_MFD_SMPRO=m
CONFIG_PMIC_ADP5520=y CONFIG_PMIC_ADP5520=y
CONFIG_MFD_AAT2870_CORE=y CONFIG_MFD_AAT2870_CORE=y
CONFIG_MFD_BCM590XX=m CONFIG_MFD_BCM590XX=m
@@ -5664,7 +5671,7 @@ CONFIG_REGULATOR_MT6311=m
CONFIG_REGULATOR_MT6323=m CONFIG_REGULATOR_MT6323=m
CONFIG_REGULATOR_MT6331=m CONFIG_REGULATOR_MT6331=m
CONFIG_REGULATOR_MT6332=m CONFIG_REGULATOR_MT6332=m
# CONFIG_REGULATOR_MT6357 is not set CONFIG_REGULATOR_MT6357=m
CONFIG_REGULATOR_MT6358=m CONFIG_REGULATOR_MT6358=m
CONFIG_REGULATOR_MT6359=m CONFIG_REGULATOR_MT6359=m
CONFIG_REGULATOR_MT6360=m CONFIG_REGULATOR_MT6360=m
@@ -5686,7 +5693,7 @@ CONFIG_REGULATOR_RT5120=m
CONFIG_REGULATOR_RT5190A=m CONFIG_REGULATOR_RT5190A=m
CONFIG_REGULATOR_RT5759=m CONFIG_REGULATOR_RT5759=m
CONFIG_REGULATOR_RT6160=m CONFIG_REGULATOR_RT6160=m
# CONFIG_REGULATOR_RT6190 is not set CONFIG_REGULATOR_RT6190=m
CONFIG_REGULATOR_RT6245=m CONFIG_REGULATOR_RT6245=m
CONFIG_REGULATOR_RTQ2134=m CONFIG_REGULATOR_RTQ2134=m
CONFIG_REGULATOR_RTMV20=m CONFIG_REGULATOR_RTMV20=m
@@ -6180,7 +6187,8 @@ CONFIG_VIDEO_VIMC=m
CONFIG_VIDEO_VIVID=m CONFIG_VIDEO_VIVID=m
CONFIG_VIDEO_VIVID_CEC=y CONFIG_VIDEO_VIVID_CEC=y
CONFIG_VIDEO_VIVID_MAX_DEVS=64 CONFIG_VIDEO_VIVID_MAX_DEVS=64
# CONFIG_VIDEO_VISL is not set CONFIG_VIDEO_VISL=m
# CONFIG_VISL_DEBUGFS is not set
CONFIG_DVB_TEST_DRIVERS=y CONFIG_DVB_TEST_DRIVERS=y
CONFIG_DVB_VIDTV=m CONFIG_DVB_VIDTV=m
@@ -6255,7 +6263,7 @@ CONFIG_VIDEO_NOON010PC30=m
CONFIG_VIDEO_OG01A1B=m CONFIG_VIDEO_OG01A1B=m
CONFIG_VIDEO_OV02A10=m CONFIG_VIDEO_OV02A10=m
CONFIG_VIDEO_OV08D10=m CONFIG_VIDEO_OV08D10=m
# CONFIG_VIDEO_OV08X40 is not set CONFIG_VIDEO_OV08X40=m
CONFIG_VIDEO_OV13858=m CONFIG_VIDEO_OV13858=m
CONFIG_VIDEO_OV13B10=m CONFIG_VIDEO_OV13B10=m
CONFIG_VIDEO_OV2640=m CONFIG_VIDEO_OV2640=m
@@ -6263,7 +6271,7 @@ CONFIG_VIDEO_OV2659=m
CONFIG_VIDEO_OV2680=m CONFIG_VIDEO_OV2680=m
CONFIG_VIDEO_OV2685=m CONFIG_VIDEO_OV2685=m
CONFIG_VIDEO_OV2740=m CONFIG_VIDEO_OV2740=m
# CONFIG_VIDEO_OV4689 is not set CONFIG_VIDEO_OV4689=m
CONFIG_VIDEO_OV5647=m CONFIG_VIDEO_OV5647=m
CONFIG_VIDEO_OV5648=m CONFIG_VIDEO_OV5648=m
CONFIG_VIDEO_OV5670=m CONFIG_VIDEO_OV5670=m
@@ -6573,7 +6581,6 @@ CONFIG_DRM=y
CONFIG_DRM_MIPI_DBI=m CONFIG_DRM_MIPI_DBI=m
CONFIG_DRM_MIPI_DSI=y CONFIG_DRM_MIPI_DSI=y
# CONFIG_DRM_DEBUG_MM is not set # CONFIG_DRM_DEBUG_MM is not set
CONFIG_DRM_USE_DYNAMIC_DEBUG=y
CONFIG_DRM_KMS_HELPER=y CONFIG_DRM_KMS_HELPER=y
CONFIG_DRM_FBDEV_EMULATION=y CONFIG_DRM_FBDEV_EMULATION=y
CONFIG_DRM_FBDEV_OVERALLOC=100 CONFIG_DRM_FBDEV_OVERALLOC=100
@@ -6851,7 +6858,7 @@ CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y
# CONFIG_LOGO is not set # CONFIG_LOGO is not set
# end of Graphics support # end of Graphics support
# CONFIG_DRM_ACCEL is not set CONFIG_DRM_ACCEL=y
CONFIG_SOUND=m CONFIG_SOUND=m
CONFIG_SOUND_OSS_CORE=y CONFIG_SOUND_OSS_CORE=y
# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set # CONFIG_SOUND_OSS_CORE_PRECLAIM is not set
@@ -7163,11 +7170,11 @@ CONFIG_SND_SOC_INTEL_AVS_MACH_DA7219=m
CONFIG_SND_SOC_INTEL_AVS_MACH_DMIC=m CONFIG_SND_SOC_INTEL_AVS_MACH_DMIC=m
CONFIG_SND_SOC_INTEL_AVS_MACH_HDAUDIO=m CONFIG_SND_SOC_INTEL_AVS_MACH_HDAUDIO=m
CONFIG_SND_SOC_INTEL_AVS_MACH_I2S_TEST=m CONFIG_SND_SOC_INTEL_AVS_MACH_I2S_TEST=m
# CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98927 is not set CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98927=m
CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98357A=m CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98357A=m
CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98373=m CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98373=m
CONFIG_SND_SOC_INTEL_AVS_MACH_NAU8825=m CONFIG_SND_SOC_INTEL_AVS_MACH_NAU8825=m
# CONFIG_SND_SOC_INTEL_AVS_MACH_PROBE is not set CONFIG_SND_SOC_INTEL_AVS_MACH_PROBE=m
CONFIG_SND_SOC_INTEL_AVS_MACH_RT274=m CONFIG_SND_SOC_INTEL_AVS_MACH_RT274=m
CONFIG_SND_SOC_INTEL_AVS_MACH_RT286=m CONFIG_SND_SOC_INTEL_AVS_MACH_RT286=m
CONFIG_SND_SOC_INTEL_AVS_MACH_RT298=m CONFIG_SND_SOC_INTEL_AVS_MACH_RT298=m
@@ -7520,7 +7527,7 @@ CONFIG_SND_SOC_WM8903=m
CONFIG_SND_SOC_WM8904=m CONFIG_SND_SOC_WM8904=m
CONFIG_SND_SOC_WM8940=m CONFIG_SND_SOC_WM8940=m
CONFIG_SND_SOC_WM8960=m CONFIG_SND_SOC_WM8960=m
# CONFIG_SND_SOC_WM8961 is not set CONFIG_SND_SOC_WM8961=m
CONFIG_SND_SOC_WM8962=m CONFIG_SND_SOC_WM8962=m
CONFIG_SND_SOC_WM8974=m CONFIG_SND_SOC_WM8974=m
CONFIG_SND_SOC_WM8978=m CONFIG_SND_SOC_WM8978=m
@@ -8350,7 +8357,7 @@ CONFIG_INFINIBAND_HFI1=m
# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set # CONFIG_HFI1_DEBUG_SDMA_ORDER is not set
# CONFIG_SDMA_VERBOSITY is not set # CONFIG_SDMA_VERBOSITY is not set
CONFIG_INFINIBAND_IRDMA=m CONFIG_INFINIBAND_IRDMA=m
# CONFIG_MANA_INFINIBAND is not set CONFIG_MANA_INFINIBAND=m
CONFIG_MLX4_INFINIBAND=m CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m
CONFIG_INFINIBAND_MTHCA=m CONFIG_INFINIBAND_MTHCA=m
@@ -8626,7 +8633,7 @@ CONFIG_NITRO_ENCLAVES=m
CONFIG_ACRN_HSM=m CONFIG_ACRN_HSM=m
CONFIG_EFI_SECRET=m CONFIG_EFI_SECRET=m
CONFIG_SEV_GUEST=m CONFIG_SEV_GUEST=m
# CONFIG_TDX_GUEST_DRIVER is not set CONFIG_TDX_GUEST_DRIVER=m
CONFIG_VIRTIO_ANCHOR=y CONFIG_VIRTIO_ANCHOR=y
CONFIG_VIRTIO=y CONFIG_VIRTIO=y
CONFIG_VIRTIO_PCI_LIB=m CONFIG_VIRTIO_PCI_LIB=m
@@ -8824,7 +8831,7 @@ CONFIG_CROS_EC_DEBUGFS=m
CONFIG_CROS_EC_SENSORHUB=m CONFIG_CROS_EC_SENSORHUB=m
CONFIG_CROS_EC_SYSFS=m CONFIG_CROS_EC_SYSFS=m
CONFIG_CROS_EC_TYPEC=m CONFIG_CROS_EC_TYPEC=m
# CONFIG_CROS_HPS_I2C is not set CONFIG_CROS_HPS_I2C=m
CONFIG_CROS_USBPD_LOGGER=m CONFIG_CROS_USBPD_LOGGER=m
CONFIG_CROS_USBPD_NOTIFY=m CONFIG_CROS_USBPD_NOTIFY=m
CONFIG_CHROMEOS_PRIVACY_SCREEN=m CONFIG_CHROMEOS_PRIVACY_SCREEN=m
@@ -8901,7 +8908,9 @@ CONFIG_AMILO_RFKILL=m
CONFIG_FUJITSU_LAPTOP=m CONFIG_FUJITSU_LAPTOP=m
CONFIG_FUJITSU_TABLET=m CONFIG_FUJITSU_TABLET=m
CONFIG_GPD_POCKET_FAN=m CONFIG_GPD_POCKET_FAN=m
# CONFIG_X86_PLATFORM_DRIVERS_HP is not set CONFIG_X86_PLATFORM_DRIVERS_HP=y
CONFIG_HP_ACCEL=m
CONFIG_HP_WMI=m
CONFIG_WIRELESS_HOTKEY=m CONFIG_WIRELESS_HOTKEY=m
CONFIG_IBM_RTL=m CONFIG_IBM_RTL=m
CONFIG_IDEAPAD_LAPTOP=m CONFIG_IDEAPAD_LAPTOP=m
@@ -8916,7 +8925,7 @@ CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
CONFIG_THINKPAD_LMI=m CONFIG_THINKPAD_LMI=m
CONFIG_INTEL_ATOMISP2_PDX86=y CONFIG_INTEL_ATOMISP2_PDX86=y
CONFIG_INTEL_ATOMISP2_LED=m CONFIG_INTEL_ATOMISP2_LED=m
# CONFIG_INTEL_IFS is not set CONFIG_INTEL_IFS=m
CONFIG_INTEL_SAR_INT1092=m CONFIG_INTEL_SAR_INT1092=m
CONFIG_INTEL_SKL_INT3472=m CONFIG_INTEL_SKL_INT3472=m
CONFIG_INTEL_PMC_CORE=y CONFIG_INTEL_PMC_CORE=y
@@ -9042,7 +9051,7 @@ CONFIG_INTEL_IOMMU_SVM=y
# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set # CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
CONFIG_INTEL_IOMMU_FLOPPY_WA=y CONFIG_INTEL_IOMMU_FLOPPY_WA=y
CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y
# CONFIG_IOMMUFD is not set CONFIG_IOMMUFD=m
CONFIG_IRQ_REMAP=y CONFIG_IRQ_REMAP=y
CONFIG_HYPERV_IOMMU=y CONFIG_HYPERV_IOMMU=y
CONFIG_VIRTIO_IOMMU=m CONFIG_VIRTIO_IOMMU=m
@@ -9219,8 +9228,9 @@ CONFIG_IIO_CROS_EC_ACCEL_LEGACY=m
CONFIG_IIO_ST_ACCEL_3AXIS=m CONFIG_IIO_ST_ACCEL_3AXIS=m
CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m
CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m
# CONFIG_IIO_KX022A_SPI is not set CONFIG_IIO_KX022A=m
# CONFIG_IIO_KX022A_I2C is not set CONFIG_IIO_KX022A_SPI=m
CONFIG_IIO_KX022A_I2C=m
CONFIG_KXSD9=m CONFIG_KXSD9=m
CONFIG_KXSD9_SPI=m CONFIG_KXSD9_SPI=m
CONFIG_KXSD9_I2C=m CONFIG_KXSD9_I2C=m
@@ -9247,7 +9257,7 @@ CONFIG_STK8BA50=m
# Analog to digital converters # Analog to digital converters
# #
CONFIG_AD_SIGMA_DELTA=m CONFIG_AD_SIGMA_DELTA=m
# CONFIG_AD4130 is not set CONFIG_AD4130=m
CONFIG_AD7091R5=m CONFIG_AD7091R5=m
CONFIG_AD7124=m CONFIG_AD7124=m
CONFIG_AD7192=m CONFIG_AD7192=m
@@ -9288,7 +9298,7 @@ CONFIG_MAX1027=m
CONFIG_MAX11100=m CONFIG_MAX11100=m
CONFIG_MAX1118=m CONFIG_MAX1118=m
CONFIG_MAX11205=m CONFIG_MAX11205=m
# CONFIG_MAX11410 is not set CONFIG_MAX11410=m
CONFIG_MAX1241=m CONFIG_MAX1241=m
CONFIG_MAX1363=m CONFIG_MAX1363=m
CONFIG_MAX9611=m CONFIG_MAX9611=m
@@ -9296,7 +9306,7 @@ CONFIG_MCP320X=m
CONFIG_MCP3422=m CONFIG_MCP3422=m
CONFIG_MCP3911=m CONFIG_MCP3911=m
CONFIG_MEDIATEK_MT6360_ADC=m CONFIG_MEDIATEK_MT6360_ADC=m
# CONFIG_MEDIATEK_MT6370_ADC is not set CONFIG_MEDIATEK_MT6370_ADC=m
CONFIG_MEN_Z188_ADC=m CONFIG_MEN_Z188_ADC=m
CONFIG_MP2629_ADC=m CONFIG_MP2629_ADC=m
CONFIG_NAU7802=m CONFIG_NAU7802=m
@@ -9329,7 +9339,7 @@ CONFIG_XILINX_XADC=m
# #
# Analog to digital and digital to analog converters # Analog to digital and digital to analog converters
# #
# CONFIG_AD74115 is not set CONFIG_AD74115=m
CONFIG_AD74413R=m CONFIG_AD74413R=m
# end of Analog to digital and digital to analog converters # end of Analog to digital and digital to analog converters
@@ -9479,7 +9489,7 @@ CONFIG_AD9523=m
# #
CONFIG_ADF4350=m CONFIG_ADF4350=m
CONFIG_ADF4371=m CONFIG_ADF4371=m
# CONFIG_ADF4377 is not set CONFIG_ADF4377=m
CONFIG_ADMV1013=m CONFIG_ADMV1013=m
CONFIG_ADMV1014=m CONFIG_ADMV1014=m
CONFIG_ADMV4420=m CONFIG_ADMV4420=m
@@ -9788,7 +9798,7 @@ CONFIG_TMP007=m
CONFIG_TMP117=m CONFIG_TMP117=m
CONFIG_TSYS01=m CONFIG_TSYS01=m
CONFIG_TSYS02D=m CONFIG_TSYS02D=m
# CONFIG_MAX30208 is not set CONFIG_MAX30208=m
CONFIG_MAX31856=m CONFIG_MAX31856=m
CONFIG_MAX31865=m CONFIG_MAX31865=m
# end of Temperature sensors # end of Temperature sensors
@@ -9942,7 +9952,8 @@ CONFIG_FPGA_DFL_NIOS_INTEL_PAC_N3000=m
CONFIG_FPGA_DFL_PCI=m CONFIG_FPGA_DFL_PCI=m
CONFIG_FPGA_M10_BMC_SEC_UPDATE=m CONFIG_FPGA_M10_BMC_SEC_UPDATE=m
CONFIG_FPGA_MGR_MICROCHIP_SPI=m CONFIG_FPGA_MGR_MICROCHIP_SPI=m
# CONFIG_FPGA_MGR_LATTICE_SYSCONFIG_SPI is not set CONFIG_FPGA_MGR_LATTICE_SYSCONFIG=m
CONFIG_FPGA_MGR_LATTICE_SYSCONFIG_SPI=m
CONFIG_TEE=m CONFIG_TEE=m
CONFIG_AMDTEE=m CONFIG_AMDTEE=m
CONFIG_MULTIPLEXER=m CONFIG_MULTIPLEXER=m
@@ -10187,10 +10198,10 @@ CONFIG_SQUASHFS=m
# CONFIG_SQUASHFS_FILE_CACHE is not set # CONFIG_SQUASHFS_FILE_CACHE is not set
CONFIG_SQUASHFS_FILE_DIRECT=y CONFIG_SQUASHFS_FILE_DIRECT=y
CONFIG_SQUASHFS_DECOMP_SINGLE=y CONFIG_SQUASHFS_DECOMP_SINGLE=y
# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set CONFIG_SQUASHFS_DECOMP_MULTI=y
CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT=y
# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set CONFIG_SQUASHFS_MOUNT_DECOMP_THREADS=y
CONFIG_SQUASHFS_XATTR=y CONFIG_SQUASHFS_XATTR=y
CONFIG_SQUASHFS_ZLIB=y CONFIG_SQUASHFS_ZLIB=y
CONFIG_SQUASHFS_LZ4=y CONFIG_SQUASHFS_LZ4=y
@@ -10869,6 +10880,7 @@ CONFIG_TEXTSEARCH_BM=m
CONFIG_TEXTSEARCH_FSM=m CONFIG_TEXTSEARCH_FSM=m
CONFIG_BTREE=y CONFIG_BTREE=y
CONFIG_INTERVAL_TREE=y CONFIG_INTERVAL_TREE=y
CONFIG_INTERVAL_TREE_SPAN_ITER=y
CONFIG_XARRAY_MULTI=y CONFIG_XARRAY_MULTI=y
CONFIG_ASSOCIATIVE_ARRAY=y CONFIG_ASSOCIATIVE_ARRAY=y
CONFIG_HAS_IOMEM=y CONFIG_HAS_IOMEM=y

View File

@@ -866,6 +866,11 @@ _tkg_srcprep() {
_disable "CPU_FREQ_DEFAULT_GOV_ONDEMAND" "CPU_FREQ_DEFAULT_GOV_CONSERVATIVE" "CPU_FREQ_DEFAULT_GOV_PERFORMANCE" "CPU_FREQ_DEFAULT_GOV_PERFORMANCE_NODEF" _disable "CPU_FREQ_DEFAULT_GOV_ONDEMAND" "CPU_FREQ_DEFAULT_GOV_CONSERVATIVE" "CPU_FREQ_DEFAULT_GOV_PERFORMANCE" "CPU_FREQ_DEFAULT_GOV_PERFORMANCE_NODEF"
_module "BLK_DEV_LOOP" _module "BLK_DEV_LOOP"
# buggy project C/PSI interaction workaround
if [ "${_cpusched}" = "pds" ] || [ "${_cpusched}" = "bmq" ]; then
_enable "PSI_DEFAULT_DISABLED"
fi
if [ -n "$_custom_commandline" ]; then if [ -n "$_custom_commandline" ]; then
_enable "CMDLINE_BOOL" _enable "CMDLINE_BOOL"
_disable "CMDLINE_OVERRIDE" _disable "CMDLINE_OVERRIDE"

View File

@@ -1,8 +1,62 @@
From e44ef62b127f6a161a131c84db92a7527d8fc72d Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Wed, 22 Feb 2023 19:24:36 +0100
Subject: [PATCH] prjc
Signed-off-by: Peter Jung <admin@ptr1337.dev>
---
.../admin-guide/kernel-parameters.txt | 6 +
Documentation/admin-guide/sysctl/kernel.rst | 10 +
Documentation/scheduler/sched-BMQ.txt | 110 +
fs/proc/base.c | 2 +-
include/asm-generic/resource.h | 2 +-
include/linux/sched.h | 33 +-
include/linux/sched/deadline.h | 20 +
include/linux/sched/prio.h | 26 +
include/linux/sched/rt.h | 2 +
include/linux/sched/topology.h | 3 +-
init/Kconfig | 34 +
init/init_task.c | 18 +
kernel/Kconfig.preempt | 2 +-
kernel/cgroup/cpuset.c | 4 +-
kernel/delayacct.c | 2 +-
kernel/exit.c | 4 +-
kernel/locking/rtmutex.c | 16 +-
kernel/sched/Makefile | 5 +
kernel/sched/alt_core.c | 8111 +++++++++++++++++
kernel/sched/alt_debug.c | 31 +
kernel/sched/alt_sched.h | 671 ++
kernel/sched/bmq.h | 110 +
kernel/sched/build_policy.c | 8 +-
kernel/sched/build_utility.c | 2 +
kernel/sched/cpufreq_schedutil.c | 10 +
kernel/sched/cputime.c | 10 +-
kernel/sched/debug.c | 10 +
kernel/sched/idle.c | 2 +
kernel/sched/pds.h | 127 +
kernel/sched/pelt.c | 4 +-
kernel/sched/pelt.h | 8 +-
kernel/sched/sched.h | 9 +
kernel/sched/stats.c | 4 +
kernel/sched/stats.h | 2 +
kernel/sched/topology.c | 17 +
kernel/sysctl.c | 15 +
kernel/time/hrtimer.c | 2 +
kernel/time/posix-cpu-timers.c | 10 +-
kernel/trace/trace_selftest.c | 5 +
39 files changed, 9445 insertions(+), 22 deletions(-)
create mode 100644 Documentation/scheduler/sched-BMQ.txt
create mode 100644 kernel/sched/alt_core.c
create mode 100644 kernel/sched/alt_debug.c
create mode 100644 kernel/sched/alt_sched.h
create mode 100644 kernel/sched/bmq.h
create mode 100644 kernel/sched/pds.h
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 42af9ca0127e..31747ec54f9d 100644 index 6cfa6e3996cf..1b6a407213da 100644
--- a/Documentation/admin-guide/kernel-parameters.txt --- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -5406,6 +5406,12 @@ @@ -5437,6 +5437,12 @@
sa1100ir [NET] sa1100ir [NET]
See drivers/net/irda/sa1100_ir.c. See drivers/net/irda/sa1100_ir.c.
@@ -16,10 +70,10 @@ index 42af9ca0127e..31747ec54f9d 100644
schedstats= [KNL,X86] Enable or disable scheduled statistics. schedstats= [KNL,X86] Enable or disable scheduled statistics.
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index 98d1b198b2b4..d7c78a107f93 100644 index 46e3d62c0eea..fb4568c919d0 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst --- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -1552,3 +1552,13 @@ is 10 seconds. @@ -1597,3 +1597,13 @@ is 10 seconds.
The softlockup threshold is (``2 * watchdog_thresh``). Setting this The softlockup threshold is (``2 * watchdog_thresh``). Setting this
tunable to zero will disable lockup detection altogether. tunable to zero will disable lockup detection altogether.
@@ -176,7 +230,7 @@ index 8874f681b056..59eb72bf7d5f 100644
[RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \
} }
diff --git a/include/linux/sched.h b/include/linux/sched.h diff --git a/include/linux/sched.h b/include/linux/sched.h
index ffb6eb55cd13..2e730a59caa2 100644 index 853d08f7562b..ad7e050d7455 100644
--- a/include/linux/sched.h --- a/include/linux/sched.h
+++ b/include/linux/sched.h +++ b/include/linux/sched.h
@@ -762,8 +762,14 @@ struct task_struct { @@ -762,8 +762,14 @@ struct task_struct {
@@ -232,7 +286,7 @@ index ffb6eb55cd13..2e730a59caa2 100644
#ifdef CONFIG_CGROUP_SCHED #ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group; struct task_group *sched_task_group;
@@ -1545,6 +1567,15 @@ struct task_struct { @@ -1539,6 +1561,15 @@ struct task_struct {
*/ */
}; };
@@ -352,10 +406,10 @@ index 816df6cc444e..c8da08e18c91 100644
#else #else
static inline void rebuild_sched_domains_energy(void) static inline void rebuild_sched_domains_energy(void)
diff --git a/init/Kconfig b/init/Kconfig diff --git a/init/Kconfig b/init/Kconfig
index 94125d3b6893..c87ba766d354 100644 index 44e90b28a30f..af24591984ab 100644
--- a/init/Kconfig --- a/init/Kconfig
+++ b/init/Kconfig +++ b/init/Kconfig
@@ -819,6 +819,7 @@ menu "Scheduler features" @@ -821,6 +821,7 @@ menu "Scheduler features"
config UCLAMP_TASK config UCLAMP_TASK
bool "Enable utilization clamping for RT/FAIR tasks" bool "Enable utilization clamping for RT/FAIR tasks"
depends on CPU_FREQ_GOV_SCHEDUTIL depends on CPU_FREQ_GOV_SCHEDUTIL
@@ -363,7 +417,7 @@ index 94125d3b6893..c87ba766d354 100644
help help
This feature enables the scheduler to track the clamped utilization This feature enables the scheduler to track the clamped utilization
of each CPU based on RUNNABLE tasks scheduled on that CPU. of each CPU based on RUNNABLE tasks scheduled on that CPU.
@@ -865,6 +866,35 @@ config UCLAMP_BUCKETS_COUNT @@ -867,6 +868,35 @@ config UCLAMP_BUCKETS_COUNT
If in doubt, use the default value. If in doubt, use the default value.
@@ -399,7 +453,7 @@ index 94125d3b6893..c87ba766d354 100644
endmenu endmenu
# #
@@ -918,6 +948,7 @@ config NUMA_BALANCING @@ -924,6 +954,7 @@ config NUMA_BALANCING
depends on ARCH_SUPPORTS_NUMA_BALANCING depends on ARCH_SUPPORTS_NUMA_BALANCING
depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
depends on SMP && NUMA && MIGRATION && !PREEMPT_RT depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
@@ -407,7 +461,7 @@ index 94125d3b6893..c87ba766d354 100644
help help
This option adds support for automatic NUMA aware memory/task placement. This option adds support for automatic NUMA aware memory/task placement.
The mechanism is quite primitive and is based on migrating memory when The mechanism is quite primitive and is based on migrating memory when
@@ -1015,6 +1046,7 @@ config FAIR_GROUP_SCHED @@ -1021,6 +1052,7 @@ config FAIR_GROUP_SCHED
depends on CGROUP_SCHED depends on CGROUP_SCHED
default CGROUP_SCHED default CGROUP_SCHED
@@ -415,7 +469,7 @@ index 94125d3b6893..c87ba766d354 100644
config CFS_BANDWIDTH config CFS_BANDWIDTH
bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED" bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
depends on FAIR_GROUP_SCHED depends on FAIR_GROUP_SCHED
@@ -1037,6 +1069,7 @@ config RT_GROUP_SCHED @@ -1043,6 +1075,7 @@ config RT_GROUP_SCHED
realtime bandwidth for them. realtime bandwidth for them.
See Documentation/scheduler/sched-rt-group.rst for more information. See Documentation/scheduler/sched-rt-group.rst for more information.
@@ -423,7 +477,7 @@ index 94125d3b6893..c87ba766d354 100644
endif #CGROUP_SCHED endif #CGROUP_SCHED
config UCLAMP_TASK_GROUP config UCLAMP_TASK_GROUP
@@ -1281,6 +1314,7 @@ config CHECKPOINT_RESTORE @@ -1287,6 +1320,7 @@ config CHECKPOINT_RESTORE
config SCHED_AUTOGROUP config SCHED_AUTOGROUP
bool "Automatic process group scheduling" bool "Automatic process group scheduling"
@@ -491,10 +545,10 @@ index c2f1fd95a821..41654679b1b2 100644
This option permits Core Scheduling, a means of coordinated task This option permits Core Scheduling, a means of coordinated task
selection across SMT siblings. When enabled -- see selection across SMT siblings. When enabled -- see
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index b474289c15b8..a23224b45b03 100644 index ca826bd1eba3..60e194f1d6d8 100644
--- a/kernel/cgroup/cpuset.c --- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c
@@ -787,7 +787,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) @@ -791,7 +791,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
return ret; return ret;
} }
@@ -503,7 +557,7 @@ index b474289c15b8..a23224b45b03 100644
/* /*
* Helper routine for generate_sched_domains(). * Helper routine for generate_sched_domains().
* Do cpusets a, b have overlapping effective cpus_allowed masks? * Do cpusets a, b have overlapping effective cpus_allowed masks?
@@ -1183,7 +1183,7 @@ static void rebuild_sched_domains_locked(void) @@ -1187,7 +1187,7 @@ static void rebuild_sched_domains_locked(void)
/* Have scheduler rebuild the domains */ /* Have scheduler rebuild the domains */
partition_and_rebuild_sched_domains(ndoms, doms, attr); partition_and_rebuild_sched_domains(ndoms, doms, attr);
} }
@@ -526,10 +580,10 @@ index e39cb696cfbd..463423572e09 100644
d->cpu_count += t1; d->cpu_count += t1;
diff --git a/kernel/exit.c b/kernel/exit.c diff --git a/kernel/exit.c b/kernel/exit.c
index 35e0a31a0315..64e368441cf4 100644 index 15dc2ec80c46..1e583e0f89a7 100644
--- a/kernel/exit.c --- a/kernel/exit.c
+++ b/kernel/exit.c +++ b/kernel/exit.c
@@ -125,7 +125,7 @@ static void __exit_signal(struct task_struct *tsk) @@ -172,7 +172,7 @@ static void __exit_signal(struct task_struct *tsk)
sig->curr_target = next_thread(tsk); sig->curr_target = next_thread(tsk);
} }
@@ -538,7 +592,7 @@ index 35e0a31a0315..64e368441cf4 100644
sizeof(unsigned long long)); sizeof(unsigned long long));
/* /*
@@ -146,7 +146,7 @@ static void __exit_signal(struct task_struct *tsk) @@ -193,7 +193,7 @@ static void __exit_signal(struct task_struct *tsk)
sig->inblock += task_io_get_inblock(tsk); sig->inblock += task_io_get_inblock(tsk);
sig->oublock += task_io_get_oublock(tsk); sig->oublock += task_io_get_oublock(tsk);
task_io_accounting_add(&sig->ioac, &tsk->ioac); task_io_accounting_add(&sig->ioac, &tsk->ioac);
@@ -548,10 +602,10 @@ index 35e0a31a0315..64e368441cf4 100644
__unhash_process(tsk, group_dead); __unhash_process(tsk, group_dead);
write_sequnlock(&sig->stats_lock); write_sequnlock(&sig->stats_lock);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 7779ee8abc2a..5b9893cdfb1b 100644 index 728f434de2bb..0e1082a4e878 100644
--- a/kernel/locking/rtmutex.c --- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c
@@ -300,21 +300,25 @@ static __always_inline void @@ -337,21 +337,25 @@ static __always_inline void
waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
{ {
waiter->prio = __waiter_prio(task); waiter->prio = __waiter_prio(task);
@@ -579,7 +633,7 @@ index 7779ee8abc2a..5b9893cdfb1b 100644
/* /*
* If both waiters have dl_prio(), we check the deadlines of the * If both waiters have dl_prio(), we check the deadlines of the
* associated tasks. * associated tasks.
@@ -323,16 +327,22 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, @@ -360,16 +364,22 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
*/ */
if (dl_prio(left->prio)) if (dl_prio(left->prio))
return dl_time_before(left->deadline, right->deadline); return dl_time_before(left->deadline, right->deadline);
@@ -602,7 +656,7 @@ index 7779ee8abc2a..5b9893cdfb1b 100644
/* /*
* If both waiters have dl_prio(), we check the deadlines of the * If both waiters have dl_prio(), we check the deadlines of the
* associated tasks. * associated tasks.
@@ -341,8 +351,10 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left, @@ -378,8 +388,10 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
*/ */
if (dl_prio(left->prio)) if (dl_prio(left->prio))
return left->deadline == right->deadline; return left->deadline == right->deadline;
@@ -632,10 +686,10 @@ index 976092b7bd45..31d587c16ec1 100644
obj-y += build_utility.o obj-y += build_utility.o
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
new file mode 100644 new file mode 100644
index 000000000000..acb8657e811d index 000000000000..f5e9c01f9382
--- /dev/null --- /dev/null
+++ b/kernel/sched/alt_core.c +++ b/kernel/sched/alt_core.c
@@ -0,0 +1,7978 @@ @@ -0,0 +1,8111 @@
+/* +/*
+ * kernel/sched/alt_core.c + * kernel/sched/alt_core.c
+ * + *
@@ -705,7 +759,7 @@ index 000000000000..acb8657e811d
+#define sched_feat(x) (0) +#define sched_feat(x) (0)
+#endif /* CONFIG_SCHED_DEBUG */ +#endif /* CONFIG_SCHED_DEBUG */
+ +
+#define ALT_SCHED_VERSION "v6.1-r3" +#define ALT_SCHED_VERSION "v6.2-r0"
+ +
+/* rt_prio(prio) defined in include/linux/sched/rt.h */ +/* rt_prio(prio) defined in include/linux/sched/rt.h */
+#define rt_task(p) rt_prio((p)->prio) +#define rt_task(p) rt_prio((p)->prio)
@@ -726,6 +780,12 @@ index 000000000000..acb8657e811d
+#include "pds.h" +#include "pds.h"
+#endif +#endif
+ +
+struct affinity_context {
+ const struct cpumask *new_mask;
+ struct cpumask *user_mask;
+ unsigned int flags;
+};
+
+static int __init sched_timeslice(char *str) +static int __init sched_timeslice(char *str)
+{ +{
+ int timeslice_ms; + int timeslice_ms;
@@ -788,6 +848,14 @@ index 000000000000..acb8657e811d
+static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp; +static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp;
+static cpumask_t *const sched_idle_mask = &sched_preempt_mask[0]; +static cpumask_t *const sched_idle_mask = &sched_preempt_mask[0];
+ +
+/* task function */
+static inline const struct cpumask *task_user_cpus(struct task_struct *p)
+{
+ if (!p->user_cpus_ptr)
+ return cpu_possible_mask; /* &init_task.cpus_mask */
+ return p->user_cpus_ptr;
+}
+
+/* sched_queue related functions */ +/* sched_queue related functions */
+static inline void sched_queue_init(struct sched_queue *q) +static inline void sched_queue_init(struct sched_queue *q)
+{ +{
@@ -1400,7 +1468,7 @@ index 000000000000..acb8657e811d
+ +
+#define __SCHED_ENQUEUE_TASK(p, rq, flags) \ +#define __SCHED_ENQUEUE_TASK(p, rq, flags) \
+ sched_info_enqueue(rq, p); \ + sched_info_enqueue(rq, p); \
+ psi_enqueue(p, flags); \ + psi_enqueue(p, flags & ENQUEUE_WAKEUP); \
+ \ + \
+ p->sq_idx = task_sched_prio_idx(p, rq); \ + p->sq_idx = task_sched_prio_idx(p, rq); \
+ list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]); \ + list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]); \
@@ -2268,35 +2336,101 @@ index 000000000000..acb8657e811d
+} +}
+ +
+static inline void +static inline void
+set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) +set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
+{ +{
+ cpumask_copy(&p->cpus_mask, new_mask); + cpumask_copy(&p->cpus_mask, ctx->new_mask);
+ p->nr_cpus_allowed = cpumask_weight(new_mask); + p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
+
+ /*
+ * Swap in a new user_cpus_ptr if SCA_USER flag set
+ */
+ if (ctx->flags & SCA_USER)
+ swap(p->user_cpus_ptr, ctx->user_mask);
+} +}
+ +
+static void +static void
+__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
+{ +{
+ lockdep_assert_held(&p->pi_lock); + lockdep_assert_held(&p->pi_lock);
+ set_cpus_allowed_common(p, new_mask); + set_cpus_allowed_common(p, ctx);
+} +}
+ +
+/*
+ * Used for kthread_bind() and select_fallback_rq(), in both cases the user
+ * affinity (if any) should be destroyed too.
+ */
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{ +{
+ __do_set_cpus_allowed(p, new_mask); + struct affinity_context ac = {
+ .new_mask = new_mask,
+ .user_mask = NULL,
+ .flags = SCA_USER, /* clear the user requested mask */
+ };
+ union cpumask_rcuhead {
+ cpumask_t cpumask;
+ struct rcu_head rcu;
+ };
+
+ __do_set_cpus_allowed(p, &ac);
+
+ /*
+ * Because this is called with p->pi_lock held, it is not possible
+ * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
+ * kfree_rcu().
+ */
+ kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
+}
+
+static cpumask_t *alloc_user_cpus_ptr(int node)
+{
+ /*
+ * See do_set_cpus_allowed() above for the rcu_head usage.
+ */
+ int size = max_t(int, cpumask_size(), sizeof(struct rcu_head));
+
+ return kmalloc_node(size, GFP_KERNEL, node);
+} +}
+ +
+int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, +int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
+ int node) + int node)
+{ +{
+ if (!src->user_cpus_ptr) + cpumask_t *user_mask;
+ unsigned long flags;
+
+ /*
+ * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
+ * may differ by now due to racing.
+ */
+ dst->user_cpus_ptr = NULL;
+
+ /*
+ * This check is racy and losing the race is a valid situation.
+ * It is not worth the extra overhead of taking the pi_lock on
+ * every fork/clone.
+ */
+ if (data_race(!src->user_cpus_ptr))
+ return 0; + return 0;
+ +
+ dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node); + user_mask = alloc_user_cpus_ptr(node);
+ if (!dst->user_cpus_ptr) + if (!user_mask)
+ return -ENOMEM; + return -ENOMEM;
+ +
+ /*
+ * Use pi_lock to protect content of user_cpus_ptr
+ *
+ * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
+ * do_set_cpus_allowed().
+ */
+ raw_spin_lock_irqsave(&src->pi_lock, flags);
+ if (src->user_cpus_ptr) {
+ swap(dst->user_cpus_ptr, user_mask);
+ cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr); + cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
+ }
+ raw_spin_unlock_irqrestore(&src->pi_lock, flags);
+
+ if (unlikely(user_mask))
+ kfree(user_mask);
+
+ return 0; + return 0;
+} +}
+ +
@@ -2641,6 +2775,8 @@ index 000000000000..acb8657e811d
+ +
+static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu, +static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu,
+ raw_spinlock_t *lock, unsigned long irq_flags) + raw_spinlock_t *lock, unsigned long irq_flags)
+ __releases(rq->lock)
+ __releases(p->pi_lock)
+{ +{
+ /* Can the task run on the task's current CPU? If so, we're done */ + /* Can the task run on the task's current CPU? If so, we're done */
+ if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { + if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
@@ -2678,8 +2814,7 @@ index 000000000000..acb8657e811d
+} +}
+ +
+static int __set_cpus_allowed_ptr_locked(struct task_struct *p, +static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
+ const struct cpumask *new_mask, + struct affinity_context *ctx,
+ u32 flags,
+ struct rq *rq, + struct rq *rq,
+ raw_spinlock_t *lock, + raw_spinlock_t *lock,
+ unsigned long irq_flags) + unsigned long irq_flags)
@@ -2687,7 +2822,6 @@ index 000000000000..acb8657e811d
+ const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p); + const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
+ const struct cpumask *cpu_valid_mask = cpu_active_mask; + const struct cpumask *cpu_valid_mask = cpu_active_mask;
+ bool kthread = p->flags & PF_KTHREAD; + bool kthread = p->flags & PF_KTHREAD;
+ struct cpumask *user_mask = NULL;
+ int dest_cpu; + int dest_cpu;
+ int ret = 0; + int ret = 0;
+ +
@@ -2705,7 +2839,7 @@ index 000000000000..acb8657e811d
+ cpu_valid_mask = cpu_online_mask; + cpu_valid_mask = cpu_online_mask;
+ } + }
+ +
+ if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) { + if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
+ ret = -EINVAL; + ret = -EINVAL;
+ goto out; + goto out;
+ } + }
@@ -2714,30 +2848,23 @@ index 000000000000..acb8657e811d
+ * Must re-check here, to close a race against __kthread_bind(), + * Must re-check here, to close a race against __kthread_bind(),
+ * sched_setaffinity() is not guaranteed to observe the flag. + * sched_setaffinity() is not guaranteed to observe the flag.
+ */ + */
+ if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { + if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
+ ret = -EINVAL; + ret = -EINVAL;
+ goto out; + goto out;
+ } + }
+ +
+ if (cpumask_equal(&p->cpus_mask, new_mask)) + if (cpumask_equal(&p->cpus_mask, ctx->new_mask))
+ goto out; + goto out;
+ +
+ dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); + dest_cpu = cpumask_any_and(cpu_valid_mask, ctx->new_mask);
+ if (dest_cpu >= nr_cpu_ids) { + if (dest_cpu >= nr_cpu_ids) {
+ ret = -EINVAL; + ret = -EINVAL;
+ goto out; + goto out;
+ } + }
+ +
+ __do_set_cpus_allowed(p, new_mask); + __do_set_cpus_allowed(p, ctx);
+ +
+ if (flags & SCA_USER) + return affine_move_task(rq, p, dest_cpu, lock, irq_flags);
+ user_mask = clear_user_cpus_ptr(p);
+
+ ret = affine_move_task(rq, p, dest_cpu, lock, irq_flags);
+
+ kfree(user_mask);
+
+ return ret;
+ +
+out: +out:
+ __task_access_unlock(p, lock); + __task_access_unlock(p, lock);
@@ -2748,7 +2875,6 @@ index 000000000000..acb8657e811d
+ +
+/* +/*
+ * Change a given task's CPU affinity. Migrate the thread to a + * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on
+ * is removed from the allowed bitmask. + * is removed from the allowed bitmask.
+ * + *
+ * NOTE: the caller must have a valid reference to the task, the + * NOTE: the caller must have a valid reference to the task, the
@@ -2756,7 +2882,7 @@ index 000000000000..acb8657e811d
+ * call is not atomic; no spinlocks may be held. + * call is not atomic; no spinlocks may be held.
+ */ + */
+static int __set_cpus_allowed_ptr(struct task_struct *p, +static int __set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask, u32 flags) + struct affinity_context *ctx)
+{ +{
+ unsigned long irq_flags; + unsigned long irq_flags;
+ struct rq *rq; + struct rq *rq;
@@ -2764,20 +2890,36 @@ index 000000000000..acb8657e811d
+ +
+ raw_spin_lock_irqsave(&p->pi_lock, irq_flags); + raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
+ rq = __task_access_lock(p, &lock); + rq = __task_access_lock(p, &lock);
+ /*
+ * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
+ * flags are set.
+ */
+ if (p->user_cpus_ptr &&
+ !(ctx->flags & SCA_USER) &&
+ cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
+ ctx->new_mask = rq->scratch_mask;
+ +
+ return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, lock, irq_flags); +
+ return __set_cpus_allowed_ptr_locked(p, ctx, rq, lock, irq_flags);
+} +}
+ +
+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) +int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+{ +{
+ return __set_cpus_allowed_ptr(p, new_mask, 0); + struct affinity_context ac = {
+ .new_mask = new_mask,
+ .flags = 0,
+ };
+
+ return __set_cpus_allowed_ptr(p, &ac);
+} +}
+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); +EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+ +
+/* +/*
+ * Change a given task's CPU affinity to the intersection of its current + * Change a given task's CPU affinity to the intersection of its current
+ * affinity mask and @subset_mask, writing the resulting mask to @new_mask + * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
+ * and pointing @p->user_cpus_ptr to a copy of the old mask. + * If user_cpus_ptr is defined, use it as the basis for restricting CPU
+ * affinity or use cpu_online_mask instead.
+ *
+ * If the resulting mask is empty, leave the affinity unchanged and return + * If the resulting mask is empty, leave the affinity unchanged and return
+ * -EINVAL. + * -EINVAL.
+ */ + */
@@ -2785,48 +2927,34 @@ index 000000000000..acb8657e811d
+ struct cpumask *new_mask, + struct cpumask *new_mask,
+ const struct cpumask *subset_mask) + const struct cpumask *subset_mask)
+{ +{
+ struct cpumask *user_mask = NULL; + struct affinity_context ac = {
+ .new_mask = new_mask,
+ .flags = 0,
+ };
+ unsigned long irq_flags; + unsigned long irq_flags;
+ raw_spinlock_t *lock; + raw_spinlock_t *lock;
+ struct rq *rq; + struct rq *rq;
+ int err; + int err;
+ +
+ if (!p->user_cpus_ptr) {
+ user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
+ if (!user_mask)
+ return -ENOMEM;
+ }
+
+ raw_spin_lock_irqsave(&p->pi_lock, irq_flags); + raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
+ rq = __task_access_lock(p, &lock); + rq = __task_access_lock(p, &lock);
+ +
+ if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) { + if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
+ err = -EINVAL; + err = -EINVAL;
+ goto err_unlock; + goto err_unlock;
+ } + }
+ +
+ /* + return __set_cpus_allowed_ptr_locked(p, &ac, rq, lock, irq_flags);
+ * We're about to butcher the task affinity, so keep track of what
+ * the user asked for in case we're able to restore it later on.
+ */
+ if (user_mask) {
+ cpumask_copy(user_mask, p->cpus_ptr);
+ p->user_cpus_ptr = user_mask;
+ }
+
+ /*return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);*/
+ return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, lock, irq_flags);
+ +
+err_unlock: +err_unlock:
+ __task_access_unlock(p, lock); + __task_access_unlock(p, lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags); + raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
+ kfree(user_mask);
+ return err; + return err;
+} +}
+ +
+/* +/*
+ * Restrict the CPU affinity of task @p so that it is a subset of + * Restrict the CPU affinity of task @p so that it is a subset of
+ * task_cpu_possible_mask() and point @p->user_cpu_ptr to a copy of the + * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
+ * old affinity mask. If the resulting mask is empty, we warn and walk + * old affinity mask. If the resulting mask is empty, we warn and walk
+ * up the cpuset hierarchy until we find a suitable mask. + * up the cpuset hierarchy until we find a suitable mask.
+ */ + */
@@ -2870,34 +2998,29 @@ index 000000000000..acb8657e811d
+} +}
+ +
+static int +static int
+__sched_setaffinity(struct task_struct *p, const struct cpumask *mask); +__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
+ +
+/* +/*
+ * Restore the affinity of a task @p which was previously restricted by a + * Restore the affinity of a task @p which was previously restricted by a
+ * call to force_compatible_cpus_allowed_ptr(). This will clear (and free) + * call to force_compatible_cpus_allowed_ptr().
+ * @p->user_cpus_ptr.
+ * + *
+ * It is the caller's responsibility to serialise this with any calls to + * It is the caller's responsibility to serialise this with any calls to
+ * force_compatible_cpus_allowed_ptr(@p). + * force_compatible_cpus_allowed_ptr(@p).
+ */ + */
+void relax_compatible_cpus_allowed_ptr(struct task_struct *p) +void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
+{ +{
+ struct cpumask *user_mask = p->user_cpus_ptr; + struct affinity_context ac = {
+ unsigned long flags; + .new_mask = task_user_cpus(p),
+ .flags = 0,
+ };
+ int ret;
+ +
+ /* + /*
+ * Try to restore the old affinity mask. If this fails, then + * Try to restore the old affinity mask with __sched_setaffinity().
+ * we free the mask explicitly to avoid it being inherited across + * Cpuset masking will be done there too.
+ * a subsequent fork().
+ */ + */
+ if (!user_mask || !__sched_setaffinity(p, user_mask)) + ret = __sched_setaffinity(p, &ac);
+ return; + WARN_ON_ONCE(ret);
+
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+ user_mask = clear_user_cpus_ptr(p);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+ kfree(user_mask);
+} +}
+ +
+#else /* CONFIG_SMP */ +#else /* CONFIG_SMP */
@@ -2909,9 +3032,9 @@ index 000000000000..acb8657e811d
+ +
+static inline int +static inline int
+__set_cpus_allowed_ptr(struct task_struct *p, +__set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask, u32 flags) + struct affinity_context *ctx)
+{ +{
+ return set_cpus_allowed_ptr(p, new_mask); + return set_cpus_allowed_ptr(p, ctx->new_mask);
+} +}
+ +
+static inline bool rq_has_pinned_tasks(struct rq *rq) +static inline bool rq_has_pinned_tasks(struct rq *rq)
@@ -2919,6 +3042,11 @@ index 000000000000..acb8657e811d
+ return false; + return false;
+} +}
+ +
+static inline cpumask_t *alloc_user_cpus_ptr(int node)
+{
+ return NULL;
+}
+
+#endif /* !CONFIG_SMP */ +#endif /* !CONFIG_SMP */
+ +
+static void +static void
@@ -3030,13 +3158,6 @@ index 000000000000..acb8657e811d
+ if (!llist) + if (!llist)
+ return; + return;
+ +
+ /*
+ * rq::ttwu_pending racy indication of out-standing wakeups.
+ * Races such that false-negatives are possible, since they
+ * are shorter lived that false-positives would be.
+ */
+ WRITE_ONCE(rq->ttwu_pending, 0);
+
+ rq_lock_irqsave(rq, &rf); + rq_lock_irqsave(rq, &rf);
+ update_rq_clock(rq); + update_rq_clock(rq);
+ +
@@ -3050,6 +3171,17 @@ index 000000000000..acb8657e811d
+ ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0); + ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0);
+ } + }
+ +
+ /*
+ * Must be after enqueueing at least once task such that
+ * idle_cpu() does not observe a false-negative -- if it does,
+ * it is possible for select_idle_siblings() to stack a number
+ * of tasks on this CPU during that window.
+ *
+ * It is ok to clear ttwu_pending when another task pending.
+ * We will receive IPI after local irq enabled and then enqueue it.
+ * Since now nr_running > 0, idle_cpu() will always get correct result.
+ */
+ WRITE_ONCE(rq->ttwu_pending, 0);
+ rq_unlock_irqrestore(rq, &rf); + rq_unlock_irqrestore(rq, &rf);
+} +}
+ +
@@ -4635,7 +4767,9 @@ index 000000000000..acb8657e811d
+ struct rq *rq = cpu_rq(cpu); + struct rq *rq = cpu_rq(cpu);
+ u64 resched_latency; + u64 resched_latency;
+ +
+ if (housekeeping_cpu(cpu, HK_TYPE_TICK))
+ arch_scale_freq_tick(); + arch_scale_freq_tick();
+
+ sched_clock_tick(); + sched_clock_tick();
+ +
+ raw_spin_lock(&rq->lock); + raw_spin_lock(&rq->lock);
@@ -4734,7 +4868,7 @@ index 000000000000..acb8657e811d
+ int i; + int i;
+ +
+ for_each_cpu_wrap(i, &chk, cpu) { + for_each_cpu_wrap(i, &chk, cpu) {
+ if (cpumask_subset(cpu_smt_mask(i), &chk) && + if (!cpumask_intersects(cpu_smt_mask(i), sched_idle_mask) &&\
+ sg_balance_trigger(i)) + sg_balance_trigger(i))
+ return; + return;
+ } + }
@@ -4857,6 +4991,7 @@ index 000000000000..acb8657e811d
+static void sched_tick_stop(int cpu) +static void sched_tick_stop(int cpu)
+{ +{
+ struct tick_work *twork; + struct tick_work *twork;
+ int os;
+ +
+ if (housekeeping_cpu(cpu, HK_TYPE_TICK)) + if (housekeeping_cpu(cpu, HK_TYPE_TICK))
+ return; + return;
@@ -4864,7 +4999,10 @@ index 000000000000..acb8657e811d
+ WARN_ON_ONCE(!tick_work_cpu); + WARN_ON_ONCE(!tick_work_cpu);
+ +
+ twork = per_cpu_ptr(tick_work_cpu, cpu); + twork = per_cpu_ptr(tick_work_cpu, cpu);
+ cancel_delayed_work_sync(&twork->work); + /* There cannot be competing actions, but don't rely on stop-machine. */
+ os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
+ WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
+ /* Don't cancel, as this would mess up the state machine. */
+} +}
+#endif /* CONFIG_HOTPLUG_CPU */ +#endif /* CONFIG_HOTPLUG_CPU */
+ +
@@ -4988,8 +5126,7 @@ index 000000000000..acb8657e811d
+ pr_err("Preemption disabled at:"); + pr_err("Preemption disabled at:");
+ print_ip_sym(KERN_ERR, preempt_disable_ip); + print_ip_sym(KERN_ERR, preempt_disable_ip);
+ } + }
+ if (panic_on_warn) + check_panic_on_warn("scheduling while atomic");
+ panic("scheduling while atomic\n");
+ +
+ dump_stack(); + dump_stack();
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); + add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
@@ -5305,7 +5442,7 @@ index 000000000000..acb8657e811d
+ prev->sched_contributes_to_load = + prev->sched_contributes_to_load =
+ (prev_state & TASK_UNINTERRUPTIBLE) && + (prev_state & TASK_UNINTERRUPTIBLE) &&
+ !(prev_state & TASK_NOLOAD) && + !(prev_state & TASK_NOLOAD) &&
+ !(prev->flags & TASK_FROZEN); + !(prev_state & TASK_FROZEN);
+ +
+ if (prev->sched_contributes_to_load) + if (prev->sched_contributes_to_load)
+ rq->nr_uninterruptible++; + rq->nr_uninterruptible++;
@@ -6653,7 +6790,7 @@ index 000000000000..acb8657e811d
+#endif +#endif
+ +
+static int +static int
+__sched_setaffinity(struct task_struct *p, const struct cpumask *mask) +__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
+{ +{
+ int retval; + int retval;
+ cpumask_var_t cpus_allowed, new_mask; + cpumask_var_t cpus_allowed, new_mask;
@@ -6667,9 +6804,12 @@ index 000000000000..acb8657e811d
+ } + }
+ +
+ cpuset_cpus_allowed(p, cpus_allowed); + cpuset_cpus_allowed(p, cpus_allowed);
+ cpumask_and(new_mask, mask, cpus_allowed); + cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
+again: +
+ retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER); + ctx->new_mask = new_mask;
+ ctx->flags |= SCA_CHECK;
+
+ retval = __set_cpus_allowed_ptr(p, ctx);
+ if (retval) + if (retval)
+ goto out_free_new_mask; + goto out_free_new_mask;
+ +
@@ -6681,7 +6821,24 @@ index 000000000000..acb8657e811d
+ * cpuset's cpus_allowed + * cpuset's cpus_allowed
+ */ + */
+ cpumask_copy(new_mask, cpus_allowed); + cpumask_copy(new_mask, cpus_allowed);
+ goto again; +
+ /*
+ * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
+ * will restore the previous user_cpus_ptr value.
+ *
+ * In the unlikely event a previous user_cpus_ptr exists,
+ * we need to further restrict the mask to what is allowed
+ * by that old user_cpus_ptr.
+ */
+ if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
+ bool empty = !cpumask_and(new_mask, new_mask,
+ ctx->user_mask);
+
+ if (WARN_ON_ONCE(empty))
+ cpumask_copy(new_mask, cpus_allowed);
+ }
+ __set_cpus_allowed_ptr(p, ctx);
+ retval = -EINVAL;
+ } + }
+ +
+out_free_new_mask: +out_free_new_mask:
@@ -6693,6 +6850,8 @@ index 000000000000..acb8657e811d
+ +
+long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) +long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+{ +{
+ struct affinity_context ac;
+ struct cpumask *user_mask;
+ struct task_struct *p; + struct task_struct *p;
+ int retval; + int retval;
+ +
@@ -6727,7 +6886,27 @@ index 000000000000..acb8657e811d
+ if (retval) + if (retval)
+ goto out_put_task; + goto out_put_task;
+ +
+ retval = __sched_setaffinity(p, in_mask); + /*
+ * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
+ * alloc_user_cpus_ptr() returns NULL.
+ */
+ user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
+ if (user_mask) {
+ cpumask_copy(user_mask, in_mask);
+ } else if (IS_ENABLED(CONFIG_SMP)) {
+ retval = -ENOMEM;
+ goto out_put_task;
+ }
+
+ ac = (struct affinity_context){
+ .new_mask = in_mask,
+ .user_mask = user_mask,
+ .flags = SCA_USER,
+ };
+
+ retval = __sched_setaffinity(p, &ac);
+ kfree(ac.user_mask);
+
+out_put_task: +out_put_task:
+ put_task_struct(p); + put_task_struct(p);
+ return retval; + return retval;
@@ -7483,6 +7662,12 @@ index 000000000000..acb8657e811d
+ */ + */
+void __init init_idle(struct task_struct *idle, int cpu) +void __init init_idle(struct task_struct *idle, int cpu)
+{ +{
+#ifdef CONFIG_SMP
+ struct affinity_context ac = (struct affinity_context) {
+ .new_mask = cpumask_of(cpu),
+ .flags = 0,
+ };
+#endif
+ struct rq *rq = cpu_rq(cpu); + struct rq *rq = cpu_rq(cpu);
+ unsigned long flags; + unsigned long flags;
+ +
@@ -7509,7 +7694,7 @@ index 000000000000..acb8657e811d
+ * + *
+ * And since this is boot we can forgo the serialisation. + * And since this is boot we can forgo the serialisation.
+ */ + */
+ set_cpus_allowed_common(idle, cpumask_of(cpu)); + set_cpus_allowed_common(idle, &ac);
+#endif +#endif
+ +
+ /* Silence PROVE_RCU */ + /* Silence PROVE_RCU */
@@ -8137,6 +8322,8 @@ index 000000000000..acb8657e811d
+ +
+ hrtick_rq_init(rq); + hrtick_rq_init(rq);
+ atomic_set(&rq->nr_iowait, 0); + atomic_set(&rq->nr_iowait, 0);
+
+ zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
+ } + }
+#ifdef CONFIG_SMP +#ifdef CONFIG_SMP
+ /* Set rq->online for cpu 0 */ + /* Set rq->online for cpu 0 */
@@ -8653,10 +8840,10 @@ index 000000000000..1212a031700e
+{} +{}
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
new file mode 100644 new file mode 100644
index 000000000000..c32403ed82b6 index 000000000000..0b563999d4c1
--- /dev/null --- /dev/null
+++ b/kernel/sched/alt_sched.h +++ b/kernel/sched/alt_sched.h
@@ -0,0 +1,668 @@ @@ -0,0 +1,671 @@
+#ifndef ALT_SCHED_H +#ifndef ALT_SCHED_H
+#define ALT_SCHED_H +#define ALT_SCHED_H
+ +
@@ -8903,6 +9090,9 @@ index 000000000000..c32403ed82b6
+#endif +#endif
+ atomic_t nohz_flags; + atomic_t nohz_flags;
+#endif /* CONFIG_NO_HZ_COMMON */ +#endif /* CONFIG_NO_HZ_COMMON */
+
+ /* Scratch cpumask to be temporarily used under rq_lock */
+ cpumask_var_t scratch_mask;
+}; +};
+ +
+extern unsigned long rq_load_util(struct rq *rq, unsigned long max); +extern unsigned long rq_load_util(struct rq *rq, unsigned long max);
@@ -9874,7 +10064,7 @@ index 3a0e0dc28721..e8a7d84aa5a5 100644
static inline int static inline int
update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a4a20046e586..c363693cd869 100644 index 771f8ddb7053..787a5069d69a 100644
--- a/kernel/sched/sched.h --- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h +++ b/kernel/sched/sched.h
@@ -5,6 +5,10 @@ @@ -5,6 +5,10 @@
@@ -9888,7 +10078,7 @@ index a4a20046e586..c363693cd869 100644
#include <linux/sched/affinity.h> #include <linux/sched/affinity.h>
#include <linux/sched/autogroup.h> #include <linux/sched/autogroup.h>
#include <linux/sched/cpufreq.h> #include <linux/sched/cpufreq.h>
@@ -3183,4 +3187,9 @@ static inline void update_current_exec_runtime(struct task_struct *curr, @@ -3261,4 +3265,9 @@ static inline void update_current_exec_runtime(struct task_struct *curr,
cgroup_account_cputime(curr, delta_exec); cgroup_account_cputime(curr, delta_exec);
} }
@@ -9930,7 +10120,7 @@ index 857f837f52cb..5486c63e4790 100644
} }
return 0; return 0;
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 84a188913cc9..53934e7ef5db 100644 index 38f3698f5e5b..b9d597394316 100644
--- a/kernel/sched/stats.h --- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h +++ b/kernel/sched/stats.h
@@ -89,6 +89,7 @@ static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delt @@ -89,6 +89,7 @@ static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delt
@@ -10005,7 +10195,7 @@ index 8739c2a5a54e..d8dd6c15eb47 100644
+#endif /* CONFIG_NUMA */ +#endif /* CONFIG_NUMA */
+#endif +#endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c6d9dec11b74..2bc42ce8b48e 100644 index 137d4abe3eda..6bada3a6d571 100644
--- a/kernel/sysctl.c --- a/kernel/sysctl.c
+++ b/kernel/sysctl.c +++ b/kernel/sysctl.c
@@ -93,6 +93,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals); @@ -93,6 +93,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals);
@@ -10019,7 +10209,7 @@ index c6d9dec11b74..2bc42ce8b48e 100644
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
static const int six_hundred_forty_kb = 640 * 1024; static const int six_hundred_forty_kb = 640 * 1024;
#endif #endif
@@ -1953,6 +1959,17 @@ static struct ctl_table kern_table[] = { @@ -1934,6 +1938,17 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec, .proc_handler = proc_dointvec,
}, },
#endif #endif
@@ -10113,10 +10303,10 @@ index cb925e8ef9a8..67d823510f5c 100644
return false; return false;
} }
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index a2d301f58ced..2ccdede8585c 100644 index ff0536cea968..ce266990006d 100644
--- a/kernel/trace/trace_selftest.c --- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c
@@ -1143,10 +1143,15 @@ static int trace_wakeup_test_thread(void *data) @@ -1150,10 +1150,15 @@ static int trace_wakeup_test_thread(void *data)
{ {
/* Make this a -deadline thread */ /* Make this a -deadline thread */
static const struct sched_attr attr = { static const struct sched_attr attr = {
@@ -10132,3 +10322,6 @@ index a2d301f58ced..2ccdede8585c 100644
}; };
struct wakeup_test_data *x = data; struct wakeup_test_data *x = data;
--
2.39.2