Compare commits

..

1 Commits
v6.2.1 ... v6.2

Author SHA1 Message Date
Sravan Balaji
72f591f5ef PDS Kernel Configuration 2023-02-02 07:15:35 -05:00
3 changed files with 188 additions and 398 deletions

View File

@@ -1,15 +1,15 @@
# #
# Automatically generated file; DO NOT EDIT. # Automatically generated file; DO NOT EDIT.
# Linux/x86 6.2.0 Kernel Configuration # Linux/x86 6.2.0-rc4 Kernel Configuration
# #
CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.1 20230201" CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.0"
CONFIG_CC_IS_GCC=y CONFIG_CC_IS_GCC=y
CONFIG_GCC_VERSION=120201 CONFIG_GCC_VERSION=120200
CONFIG_CLANG_VERSION=0 CONFIG_CLANG_VERSION=0
CONFIG_AS_IS_GNU=y CONFIG_AS_IS_GNU=y
CONFIG_AS_VERSION=24000 CONFIG_AS_VERSION=23900
CONFIG_LD_IS_BFD=y CONFIG_LD_IS_BFD=y
CONFIG_LD_VERSION=24000 CONFIG_LD_VERSION=23900
CONFIG_LLD_VERSION=0 CONFIG_LLD_VERSION=0
CONFIG_CC_CAN_LINK=y CONFIG_CC_CAN_LINK=y
CONFIG_CC_CAN_LINK_STATIC=y CONFIG_CC_CAN_LINK_STATIC=y
@@ -182,9 +182,10 @@ CONFIG_RCU_NOCB_CPU=y
# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set # CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set
# CONFIG_RCU_NOCB_CPU_CB_BOOST is not set # CONFIG_RCU_NOCB_CPU_CB_BOOST is not set
# CONFIG_TASKS_TRACE_RCU_READ_MB is not set # CONFIG_TASKS_TRACE_RCU_READ_MB is not set
CONFIG_RCU_LAZY=y # CONFIG_RCU_LAZY is not set
# end of RCU Subsystem # end of RCU Subsystem
CONFIG_BUILD_BIN2C=y
CONFIG_IKCONFIG=y CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y CONFIG_IKCONFIG_PROC=y
CONFIG_IKHEADERS=m CONFIG_IKHEADERS=m
@@ -487,7 +488,7 @@ CONFIG_X86_INTEL_TSX_MODE_AUTO=y
CONFIG_X86_SGX=y CONFIG_X86_SGX=y
CONFIG_EFI=y CONFIG_EFI=y
CONFIG_EFI_STUB=y CONFIG_EFI_STUB=y
# CONFIG_EFI_HANDOVER_PROTOCOL is not set CONFIG_EFI_HANDOVER_PROTOCOL=y
CONFIG_EFI_MIXED=y CONFIG_EFI_MIXED=y
# CONFIG_EFI_FAKE_MEMMAP is not set # CONFIG_EFI_FAKE_MEMMAP is not set
CONFIG_EFI_RUNTIME_MAP=y CONFIG_EFI_RUNTIME_MAP=y
@@ -637,7 +638,7 @@ CONFIG_ACPI_ADXL=y
CONFIG_ACPI_CONFIGFS=m CONFIG_ACPI_CONFIGFS=m
CONFIG_ACPI_PFRUT=m CONFIG_ACPI_PFRUT=m
CONFIG_ACPI_PCC=y CONFIG_ACPI_PCC=y
CONFIG_ACPI_FFH=y # CONFIG_ACPI_FFH is not set
CONFIG_PMIC_OPREGION=y CONFIG_PMIC_OPREGION=y
CONFIG_BYTCRC_PMIC_OPREGION=y CONFIG_BYTCRC_PMIC_OPREGION=y
CONFIG_CHTCRC_PMIC_OPREGION=y CONFIG_CHTCRC_PMIC_OPREGION=y
@@ -935,7 +936,7 @@ CONFIG_MODULE_SIG_HASH="sha512"
# CONFIG_MODULE_COMPRESS_GZIP is not set # CONFIG_MODULE_COMPRESS_GZIP is not set
# CONFIG_MODULE_COMPRESS_XZ is not set # CONFIG_MODULE_COMPRESS_XZ is not set
CONFIG_MODULE_COMPRESS_ZSTD=y CONFIG_MODULE_COMPRESS_ZSTD=y
CONFIG_MODULE_DECOMPRESS=y # CONFIG_MODULE_DECOMPRESS is not set
CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS=y CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS=y
CONFIG_MODPROBE_PATH="/sbin/modprobe" CONFIG_MODPROBE_PATH="/sbin/modprobe"
CONFIG_MODULES_TREE_LOOKUP=y CONFIG_MODULES_TREE_LOOKUP=y
@@ -1988,7 +1989,7 @@ CONFIG_BT_HCIUART_QCA=y
CONFIG_BT_HCIUART_AG6XX=y CONFIG_BT_HCIUART_AG6XX=y
CONFIG_BT_HCIUART_MRVL=y CONFIG_BT_HCIUART_MRVL=y
CONFIG_BT_HCIBCM203X=m CONFIG_BT_HCIBCM203X=m
CONFIG_BT_HCIBCM4377=m # CONFIG_BT_HCIBCM4377 is not set
CONFIG_BT_HCIBPA10X=m CONFIG_BT_HCIBPA10X=m
CONFIG_BT_HCIBFUSB=m CONFIG_BT_HCIBFUSB=m
CONFIG_BT_HCIDTL1=m CONFIG_BT_HCIDTL1=m
@@ -2326,7 +2327,7 @@ CONFIG_SYSFB=y
CONFIG_FW_CS_DSP=m CONFIG_FW_CS_DSP=m
CONFIG_GOOGLE_FIRMWARE=y CONFIG_GOOGLE_FIRMWARE=y
# CONFIG_GOOGLE_SMI is not set # CONFIG_GOOGLE_SMI is not set
CONFIG_GOOGLE_CBMEM=m # CONFIG_GOOGLE_CBMEM is not set
CONFIG_GOOGLE_COREBOOT_TABLE=m CONFIG_GOOGLE_COREBOOT_TABLE=m
CONFIG_GOOGLE_MEMCONSOLE=m CONFIG_GOOGLE_MEMCONSOLE=m
# CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY is not set # CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY is not set
@@ -2538,7 +2539,7 @@ CONFIG_ZRAM_DEF_COMP_LZORLE=y
CONFIG_ZRAM_DEF_COMP="lzo-rle" CONFIG_ZRAM_DEF_COMP="lzo-rle"
CONFIG_ZRAM_WRITEBACK=y CONFIG_ZRAM_WRITEBACK=y
# CONFIG_ZRAM_MEMORY_TRACKING is not set # CONFIG_ZRAM_MEMORY_TRACKING is not set
CONFIG_ZRAM_MULTI_COMP=y # CONFIG_ZRAM_MULTI_COMP is not set
CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 CONFIG_BLK_DEV_LOOP_MIN_COUNT=0
CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_DRBD=m
@@ -2547,9 +2548,7 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=m CONFIG_BLK_DEV_RAM=m
CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=16384 CONFIG_BLK_DEV_RAM_SIZE=16384
CONFIG_CDROM_PKTCDVD=m # CONFIG_CDROM_PKTCDVD is not set
CONFIG_CDROM_PKTCDVD_BUFFERS=8
# CONFIG_CDROM_PKTCDVD_WCACHE is not set
CONFIG_ATA_OVER_ETH=m CONFIG_ATA_OVER_ETH=m
CONFIG_XEN_BLKDEV_FRONTEND=m CONFIG_XEN_BLKDEV_FRONTEND=m
CONFIG_XEN_BLKDEV_BACKEND=m CONFIG_XEN_BLKDEV_BACKEND=m
@@ -2598,8 +2597,6 @@ CONFIG_TIFM_CORE=m
CONFIG_TIFM_7XX1=m CONFIG_TIFM_7XX1=m
CONFIG_ICS932S401=m CONFIG_ICS932S401=m
CONFIG_ENCLOSURE_SERVICES=m CONFIG_ENCLOSURE_SERVICES=m
CONFIG_SMPRO_ERRMON=m
CONFIG_SMPRO_MISC=m
CONFIG_HP_ILO=m CONFIG_HP_ILO=m
CONFIG_APDS9802ALS=m CONFIG_APDS9802ALS=m
CONFIG_ISL29003=m CONFIG_ISL29003=m
@@ -3324,7 +3321,7 @@ CONFIG_ENC28J60=m
# CONFIG_ENC28J60_WRITEVERIFY is not set # CONFIG_ENC28J60_WRITEVERIFY is not set
CONFIG_ENCX24J600=m CONFIG_ENCX24J600=m
CONFIG_LAN743X=m CONFIG_LAN743X=m
CONFIG_VCAP=y # CONFIG_VCAP is not set
CONFIG_NET_VENDOR_MICROSEMI=y CONFIG_NET_VENDOR_MICROSEMI=y
CONFIG_MSCC_OCELOT_SWITCH_LIB=m CONFIG_MSCC_OCELOT_SWITCH_LIB=m
CONFIG_NET_VENDOR_MICROSOFT=y CONFIG_NET_VENDOR_MICROSOFT=y
@@ -3898,7 +3895,7 @@ CONFIG_MT7921_COMMON=m
CONFIG_MT7921E=m CONFIG_MT7921E=m
CONFIG_MT7921S=m CONFIG_MT7921S=m
CONFIG_MT7921U=m CONFIG_MT7921U=m
CONFIG_MT7996E=m # CONFIG_MT7996E is not set
CONFIG_WLAN_VENDOR_MICROCHIP=y CONFIG_WLAN_VENDOR_MICROCHIP=y
CONFIG_WILC1000=m CONFIG_WILC1000=m
CONFIG_WILC1000_SDIO=m CONFIG_WILC1000_SDIO=m
@@ -3962,29 +3959,27 @@ CONFIG_RTL8XXXU_UNTESTED=y
CONFIG_RTW88=m CONFIG_RTW88=m
CONFIG_RTW88_CORE=m CONFIG_RTW88_CORE=m
CONFIG_RTW88_PCI=m CONFIG_RTW88_PCI=m
CONFIG_RTW88_USB=m
CONFIG_RTW88_8822B=m CONFIG_RTW88_8822B=m
CONFIG_RTW88_8822C=m CONFIG_RTW88_8822C=m
CONFIG_RTW88_8723D=m CONFIG_RTW88_8723D=m
CONFIG_RTW88_8821C=m CONFIG_RTW88_8821C=m
CONFIG_RTW88_8822BE=m CONFIG_RTW88_8822BE=m
CONFIG_RTW88_8822BU=m # CONFIG_RTW88_8822BU is not set
CONFIG_RTW88_8822CE=m CONFIG_RTW88_8822CE=m
CONFIG_RTW88_8822CU=m # CONFIG_RTW88_8822CU is not set
CONFIG_RTW88_8723DE=m CONFIG_RTW88_8723DE=m
CONFIG_RTW88_8723DU=m # CONFIG_RTW88_8723DU is not set
CONFIG_RTW88_8821CE=m CONFIG_RTW88_8821CE=m
CONFIG_RTW88_8821CU=m # CONFIG_RTW88_8821CU is not set
CONFIG_RTW88_DEBUG=y CONFIG_RTW88_DEBUG=y
CONFIG_RTW88_DEBUGFS=y CONFIG_RTW88_DEBUGFS=y
CONFIG_RTW89=m CONFIG_RTW89=m
CONFIG_RTW89_CORE=m CONFIG_RTW89_CORE=m
CONFIG_RTW89_PCI=m CONFIG_RTW89_PCI=m
CONFIG_RTW89_8852A=m CONFIG_RTW89_8852A=m
CONFIG_RTW89_8852B=m
CONFIG_RTW89_8852C=m CONFIG_RTW89_8852C=m
CONFIG_RTW89_8852AE=m CONFIG_RTW89_8852AE=m
CONFIG_RTW89_8852BE=m # CONFIG_RTW89_8852BE is not set
CONFIG_RTW89_8852CE=m CONFIG_RTW89_8852CE=m
CONFIG_RTW89_DEBUG=y CONFIG_RTW89_DEBUG=y
CONFIG_RTW89_DEBUGMSG=y CONFIG_RTW89_DEBUGMSG=y
@@ -4232,7 +4227,7 @@ CONFIG_TOUCHSCREEN_CYTTSP_SPI=m
CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m
CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m
CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m
CONFIG_TOUCHSCREEN_CYTTSP5=m # CONFIG_TOUCHSCREEN_CYTTSP5 is not set
CONFIG_TOUCHSCREEN_DA9034=m CONFIG_TOUCHSCREEN_DA9034=m
CONFIG_TOUCHSCREEN_DA9052=m CONFIG_TOUCHSCREEN_DA9052=m
CONFIG_TOUCHSCREEN_DYNAPRO=m CONFIG_TOUCHSCREEN_DYNAPRO=m
@@ -4244,7 +4239,7 @@ CONFIG_TOUCHSCREEN_FUJITSU=m
CONFIG_TOUCHSCREEN_GOODIX=m CONFIG_TOUCHSCREEN_GOODIX=m
CONFIG_TOUCHSCREEN_HIDEEP=m CONFIG_TOUCHSCREEN_HIDEEP=m
CONFIG_TOUCHSCREEN_HYCON_HY46XX=m CONFIG_TOUCHSCREEN_HYCON_HY46XX=m
CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX=m # CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set
CONFIG_TOUCHSCREEN_ILI210X=m CONFIG_TOUCHSCREEN_ILI210X=m
CONFIG_TOUCHSCREEN_ILITEK=m CONFIG_TOUCHSCREEN_ILITEK=m
CONFIG_TOUCHSCREEN_S6SY761=m CONFIG_TOUCHSCREEN_S6SY761=m
@@ -4319,7 +4314,7 @@ CONFIG_TOUCHSCREEN_COLIBRI_VF50=m
CONFIG_TOUCHSCREEN_ROHM_BU21023=m CONFIG_TOUCHSCREEN_ROHM_BU21023=m
CONFIG_TOUCHSCREEN_IQS5XX=m CONFIG_TOUCHSCREEN_IQS5XX=m
CONFIG_TOUCHSCREEN_ZINITIX=m CONFIG_TOUCHSCREEN_ZINITIX=m
CONFIG_TOUCHSCREEN_HIMAX_HX83112B=m # CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set
CONFIG_INPUT_MISC=y CONFIG_INPUT_MISC=y
CONFIG_INPUT_88PM860X_ONKEY=m CONFIG_INPUT_88PM860X_ONKEY=m
CONFIG_INPUT_88PM80X_ONKEY=m CONFIG_INPUT_88PM80X_ONKEY=m
@@ -4437,7 +4432,7 @@ CONFIG_HW_CONSOLE=y
CONFIG_VT_HW_CONSOLE_BINDING=y CONFIG_VT_HW_CONSOLE_BINDING=y
CONFIG_UNIX98_PTYS=y CONFIG_UNIX98_PTYS=y
# CONFIG_LEGACY_PTYS is not set # CONFIG_LEGACY_PTYS is not set
# CONFIG_LEGACY_TIOCSTI is not set CONFIG_LEGACY_TIOCSTI=y
CONFIG_LDISC_AUTOLOAD=y CONFIG_LDISC_AUTOLOAD=y
# #
@@ -4530,7 +4525,7 @@ CONFIG_IPMI_SSIF=m
CONFIG_IPMI_IPMB=m CONFIG_IPMI_IPMB=m
CONFIG_IPMI_WATCHDOG=m CONFIG_IPMI_WATCHDOG=m
CONFIG_IPMI_POWEROFF=m CONFIG_IPMI_POWEROFF=m
CONFIG_SSIF_IPMI_BMC=m # CONFIG_SSIF_IPMI_BMC is not set
CONFIG_IPMB_DEVICE_INTERFACE=m CONFIG_IPMB_DEVICE_INTERFACE=m
CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_TIMERIOMEM=m CONFIG_HW_RANDOM_TIMERIOMEM=m
@@ -4726,7 +4721,7 @@ CONFIG_SPI_MICROCHIP_CORE=m
CONFIG_SPI_MICROCHIP_CORE_QSPI=m CONFIG_SPI_MICROCHIP_CORE_QSPI=m
# CONFIG_SPI_LANTIQ_SSC is not set # CONFIG_SPI_LANTIQ_SSC is not set
CONFIG_SPI_OC_TINY=m CONFIG_SPI_OC_TINY=m
CONFIG_SPI_PCI1XXXX=m # CONFIG_SPI_PCI1XXXX is not set
CONFIG_SPI_PXA2XX=m CONFIG_SPI_PXA2XX=m
CONFIG_SPI_PXA2XX_PCI=m CONFIG_SPI_PXA2XX_PCI=m
# CONFIG_SPI_ROCKCHIP is not set # CONFIG_SPI_ROCKCHIP is not set
@@ -4943,7 +4938,7 @@ CONFIG_GPIO_VIPERBOARD=m
# Virtual GPIO drivers # Virtual GPIO drivers
# #
CONFIG_GPIO_AGGREGATOR=m CONFIG_GPIO_AGGREGATOR=m
CONFIG_GPIO_LATCH=m # CONFIG_GPIO_LATCH is not set
CONFIG_GPIO_MOCKUP=m CONFIG_GPIO_MOCKUP=m
CONFIG_GPIO_VIRTIO=m CONFIG_GPIO_VIRTIO=m
CONFIG_GPIO_SIM=m CONFIG_GPIO_SIM=m
@@ -5081,7 +5076,6 @@ CONFIG_HWMON_VID=m
# #
CONFIG_SENSORS_ABITUGURU=m CONFIG_SENSORS_ABITUGURU=m
CONFIG_SENSORS_ABITUGURU3=m CONFIG_SENSORS_ABITUGURU3=m
CONFIG_SENSORS_SMPRO=m
CONFIG_SENSORS_AD7314=m CONFIG_SENSORS_AD7314=m
CONFIG_SENSORS_AD7414=m CONFIG_SENSORS_AD7414=m
CONFIG_SENSORS_AD7418=m CONFIG_SENSORS_AD7418=m
@@ -5201,9 +5195,8 @@ CONFIG_SENSORS_NCT7904=m
CONFIG_SENSORS_NPCM7XX=m CONFIG_SENSORS_NPCM7XX=m
CONFIG_SENSORS_NZXT_KRAKEN2=m CONFIG_SENSORS_NZXT_KRAKEN2=m
CONFIG_SENSORS_NZXT_SMART2=m CONFIG_SENSORS_NZXT_SMART2=m
CONFIG_SENSORS_OCC_P8_I2C=m # CONFIG_SENSORS_OCC_P8_I2C is not set
CONFIG_SENSORS_OCC=m # CONFIG_SENSORS_OXP is not set
CONFIG_SENSORS_OXP=m
CONFIG_SENSORS_PCF8591=m CONFIG_SENSORS_PCF8591=m
CONFIG_PMBUS=m CONFIG_PMBUS=m
CONFIG_SENSORS_PMBUS=m CONFIG_SENSORS_PMBUS=m
@@ -5408,7 +5401,7 @@ CONFIG_MAX63XX_WATCHDOG=m
CONFIG_RETU_WATCHDOG=m CONFIG_RETU_WATCHDOG=m
CONFIG_ACQUIRE_WDT=m CONFIG_ACQUIRE_WDT=m
CONFIG_ADVANTECH_WDT=m CONFIG_ADVANTECH_WDT=m
CONFIG_ADVANTECH_EC_WDT=m # CONFIG_ADVANTECH_EC_WDT is not set
CONFIG_ALIM1535_WDT=m CONFIG_ALIM1535_WDT=m
CONFIG_ALIM7101_WDT=m CONFIG_ALIM7101_WDT=m
CONFIG_EBC_C384_WDT=m CONFIG_EBC_C384_WDT=m
@@ -5490,7 +5483,7 @@ CONFIG_BCMA_DRIVER_GPIO=y
# #
CONFIG_MFD_CORE=y CONFIG_MFD_CORE=y
CONFIG_MFD_AS3711=y CONFIG_MFD_AS3711=y
CONFIG_MFD_SMPRO=m # CONFIG_MFD_SMPRO is not set
CONFIG_PMIC_ADP5520=y CONFIG_PMIC_ADP5520=y
CONFIG_MFD_AAT2870_CORE=y CONFIG_MFD_AAT2870_CORE=y
CONFIG_MFD_BCM590XX=m CONFIG_MFD_BCM590XX=m
@@ -5671,7 +5664,7 @@ CONFIG_REGULATOR_MT6311=m
CONFIG_REGULATOR_MT6323=m CONFIG_REGULATOR_MT6323=m
CONFIG_REGULATOR_MT6331=m CONFIG_REGULATOR_MT6331=m
CONFIG_REGULATOR_MT6332=m CONFIG_REGULATOR_MT6332=m
CONFIG_REGULATOR_MT6357=m # CONFIG_REGULATOR_MT6357 is not set
CONFIG_REGULATOR_MT6358=m CONFIG_REGULATOR_MT6358=m
CONFIG_REGULATOR_MT6359=m CONFIG_REGULATOR_MT6359=m
CONFIG_REGULATOR_MT6360=m CONFIG_REGULATOR_MT6360=m
@@ -5693,7 +5686,7 @@ CONFIG_REGULATOR_RT5120=m
CONFIG_REGULATOR_RT5190A=m CONFIG_REGULATOR_RT5190A=m
CONFIG_REGULATOR_RT5759=m CONFIG_REGULATOR_RT5759=m
CONFIG_REGULATOR_RT6160=m CONFIG_REGULATOR_RT6160=m
CONFIG_REGULATOR_RT6190=m # CONFIG_REGULATOR_RT6190 is not set
CONFIG_REGULATOR_RT6245=m CONFIG_REGULATOR_RT6245=m
CONFIG_REGULATOR_RTQ2134=m CONFIG_REGULATOR_RTQ2134=m
CONFIG_REGULATOR_RTMV20=m CONFIG_REGULATOR_RTMV20=m
@@ -6187,8 +6180,7 @@ CONFIG_VIDEO_VIMC=m
CONFIG_VIDEO_VIVID=m CONFIG_VIDEO_VIVID=m
CONFIG_VIDEO_VIVID_CEC=y CONFIG_VIDEO_VIVID_CEC=y
CONFIG_VIDEO_VIVID_MAX_DEVS=64 CONFIG_VIDEO_VIVID_MAX_DEVS=64
CONFIG_VIDEO_VISL=m # CONFIG_VIDEO_VISL is not set
# CONFIG_VISL_DEBUGFS is not set
CONFIG_DVB_TEST_DRIVERS=y CONFIG_DVB_TEST_DRIVERS=y
CONFIG_DVB_VIDTV=m CONFIG_DVB_VIDTV=m
@@ -6263,7 +6255,7 @@ CONFIG_VIDEO_NOON010PC30=m
CONFIG_VIDEO_OG01A1B=m CONFIG_VIDEO_OG01A1B=m
CONFIG_VIDEO_OV02A10=m CONFIG_VIDEO_OV02A10=m
CONFIG_VIDEO_OV08D10=m CONFIG_VIDEO_OV08D10=m
CONFIG_VIDEO_OV08X40=m # CONFIG_VIDEO_OV08X40 is not set
CONFIG_VIDEO_OV13858=m CONFIG_VIDEO_OV13858=m
CONFIG_VIDEO_OV13B10=m CONFIG_VIDEO_OV13B10=m
CONFIG_VIDEO_OV2640=m CONFIG_VIDEO_OV2640=m
@@ -6271,7 +6263,7 @@ CONFIG_VIDEO_OV2659=m
CONFIG_VIDEO_OV2680=m CONFIG_VIDEO_OV2680=m
CONFIG_VIDEO_OV2685=m CONFIG_VIDEO_OV2685=m
CONFIG_VIDEO_OV2740=m CONFIG_VIDEO_OV2740=m
CONFIG_VIDEO_OV4689=m # CONFIG_VIDEO_OV4689 is not set
CONFIG_VIDEO_OV5647=m CONFIG_VIDEO_OV5647=m
CONFIG_VIDEO_OV5648=m CONFIG_VIDEO_OV5648=m
CONFIG_VIDEO_OV5670=m CONFIG_VIDEO_OV5670=m
@@ -6581,6 +6573,7 @@ CONFIG_DRM=y
CONFIG_DRM_MIPI_DBI=m CONFIG_DRM_MIPI_DBI=m
CONFIG_DRM_MIPI_DSI=y CONFIG_DRM_MIPI_DSI=y
# CONFIG_DRM_DEBUG_MM is not set # CONFIG_DRM_DEBUG_MM is not set
CONFIG_DRM_USE_DYNAMIC_DEBUG=y
CONFIG_DRM_KMS_HELPER=y CONFIG_DRM_KMS_HELPER=y
CONFIG_DRM_FBDEV_EMULATION=y CONFIG_DRM_FBDEV_EMULATION=y
CONFIG_DRM_FBDEV_OVERALLOC=100 CONFIG_DRM_FBDEV_OVERALLOC=100
@@ -6858,7 +6851,7 @@ CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y
# CONFIG_LOGO is not set # CONFIG_LOGO is not set
# end of Graphics support # end of Graphics support
CONFIG_DRM_ACCEL=y # CONFIG_DRM_ACCEL is not set
CONFIG_SOUND=m CONFIG_SOUND=m
CONFIG_SOUND_OSS_CORE=y CONFIG_SOUND_OSS_CORE=y
# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set # CONFIG_SOUND_OSS_CORE_PRECLAIM is not set
@@ -7170,11 +7163,11 @@ CONFIG_SND_SOC_INTEL_AVS_MACH_DA7219=m
CONFIG_SND_SOC_INTEL_AVS_MACH_DMIC=m CONFIG_SND_SOC_INTEL_AVS_MACH_DMIC=m
CONFIG_SND_SOC_INTEL_AVS_MACH_HDAUDIO=m CONFIG_SND_SOC_INTEL_AVS_MACH_HDAUDIO=m
CONFIG_SND_SOC_INTEL_AVS_MACH_I2S_TEST=m CONFIG_SND_SOC_INTEL_AVS_MACH_I2S_TEST=m
CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98927=m # CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98927 is not set
CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98357A=m CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98357A=m
CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98373=m CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98373=m
CONFIG_SND_SOC_INTEL_AVS_MACH_NAU8825=m CONFIG_SND_SOC_INTEL_AVS_MACH_NAU8825=m
CONFIG_SND_SOC_INTEL_AVS_MACH_PROBE=m # CONFIG_SND_SOC_INTEL_AVS_MACH_PROBE is not set
CONFIG_SND_SOC_INTEL_AVS_MACH_RT274=m CONFIG_SND_SOC_INTEL_AVS_MACH_RT274=m
CONFIG_SND_SOC_INTEL_AVS_MACH_RT286=m CONFIG_SND_SOC_INTEL_AVS_MACH_RT286=m
CONFIG_SND_SOC_INTEL_AVS_MACH_RT298=m CONFIG_SND_SOC_INTEL_AVS_MACH_RT298=m
@@ -7527,7 +7520,7 @@ CONFIG_SND_SOC_WM8903=m
CONFIG_SND_SOC_WM8904=m CONFIG_SND_SOC_WM8904=m
CONFIG_SND_SOC_WM8940=m CONFIG_SND_SOC_WM8940=m
CONFIG_SND_SOC_WM8960=m CONFIG_SND_SOC_WM8960=m
CONFIG_SND_SOC_WM8961=m # CONFIG_SND_SOC_WM8961 is not set
CONFIG_SND_SOC_WM8962=m CONFIG_SND_SOC_WM8962=m
CONFIG_SND_SOC_WM8974=m CONFIG_SND_SOC_WM8974=m
CONFIG_SND_SOC_WM8978=m CONFIG_SND_SOC_WM8978=m
@@ -8357,7 +8350,7 @@ CONFIG_INFINIBAND_HFI1=m
# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set # CONFIG_HFI1_DEBUG_SDMA_ORDER is not set
# CONFIG_SDMA_VERBOSITY is not set # CONFIG_SDMA_VERBOSITY is not set
CONFIG_INFINIBAND_IRDMA=m CONFIG_INFINIBAND_IRDMA=m
CONFIG_MANA_INFINIBAND=m # CONFIG_MANA_INFINIBAND is not set
CONFIG_MLX4_INFINIBAND=m CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m
CONFIG_INFINIBAND_MTHCA=m CONFIG_INFINIBAND_MTHCA=m
@@ -8633,7 +8626,7 @@ CONFIG_NITRO_ENCLAVES=m
CONFIG_ACRN_HSM=m CONFIG_ACRN_HSM=m
CONFIG_EFI_SECRET=m CONFIG_EFI_SECRET=m
CONFIG_SEV_GUEST=m CONFIG_SEV_GUEST=m
CONFIG_TDX_GUEST_DRIVER=m # CONFIG_TDX_GUEST_DRIVER is not set
CONFIG_VIRTIO_ANCHOR=y CONFIG_VIRTIO_ANCHOR=y
CONFIG_VIRTIO=y CONFIG_VIRTIO=y
CONFIG_VIRTIO_PCI_LIB=m CONFIG_VIRTIO_PCI_LIB=m
@@ -8831,7 +8824,7 @@ CONFIG_CROS_EC_DEBUGFS=m
CONFIG_CROS_EC_SENSORHUB=m CONFIG_CROS_EC_SENSORHUB=m
CONFIG_CROS_EC_SYSFS=m CONFIG_CROS_EC_SYSFS=m
CONFIG_CROS_EC_TYPEC=m CONFIG_CROS_EC_TYPEC=m
CONFIG_CROS_HPS_I2C=m # CONFIG_CROS_HPS_I2C is not set
CONFIG_CROS_USBPD_LOGGER=m CONFIG_CROS_USBPD_LOGGER=m
CONFIG_CROS_USBPD_NOTIFY=m CONFIG_CROS_USBPD_NOTIFY=m
CONFIG_CHROMEOS_PRIVACY_SCREEN=m CONFIG_CHROMEOS_PRIVACY_SCREEN=m
@@ -8908,9 +8901,7 @@ CONFIG_AMILO_RFKILL=m
CONFIG_FUJITSU_LAPTOP=m CONFIG_FUJITSU_LAPTOP=m
CONFIG_FUJITSU_TABLET=m CONFIG_FUJITSU_TABLET=m
CONFIG_GPD_POCKET_FAN=m CONFIG_GPD_POCKET_FAN=m
CONFIG_X86_PLATFORM_DRIVERS_HP=y # CONFIG_X86_PLATFORM_DRIVERS_HP is not set
CONFIG_HP_ACCEL=m
CONFIG_HP_WMI=m
CONFIG_WIRELESS_HOTKEY=m CONFIG_WIRELESS_HOTKEY=m
CONFIG_IBM_RTL=m CONFIG_IBM_RTL=m
CONFIG_IDEAPAD_LAPTOP=m CONFIG_IDEAPAD_LAPTOP=m
@@ -8925,7 +8916,7 @@ CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
CONFIG_THINKPAD_LMI=m CONFIG_THINKPAD_LMI=m
CONFIG_INTEL_ATOMISP2_PDX86=y CONFIG_INTEL_ATOMISP2_PDX86=y
CONFIG_INTEL_ATOMISP2_LED=m CONFIG_INTEL_ATOMISP2_LED=m
CONFIG_INTEL_IFS=m # CONFIG_INTEL_IFS is not set
CONFIG_INTEL_SAR_INT1092=m CONFIG_INTEL_SAR_INT1092=m
CONFIG_INTEL_SKL_INT3472=m CONFIG_INTEL_SKL_INT3472=m
CONFIG_INTEL_PMC_CORE=y CONFIG_INTEL_PMC_CORE=y
@@ -9051,7 +9042,7 @@ CONFIG_INTEL_IOMMU_SVM=y
# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set # CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
CONFIG_INTEL_IOMMU_FLOPPY_WA=y CONFIG_INTEL_IOMMU_FLOPPY_WA=y
CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y
CONFIG_IOMMUFD=m # CONFIG_IOMMUFD is not set
CONFIG_IRQ_REMAP=y CONFIG_IRQ_REMAP=y
CONFIG_HYPERV_IOMMU=y CONFIG_HYPERV_IOMMU=y
CONFIG_VIRTIO_IOMMU=m CONFIG_VIRTIO_IOMMU=m
@@ -9228,9 +9219,8 @@ CONFIG_IIO_CROS_EC_ACCEL_LEGACY=m
CONFIG_IIO_ST_ACCEL_3AXIS=m CONFIG_IIO_ST_ACCEL_3AXIS=m
CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m
CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m
CONFIG_IIO_KX022A=m # CONFIG_IIO_KX022A_SPI is not set
CONFIG_IIO_KX022A_SPI=m # CONFIG_IIO_KX022A_I2C is not set
CONFIG_IIO_KX022A_I2C=m
CONFIG_KXSD9=m CONFIG_KXSD9=m
CONFIG_KXSD9_SPI=m CONFIG_KXSD9_SPI=m
CONFIG_KXSD9_I2C=m CONFIG_KXSD9_I2C=m
@@ -9257,7 +9247,7 @@ CONFIG_STK8BA50=m
# Analog to digital converters # Analog to digital converters
# #
CONFIG_AD_SIGMA_DELTA=m CONFIG_AD_SIGMA_DELTA=m
CONFIG_AD4130=m # CONFIG_AD4130 is not set
CONFIG_AD7091R5=m CONFIG_AD7091R5=m
CONFIG_AD7124=m CONFIG_AD7124=m
CONFIG_AD7192=m CONFIG_AD7192=m
@@ -9298,7 +9288,7 @@ CONFIG_MAX1027=m
CONFIG_MAX11100=m CONFIG_MAX11100=m
CONFIG_MAX1118=m CONFIG_MAX1118=m
CONFIG_MAX11205=m CONFIG_MAX11205=m
CONFIG_MAX11410=m # CONFIG_MAX11410 is not set
CONFIG_MAX1241=m CONFIG_MAX1241=m
CONFIG_MAX1363=m CONFIG_MAX1363=m
CONFIG_MAX9611=m CONFIG_MAX9611=m
@@ -9306,7 +9296,7 @@ CONFIG_MCP320X=m
CONFIG_MCP3422=m CONFIG_MCP3422=m
CONFIG_MCP3911=m CONFIG_MCP3911=m
CONFIG_MEDIATEK_MT6360_ADC=m CONFIG_MEDIATEK_MT6360_ADC=m
CONFIG_MEDIATEK_MT6370_ADC=m # CONFIG_MEDIATEK_MT6370_ADC is not set
CONFIG_MEN_Z188_ADC=m CONFIG_MEN_Z188_ADC=m
CONFIG_MP2629_ADC=m CONFIG_MP2629_ADC=m
CONFIG_NAU7802=m CONFIG_NAU7802=m
@@ -9339,7 +9329,7 @@ CONFIG_XILINX_XADC=m
# #
# Analog to digital and digital to analog converters # Analog to digital and digital to analog converters
# #
CONFIG_AD74115=m # CONFIG_AD74115 is not set
CONFIG_AD74413R=m CONFIG_AD74413R=m
# end of Analog to digital and digital to analog converters # end of Analog to digital and digital to analog converters
@@ -9489,7 +9479,7 @@ CONFIG_AD9523=m
# #
CONFIG_ADF4350=m CONFIG_ADF4350=m
CONFIG_ADF4371=m CONFIG_ADF4371=m
CONFIG_ADF4377=m # CONFIG_ADF4377 is not set
CONFIG_ADMV1013=m CONFIG_ADMV1013=m
CONFIG_ADMV1014=m CONFIG_ADMV1014=m
CONFIG_ADMV4420=m CONFIG_ADMV4420=m
@@ -9798,7 +9788,7 @@ CONFIG_TMP007=m
CONFIG_TMP117=m CONFIG_TMP117=m
CONFIG_TSYS01=m CONFIG_TSYS01=m
CONFIG_TSYS02D=m CONFIG_TSYS02D=m
CONFIG_MAX30208=m # CONFIG_MAX30208 is not set
CONFIG_MAX31856=m CONFIG_MAX31856=m
CONFIG_MAX31865=m CONFIG_MAX31865=m
# end of Temperature sensors # end of Temperature sensors
@@ -9952,8 +9942,7 @@ CONFIG_FPGA_DFL_NIOS_INTEL_PAC_N3000=m
CONFIG_FPGA_DFL_PCI=m CONFIG_FPGA_DFL_PCI=m
CONFIG_FPGA_M10_BMC_SEC_UPDATE=m CONFIG_FPGA_M10_BMC_SEC_UPDATE=m
CONFIG_FPGA_MGR_MICROCHIP_SPI=m CONFIG_FPGA_MGR_MICROCHIP_SPI=m
CONFIG_FPGA_MGR_LATTICE_SYSCONFIG=m # CONFIG_FPGA_MGR_LATTICE_SYSCONFIG_SPI is not set
CONFIG_FPGA_MGR_LATTICE_SYSCONFIG_SPI=m
CONFIG_TEE=m CONFIG_TEE=m
CONFIG_AMDTEE=m CONFIG_AMDTEE=m
CONFIG_MULTIPLEXER=m CONFIG_MULTIPLEXER=m
@@ -10198,10 +10187,10 @@ CONFIG_SQUASHFS=m
# CONFIG_SQUASHFS_FILE_CACHE is not set # CONFIG_SQUASHFS_FILE_CACHE is not set
CONFIG_SQUASHFS_FILE_DIRECT=y CONFIG_SQUASHFS_FILE_DIRECT=y
CONFIG_SQUASHFS_DECOMP_SINGLE=y CONFIG_SQUASHFS_DECOMP_SINGLE=y
CONFIG_SQUASHFS_DECOMP_MULTI=y # CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set
CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y
CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT=y # CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set
CONFIG_SQUASHFS_MOUNT_DECOMP_THREADS=y # CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set
CONFIG_SQUASHFS_XATTR=y CONFIG_SQUASHFS_XATTR=y
CONFIG_SQUASHFS_ZLIB=y CONFIG_SQUASHFS_ZLIB=y
CONFIG_SQUASHFS_LZ4=y CONFIG_SQUASHFS_LZ4=y
@@ -10880,7 +10869,6 @@ CONFIG_TEXTSEARCH_BM=m
CONFIG_TEXTSEARCH_FSM=m CONFIG_TEXTSEARCH_FSM=m
CONFIG_BTREE=y CONFIG_BTREE=y
CONFIG_INTERVAL_TREE=y CONFIG_INTERVAL_TREE=y
CONFIG_INTERVAL_TREE_SPAN_ITER=y
CONFIG_XARRAY_MULTI=y CONFIG_XARRAY_MULTI=y
CONFIG_ASSOCIATIVE_ARRAY=y CONFIG_ASSOCIATIVE_ARRAY=y
CONFIG_HAS_IOMEM=y CONFIG_HAS_IOMEM=y

View File

@@ -866,11 +866,6 @@ _tkg_srcprep() {
_disable "CPU_FREQ_DEFAULT_GOV_ONDEMAND" "CPU_FREQ_DEFAULT_GOV_CONSERVATIVE" "CPU_FREQ_DEFAULT_GOV_PERFORMANCE" "CPU_FREQ_DEFAULT_GOV_PERFORMANCE_NODEF" _disable "CPU_FREQ_DEFAULT_GOV_ONDEMAND" "CPU_FREQ_DEFAULT_GOV_CONSERVATIVE" "CPU_FREQ_DEFAULT_GOV_PERFORMANCE" "CPU_FREQ_DEFAULT_GOV_PERFORMANCE_NODEF"
_module "BLK_DEV_LOOP" _module "BLK_DEV_LOOP"
# buggy project C/PSI interaction workaround
if [ "${_cpusched}" = "pds" ] || [ "${_cpusched}" = "bmq" ]; then
_enable "PSI_DEFAULT_DISABLED"
fi
if [ -n "$_custom_commandline" ]; then if [ -n "$_custom_commandline" ]; then
_enable "CMDLINE_BOOL" _enable "CMDLINE_BOOL"
_disable "CMDLINE_OVERRIDE" _disable "CMDLINE_OVERRIDE"

View File

@@ -1,62 +1,8 @@
From e44ef62b127f6a161a131c84db92a7527d8fc72d Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Wed, 22 Feb 2023 19:24:36 +0100
Subject: [PATCH] prjc
Signed-off-by: Peter Jung <admin@ptr1337.dev>
---
.../admin-guide/kernel-parameters.txt | 6 +
Documentation/admin-guide/sysctl/kernel.rst | 10 +
Documentation/scheduler/sched-BMQ.txt | 110 +
fs/proc/base.c | 2 +-
include/asm-generic/resource.h | 2 +-
include/linux/sched.h | 33 +-
include/linux/sched/deadline.h | 20 +
include/linux/sched/prio.h | 26 +
include/linux/sched/rt.h | 2 +
include/linux/sched/topology.h | 3 +-
init/Kconfig | 34 +
init/init_task.c | 18 +
kernel/Kconfig.preempt | 2 +-
kernel/cgroup/cpuset.c | 4 +-
kernel/delayacct.c | 2 +-
kernel/exit.c | 4 +-
kernel/locking/rtmutex.c | 16 +-
kernel/sched/Makefile | 5 +
kernel/sched/alt_core.c | 8111 +++++++++++++++++
kernel/sched/alt_debug.c | 31 +
kernel/sched/alt_sched.h | 671 ++
kernel/sched/bmq.h | 110 +
kernel/sched/build_policy.c | 8 +-
kernel/sched/build_utility.c | 2 +
kernel/sched/cpufreq_schedutil.c | 10 +
kernel/sched/cputime.c | 10 +-
kernel/sched/debug.c | 10 +
kernel/sched/idle.c | 2 +
kernel/sched/pds.h | 127 +
kernel/sched/pelt.c | 4 +-
kernel/sched/pelt.h | 8 +-
kernel/sched/sched.h | 9 +
kernel/sched/stats.c | 4 +
kernel/sched/stats.h | 2 +
kernel/sched/topology.c | 17 +
kernel/sysctl.c | 15 +
kernel/time/hrtimer.c | 2 +
kernel/time/posix-cpu-timers.c | 10 +-
kernel/trace/trace_selftest.c | 5 +
39 files changed, 9445 insertions(+), 22 deletions(-)
create mode 100644 Documentation/scheduler/sched-BMQ.txt
create mode 100644 kernel/sched/alt_core.c
create mode 100644 kernel/sched/alt_debug.c
create mode 100644 kernel/sched/alt_sched.h
create mode 100644 kernel/sched/bmq.h
create mode 100644 kernel/sched/pds.h
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 6cfa6e3996cf..1b6a407213da 100644 index 42af9ca0127e..31747ec54f9d 100644
--- a/Documentation/admin-guide/kernel-parameters.txt --- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -5437,6 +5437,12 @@ @@ -5406,6 +5406,12 @@
sa1100ir [NET] sa1100ir [NET]
See drivers/net/irda/sa1100_ir.c. See drivers/net/irda/sa1100_ir.c.
@@ -70,10 +16,10 @@ index 6cfa6e3996cf..1b6a407213da 100644
schedstats= [KNL,X86] Enable or disable scheduled statistics. schedstats= [KNL,X86] Enable or disable scheduled statistics.
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index 46e3d62c0eea..fb4568c919d0 100644 index 98d1b198b2b4..d7c78a107f93 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst --- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -1597,3 +1597,13 @@ is 10 seconds. @@ -1552,3 +1552,13 @@ is 10 seconds.
The softlockup threshold is (``2 * watchdog_thresh``). Setting this The softlockup threshold is (``2 * watchdog_thresh``). Setting this
tunable to zero will disable lockup detection altogether. tunable to zero will disable lockup detection altogether.
@@ -230,7 +176,7 @@ index 8874f681b056..59eb72bf7d5f 100644
[RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \
} }
diff --git a/include/linux/sched.h b/include/linux/sched.h diff --git a/include/linux/sched.h b/include/linux/sched.h
index 853d08f7562b..ad7e050d7455 100644 index ffb6eb55cd13..2e730a59caa2 100644
--- a/include/linux/sched.h --- a/include/linux/sched.h
+++ b/include/linux/sched.h +++ b/include/linux/sched.h
@@ -762,8 +762,14 @@ struct task_struct { @@ -762,8 +762,14 @@ struct task_struct {
@@ -286,7 +232,7 @@ index 853d08f7562b..ad7e050d7455 100644
#ifdef CONFIG_CGROUP_SCHED #ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group; struct task_group *sched_task_group;
@@ -1539,6 +1561,15 @@ struct task_struct { @@ -1545,6 +1567,15 @@ struct task_struct {
*/ */
}; };
@@ -406,10 +352,10 @@ index 816df6cc444e..c8da08e18c91 100644
#else #else
static inline void rebuild_sched_domains_energy(void) static inline void rebuild_sched_domains_energy(void)
diff --git a/init/Kconfig b/init/Kconfig diff --git a/init/Kconfig b/init/Kconfig
index 44e90b28a30f..af24591984ab 100644 index 94125d3b6893..c87ba766d354 100644
--- a/init/Kconfig --- a/init/Kconfig
+++ b/init/Kconfig +++ b/init/Kconfig
@@ -821,6 +821,7 @@ menu "Scheduler features" @@ -819,6 +819,7 @@ menu "Scheduler features"
config UCLAMP_TASK config UCLAMP_TASK
bool "Enable utilization clamping for RT/FAIR tasks" bool "Enable utilization clamping for RT/FAIR tasks"
depends on CPU_FREQ_GOV_SCHEDUTIL depends on CPU_FREQ_GOV_SCHEDUTIL
@@ -417,7 +363,7 @@ index 44e90b28a30f..af24591984ab 100644
help help
This feature enables the scheduler to track the clamped utilization This feature enables the scheduler to track the clamped utilization
of each CPU based on RUNNABLE tasks scheduled on that CPU. of each CPU based on RUNNABLE tasks scheduled on that CPU.
@@ -867,6 +868,35 @@ config UCLAMP_BUCKETS_COUNT @@ -865,6 +866,35 @@ config UCLAMP_BUCKETS_COUNT
If in doubt, use the default value. If in doubt, use the default value.
@@ -453,7 +399,7 @@ index 44e90b28a30f..af24591984ab 100644
endmenu endmenu
# #
@@ -924,6 +954,7 @@ config NUMA_BALANCING @@ -918,6 +948,7 @@ config NUMA_BALANCING
depends on ARCH_SUPPORTS_NUMA_BALANCING depends on ARCH_SUPPORTS_NUMA_BALANCING
depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
depends on SMP && NUMA && MIGRATION && !PREEMPT_RT depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
@@ -461,7 +407,7 @@ index 44e90b28a30f..af24591984ab 100644
help help
This option adds support for automatic NUMA aware memory/task placement. This option adds support for automatic NUMA aware memory/task placement.
The mechanism is quite primitive and is based on migrating memory when The mechanism is quite primitive and is based on migrating memory when
@@ -1021,6 +1052,7 @@ config FAIR_GROUP_SCHED @@ -1015,6 +1046,7 @@ config FAIR_GROUP_SCHED
depends on CGROUP_SCHED depends on CGROUP_SCHED
default CGROUP_SCHED default CGROUP_SCHED
@@ -469,7 +415,7 @@ index 44e90b28a30f..af24591984ab 100644
config CFS_BANDWIDTH config CFS_BANDWIDTH
bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED" bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
depends on FAIR_GROUP_SCHED depends on FAIR_GROUP_SCHED
@@ -1043,6 +1075,7 @@ config RT_GROUP_SCHED @@ -1037,6 +1069,7 @@ config RT_GROUP_SCHED
realtime bandwidth for them. realtime bandwidth for them.
See Documentation/scheduler/sched-rt-group.rst for more information. See Documentation/scheduler/sched-rt-group.rst for more information.
@@ -477,7 +423,7 @@ index 44e90b28a30f..af24591984ab 100644
endif #CGROUP_SCHED endif #CGROUP_SCHED
config UCLAMP_TASK_GROUP config UCLAMP_TASK_GROUP
@@ -1287,6 +1320,7 @@ config CHECKPOINT_RESTORE @@ -1281,6 +1314,7 @@ config CHECKPOINT_RESTORE
config SCHED_AUTOGROUP config SCHED_AUTOGROUP
bool "Automatic process group scheduling" bool "Automatic process group scheduling"
@@ -545,10 +491,10 @@ index c2f1fd95a821..41654679b1b2 100644
This option permits Core Scheduling, a means of coordinated task This option permits Core Scheduling, a means of coordinated task
selection across SMT siblings. When enabled -- see selection across SMT siblings. When enabled -- see
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index ca826bd1eba3..60e194f1d6d8 100644 index b474289c15b8..a23224b45b03 100644
--- a/kernel/cgroup/cpuset.c --- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c
@@ -791,7 +791,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) @@ -787,7 +787,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
return ret; return ret;
} }
@@ -557,7 +503,7 @@ index ca826bd1eba3..60e194f1d6d8 100644
/* /*
* Helper routine for generate_sched_domains(). * Helper routine for generate_sched_domains().
* Do cpusets a, b have overlapping effective cpus_allowed masks? * Do cpusets a, b have overlapping effective cpus_allowed masks?
@@ -1187,7 +1187,7 @@ static void rebuild_sched_domains_locked(void) @@ -1183,7 +1183,7 @@ static void rebuild_sched_domains_locked(void)
/* Have scheduler rebuild the domains */ /* Have scheduler rebuild the domains */
partition_and_rebuild_sched_domains(ndoms, doms, attr); partition_and_rebuild_sched_domains(ndoms, doms, attr);
} }
@@ -580,10 +526,10 @@ index e39cb696cfbd..463423572e09 100644
d->cpu_count += t1; d->cpu_count += t1;
diff --git a/kernel/exit.c b/kernel/exit.c diff --git a/kernel/exit.c b/kernel/exit.c
index 15dc2ec80c46..1e583e0f89a7 100644 index 35e0a31a0315..64e368441cf4 100644
--- a/kernel/exit.c --- a/kernel/exit.c
+++ b/kernel/exit.c +++ b/kernel/exit.c
@@ -172,7 +172,7 @@ static void __exit_signal(struct task_struct *tsk) @@ -125,7 +125,7 @@ static void __exit_signal(struct task_struct *tsk)
sig->curr_target = next_thread(tsk); sig->curr_target = next_thread(tsk);
} }
@@ -592,7 +538,7 @@ index 15dc2ec80c46..1e583e0f89a7 100644
sizeof(unsigned long long)); sizeof(unsigned long long));
/* /*
@@ -193,7 +193,7 @@ static void __exit_signal(struct task_struct *tsk) @@ -146,7 +146,7 @@ static void __exit_signal(struct task_struct *tsk)
sig->inblock += task_io_get_inblock(tsk); sig->inblock += task_io_get_inblock(tsk);
sig->oublock += task_io_get_oublock(tsk); sig->oublock += task_io_get_oublock(tsk);
task_io_accounting_add(&sig->ioac, &tsk->ioac); task_io_accounting_add(&sig->ioac, &tsk->ioac);
@@ -602,10 +548,10 @@ index 15dc2ec80c46..1e583e0f89a7 100644
__unhash_process(tsk, group_dead); __unhash_process(tsk, group_dead);
write_sequnlock(&sig->stats_lock); write_sequnlock(&sig->stats_lock);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 728f434de2bb..0e1082a4e878 100644 index 7779ee8abc2a..5b9893cdfb1b 100644
--- a/kernel/locking/rtmutex.c --- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c
@@ -337,21 +337,25 @@ static __always_inline void @@ -300,21 +300,25 @@ static __always_inline void
waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
{ {
waiter->prio = __waiter_prio(task); waiter->prio = __waiter_prio(task);
@@ -633,7 +579,7 @@ index 728f434de2bb..0e1082a4e878 100644
/* /*
* If both waiters have dl_prio(), we check the deadlines of the * If both waiters have dl_prio(), we check the deadlines of the
* associated tasks. * associated tasks.
@@ -360,16 +364,22 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, @@ -323,16 +327,22 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
*/ */
if (dl_prio(left->prio)) if (dl_prio(left->prio))
return dl_time_before(left->deadline, right->deadline); return dl_time_before(left->deadline, right->deadline);
@@ -656,7 +602,7 @@ index 728f434de2bb..0e1082a4e878 100644
/* /*
* If both waiters have dl_prio(), we check the deadlines of the * If both waiters have dl_prio(), we check the deadlines of the
* associated tasks. * associated tasks.
@@ -378,8 +388,10 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left, @@ -341,8 +351,10 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
*/ */
if (dl_prio(left->prio)) if (dl_prio(left->prio))
return left->deadline == right->deadline; return left->deadline == right->deadline;
@@ -686,10 +632,10 @@ index 976092b7bd45..31d587c16ec1 100644
obj-y += build_utility.o obj-y += build_utility.o
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
new file mode 100644 new file mode 100644
index 000000000000..f5e9c01f9382 index 000000000000..acb8657e811d
--- /dev/null --- /dev/null
+++ b/kernel/sched/alt_core.c +++ b/kernel/sched/alt_core.c
@@ -0,0 +1,8111 @@ @@ -0,0 +1,7978 @@
+/* +/*
+ * kernel/sched/alt_core.c + * kernel/sched/alt_core.c
+ * + *
@@ -759,7 +705,7 @@ index 000000000000..f5e9c01f9382
+#define sched_feat(x) (0) +#define sched_feat(x) (0)
+#endif /* CONFIG_SCHED_DEBUG */ +#endif /* CONFIG_SCHED_DEBUG */
+ +
+#define ALT_SCHED_VERSION "v6.2-r0" +#define ALT_SCHED_VERSION "v6.1-r3"
+ +
+/* rt_prio(prio) defined in include/linux/sched/rt.h */ +/* rt_prio(prio) defined in include/linux/sched/rt.h */
+#define rt_task(p) rt_prio((p)->prio) +#define rt_task(p) rt_prio((p)->prio)
@@ -780,12 +726,6 @@ index 000000000000..f5e9c01f9382
+#include "pds.h" +#include "pds.h"
+#endif +#endif
+ +
+struct affinity_context {
+ const struct cpumask *new_mask;
+ struct cpumask *user_mask;
+ unsigned int flags;
+};
+
+static int __init sched_timeslice(char *str) +static int __init sched_timeslice(char *str)
+{ +{
+ int timeslice_ms; + int timeslice_ms;
@@ -848,14 +788,6 @@ index 000000000000..f5e9c01f9382
+static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp; +static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp;
+static cpumask_t *const sched_idle_mask = &sched_preempt_mask[0]; +static cpumask_t *const sched_idle_mask = &sched_preempt_mask[0];
+ +
+/* task function */
+static inline const struct cpumask *task_user_cpus(struct task_struct *p)
+{
+ if (!p->user_cpus_ptr)
+ return cpu_possible_mask; /* &init_task.cpus_mask */
+ return p->user_cpus_ptr;
+}
+
+/* sched_queue related functions */ +/* sched_queue related functions */
+static inline void sched_queue_init(struct sched_queue *q) +static inline void sched_queue_init(struct sched_queue *q)
+{ +{
@@ -1468,7 +1400,7 @@ index 000000000000..f5e9c01f9382
+ +
+#define __SCHED_ENQUEUE_TASK(p, rq, flags) \ +#define __SCHED_ENQUEUE_TASK(p, rq, flags) \
+ sched_info_enqueue(rq, p); \ + sched_info_enqueue(rq, p); \
+ psi_enqueue(p, flags & ENQUEUE_WAKEUP); \ + psi_enqueue(p, flags); \
+ \ + \
+ p->sq_idx = task_sched_prio_idx(p, rq); \ + p->sq_idx = task_sched_prio_idx(p, rq); \
+ list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]); \ + list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]); \
@@ -2336,101 +2268,35 @@ index 000000000000..f5e9c01f9382
+} +}
+ +
+static inline void +static inline void
+set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx) +set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
+{ +{
+ cpumask_copy(&p->cpus_mask, ctx->new_mask); + cpumask_copy(&p->cpus_mask, new_mask);
+ p->nr_cpus_allowed = cpumask_weight(ctx->new_mask); + p->nr_cpus_allowed = cpumask_weight(new_mask);
+
+ /*
+ * Swap in a new user_cpus_ptr if SCA_USER flag set
+ */
+ if (ctx->flags & SCA_USER)
+ swap(p->user_cpus_ptr, ctx->user_mask);
+} +}
+ +
+static void +static void
+__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx) +__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{ +{
+ lockdep_assert_held(&p->pi_lock); + lockdep_assert_held(&p->pi_lock);
+ set_cpus_allowed_common(p, ctx); + set_cpus_allowed_common(p, new_mask);
+} +}
+ +
+/*
+ * Used for kthread_bind() and select_fallback_rq(), in both cases the user
+ * affinity (if any) should be destroyed too.
+ */
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{ +{
+ struct affinity_context ac = { + __do_set_cpus_allowed(p, new_mask);
+ .new_mask = new_mask,
+ .user_mask = NULL,
+ .flags = SCA_USER, /* clear the user requested mask */
+ };
+ union cpumask_rcuhead {
+ cpumask_t cpumask;
+ struct rcu_head rcu;
+ };
+
+ __do_set_cpus_allowed(p, &ac);
+
+ /*
+ * Because this is called with p->pi_lock held, it is not possible
+ * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
+ * kfree_rcu().
+ */
+ kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
+}
+
+static cpumask_t *alloc_user_cpus_ptr(int node)
+{
+ /*
+ * See do_set_cpus_allowed() above for the rcu_head usage.
+ */
+ int size = max_t(int, cpumask_size(), sizeof(struct rcu_head));
+
+ return kmalloc_node(size, GFP_KERNEL, node);
+} +}
+ +
+int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, +int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
+ int node) + int node)
+{ +{
+ cpumask_t *user_mask; + if (!src->user_cpus_ptr)
+ unsigned long flags;
+
+ /*
+ * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
+ * may differ by now due to racing.
+ */
+ dst->user_cpus_ptr = NULL;
+
+ /*
+ * This check is racy and losing the race is a valid situation.
+ * It is not worth the extra overhead of taking the pi_lock on
+ * every fork/clone.
+ */
+ if (data_race(!src->user_cpus_ptr))
+ return 0; + return 0;
+ +
+ user_mask = alloc_user_cpus_ptr(node); + dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
+ if (!user_mask) + if (!dst->user_cpus_ptr)
+ return -ENOMEM; + return -ENOMEM;
+ +
+ /* + cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
+ * Use pi_lock to protect content of user_cpus_ptr
+ *
+ * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
+ * do_set_cpus_allowed().
+ */
+ raw_spin_lock_irqsave(&src->pi_lock, flags);
+ if (src->user_cpus_ptr) {
+ swap(dst->user_cpus_ptr, user_mask);
+ cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
+ }
+ raw_spin_unlock_irqrestore(&src->pi_lock, flags);
+
+ if (unlikely(user_mask))
+ kfree(user_mask);
+
+ return 0; + return 0;
+} +}
+ +
@@ -2775,8 +2641,6 @@ index 000000000000..f5e9c01f9382
+ +
+static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu, +static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu,
+ raw_spinlock_t *lock, unsigned long irq_flags) + raw_spinlock_t *lock, unsigned long irq_flags)
+ __releases(rq->lock)
+ __releases(p->pi_lock)
+{ +{
+ /* Can the task run on the task's current CPU? If so, we're done */ + /* Can the task run on the task's current CPU? If so, we're done */
+ if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { + if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
@@ -2814,7 +2678,8 @@ index 000000000000..f5e9c01f9382
+} +}
+ +
+static int __set_cpus_allowed_ptr_locked(struct task_struct *p, +static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
+ struct affinity_context *ctx, + const struct cpumask *new_mask,
+ u32 flags,
+ struct rq *rq, + struct rq *rq,
+ raw_spinlock_t *lock, + raw_spinlock_t *lock,
+ unsigned long irq_flags) + unsigned long irq_flags)
@@ -2822,6 +2687,7 @@ index 000000000000..f5e9c01f9382
+ const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p); + const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
+ const struct cpumask *cpu_valid_mask = cpu_active_mask; + const struct cpumask *cpu_valid_mask = cpu_active_mask;
+ bool kthread = p->flags & PF_KTHREAD; + bool kthread = p->flags & PF_KTHREAD;
+ struct cpumask *user_mask = NULL;
+ int dest_cpu; + int dest_cpu;
+ int ret = 0; + int ret = 0;
+ +
@@ -2839,7 +2705,7 @@ index 000000000000..f5e9c01f9382
+ cpu_valid_mask = cpu_online_mask; + cpu_valid_mask = cpu_online_mask;
+ } + }
+ +
+ if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) { + if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) {
+ ret = -EINVAL; + ret = -EINVAL;
+ goto out; + goto out;
+ } + }
@@ -2848,23 +2714,30 @@ index 000000000000..f5e9c01f9382
+ * Must re-check here, to close a race against __kthread_bind(), + * Must re-check here, to close a race against __kthread_bind(),
+ * sched_setaffinity() is not guaranteed to observe the flag. + * sched_setaffinity() is not guaranteed to observe the flag.
+ */ + */
+ if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { + if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
+ ret = -EINVAL; + ret = -EINVAL;
+ goto out; + goto out;
+ } + }
+ +
+ if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) + if (cpumask_equal(&p->cpus_mask, new_mask))
+ goto out; + goto out;
+ +
+ dest_cpu = cpumask_any_and(cpu_valid_mask, ctx->new_mask); + dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
+ if (dest_cpu >= nr_cpu_ids) { + if (dest_cpu >= nr_cpu_ids) {
+ ret = -EINVAL; + ret = -EINVAL;
+ goto out; + goto out;
+ } + }
+ +
+ __do_set_cpus_allowed(p, ctx); + __do_set_cpus_allowed(p, new_mask);
+ +
+ return affine_move_task(rq, p, dest_cpu, lock, irq_flags); + if (flags & SCA_USER)
+ user_mask = clear_user_cpus_ptr(p);
+
+ ret = affine_move_task(rq, p, dest_cpu, lock, irq_flags);
+
+ kfree(user_mask);
+
+ return ret;
+ +
+out: +out:
+ __task_access_unlock(p, lock); + __task_access_unlock(p, lock);
@@ -2875,6 +2748,7 @@ index 000000000000..f5e9c01f9382
+ +
+/* +/*
+ * Change a given task's CPU affinity. Migrate the thread to a + * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on
+ * is removed from the allowed bitmask. + * is removed from the allowed bitmask.
+ * + *
+ * NOTE: the caller must have a valid reference to the task, the + * NOTE: the caller must have a valid reference to the task, the
@@ -2882,7 +2756,7 @@ index 000000000000..f5e9c01f9382
+ * call is not atomic; no spinlocks may be held. + * call is not atomic; no spinlocks may be held.
+ */ + */
+static int __set_cpus_allowed_ptr(struct task_struct *p, +static int __set_cpus_allowed_ptr(struct task_struct *p,
+ struct affinity_context *ctx) + const struct cpumask *new_mask, u32 flags)
+{ +{
+ unsigned long irq_flags; + unsigned long irq_flags;
+ struct rq *rq; + struct rq *rq;
@@ -2890,36 +2764,20 @@ index 000000000000..f5e9c01f9382
+ +
+ raw_spin_lock_irqsave(&p->pi_lock, irq_flags); + raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
+ rq = __task_access_lock(p, &lock); + rq = __task_access_lock(p, &lock);
+ /*
+ * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
+ * flags are set.
+ */
+ if (p->user_cpus_ptr &&
+ !(ctx->flags & SCA_USER) &&
+ cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
+ ctx->new_mask = rq->scratch_mask;
+ +
+ + return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, lock, irq_flags);
+ return __set_cpus_allowed_ptr_locked(p, ctx, rq, lock, irq_flags);
+} +}
+ +
+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) +int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+{ +{
+ struct affinity_context ac = { + return __set_cpus_allowed_ptr(p, new_mask, 0);
+ .new_mask = new_mask,
+ .flags = 0,
+ };
+
+ return __set_cpus_allowed_ptr(p, &ac);
+} +}
+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); +EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+ +
+/* +/*
+ * Change a given task's CPU affinity to the intersection of its current + * Change a given task's CPU affinity to the intersection of its current
+ * affinity mask and @subset_mask, writing the resulting mask to @new_mask. + * affinity mask and @subset_mask, writing the resulting mask to @new_mask
+ * If user_cpus_ptr is defined, use it as the basis for restricting CPU + * and pointing @p->user_cpus_ptr to a copy of the old mask.
+ * affinity or use cpu_online_mask instead.
+ *
+ * If the resulting mask is empty, leave the affinity unchanged and return + * If the resulting mask is empty, leave the affinity unchanged and return
+ * -EINVAL. + * -EINVAL.
+ */ + */
@@ -2927,34 +2785,48 @@ index 000000000000..f5e9c01f9382
+ struct cpumask *new_mask, + struct cpumask *new_mask,
+ const struct cpumask *subset_mask) + const struct cpumask *subset_mask)
+{ +{
+ struct affinity_context ac = { + struct cpumask *user_mask = NULL;
+ .new_mask = new_mask,
+ .flags = 0,
+ };
+ unsigned long irq_flags; + unsigned long irq_flags;
+ raw_spinlock_t *lock; + raw_spinlock_t *lock;
+ struct rq *rq; + struct rq *rq;
+ int err; + int err;
+ +
+ if (!p->user_cpus_ptr) {
+ user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
+ if (!user_mask)
+ return -ENOMEM;
+ }
+
+ raw_spin_lock_irqsave(&p->pi_lock, irq_flags); + raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
+ rq = __task_access_lock(p, &lock); + rq = __task_access_lock(p, &lock);
+ +
+ if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) { + if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) {
+ err = -EINVAL; + err = -EINVAL;
+ goto err_unlock; + goto err_unlock;
+ } + }
+ +
+ return __set_cpus_allowed_ptr_locked(p, &ac, rq, lock, irq_flags); + /*
+ * We're about to butcher the task affinity, so keep track of what
+ * the user asked for in case we're able to restore it later on.
+ */
+ if (user_mask) {
+ cpumask_copy(user_mask, p->cpus_ptr);
+ p->user_cpus_ptr = user_mask;
+ }
+
+ /*return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);*/
+ return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, lock, irq_flags);
+ +
+err_unlock: +err_unlock:
+ __task_access_unlock(p, lock); + __task_access_unlock(p, lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags); + raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
+ kfree(user_mask);
+ return err; + return err;
+} +}
+ +
+/* +/*
+ * Restrict the CPU affinity of task @p so that it is a subset of + * Restrict the CPU affinity of task @p so that it is a subset of
+ * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the + * task_cpu_possible_mask() and point @p->user_cpu_ptr to a copy of the
+ * old affinity mask. If the resulting mask is empty, we warn and walk + * old affinity mask. If the resulting mask is empty, we warn and walk
+ * up the cpuset hierarchy until we find a suitable mask. + * up the cpuset hierarchy until we find a suitable mask.
+ */ + */
@@ -2998,29 +2870,34 @@ index 000000000000..f5e9c01f9382
+} +}
+ +
+static int +static int
+__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx); +__sched_setaffinity(struct task_struct *p, const struct cpumask *mask);
+ +
+/* +/*
+ * Restore the affinity of a task @p which was previously restricted by a + * Restore the affinity of a task @p which was previously restricted by a
+ * call to force_compatible_cpus_allowed_ptr(). + * call to force_compatible_cpus_allowed_ptr(). This will clear (and free)
+ * @p->user_cpus_ptr.
+ * + *
+ * It is the caller's responsibility to serialise this with any calls to + * It is the caller's responsibility to serialise this with any calls to
+ * force_compatible_cpus_allowed_ptr(@p). + * force_compatible_cpus_allowed_ptr(@p).
+ */ + */
+void relax_compatible_cpus_allowed_ptr(struct task_struct *p) +void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
+{ +{
+ struct affinity_context ac = { + struct cpumask *user_mask = p->user_cpus_ptr;
+ .new_mask = task_user_cpus(p), + unsigned long flags;
+ .flags = 0,
+ };
+ int ret;
+ +
+ /* + /*
+ * Try to restore the old affinity mask with __sched_setaffinity(). + * Try to restore the old affinity mask. If this fails, then
+ * Cpuset masking will be done there too. + * we free the mask explicitly to avoid it being inherited across
+ * a subsequent fork().
+ */ + */
+ ret = __sched_setaffinity(p, &ac); + if (!user_mask || !__sched_setaffinity(p, user_mask))
+ WARN_ON_ONCE(ret); + return;
+
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+ user_mask = clear_user_cpus_ptr(p);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+ kfree(user_mask);
+} +}
+ +
+#else /* CONFIG_SMP */ +#else /* CONFIG_SMP */
@@ -3032,9 +2909,9 @@ index 000000000000..f5e9c01f9382
+ +
+static inline int +static inline int
+__set_cpus_allowed_ptr(struct task_struct *p, +__set_cpus_allowed_ptr(struct task_struct *p,
+ struct affinity_context *ctx) + const struct cpumask *new_mask, u32 flags)
+{ +{
+ return set_cpus_allowed_ptr(p, ctx->new_mask); + return set_cpus_allowed_ptr(p, new_mask);
+} +}
+ +
+static inline bool rq_has_pinned_tasks(struct rq *rq) +static inline bool rq_has_pinned_tasks(struct rq *rq)
@@ -3042,11 +2919,6 @@ index 000000000000..f5e9c01f9382
+ return false; + return false;
+} +}
+ +
+static inline cpumask_t *alloc_user_cpus_ptr(int node)
+{
+ return NULL;
+}
+
+#endif /* !CONFIG_SMP */ +#endif /* !CONFIG_SMP */
+ +
+static void +static void
@@ -3158,6 +3030,13 @@ index 000000000000..f5e9c01f9382
+ if (!llist) + if (!llist)
+ return; + return;
+ +
+ /*
+ * rq::ttwu_pending racy indication of out-standing wakeups.
+ * Races such that false-negatives are possible, since they
+ * are shorter lived that false-positives would be.
+ */
+ WRITE_ONCE(rq->ttwu_pending, 0);
+
+ rq_lock_irqsave(rq, &rf); + rq_lock_irqsave(rq, &rf);
+ update_rq_clock(rq); + update_rq_clock(rq);
+ +
@@ -3171,17 +3050,6 @@ index 000000000000..f5e9c01f9382
+ ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0); + ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0);
+ } + }
+ +
+ /*
+ * Must be after enqueueing at least once task such that
+ * idle_cpu() does not observe a false-negative -- if it does,
+ * it is possible for select_idle_siblings() to stack a number
+ * of tasks on this CPU during that window.
+ *
+ * It is ok to clear ttwu_pending when another task pending.
+ * We will receive IPI after local irq enabled and then enqueue it.
+ * Since now nr_running > 0, idle_cpu() will always get correct result.
+ */
+ WRITE_ONCE(rq->ttwu_pending, 0);
+ rq_unlock_irqrestore(rq, &rf); + rq_unlock_irqrestore(rq, &rf);
+} +}
+ +
@@ -4767,9 +4635,7 @@ index 000000000000..f5e9c01f9382
+ struct rq *rq = cpu_rq(cpu); + struct rq *rq = cpu_rq(cpu);
+ u64 resched_latency; + u64 resched_latency;
+ +
+ if (housekeeping_cpu(cpu, HK_TYPE_TICK)) + arch_scale_freq_tick();
+ arch_scale_freq_tick();
+
+ sched_clock_tick(); + sched_clock_tick();
+ +
+ raw_spin_lock(&rq->lock); + raw_spin_lock(&rq->lock);
@@ -4868,7 +4734,7 @@ index 000000000000..f5e9c01f9382
+ int i; + int i;
+ +
+ for_each_cpu_wrap(i, &chk, cpu) { + for_each_cpu_wrap(i, &chk, cpu) {
+ if (!cpumask_intersects(cpu_smt_mask(i), sched_idle_mask) &&\ + if (cpumask_subset(cpu_smt_mask(i), &chk) &&
+ sg_balance_trigger(i)) + sg_balance_trigger(i))
+ return; + return;
+ } + }
@@ -4991,7 +4857,6 @@ index 000000000000..f5e9c01f9382
+static void sched_tick_stop(int cpu) +static void sched_tick_stop(int cpu)
+{ +{
+ struct tick_work *twork; + struct tick_work *twork;
+ int os;
+ +
+ if (housekeeping_cpu(cpu, HK_TYPE_TICK)) + if (housekeeping_cpu(cpu, HK_TYPE_TICK))
+ return; + return;
@@ -4999,10 +4864,7 @@ index 000000000000..f5e9c01f9382
+ WARN_ON_ONCE(!tick_work_cpu); + WARN_ON_ONCE(!tick_work_cpu);
+ +
+ twork = per_cpu_ptr(tick_work_cpu, cpu); + twork = per_cpu_ptr(tick_work_cpu, cpu);
+ /* There cannot be competing actions, but don't rely on stop-machine. */ + cancel_delayed_work_sync(&twork->work);
+ os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
+ WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
+ /* Don't cancel, as this would mess up the state machine. */
+} +}
+#endif /* CONFIG_HOTPLUG_CPU */ +#endif /* CONFIG_HOTPLUG_CPU */
+ +
@@ -5126,7 +4988,8 @@ index 000000000000..f5e9c01f9382
+ pr_err("Preemption disabled at:"); + pr_err("Preemption disabled at:");
+ print_ip_sym(KERN_ERR, preempt_disable_ip); + print_ip_sym(KERN_ERR, preempt_disable_ip);
+ } + }
+ check_panic_on_warn("scheduling while atomic"); + if (panic_on_warn)
+ panic("scheduling while atomic\n");
+ +
+ dump_stack(); + dump_stack();
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); + add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
@@ -5442,7 +5305,7 @@ index 000000000000..f5e9c01f9382
+ prev->sched_contributes_to_load = + prev->sched_contributes_to_load =
+ (prev_state & TASK_UNINTERRUPTIBLE) && + (prev_state & TASK_UNINTERRUPTIBLE) &&
+ !(prev_state & TASK_NOLOAD) && + !(prev_state & TASK_NOLOAD) &&
+ !(prev_state & TASK_FROZEN); + !(prev->flags & TASK_FROZEN);
+ +
+ if (prev->sched_contributes_to_load) + if (prev->sched_contributes_to_load)
+ rq->nr_uninterruptible++; + rq->nr_uninterruptible++;
@@ -6790,7 +6653,7 @@ index 000000000000..f5e9c01f9382
+#endif +#endif
+ +
+static int +static int
+__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) +__sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
+{ +{
+ int retval; + int retval;
+ cpumask_var_t cpus_allowed, new_mask; + cpumask_var_t cpus_allowed, new_mask;
@@ -6804,12 +6667,9 @@ index 000000000000..f5e9c01f9382
+ } + }
+ +
+ cpuset_cpus_allowed(p, cpus_allowed); + cpuset_cpus_allowed(p, cpus_allowed);
+ cpumask_and(new_mask, ctx->new_mask, cpus_allowed); + cpumask_and(new_mask, mask, cpus_allowed);
+ +again:
+ ctx->new_mask = new_mask; + retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER);
+ ctx->flags |= SCA_CHECK;
+
+ retval = __set_cpus_allowed_ptr(p, ctx);
+ if (retval) + if (retval)
+ goto out_free_new_mask; + goto out_free_new_mask;
+ +
@@ -6821,24 +6681,7 @@ index 000000000000..f5e9c01f9382
+ * cpuset's cpus_allowed + * cpuset's cpus_allowed
+ */ + */
+ cpumask_copy(new_mask, cpus_allowed); + cpumask_copy(new_mask, cpus_allowed);
+ + goto again;
+ /*
+ * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
+ * will restore the previous user_cpus_ptr value.
+ *
+ * In the unlikely event a previous user_cpus_ptr exists,
+ * we need to further restrict the mask to what is allowed
+ * by that old user_cpus_ptr.
+ */
+ if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
+ bool empty = !cpumask_and(new_mask, new_mask,
+ ctx->user_mask);
+
+ if (WARN_ON_ONCE(empty))
+ cpumask_copy(new_mask, cpus_allowed);
+ }
+ __set_cpus_allowed_ptr(p, ctx);
+ retval = -EINVAL;
+ } + }
+ +
+out_free_new_mask: +out_free_new_mask:
@@ -6850,8 +6693,6 @@ index 000000000000..f5e9c01f9382
+ +
+long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) +long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+{ +{
+ struct affinity_context ac;
+ struct cpumask *user_mask;
+ struct task_struct *p; + struct task_struct *p;
+ int retval; + int retval;
+ +
@@ -6886,27 +6727,7 @@ index 000000000000..f5e9c01f9382
+ if (retval) + if (retval)
+ goto out_put_task; + goto out_put_task;
+ +
+ /* + retval = __sched_setaffinity(p, in_mask);
+ * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
+ * alloc_user_cpus_ptr() returns NULL.
+ */
+ user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
+ if (user_mask) {
+ cpumask_copy(user_mask, in_mask);
+ } else if (IS_ENABLED(CONFIG_SMP)) {
+ retval = -ENOMEM;
+ goto out_put_task;
+ }
+
+ ac = (struct affinity_context){
+ .new_mask = in_mask,
+ .user_mask = user_mask,
+ .flags = SCA_USER,
+ };
+
+ retval = __sched_setaffinity(p, &ac);
+ kfree(ac.user_mask);
+
+out_put_task: +out_put_task:
+ put_task_struct(p); + put_task_struct(p);
+ return retval; + return retval;
@@ -7662,12 +7483,6 @@ index 000000000000..f5e9c01f9382
+ */ + */
+void __init init_idle(struct task_struct *idle, int cpu) +void __init init_idle(struct task_struct *idle, int cpu)
+{ +{
+#ifdef CONFIG_SMP
+ struct affinity_context ac = (struct affinity_context) {
+ .new_mask = cpumask_of(cpu),
+ .flags = 0,
+ };
+#endif
+ struct rq *rq = cpu_rq(cpu); + struct rq *rq = cpu_rq(cpu);
+ unsigned long flags; + unsigned long flags;
+ +
@@ -7694,7 +7509,7 @@ index 000000000000..f5e9c01f9382
+ * + *
+ * And since this is boot we can forgo the serialisation. + * And since this is boot we can forgo the serialisation.
+ */ + */
+ set_cpus_allowed_common(idle, &ac); + set_cpus_allowed_common(idle, cpumask_of(cpu));
+#endif +#endif
+ +
+ /* Silence PROVE_RCU */ + /* Silence PROVE_RCU */
@@ -8322,8 +8137,6 @@ index 000000000000..f5e9c01f9382
+ +
+ hrtick_rq_init(rq); + hrtick_rq_init(rq);
+ atomic_set(&rq->nr_iowait, 0); + atomic_set(&rq->nr_iowait, 0);
+
+ zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
+ } + }
+#ifdef CONFIG_SMP +#ifdef CONFIG_SMP
+ /* Set rq->online for cpu 0 */ + /* Set rq->online for cpu 0 */
@@ -8840,10 +8653,10 @@ index 000000000000..1212a031700e
+{} +{}
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
new file mode 100644 new file mode 100644
index 000000000000..0b563999d4c1 index 000000000000..c32403ed82b6
--- /dev/null --- /dev/null
+++ b/kernel/sched/alt_sched.h +++ b/kernel/sched/alt_sched.h
@@ -0,0 +1,671 @@ @@ -0,0 +1,668 @@
+#ifndef ALT_SCHED_H +#ifndef ALT_SCHED_H
+#define ALT_SCHED_H +#define ALT_SCHED_H
+ +
@@ -9090,9 +8903,6 @@ index 000000000000..0b563999d4c1
+#endif +#endif
+ atomic_t nohz_flags; + atomic_t nohz_flags;
+#endif /* CONFIG_NO_HZ_COMMON */ +#endif /* CONFIG_NO_HZ_COMMON */
+
+ /* Scratch cpumask to be temporarily used under rq_lock */
+ cpumask_var_t scratch_mask;
+}; +};
+ +
+extern unsigned long rq_load_util(struct rq *rq, unsigned long max); +extern unsigned long rq_load_util(struct rq *rq, unsigned long max);
@@ -10064,7 +9874,7 @@ index 3a0e0dc28721..e8a7d84aa5a5 100644
static inline int static inline int
update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 771f8ddb7053..787a5069d69a 100644 index a4a20046e586..c363693cd869 100644
--- a/kernel/sched/sched.h --- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h +++ b/kernel/sched/sched.h
@@ -5,6 +5,10 @@ @@ -5,6 +5,10 @@
@@ -10078,7 +9888,7 @@ index 771f8ddb7053..787a5069d69a 100644
#include <linux/sched/affinity.h> #include <linux/sched/affinity.h>
#include <linux/sched/autogroup.h> #include <linux/sched/autogroup.h>
#include <linux/sched/cpufreq.h> #include <linux/sched/cpufreq.h>
@@ -3261,4 +3265,9 @@ static inline void update_current_exec_runtime(struct task_struct *curr, @@ -3183,4 +3187,9 @@ static inline void update_current_exec_runtime(struct task_struct *curr,
cgroup_account_cputime(curr, delta_exec); cgroup_account_cputime(curr, delta_exec);
} }
@@ -10120,7 +9930,7 @@ index 857f837f52cb..5486c63e4790 100644
} }
return 0; return 0;
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 38f3698f5e5b..b9d597394316 100644 index 84a188913cc9..53934e7ef5db 100644
--- a/kernel/sched/stats.h --- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h +++ b/kernel/sched/stats.h
@@ -89,6 +89,7 @@ static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delt @@ -89,6 +89,7 @@ static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delt
@@ -10195,7 +10005,7 @@ index 8739c2a5a54e..d8dd6c15eb47 100644
+#endif /* CONFIG_NUMA */ +#endif /* CONFIG_NUMA */
+#endif +#endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 137d4abe3eda..6bada3a6d571 100644 index c6d9dec11b74..2bc42ce8b48e 100644
--- a/kernel/sysctl.c --- a/kernel/sysctl.c
+++ b/kernel/sysctl.c +++ b/kernel/sysctl.c
@@ -93,6 +93,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals); @@ -93,6 +93,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals);
@@ -10209,7 +10019,7 @@ index 137d4abe3eda..6bada3a6d571 100644
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
static const int six_hundred_forty_kb = 640 * 1024; static const int six_hundred_forty_kb = 640 * 1024;
#endif #endif
@@ -1934,6 +1938,17 @@ static struct ctl_table kern_table[] = { @@ -1953,6 +1959,17 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec, .proc_handler = proc_dointvec,
}, },
#endif #endif
@@ -10303,10 +10113,10 @@ index cb925e8ef9a8..67d823510f5c 100644
return false; return false;
} }
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index ff0536cea968..ce266990006d 100644 index a2d301f58ced..2ccdede8585c 100644
--- a/kernel/trace/trace_selftest.c --- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c
@@ -1150,10 +1150,15 @@ static int trace_wakeup_test_thread(void *data) @@ -1143,10 +1143,15 @@ static int trace_wakeup_test_thread(void *data)
{ {
/* Make this a -deadline thread */ /* Make this a -deadline thread */
static const struct sched_attr attr = { static const struct sched_attr attr = {
@@ -10322,6 +10132,3 @@ index ff0536cea968..ce266990006d 100644
}; };
struct wakeup_test_data *x = data; struct wakeup_test_data *x = data;
--
2.39.2