Compare commits

..

1 Commits

Author SHA1 Message Date
Sravan Balaji
dff1fb46cc PDS Kernel Configuration 2023-03-22 15:49:07 -04:00
7 changed files with 250 additions and 570 deletions

View File

@@ -1,15 +1,15 @@
# #
# Automatically generated file; DO NOT EDIT. # Automatically generated file; DO NOT EDIT.
# Linux/x86 5.15.106 Kernel Configuration # Linux/x86 5.15.61 Kernel Configuration
# #
CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.1 20230201" CONFIG_CC_VERSION_TEXT="gcc (TkG-mostlyportable) 12.1.1 20220515"
CONFIG_CC_IS_GCC=y CONFIG_CC_IS_GCC=y
CONFIG_GCC_VERSION=120201 CONFIG_GCC_VERSION=120101
CONFIG_CLANG_VERSION=0 CONFIG_CLANG_VERSION=0
CONFIG_AS_IS_GNU=y CONFIG_AS_IS_GNU=y
CONFIG_AS_VERSION=24000 CONFIG_AS_VERSION=20244315
CONFIG_LD_IS_BFD=y CONFIG_LD_IS_BFD=y
CONFIG_LD_VERSION=24000 CONFIG_LD_VERSION=20244315
CONFIG_LLD_VERSION=0 CONFIG_LLD_VERSION=0
CONFIG_CC_CAN_LINK=y CONFIG_CC_CAN_LINK=y
CONFIG_CC_CAN_LINK_STATIC=y CONFIG_CC_CAN_LINK_STATIC=y
@@ -18,7 +18,6 @@ CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y
CONFIG_CC_HAS_ASM_INLINE=y CONFIG_CC_HAS_ASM_INLINE=y
CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y
CONFIG_PAHOLE_VERSION=124
CONFIG_IRQ_WORK=y CONFIG_IRQ_WORK=y
CONFIG_BUILDTIME_TABLE_SORT=y CONFIG_BUILDTIME_TABLE_SORT=y
CONFIG_THREAD_INFO_IN_TASK=y CONFIG_THREAD_INFO_IN_TASK=y
@@ -438,7 +437,7 @@ CONFIG_I8K=m
CONFIG_MICROCODE=y CONFIG_MICROCODE=y
CONFIG_MICROCODE_INTEL=y CONFIG_MICROCODE_INTEL=y
CONFIG_MICROCODE_AMD=y CONFIG_MICROCODE_AMD=y
# CONFIG_MICROCODE_LATE_LOADING is not set # CONFIG_MICROCODE_OLD_INTERFACE is not set
CONFIG_X86_MSR=y CONFIG_X86_MSR=y
CONFIG_X86_CPUID=y CONFIG_X86_CPUID=y
CONFIG_X86_5LEVEL=y CONFIG_X86_5LEVEL=y
@@ -1143,7 +1142,6 @@ CONFIG_INET_ESP=m
CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_ESPINTCP=y CONFIG_INET_ESPINTCP=y
CONFIG_INET_IPCOMP=m CONFIG_INET_IPCOMP=m
CONFIG_INET_TABLE_PERTURB_ORDER=16
CONFIG_INET_XFRM_TUNNEL=m CONFIG_INET_XFRM_TUNNEL=m
CONFIG_INET_TUNNEL=m CONFIG_INET_TUNNEL=m
CONFIG_INET_DIAG=m CONFIG_INET_DIAG=m
@@ -1709,6 +1707,7 @@ CONFIG_DEFAULT_NET_SCH="fq_codel"
# #
CONFIG_NET_CLS=y CONFIG_NET_CLS=y
CONFIG_NET_CLS_BASIC=m CONFIG_NET_CLS_BASIC=m
CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m CONFIG_NET_CLS_ROUTE4=m
CONFIG_NET_CLS_FW=m CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m CONFIG_NET_CLS_U32=m
@@ -9899,7 +9898,6 @@ CONFIG_GCC_PLUGIN_STRUCTLEAK=y
# Memory initialization # Memory initialization
# #
CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y
CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y
CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y
# CONFIG_INIT_STACK_NONE is not set # CONFIG_INIT_STACK_NONE is not set
# CONFIG_GCC_PLUGIN_STRUCTLEAK_USER is not set # CONFIG_GCC_PLUGIN_STRUCTLEAK_USER is not set
@@ -10353,8 +10351,6 @@ CONFIG_SYMBOLIC_ERRNAME=y
CONFIG_DEBUG_BUGVERBOSE=y CONFIG_DEBUG_BUGVERBOSE=y
# end of printk and dmesg options # end of printk and dmesg options
CONFIG_AS_HAS_NON_CONST_LEB128=y
# #
# Compile-time checks and compiler options # Compile-time checks and compiler options
# #
@@ -10364,7 +10360,6 @@ CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_INFO_SPLIT is not set # CONFIG_DEBUG_INFO_SPLIT is not set
# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set # CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set
CONFIG_DEBUG_INFO_DWARF4=y CONFIG_DEBUG_INFO_DWARF4=y
# CONFIG_DEBUG_INFO_DWARF5 is not set
CONFIG_DEBUG_INFO_BTF=y CONFIG_DEBUG_INFO_BTF=y
CONFIG_PAHOLE_HAS_SPLIT_BTF=y CONFIG_PAHOLE_HAS_SPLIT_BTF=y
CONFIG_DEBUG_INFO_BTF_MODULES=y CONFIG_DEBUG_INFO_BTF_MODULES=y

View File

@@ -1,15 +1,15 @@
# #
# Automatically generated file; DO NOT EDIT. # Automatically generated file; DO NOT EDIT.
# Linux/x86 6.1.23 Kernel Configuration # Linux/x86 6.1.0-arch1 Kernel Configuration
# #
CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.1 20230201" CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.0"
CONFIG_CC_IS_GCC=y CONFIG_CC_IS_GCC=y
CONFIG_GCC_VERSION=120201 CONFIG_GCC_VERSION=120200
CONFIG_CLANG_VERSION=0 CONFIG_CLANG_VERSION=0
CONFIG_AS_IS_GNU=y CONFIG_AS_IS_GNU=y
CONFIG_AS_VERSION=24000 CONFIG_AS_VERSION=23900
CONFIG_LD_IS_BFD=y CONFIG_LD_IS_BFD=y
CONFIG_LD_VERSION=24000 CONFIG_LD_VERSION=23900
CONFIG_LLD_VERSION=0 CONFIG_LLD_VERSION=0
CONFIG_CC_CAN_LINK=y CONFIG_CC_CAN_LINK=y
CONFIG_CC_CAN_LINK_STATIC=y CONFIG_CC_CAN_LINK_STATIC=y
@@ -206,7 +206,6 @@ CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
CONFIG_CC_HAS_INT128=y CONFIG_CC_HAS_INT128=y
CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5"
CONFIG_GCC11_NO_ARRAY_BOUNDS=y
CONFIG_GCC12_NO_ARRAY_BOUNDS=y CONFIG_GCC12_NO_ARRAY_BOUNDS=y
CONFIG_CC_NO_ARRAY_BOUNDS=y CONFIG_CC_NO_ARRAY_BOUNDS=y
CONFIG_ARCH_SUPPORTS_INT128=y CONFIG_ARCH_SUPPORTS_INT128=y
@@ -1785,6 +1784,7 @@ CONFIG_DEFAULT_NET_SCH="fq_codel"
# #
CONFIG_NET_CLS=y CONFIG_NET_CLS=y
CONFIG_NET_CLS_BASIC=m CONFIG_NET_CLS_BASIC=m
CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m CONFIG_NET_CLS_ROUTE4=m
CONFIG_NET_CLS_FW=m CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m CONFIG_NET_CLS_U32=m
@@ -4456,6 +4456,7 @@ CONFIG_SERIAL_ARC_NR_PORTS=1
CONFIG_SERIAL_RP2=m CONFIG_SERIAL_RP2=m
CONFIG_SERIAL_RP2_NR_UARTS=32 CONFIG_SERIAL_RP2_NR_UARTS=32
CONFIG_SERIAL_FSL_LPUART=m CONFIG_SERIAL_FSL_LPUART=m
CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
CONFIG_SERIAL_FSL_LINFLEXUART=m CONFIG_SERIAL_FSL_LINFLEXUART=m
CONFIG_SERIAL_MEN_Z135=m CONFIG_SERIAL_MEN_Z135=m
CONFIG_SERIAL_SPRD=m CONFIG_SERIAL_SPRD=m
@@ -6526,6 +6527,7 @@ CONFIG_DRM=y
CONFIG_DRM_MIPI_DBI=m CONFIG_DRM_MIPI_DBI=m
CONFIG_DRM_MIPI_DSI=y CONFIG_DRM_MIPI_DSI=y
# CONFIG_DRM_DEBUG_MM is not set # CONFIG_DRM_DEBUG_MM is not set
CONFIG_DRM_USE_DYNAMIC_DEBUG=y
CONFIG_DRM_KMS_HELPER=y CONFIG_DRM_KMS_HELPER=y
CONFIG_DRM_FBDEV_EMULATION=y CONFIG_DRM_FBDEV_EMULATION=y
CONFIG_DRM_FBDEV_OVERALLOC=100 CONFIG_DRM_FBDEV_OVERALLOC=100
@@ -6596,6 +6598,7 @@ CONFIG_DRM_I915_FORCE_PROBE="*"
CONFIG_DRM_I915_CAPTURE_ERROR=y CONFIG_DRM_I915_CAPTURE_ERROR=y
CONFIG_DRM_I915_COMPRESS_ERROR=y CONFIG_DRM_I915_COMPRESS_ERROR=y
CONFIG_DRM_I915_USERPTR=y CONFIG_DRM_I915_USERPTR=y
CONFIG_DRM_I915_GVT=y
CONFIG_DRM_I915_GVT_KVMGT=m CONFIG_DRM_I915_GVT_KVMGT=m
CONFIG_DRM_I915_PXP=y CONFIG_DRM_I915_PXP=y
CONFIG_DRM_I915_REQUEST_TIMEOUT=20000 CONFIG_DRM_I915_REQUEST_TIMEOUT=20000
@@ -6606,7 +6609,6 @@ CONFIG_DRM_I915_PREEMPT_TIMEOUT=640
CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT=8000 CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT=8000
CONFIG_DRM_I915_STOP_TIMEOUT=100 CONFIG_DRM_I915_STOP_TIMEOUT=100
CONFIG_DRM_I915_TIMESLICE_DURATION=1 CONFIG_DRM_I915_TIMESLICE_DURATION=1
CONFIG_DRM_I915_GVT=y
CONFIG_DRM_VGEM=m CONFIG_DRM_VGEM=m
CONFIG_DRM_VKMS=m CONFIG_DRM_VKMS=m
CONFIG_DRM_VMWGFX=m CONFIG_DRM_VMWGFX=m

View File

@@ -1,6 +1,6 @@
# #
# Automatically generated file; DO NOT EDIT. # Automatically generated file; DO NOT EDIT.
# Linux/x86 6.2.10 Kernel Configuration # Linux/x86 6.2.5-arch1 Kernel Configuration
# #
CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.1 20230201" CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.1 20230201"
CONFIG_CC_IS_GCC=y CONFIG_CC_IS_GCC=y
@@ -4494,6 +4494,7 @@ CONFIG_SERIAL_ARC_NR_PORTS=1
CONFIG_SERIAL_RP2=m CONFIG_SERIAL_RP2=m
CONFIG_SERIAL_RP2_NR_UARTS=32 CONFIG_SERIAL_RP2_NR_UARTS=32
CONFIG_SERIAL_FSL_LPUART=m CONFIG_SERIAL_FSL_LPUART=m
CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
CONFIG_SERIAL_FSL_LINFLEXUART=m CONFIG_SERIAL_FSL_LINFLEXUART=m
CONFIG_SERIAL_MEN_Z135=m CONFIG_SERIAL_MEN_Z135=m
CONFIG_SERIAL_SPRD=m CONFIG_SERIAL_SPRD=m

View File

@@ -1,6 +1,6 @@
# #
# Automatically generated file; DO NOT EDIT. # Automatically generated file; DO NOT EDIT.
# Linux/x86 6.3.0-rc6 Kernel Configuration # Linux/x86 6.3.0-rc1 Kernel Configuration
# #
CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.1 20230201" CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.1 20230201"
CONFIG_CC_IS_GCC=y CONFIG_CC_IS_GCC=y
@@ -993,6 +993,7 @@ CONFIG_EFI_PARTITION=y
# CONFIG_CMDLINE_PARTITION is not set # CONFIG_CMDLINE_PARTITION is not set
# end of Partition Types # end of Partition Types
CONFIG_BLOCK_COMPAT=y
CONFIG_BLK_MQ_PCI=y CONFIG_BLK_MQ_PCI=y
CONFIG_BLK_MQ_VIRTIO=y CONFIG_BLK_MQ_VIRTIO=y
CONFIG_BLK_MQ_RDMA=y CONFIG_BLK_MQ_RDMA=y
@@ -3332,7 +3333,6 @@ CONFIG_MICROSOFT_MANA=m
CONFIG_NET_VENDOR_MYRI=y CONFIG_NET_VENDOR_MYRI=y
CONFIG_MYRI10GE=m CONFIG_MYRI10GE=m
CONFIG_MYRI10GE_DCA=y CONFIG_MYRI10GE_DCA=y
CONFIG_FEALNX=m
CONFIG_NET_VENDOR_NI=y CONFIG_NET_VENDOR_NI=y
CONFIG_NI_XGE_MANAGEMENT_ENET=m CONFIG_NI_XGE_MANAGEMENT_ENET=m
CONFIG_NET_VENDOR_NATSEMI=y CONFIG_NET_VENDOR_NATSEMI=y
@@ -4499,6 +4499,7 @@ CONFIG_SERIAL_ARC_NR_PORTS=1
CONFIG_SERIAL_RP2=m CONFIG_SERIAL_RP2=m
CONFIG_SERIAL_RP2_NR_UARTS=32 CONFIG_SERIAL_RP2_NR_UARTS=32
CONFIG_SERIAL_FSL_LPUART=m CONFIG_SERIAL_FSL_LPUART=m
CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
CONFIG_SERIAL_FSL_LINFLEXUART=m CONFIG_SERIAL_FSL_LINFLEXUART=m
CONFIG_SERIAL_MEN_Z135=m CONFIG_SERIAL_MEN_Z135=m
CONFIG_SERIAL_SPRD=m CONFIG_SERIAL_SPRD=m
@@ -8755,6 +8756,7 @@ CONFIG_RTLLIB_CRYPTO_WEP=m
CONFIG_RTL8192E=m CONFIG_RTL8192E=m
CONFIG_RTL8723BS=m CONFIG_RTL8723BS=m
CONFIG_R8712U=m CONFIG_R8712U=m
CONFIG_R8188EU=m
CONFIG_RTS5208=m CONFIG_RTS5208=m
CONFIG_VT6655=m CONFIG_VT6655=m
CONFIG_VT6656=m CONFIG_VT6656=m

View File

@@ -252,7 +252,7 @@ _set_cpu_scheduler() {
["bore"]="BORE (Burst-Oriented Response Enhancer) CPU Scheduler" ["bore"]="BORE (Burst-Oriented Response Enhancer) CPU Scheduler"
) )
# CPU SCHED selector - _projectc_unoff=1 sets unofficial Project C revision flag for a given version # CPU SCHED selector
if [ "$_kver" = "504" ]; then if [ "$_kver" = "504" ]; then
_avail_cpu_scheds=("pds" "bmq" "muqss" "cacule" "cfs") _avail_cpu_scheds=("pds" "bmq" "muqss" "cacule" "cfs")
elif [ "$_kver" = "507" ]; then elif [ "$_kver" = "507" ]; then
@@ -285,6 +285,7 @@ _set_cpu_scheduler() {
_avail_cpu_scheds=("cfs" "pds" "bmq" "tt" "bore") _avail_cpu_scheds=("cfs" "pds" "bmq" "tt" "bore")
elif [ "$_kver" = "602" ]; then elif [ "$_kver" = "602" ]; then
_avail_cpu_scheds=("cfs" "pds" "bmq" "tt" "bore") _avail_cpu_scheds=("cfs" "pds" "bmq" "tt" "bore")
_projectc_unoff=1
else else
_avail_cpu_scheds=("cfs") _avail_cpu_scheds=("cfs")
fi fi
@@ -725,8 +726,6 @@ _tkg_srcprep() {
rev=2 rev=2
elif [ "$_kver" = "601" ]; then elif [ "$_kver" = "601" ]; then
rev=1 rev=1
elif [ "$_kver" = "602" ]; then
rev=2
else else
rev=0 rev=0
fi fi

View File

@@ -1,3 +1,57 @@
From e44ef62b127f6a161a131c84db92a7527d8fc72d Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Wed, 22 Feb 2023 19:24:36 +0100
Subject: [PATCH] prjc
Signed-off-by: Peter Jung <admin@ptr1337.dev>
---
.../admin-guide/kernel-parameters.txt | 6 +
Documentation/admin-guide/sysctl/kernel.rst | 10 +
Documentation/scheduler/sched-BMQ.txt | 110 +
fs/proc/base.c | 2 +-
include/asm-generic/resource.h | 2 +-
include/linux/sched.h | 33 +-
include/linux/sched/deadline.h | 20 +
include/linux/sched/prio.h | 26 +
include/linux/sched/rt.h | 2 +
include/linux/sched/topology.h | 3 +-
init/Kconfig | 34 +
init/init_task.c | 18 +
kernel/Kconfig.preempt | 2 +-
kernel/cgroup/cpuset.c | 4 +-
kernel/delayacct.c | 2 +-
kernel/exit.c | 4 +-
kernel/locking/rtmutex.c | 16 +-
kernel/sched/Makefile | 5 +
kernel/sched/alt_core.c | 8111 +++++++++++++++++
kernel/sched/alt_debug.c | 31 +
kernel/sched/alt_sched.h | 671 ++
kernel/sched/bmq.h | 110 +
kernel/sched/build_policy.c | 8 +-
kernel/sched/build_utility.c | 2 +
kernel/sched/cpufreq_schedutil.c | 10 +
kernel/sched/cputime.c | 10 +-
kernel/sched/debug.c | 10 +
kernel/sched/idle.c | 2 +
kernel/sched/pds.h | 127 +
kernel/sched/pelt.c | 4 +-
kernel/sched/pelt.h | 8 +-
kernel/sched/sched.h | 9 +
kernel/sched/stats.c | 4 +
kernel/sched/stats.h | 2 +
kernel/sched/topology.c | 17 +
kernel/sysctl.c | 15 +
kernel/time/hrtimer.c | 2 +
kernel/time/posix-cpu-timers.c | 10 +-
kernel/trace/trace_selftest.c | 5 +
39 files changed, 9445 insertions(+), 22 deletions(-)
create mode 100644 Documentation/scheduler/sched-BMQ.txt
create mode 100644 kernel/sched/alt_core.c
create mode 100644 kernel/sched/alt_debug.c
create mode 100644 kernel/sched/alt_sched.h
create mode 100644 kernel/sched/bmq.h
create mode 100644 kernel/sched/pds.h
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 6cfa6e3996cf..1b6a407213da 100644 index 6cfa6e3996cf..1b6a407213da 100644
--- a/Documentation/admin-guide/kernel-parameters.txt --- a/Documentation/admin-guide/kernel-parameters.txt
@@ -632,10 +686,10 @@ index 976092b7bd45..31d587c16ec1 100644
obj-y += build_utility.o obj-y += build_utility.o
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
new file mode 100644 new file mode 100644
index 000000000000..a122b1f8678e index 000000000000..f5e9c01f9382
--- /dev/null --- /dev/null
+++ b/kernel/sched/alt_core.c +++ b/kernel/sched/alt_core.c
@@ -0,0 +1,8120 @@ @@ -0,0 +1,8111 @@
+/* +/*
+ * kernel/sched/alt_core.c + * kernel/sched/alt_core.c
+ * + *
@@ -705,12 +759,7 @@ index 000000000000..a122b1f8678e
+#define sched_feat(x) (0) +#define sched_feat(x) (0)
+#endif /* CONFIG_SCHED_DEBUG */ +#endif /* CONFIG_SCHED_DEBUG */
+ +
+#define ALT_SCHED_VERSION "v6.2-r2" +#define ALT_SCHED_VERSION "v6.2-r0"
+
+/*
+ * Compile time debug macro
+ * #define ALT_SCHED_DEBUG
+ */
+ +
+/* rt_prio(prio) defined in include/linux/sched/rt.h */ +/* rt_prio(prio) defined in include/linux/sched/rt.h */
+#define rt_task(p) rt_prio((p)->prio) +#define rt_task(p) rt_prio((p)->prio)
@@ -765,9 +814,9 @@ index 000000000000..a122b1f8678e
+#ifdef CONFIG_SMP +#ifdef CONFIG_SMP
+static cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp; +static cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp;
+ +
+DEFINE_PER_CPU_ALIGNED(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks); +DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
+DEFINE_PER_CPU_ALIGNED(cpumask_t *, sched_cpu_llc_mask); +DEFINE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
+DEFINE_PER_CPU_ALIGNED(cpumask_t *, sched_cpu_topo_end_mask); +DEFINE_PER_CPU(cpumask_t *, sched_cpu_topo_end_mask);
+ +
+#ifdef CONFIG_SCHED_SMT +#ifdef CONFIG_SCHED_SMT
+DEFINE_STATIC_KEY_FALSE(sched_smt_present); +DEFINE_STATIC_KEY_FALSE(sched_smt_present);
@@ -861,13 +910,13 @@ index 000000000000..a122b1f8678e
+ +
+ if (prio < last_prio) { + if (prio < last_prio) {
+ if (IDLE_TASK_SCHED_PRIO == last_prio) { + if (IDLE_TASK_SCHED_PRIO == last_prio) {
+ cpumask_clear_cpu(cpu, sched_idle_mask);
+ last_prio -= 2;
+#ifdef CONFIG_SCHED_SMT +#ifdef CONFIG_SCHED_SMT
+ if (static_branch_likely(&sched_smt_present)) + if (static_branch_likely(&sched_smt_present))
+ cpumask_andnot(&sched_sg_idle_mask, + cpumask_andnot(&sched_sg_idle_mask,
+ &sched_sg_idle_mask, cpu_smt_mask(cpu)); + &sched_sg_idle_mask, cpu_smt_mask(cpu));
+#endif +#endif
+ cpumask_clear_cpu(cpu, sched_idle_mask);
+ last_prio -= 2;
+ } + }
+ clear_recorded_preempt_mask(pr, prio, last_prio, cpu); + clear_recorded_preempt_mask(pr, prio, last_prio, cpu);
+ +
@@ -875,14 +924,18 @@ index 000000000000..a122b1f8678e
+ } + }
+ /* last_prio < prio */ + /* last_prio < prio */
+ if (IDLE_TASK_SCHED_PRIO == prio) { + if (IDLE_TASK_SCHED_PRIO == prio) {
+#ifdef CONFIG_SCHED_SMT
+ if (static_branch_likely(&sched_smt_present) &&
+ cpumask_intersects(cpu_smt_mask(cpu), sched_idle_mask))
+ cpumask_or(&sched_sg_idle_mask,
+ &sched_sg_idle_mask, cpu_smt_mask(cpu));
+#endif
+ cpumask_set_cpu(cpu, sched_idle_mask); + cpumask_set_cpu(cpu, sched_idle_mask);
+ prio -= 2; + prio -= 2;
+#ifdef CONFIG_SCHED_SMT
+ if (static_branch_likely(&sched_smt_present)) {
+ cpumask_t tmp;
+
+ cpumask_and(&tmp, cpu_smt_mask(cpu), sched_idle_mask);
+ if (cpumask_equal(&tmp, cpu_smt_mask(cpu)))
+ cpumask_or(&sched_sg_idle_mask,
+ &sched_sg_idle_mask, cpu_smt_mask(cpu));
+ }
+#endif
+ } + }
+ set_recorded_preempt_mask(pr, last_prio, prio, cpu); + set_recorded_preempt_mask(pr, last_prio, prio, cpu);
+} +}
@@ -1423,13 +1476,11 @@ index 000000000000..a122b1f8678e
+ +
+static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags) +static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
+{ +{
+#ifdef ALT_SCHED_DEBUG
+ lockdep_assert_held(&rq->lock); + lockdep_assert_held(&rq->lock);
+ +
+ /*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/ + /*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
+ WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n", + WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
+ task_cpu(p), cpu_of(rq)); + task_cpu(p), cpu_of(rq));
+#endif
+ +
+ __SCHED_DEQUEUE_TASK(p, rq, flags); + __SCHED_DEQUEUE_TASK(p, rq, flags);
+ --rq->nr_running; + --rq->nr_running;
@@ -1443,13 +1494,11 @@ index 000000000000..a122b1f8678e
+ +
+static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags) +static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
+{ +{
+#ifdef ALT_SCHED_DEBUG
+ lockdep_assert_held(&rq->lock); + lockdep_assert_held(&rq->lock);
+ +
+ /*printk(KERN_INFO "sched: enqueue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/ + /*printk(KERN_INFO "sched: enqueue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
+ WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n", + WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
+ task_cpu(p), cpu_of(rq)); + task_cpu(p), cpu_of(rq));
+#endif
+ +
+ __SCHED_ENQUEUE_TASK(p, rq, flags); + __SCHED_ENQUEUE_TASK(p, rq, flags);
+ update_sched_preempt_mask(rq); + update_sched_preempt_mask(rq);
@@ -1464,12 +1513,10 @@ index 000000000000..a122b1f8678e
+ +
+static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx) +static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
+{ +{
+#ifdef ALT_SCHED_DEBUG
+ lockdep_assert_held(&rq->lock); + lockdep_assert_held(&rq->lock);
+ /*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/ + /*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
+ WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n", + WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
+ cpu_of(rq), task_cpu(p)); + cpu_of(rq), task_cpu(p));
+#endif
+ +
+ list_del(&p->sq_node); + list_del(&p->sq_node);
+ list_add_tail(&p->sq_node, &rq->queue.heads[idx]); + list_add_tail(&p->sq_node, &rq->queue.heads[idx]);
@@ -1988,8 +2035,8 @@ index 000000000000..a122b1f8678e
+ */ + */
+static inline void deactivate_task(struct task_struct *p, struct rq *rq) +static inline void deactivate_task(struct task_struct *p, struct rq *rq)
+{ +{
+ p->on_rq = 0;
+ dequeue_task(p, rq, DEQUEUE_SLEEP); + dequeue_task(p, rq, DEQUEUE_SLEEP);
+ p->on_rq = 0;
+ cpufreq_update_util(rq, 0); + cpufreq_update_util(rq, 0);
+} +}
+ +
@@ -2206,7 +2253,7 @@ index 000000000000..a122b1f8678e
+{ +{
+ lockdep_assert_held(&rq->lock); + lockdep_assert_held(&rq->lock);
+ +
+ p->on_rq = TASK_ON_RQ_MIGRATING; + WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
+ dequeue_task(p, rq, 0); + dequeue_task(p, rq, 0);
+ update_sched_preempt_mask(rq); + update_sched_preempt_mask(rq);
+ set_task_cpu(p, new_cpu); + set_task_cpu(p, new_cpu);
@@ -4802,9 +4849,10 @@ index 000000000000..a122b1f8678e
+/* +/*
+ * sg_balance - slibing group balance check for run queue @rq + * sg_balance - slibing group balance check for run queue @rq
+ */ + */
+static inline void sg_balance(struct rq *rq, int cpu) +static inline void sg_balance(struct rq *rq)
+{ +{
+ cpumask_t chk; + cpumask_t chk;
+ int cpu = cpu_of(rq);
+ +
+ /* exit when cpu is offline */ + /* exit when cpu is offline */
+ if (unlikely(!rq->online)) + if (unlikely(!rq->online))
@@ -5118,6 +5166,11 @@ index 000000000000..a122b1f8678e
+ schedstat_inc(this_rq()->sched_count); + schedstat_inc(this_rq()->sched_count);
+} +}
+ +
+/*
+ * Compile time debug macro
+ * #define ALT_SCHED_DEBUG
+ */
+
+#ifdef ALT_SCHED_DEBUG +#ifdef ALT_SCHED_DEBUG
+void alt_sched_debug(void) +void alt_sched_debug(void)
+{ +{
@@ -5154,12 +5207,10 @@ index 000000000000..a122b1f8678e
+ (p = sched_rq_next_task(skip, rq)) != rq->idle) { + (p = sched_rq_next_task(skip, rq)) != rq->idle) {
+ skip = sched_rq_next_task(p, rq); + skip = sched_rq_next_task(p, rq);
+ if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) { + if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
+ p->on_rq = TASK_ON_RQ_MIGRATING;
+ __SCHED_DEQUEUE_TASK(p, rq, 0); + __SCHED_DEQUEUE_TASK(p, rq, 0);
+ set_task_cpu(p, dest_cpu); + set_task_cpu(p, dest_cpu);
+ sched_task_sanity_check(p, dest_rq); + sched_task_sanity_check(p, dest_rq);
+ __SCHED_ENQUEUE_TASK(p, dest_rq, 0); + __SCHED_ENQUEUE_TASK(p, dest_rq, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
+ nr_migrated++; + nr_migrated++;
+ } + }
+ nr_tries--; + nr_tries--;
@@ -5456,21 +5507,19 @@ index 000000000000..a122b1f8678e
+ */ + */
+ ++*switch_count; + ++*switch_count;
+ +
+ psi_sched_switch(prev, next, deactivated); + psi_sched_switch(prev, next, !task_on_rq_queued(prev));
+ +
+ trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state); + trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
+ +
+ /* Also unlocks the rq: */ + /* Also unlocks the rq: */
+ rq = context_switch(rq, prev, next); + rq = context_switch(rq, prev, next);
+
+ cpu = cpu_of(rq);
+ } else { + } else {
+ __balance_callbacks(rq); + __balance_callbacks(rq);
+ raw_spin_unlock_irq(&rq->lock); + raw_spin_unlock_irq(&rq->lock);
+ } + }
+ +
+#ifdef CONFIG_SCHED_SMT +#ifdef CONFIG_SCHED_SMT
+ sg_balance(rq, cpu); + sg_balance(rq);
+#endif +#endif
+} +}
+ +
@@ -5795,16 +5844,12 @@ index 000000000000..a122b1f8678e
+ +
+static inline void check_task_changed(struct task_struct *p, struct rq *rq) +static inline void check_task_changed(struct task_struct *p, struct rq *rq)
+{ +{
+ /* Trigger resched if task sched_prio has been modified. */ + int idx;
+ if (task_on_rq_queued(p)) {
+ int idx;
+ +
+ update_rq_clock(rq); + /* Trigger resched if task sched_prio has been modified. */
+ idx = task_sched_prio_idx(p, rq); + if (task_on_rq_queued(p) && (idx = task_sched_prio_idx(p, rq)) != p->sq_idx) {
+ if (idx != p->sq_idx) { + requeue_task(p, rq, idx);
+ requeue_task(p, rq, idx); + check_preempt_curr(rq);
+ check_preempt_curr(rq);
+ }
+ } + }
+} +}
+ +
@@ -5857,6 +5902,7 @@ index 000000000000..a122b1f8678e
+ return; + return;
+ +
+ rq = __task_access_lock(p, &lock); + rq = __task_access_lock(p, &lock);
+ update_rq_clock(rq);
+ /* + /*
+ * Set under pi_lock && rq->lock, such that the value can be used under + * Set under pi_lock && rq->lock, such that the value can be used under
+ * either lock. + * either lock.
@@ -8229,8 +8275,7 @@ index 000000000000..a122b1f8678e
+ int i; + int i;
+ struct rq *rq; + struct rq *rq;
+ +
+ printk(KERN_INFO "sched/alt: "ALT_SCHED_NAME" CPU Scheduler "ALT_SCHED_VERSION\ + printk(KERN_INFO ALT_SCHED_VERSION_MSG);
+ " by Alfred Chen.\n");
+ +
+ wait_bit_init(); + wait_bit_init();
+ +
@@ -8795,10 +8840,10 @@ index 000000000000..1212a031700e
+{} +{}
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
new file mode 100644 new file mode 100644
index 000000000000..e9b93e63406a index 000000000000..0b563999d4c1
--- /dev/null --- /dev/null
+++ b/kernel/sched/alt_sched.h +++ b/kernel/sched/alt_sched.h
@@ -0,0 +1,672 @@ @@ -0,0 +1,671 @@
+#ifndef ALT_SCHED_H +#ifndef ALT_SCHED_H
+#define ALT_SCHED_H +#define ALT_SCHED_H
+ +
@@ -8920,8 +8965,6 @@ index 000000000000..e9b93e63406a
+}; +};
+ +
+struct rq; +struct rq;
+struct cpuidle_state;
+
+struct balance_callback { +struct balance_callback {
+ struct balance_callback *next; + struct balance_callback *next;
+ void (*func)(struct rq *rq); + void (*func)(struct rq *rq);
@@ -9093,7 +9136,8 @@ index 000000000000..e9b93e63406a
+ NR_CPU_AFFINITY_LEVELS + NR_CPU_AFFINITY_LEVELS
+}; +};
+ +
+DECLARE_PER_CPU_ALIGNED(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks); +DECLARE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
+DECLARE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
+ +
+static inline int +static inline int
+__best_mask_cpu(const cpumask_t *cpumask, const cpumask_t *mask) +__best_mask_cpu(const cpumask_t *cpumask, const cpumask_t *mask)
@@ -9473,11 +9517,11 @@ index 000000000000..e9b93e63406a
+#endif /* ALT_SCHED_H */ +#endif /* ALT_SCHED_H */
diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
new file mode 100644 new file mode 100644
index 000000000000..f29b8f3aa786 index 000000000000..66b77291b9d0
--- /dev/null --- /dev/null
+++ b/kernel/sched/bmq.h +++ b/kernel/sched/bmq.h
@@ -0,0 +1,110 @@ @@ -0,0 +1,110 @@
+#define ALT_SCHED_NAME "BMQ" +#define ALT_SCHED_VERSION_MSG "sched/bmq: BMQ CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
+ +
+/* +/*
+ * BMQ only routines + * BMQ only routines
@@ -9816,15 +9860,14 @@ index f26ab2675f7d..480d4ad16d45 100644
+#endif +#endif
diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
new file mode 100644 new file mode 100644
index 000000000000..27e09b4feb8c index 000000000000..56a649d02e49
--- /dev/null --- /dev/null
+++ b/kernel/sched/pds.h +++ b/kernel/sched/pds.h
@@ -0,0 +1,133 @@ @@ -0,0 +1,127 @@
+#define ALT_SCHED_NAME "PDS" +#define ALT_SCHED_VERSION_MSG "sched/pds: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
+ +
+static int sched_timeslice_shift = 22; +static int sched_timeslice_shift = 22;
+ +
+/* PDS assume NORMAL_PRIO_NUM is power of 2 */
+#define NORMAL_PRIO_MOD(x) ((x) & (NORMAL_PRIO_NUM - 1)) +#define NORMAL_PRIO_MOD(x) ((x) & (NORMAL_PRIO_NUM - 1))
+ +
+/* +/*
@@ -9841,43 +9884,38 @@ index 000000000000..27e09b4feb8c
+{ +{
+ s64 delta = p->deadline - rq->time_edge + NORMAL_PRIO_NUM - NICE_WIDTH; + s64 delta = p->deadline - rq->time_edge + NORMAL_PRIO_NUM - NICE_WIDTH;
+ +
+#ifdef ALT_SCHED_DEBUG
+ if (WARN_ONCE(delta > NORMAL_PRIO_NUM - 1, + if (WARN_ONCE(delta > NORMAL_PRIO_NUM - 1,
+ "pds: task_sched_prio_normal() delta %lld\n", delta)) + "pds: task_sched_prio_normal() delta %lld\n", delta))
+ return NORMAL_PRIO_NUM - 1; + return NORMAL_PRIO_NUM - 1;
+#endif
+ +
+ return max(0LL, delta); + return (delta < 0) ? 0 : delta;
+} +}
+ +
+static inline int task_sched_prio(const struct task_struct *p) +static inline int task_sched_prio(const struct task_struct *p)
+{ +{
+ return (p->prio < MIN_NORMAL_PRIO) ? p->prio : + return (p->prio < MAX_RT_PRIO) ? p->prio :
+ MIN_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p)); + MIN_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
+} +}
+ +
+static inline int +static inline int
+task_sched_prio_idx(const struct task_struct *p, const struct rq *rq) +task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
+{ +{
+ u64 idx; + return (p->prio < MAX_RT_PRIO) ? p->prio : MIN_NORMAL_PRIO +
+ + NORMAL_PRIO_MOD(task_sched_prio_normal(p, rq) + rq->time_edge);
+ if (p->prio < MAX_RT_PRIO)
+ return p->prio;
+
+ idx = max(p->deadline + NORMAL_PRIO_NUM - NICE_WIDTH, rq->time_edge);
+ return MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(idx);
+} +}
+ +
+static inline int sched_prio2idx(int prio, struct rq *rq) +static inline int sched_prio2idx(int prio, struct rq *rq)
+{ +{
+ return (IDLE_TASK_SCHED_PRIO == prio || prio < MAX_RT_PRIO) ? prio : + return (IDLE_TASK_SCHED_PRIO == prio || prio < MAX_RT_PRIO) ? prio :
+ MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(prio + rq->time_edge); + MIN_NORMAL_PRIO + NORMAL_PRIO_MOD((prio - MIN_NORMAL_PRIO) +
+ rq->time_edge);
+} +}
+ +
+static inline int sched_idx2prio(int idx, struct rq *rq) +static inline int sched_idx2prio(int idx, struct rq *rq)
+{ +{
+ return (idx < MAX_RT_PRIO) ? idx : MIN_NORMAL_PRIO + + return (idx < MAX_RT_PRIO) ? idx : MIN_NORMAL_PRIO +
+ NORMAL_PRIO_MOD(idx - rq->time_edge); + NORMAL_PRIO_MOD((idx - MIN_NORMAL_PRIO) + NORMAL_PRIO_NUM -
+ NORMAL_PRIO_MOD(rq->time_edge));
+} +}
+ +
+static inline void sched_renew_deadline(struct task_struct *p, const struct rq *rq) +static inline void sched_renew_deadline(struct task_struct *p, const struct rq *rq)
@@ -9902,7 +9940,6 @@ index 000000000000..27e09b4feb8c
+ if (now == old) + if (now == old)
+ return; + return;
+ +
+ rq->time_edge = now;
+ delta = min_t(u64, NORMAL_PRIO_NUM, now - old); + delta = min_t(u64, NORMAL_PRIO_NUM, now - old);
+ INIT_LIST_HEAD(&head); + INIT_LIST_HEAD(&head);
+ +
@@ -9912,9 +9949,10 @@ index 000000000000..27e09b4feb8c
+ +
+ rq->queue.bitmap[2] = (NORMAL_PRIO_NUM == delta) ? 0UL : + rq->queue.bitmap[2] = (NORMAL_PRIO_NUM == delta) ? 0UL :
+ rq->queue.bitmap[2] >> delta; + rq->queue.bitmap[2] >> delta;
+ rq->time_edge = now;
+ if (!list_empty(&head)) { + if (!list_empty(&head)) {
+ struct task_struct *p;
+ u64 idx = MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(now); + u64 idx = MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(now);
+ struct task_struct *p;
+ +
+ list_for_each_entry(p, &head, sq_node) + list_for_each_entry(p, &head, sq_node)
+ p->sq_idx = idx; + p->sq_idx = idx;
@@ -10284,363 +10322,6 @@ index ff0536cea968..ce266990006d 100644
}; };
struct wakeup_test_data *x = data; struct wakeup_test_data *x = data;
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c --
index a122b1f8678e..78748ebb1d71 100644 2.39.2
--- a/kernel/sched/alt_core.c
+++ b/kernel/sched/alt_core.c
@@ -254,8 +254,7 @@ static inline void update_sched_preempt_mask(struct rq *rq)
*/
static inline struct task_struct *sched_rq_first_task(struct rq *rq)
{
- unsigned long idx = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
- const struct list_head *head = &rq->queue.heads[sched_prio2idx(idx, rq)];
+ const struct list_head *head = &rq->queue.heads[sched_prio2idx(rq->prio, rq)];
return list_first_entry(head, struct task_struct, sq_node);
}
@@ -767,13 +766,15 @@ unsigned long get_wchan(struct task_struct *p)
* Add/Remove/Requeue task to/from the runqueue routines
* Context: rq->lock
*/
-#define __SCHED_DEQUEUE_TASK(p, rq, flags) \
+#define __SCHED_DEQUEUE_TASK(p, rq, flags, func) \
sched_info_dequeue(rq, p); \
psi_dequeue(p, flags & DEQUEUE_SLEEP); \
\
list_del(&p->sq_node); \
- if (list_empty(&rq->queue.heads[p->sq_idx])) \
- clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
+ if (list_empty(&rq->queue.heads[p->sq_idx])) { \
+ clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap); \
+ func; \
+ }
#define __SCHED_ENQUEUE_TASK(p, rq, flags) \
sched_info_enqueue(rq, p); \
@@ -788,12 +789,12 @@ static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
#ifdef ALT_SCHED_DEBUG
lockdep_assert_held(&rq->lock);
- /*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
+ /*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->deadline);*/
WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
task_cpu(p), cpu_of(rq));
#endif
- __SCHED_DEQUEUE_TASK(p, rq, flags);
+ __SCHED_DEQUEUE_TASK(p, rq, flags, update_sched_preempt_mask(rq));
--rq->nr_running;
#ifdef CONFIG_SMP
if (1 == rq->nr_running)
@@ -808,7 +809,7 @@ static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
#ifdef ALT_SCHED_DEBUG
lockdep_assert_held(&rq->lock);
- /*printk(KERN_INFO "sched: enqueue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
+ /*printk(KERN_INFO "sched: enqueue(%d) %px %d\n", cpu_of(rq), p, p->prio);*/
WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
task_cpu(p), cpu_of(rq));
#endif
@@ -828,7 +829,7 @@ static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
{
#ifdef ALT_SCHED_DEBUG
lockdep_assert_held(&rq->lock);
- /*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
+ /*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->deadline);*/
WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
cpu_of(rq), task_cpu(p));
#endif
@@ -837,8 +838,7 @@ static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
list_add_tail(&p->sq_node, &rq->queue.heads[idx]);
if (idx != p->sq_idx) {
if (list_empty(&rq->queue.heads[p->sq_idx]))
- clear_bit(sched_idx2prio(p->sq_idx, rq),
- rq->queue.bitmap);
+ clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
p->sq_idx = idx;
set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
update_sched_preempt_mask(rq);
@@ -1350,8 +1350,8 @@ static void activate_task(struct task_struct *p, struct rq *rq)
*/
static inline void deactivate_task(struct task_struct *p, struct rq *rq)
{
- p->on_rq = 0;
dequeue_task(p, rq, DEQUEUE_SLEEP);
+ p->on_rq = 0;
cpufreq_update_util(rq, 0);
}
@@ -1568,9 +1568,8 @@ static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int
{
lockdep_assert_held(&rq->lock);
- p->on_rq = TASK_ON_RQ_MIGRATING;
+ WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
dequeue_task(p, rq, 0);
- update_sched_preempt_mask(rq);
set_task_cpu(p, new_cpu);
raw_spin_unlock(&rq->lock);
@@ -4516,12 +4515,10 @@ migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu)
(p = sched_rq_next_task(skip, rq)) != rq->idle) {
skip = sched_rq_next_task(p, rq);
if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
- p->on_rq = TASK_ON_RQ_MIGRATING;
- __SCHED_DEQUEUE_TASK(p, rq, 0);
+ __SCHED_DEQUEUE_TASK(p, rq, 0, );
set_task_cpu(p, dest_cpu);
sched_task_sanity_check(p, dest_rq);
__SCHED_ENQUEUE_TASK(p, dest_rq, 0);
- p->on_rq = TASK_ON_RQ_QUEUED;
nr_migrated++;
}
nr_tries--;
@@ -4566,6 +4563,7 @@ static inline int take_other_rq_tasks(struct rq *rq, int cpu)
if (rq->nr_running > 1)
cpumask_set_cpu(cpu, &sched_rq_pending_mask);
+ update_sched_preempt_mask(rq);
cpufreq_update_util(rq, 0);
return 1;
@@ -4637,8 +4635,7 @@ choose_next_task(struct rq *rq, int cpu)
#ifdef CONFIG_HIGH_RES_TIMERS
hrtick_start(rq, next->time_slice);
#endif
- /*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu,
- * next);*/
+ /*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu, next);*/
return next;
}
@@ -4706,7 +4703,6 @@ static void __sched notrace __schedule(unsigned int sched_mode)
unsigned long prev_state;
struct rq *rq;
int cpu;
- int deactivated = 0;
cpu = smp_processor_id();
rq = cpu_rq(cpu);
@@ -4771,7 +4767,6 @@ static void __sched notrace __schedule(unsigned int sched_mode)
*/
sched_task_deactivate(prev, rq);
deactivate_task(prev, rq);
- deactivated = 1;
if (prev->in_iowait) {
atomic_inc(&rq->nr_iowait);
@@ -4791,11 +4786,10 @@ static void __sched notrace __schedule(unsigned int sched_mode)
#endif
if (likely(prev != next)) {
- if (deactivated)
- update_sched_preempt_mask(rq);
next->last_ran = rq->clock_task;
rq->last_ts_switch = rq->clock;
+ /*printk(KERN_INFO "sched: %px -> %px\n", prev, next);*/
rq->nr_switches++;
/*
* RCU users of rcu_dereference(rq->curr) may not see
@@ -4818,7 +4812,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
*/
++*switch_count;
- psi_sched_switch(prev, next, deactivated);
+ psi_sched_switch(prev, next, !task_on_rq_queued(prev));
trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
index e9b93e63406a..60bbb4583d16 100644
--- a/kernel/sched/alt_sched.h
+++ b/kernel/sched/alt_sched.h
@@ -22,8 +22,8 @@
#endif
#ifdef CONFIG_SCHED_PDS
-/* bits: RT(0-99), reserved(100-127), NORMAL_PRIO_NUM, cpu idle task */
-#define SCHED_BITS (MIN_NORMAL_PRIO + NORMAL_PRIO_NUM + 1)
+/* bits: RT(0-24), reserved(25-31), SCHED_NORMAL_PRIO_NUM(32), cpu idle task(1) */
+#define SCHED_BITS (64 + 1)
#endif /* CONFIG_SCHED_PDS */
#define IDLE_TASK_SCHED_PRIO (SCHED_BITS - 1)
@@ -142,7 +142,7 @@ struct rq {
#ifdef CONFIG_SCHED_PDS
u64 time_edge;
#endif
- unsigned long prio;
+ unsigned long prio;
/* switch count */
u64 nr_switches;
diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
index 27e09b4feb8c..5a94a98e19af 100644
--- a/kernel/sched/pds.h
+++ b/kernel/sched/pds.h
@@ -1,9 +1,15 @@
#define ALT_SCHED_NAME "PDS"
-static int sched_timeslice_shift = 22;
+#define MIN_SCHED_NORMAL_PRIO (32)
+#define SCHED_NORMAL_PRIO_NUM (32)
+#define SCHED_EDGE_DELTA (SCHED_NORMAL_PRIO_NUM - NICE_WIDTH / 2)
/* PDS assume NORMAL_PRIO_NUM is power of 2 */
#define NORMAL_PRIO_MOD(x) ((x) & (NORMAL_PRIO_NUM - 1))
+#define SCHED_NORMAL_PRIO_MOD(x) ((x) & (SCHED_NORMAL_PRIO_NUM - 1))
+
+/* 4ms -> shift 22, 2 time slice slots -> shift 23 */
+static int sched_timeslice_shift = 23;
/*
* Common interfaces
@@ -11,18 +17,18 @@ static int sched_timeslice_shift = 22;
static inline void sched_timeslice_imp(const int timeslice_ms)
{
if (2 == timeslice_ms)
- sched_timeslice_shift = 21;
+ sched_timeslice_shift = 22;
}
static inline int
task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
{
- s64 delta = p->deadline - rq->time_edge + NORMAL_PRIO_NUM - NICE_WIDTH;
+ s64 delta = p->deadline - rq->time_edge + SCHED_EDGE_DELTA;
#ifdef ALT_SCHED_DEBUG
if (WARN_ONCE(delta > NORMAL_PRIO_NUM - 1,
"pds: task_sched_prio_normal() delta %lld\n", delta))
- return NORMAL_PRIO_NUM - 1;
+ return SCHED_NORMAL_PRIO_NUM - 1;
#endif
return max(0LL, delta);
@@ -30,8 +36,8 @@ task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
static inline int task_sched_prio(const struct task_struct *p)
{
- return (p->prio < MIN_NORMAL_PRIO) ? p->prio :
- MIN_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
+ return (p->prio < MIN_NORMAL_PRIO) ? (p->prio >> 2) :
+ MIN_SCHED_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
}
static inline int
@@ -39,30 +45,35 @@ task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
{
u64 idx;
- if (p->prio < MAX_RT_PRIO)
- return p->prio;
+ if (p->prio < MIN_NORMAL_PRIO)
+ return p->prio >> 2;
- idx = max(p->deadline + NORMAL_PRIO_NUM - NICE_WIDTH, rq->time_edge);
- return MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(idx);
+ idx = max(p->deadline + SCHED_EDGE_DELTA, rq->time_edge);
+ /*printk(KERN_INFO "sched: task_sched_prio_idx edge:%llu, deadline=%llu idx=%llu\n", rq->time_edge, p->deadline, idx);*/
+ return MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(idx);
}
-static inline int sched_prio2idx(int prio, struct rq *rq)
+static inline int sched_prio2idx(int sched_prio, struct rq *rq)
{
- return (IDLE_TASK_SCHED_PRIO == prio || prio < MAX_RT_PRIO) ? prio :
- MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(prio + rq->time_edge);
+ return (IDLE_TASK_SCHED_PRIO == sched_prio || sched_prio < MIN_SCHED_NORMAL_PRIO) ?
+ sched_prio :
+ MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(sched_prio + rq->time_edge);
}
-static inline int sched_idx2prio(int idx, struct rq *rq)
+static inline int sched_idx2prio(int sched_idx, struct rq *rq)
{
- return (idx < MAX_RT_PRIO) ? idx : MIN_NORMAL_PRIO +
- NORMAL_PRIO_MOD(idx - rq->time_edge);
+ int ret;
+ ret = (sched_idx < MIN_SCHED_NORMAL_PRIO) ? sched_idx :
+ MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(sched_idx - rq->time_edge);
+ /*printk(KERN_INFO "sched: sched_idx2prio edge:%llu, %d -> %d\n", rq->time_edge, sched_idx, ret);*/
+
+ return ret;
}
static inline void sched_renew_deadline(struct task_struct *p, const struct rq *rq)
{
- if (p->prio >= MAX_RT_PRIO)
- p->deadline = (rq->clock >> sched_timeslice_shift) +
- p->static_prio - (MAX_PRIO - NICE_WIDTH);
+ if (p->prio >= MIN_NORMAL_PRIO)
+ p->deadline = rq->time_edge + (p->static_prio - (MAX_PRIO - NICE_WIDTH)) / 2;
}
int task_running_nice(struct task_struct *p)
@@ -70,36 +81,48 @@ int task_running_nice(struct task_struct *p)
return (p->prio > DEFAULT_PRIO);
}
+const u64 RT_MASK = 0xffffffffULL;
+
static inline void update_rq_time_edge(struct rq *rq)
{
struct list_head head;
u64 old = rq->time_edge;
u64 now = rq->clock >> sched_timeslice_shift;
u64 prio, delta;
+ DECLARE_BITMAP(normal, SCHED_QUEUE_BITS);
if (now == old)
return;
rq->time_edge = now;
- delta = min_t(u64, NORMAL_PRIO_NUM, now - old);
+ delta = min_t(u64, SCHED_NORMAL_PRIO_NUM, now - old);
INIT_LIST_HEAD(&head);
- for_each_set_bit(prio, &rq->queue.bitmap[2], delta)
- list_splice_tail_init(rq->queue.heads + MIN_NORMAL_PRIO +
- NORMAL_PRIO_MOD(prio + old), &head);
+ /*printk(KERN_INFO "sched: update_rq_time_edge 0x%016lx %llu\n", rq->queue.bitmap[0], delta);*/
+ prio = MIN_SCHED_NORMAL_PRIO;
+ for_each_set_bit_from(prio, &rq->queue.bitmap[0], MIN_SCHED_NORMAL_PRIO + delta)
+ list_splice_tail_init(rq->queue.heads + MIN_SCHED_NORMAL_PRIO +
+ SCHED_NORMAL_PRIO_MOD(prio + old), &head);
- rq->queue.bitmap[2] = (NORMAL_PRIO_NUM == delta) ? 0UL :
- rq->queue.bitmap[2] >> delta;
+ bitmap_shift_right(&normal[0], &rq->queue.bitmap[0], delta, SCHED_QUEUE_BITS);
if (!list_empty(&head)) {
struct task_struct *p;
- u64 idx = MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(now);
+ u64 idx = MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(now);
list_for_each_entry(p, &head, sq_node)
p->sq_idx = idx;
list_splice(&head, rq->queue.heads + idx);
- rq->queue.bitmap[2] |= 1UL;
+ set_bit(MIN_SCHED_NORMAL_PRIO, &normal[0]);
}
+ bitmap_replace(&rq->queue.bitmap[0], &normal[0], &rq->queue.bitmap[0],
+ (const unsigned long *)&RT_MASK, SCHED_QUEUE_BITS);
+ /*printk(KERN_INFO "sched: update_rq_time_edge 0x%016lx 0x%016lx", rq->queue.bitmap[0], normal);*/
+ if (rq->prio < MIN_SCHED_NORMAL_PRIO || IDLE_TASK_SCHED_PRIO == rq->prio)
+ return;
+
+ rq->prio = (rq->prio < MIN_SCHED_NORMAL_PRIO + delta) ?
+ MIN_SCHED_NORMAL_PRIO : rq->prio - delta;
}
static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
@@ -112,7 +135,7 @@ static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq)
{
- u64 max_dl = rq->time_edge + NICE_WIDTH - 1;
+ u64 max_dl = rq->time_edge + NICE_WIDTH / 2 - 1;
if (unlikely(p->deadline > max_dl))
p->deadline = max_dl;
}

View File

@@ -182,11 +182,11 @@ index 4700d24e5d55..8f7a3d7fd9c1 100644
/* /*
* Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset. * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
* The device will throw a Link Down error on AER-capable systems and * The device will throw a Link Down error on AER-capable systems and
@@ -5102,6 +5102,7 @@ @@ -4513,6 +4613,7 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
/* Zhaoxin Root/Downstream Ports */
{ PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs }, { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
/* Wangxun nics */ + { PCI_ANY_ID, PCI_ANY_ID, pcie_acs_overrides },
{ PCI_VENDOR_ID_WANGXUN, PCI_ANY_ID, pci_quirk_wangxun_nic_acs },
+ { PCI_ANY_ID, PCI_ANY_ID, pcie_acs_overrides },
{ 0 } { 0 }
}; };