Compare commits

...

4 Commits
v6.2.9 ... v6.3

Author SHA1 Message Date
Sravan Balaji
8d8d1f8f18 PDS Kernel Configuration 2023-04-14 06:04:13 -04:00
Tk-Glitch
24f561c816 linux 6.2.y: Update Project C to r2 with pds priority squeeze 0.5
https://gitlab.com/alfredchen/projectc/-/tree/master/6.2
2023-04-12 17:37:56 +02:00
Tk-Glitch
f033dc9a50 Defconfig refresh 2023-04-12 17:35:37 +02:00
icxes
e64616ef3c linux 6.3 RC: fix ACS override patch causing an error (#725) 2023-04-05 16:14:53 +02:00
8 changed files with 585 additions and 265 deletions

View File

@@ -3,7 +3,7 @@
# Linux distribution you are using, options are "Arch", "Ubuntu", "Debian", "Fedora", "Suse", "Gentoo", "Generic".
# It is automatically set to "Arch" when using PKGBUILD.
# If left empty, the script will prompt
_distro=""
_distro="Arch"
# Kernel Version - Options are "5.4", and from "5.7" to "5.19"
# you can also set a specific kernel version, e.g. "6.0-rc4" or "5.10.51",
@@ -46,7 +46,7 @@ CUSTOM_GCC_PATH=""
CUSTOM_LLVM_PATH=""
# Set to true to bypass makepkg.conf and use all available threads for compilation. False will respect your makepkg.conf options.
_force_all_threads="true"
_force_all_threads="false"
# Set to true to prevent ccache from being used and set CONFIG_GCC_PLUGINS=y (which needs to be disabled for ccache to work properly)
_noccache="false"
@@ -60,10 +60,10 @@ _modprobeddb="false"
_modprobeddb_db_path=~/.config/modprobed.db
# Set to "1" to call make menuconfig, "2" to call make nconfig, "3" to call make xconfig, before building the kernel. Set to false to disable and skip the prompt.
_menunconfig=""
_menunconfig="false"
# Set to true to generate a kernel config fragment from your changes in menuconfig/nconfig. Set to false to disable and skip the prompt.
_diffconfig=""
_diffconfig="false"
# Set to the file name where the generated config fragment should be written to. Only used if _diffconfig is active.
_diffconfig_name=""
@@ -97,11 +97,11 @@ _STRIP="true"
# LEAVE AN EMPTY VALUE TO BE PROMPTED ABOUT FOLLOWING OPTIONS AT BUILD TIME
# CPU scheduler - Options are "upds" (TkG's Undead PDS), "pds", "bmq", "muqss", "cacule", "tt", "bore" or "cfs" (kernel's default)
_cpusched=""
_cpusched="pds"
# Compiler to use - Options are "gcc" or "llvm".
# For advanced users.
_compiler=""
_compiler="gcc"
# Force the use of the LLVM Integrated Assembler whether using LLVM, LTO or not.
# Set to "1" to enable.
@@ -131,7 +131,7 @@ _preempt_rt_force=""
# For BMQ: 0: No yield.
# 1: Deboost and requeue task. (Default)
# 2: Set rq skip task.
_sched_yield_type=""
_sched_yield_type="0"
# Round Robin interval is the longest duration two tasks with the same nice level will be delayed for. When CPU time is requested by a task, it receives a time slice equal
# to the rr_interval in addition to a virtual deadline. When using yield_type 2, a low value can help offset the disadvantages of rescheduling a process that has yielded.
@@ -139,7 +139,7 @@ _sched_yield_type=""
# PDS default: 4ms"
# BMQ default: 2ms"
# Set to "1" for 2ms, "2" for 4ms, "3" for 6ms, "4" for 8ms, or "default" to keep the chosen scheduler defaults.
_rr_interval=""
_rr_interval="2"
# Set to "true" to disable FUNCTION_TRACER/GRAPH_TRACER, lowering overhead but limiting debugging and analyzing of kernel functions - Kernel default is "false"
_ftracedisable="false"
@@ -154,10 +154,10 @@ _misc_adds="true"
# Full tickless can give higher performances in case you use isolation of CPUs for tasks
# and it works only when using the nohz_full kernel parameter, otherwise behaves like idle.
# Just tickless idle perform better for most platforms.
_tickless=""
_tickless="2"
# Set to "true" to use ACS override patch - https://wiki.archlinux.org/index.php/PCI_passthrough_via_OVMF#Bypassing_the_IOMMU_groups_.28ACS_override_patch.29 - Kernel default is "false"
_acs_override=""
_acs_override="false"
# Set to "true" to add Bcache filesystem support. You'll have to install bcachefs-tools-git from AUR for utilities - https://bcachefs.org/ - If in doubt, set to "false"
# This can be buggy and isn't recommended on a production machine, also enabling this option will not allow you to enable MGLRU.
@@ -168,13 +168,13 @@ _bcachefs="false"
_winesync="false"
# Set to "true" to enable Binder and Ashmem, the kernel modules required to use the android emulator Anbox. ! This doesn't apply to 5.4.y !
_anbox=""
_anbox="false"
# A selection of patches from Zen/Liquorix kernel and additional tweaks for a better gaming experience (ZENIFY) - Default is "true"
_zenify="true"
# compiler optimization level - 1. Optimize for performance (-O2); 2. Optimize harder (-O3); 3. Optimize for size (-Os) - Kernel default is "1"
_compileroptlevel="1"
_compileroptlevel="2"
# CPU compiler optimizations - Defaults to prompt at kernel config if left empty
# AMD CPUs : "k8" "k8sse3" "k10" "barcelona" "bobcat" "jaguar" "bulldozer" "piledriver" "steamroller" "excavator" "zen" "zen2" "zen3" "zen4" (zen3 opt support depends on GCC11) (zen4 opt support depends on GCC13)
@@ -188,7 +188,7 @@ _compileroptlevel="1"
# - "generic_v2" (depends on GCC11 - to share the package between machines with different CPU µarch supporting at least x86-64-v2
# - "generic_v3" (depends on GCC11 - to share the package between machines with different CPU µarch supporting at least x86-64-v3
# - "generic_v4" (depends on GCC11 - to share the package between machines with different CPU µarch supporting at least x86-64-v4
_processor_opt=""
_processor_opt="skylake"
# CacULE only - Enable Response Driven Balancer, an experimental load balancer for CacULE
_cacule_rdb="false"
@@ -201,13 +201,13 @@ _cacule_rdb_interval="19"
_tt_high_hz="false"
# MuQSS and PDS only - SMT (Hyperthreading) aware nice priority and policy support (SMT_NICE) - Kernel default is "true" - You can disable this on non-SMT/HT CPUs for lower overhead
_smt_nice=""
_smt_nice="true"
# Trust the CPU manufacturer to initialize Linux's CRNG (RANDOM_TRUST_CPU) - Kernel default is "false"
_random_trust_cpu="true"
# Timer frequency - "100" "250" "300" "500" "750" "1000" ("2000" is available for cacule cpusched only) - More options available in kernel config prompt when left empty depending on selected cpusched with the default option pointed with a ">" (2000 for cacule, 100 for muqss and 1000 for other cpu schedulers)
_timer_freq=""
_timer_freq="500"
# Default CPU governor - "performance", "ondemand", "schedutil" or leave empty for default (schedutil)
_default_cpu_gov="ondemand"

View File

@@ -1,15 +1,15 @@
#
# Automatically generated file; DO NOT EDIT.
# Linux/x86 5.15.61 Kernel Configuration
# Linux/x86 5.15.106 Kernel Configuration
#
CONFIG_CC_VERSION_TEXT="gcc (TkG-mostlyportable) 12.1.1 20220515"
CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.1 20230201"
CONFIG_CC_IS_GCC=y
CONFIG_GCC_VERSION=120101
CONFIG_GCC_VERSION=120201
CONFIG_CLANG_VERSION=0
CONFIG_AS_IS_GNU=y
CONFIG_AS_VERSION=20244315
CONFIG_AS_VERSION=24000
CONFIG_LD_IS_BFD=y
CONFIG_LD_VERSION=20244315
CONFIG_LD_VERSION=24000
CONFIG_LLD_VERSION=0
CONFIG_CC_CAN_LINK=y
CONFIG_CC_CAN_LINK_STATIC=y
@@ -18,6 +18,7 @@ CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y
CONFIG_CC_HAS_ASM_INLINE=y
CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y
CONFIG_PAHOLE_VERSION=124
CONFIG_IRQ_WORK=y
CONFIG_BUILDTIME_TABLE_SORT=y
CONFIG_THREAD_INFO_IN_TASK=y
@@ -437,7 +438,7 @@ CONFIG_I8K=m
CONFIG_MICROCODE=y
CONFIG_MICROCODE_INTEL=y
CONFIG_MICROCODE_AMD=y
# CONFIG_MICROCODE_OLD_INTERFACE is not set
# CONFIG_MICROCODE_LATE_LOADING is not set
CONFIG_X86_MSR=y
CONFIG_X86_CPUID=y
CONFIG_X86_5LEVEL=y
@@ -1142,6 +1143,7 @@ CONFIG_INET_ESP=m
CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_ESPINTCP=y
CONFIG_INET_IPCOMP=m
CONFIG_INET_TABLE_PERTURB_ORDER=16
CONFIG_INET_XFRM_TUNNEL=m
CONFIG_INET_TUNNEL=m
CONFIG_INET_DIAG=m
@@ -1707,7 +1709,6 @@ CONFIG_DEFAULT_NET_SCH="fq_codel"
#
CONFIG_NET_CLS=y
CONFIG_NET_CLS_BASIC=m
CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m
CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m
@@ -9898,6 +9899,7 @@ CONFIG_GCC_PLUGIN_STRUCTLEAK=y
# Memory initialization
#
CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y
CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y
CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y
# CONFIG_INIT_STACK_NONE is not set
# CONFIG_GCC_PLUGIN_STRUCTLEAK_USER is not set
@@ -10351,6 +10353,8 @@ CONFIG_SYMBOLIC_ERRNAME=y
CONFIG_DEBUG_BUGVERBOSE=y
# end of printk and dmesg options
CONFIG_AS_HAS_NON_CONST_LEB128=y
#
# Compile-time checks and compiler options
#
@@ -10360,6 +10364,7 @@ CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_INFO_SPLIT is not set
# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set
CONFIG_DEBUG_INFO_DWARF4=y
# CONFIG_DEBUG_INFO_DWARF5 is not set
CONFIG_DEBUG_INFO_BTF=y
CONFIG_PAHOLE_HAS_SPLIT_BTF=y
CONFIG_DEBUG_INFO_BTF_MODULES=y

View File

@@ -1,15 +1,15 @@
#
# Automatically generated file; DO NOT EDIT.
# Linux/x86 6.1.0-arch1 Kernel Configuration
# Linux/x86 6.1.23 Kernel Configuration
#
CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.0"
CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.1 20230201"
CONFIG_CC_IS_GCC=y
CONFIG_GCC_VERSION=120200
CONFIG_GCC_VERSION=120201
CONFIG_CLANG_VERSION=0
CONFIG_AS_IS_GNU=y
CONFIG_AS_VERSION=23900
CONFIG_AS_VERSION=24000
CONFIG_LD_IS_BFD=y
CONFIG_LD_VERSION=23900
CONFIG_LD_VERSION=24000
CONFIG_LLD_VERSION=0
CONFIG_CC_CAN_LINK=y
CONFIG_CC_CAN_LINK_STATIC=y
@@ -206,6 +206,7 @@ CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
CONFIG_CC_HAS_INT128=y
CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5"
CONFIG_GCC11_NO_ARRAY_BOUNDS=y
CONFIG_GCC12_NO_ARRAY_BOUNDS=y
CONFIG_CC_NO_ARRAY_BOUNDS=y
CONFIG_ARCH_SUPPORTS_INT128=y
@@ -1784,7 +1785,6 @@ CONFIG_DEFAULT_NET_SCH="fq_codel"
#
CONFIG_NET_CLS=y
CONFIG_NET_CLS_BASIC=m
CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m
CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m
@@ -4456,7 +4456,6 @@ CONFIG_SERIAL_ARC_NR_PORTS=1
CONFIG_SERIAL_RP2=m
CONFIG_SERIAL_RP2_NR_UARTS=32
CONFIG_SERIAL_FSL_LPUART=m
CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
CONFIG_SERIAL_FSL_LINFLEXUART=m
CONFIG_SERIAL_MEN_Z135=m
CONFIG_SERIAL_SPRD=m
@@ -6527,7 +6526,6 @@ CONFIG_DRM=y
CONFIG_DRM_MIPI_DBI=m
CONFIG_DRM_MIPI_DSI=y
# CONFIG_DRM_DEBUG_MM is not set
CONFIG_DRM_USE_DYNAMIC_DEBUG=y
CONFIG_DRM_KMS_HELPER=y
CONFIG_DRM_FBDEV_EMULATION=y
CONFIG_DRM_FBDEV_OVERALLOC=100
@@ -6598,7 +6596,6 @@ CONFIG_DRM_I915_FORCE_PROBE="*"
CONFIG_DRM_I915_CAPTURE_ERROR=y
CONFIG_DRM_I915_COMPRESS_ERROR=y
CONFIG_DRM_I915_USERPTR=y
CONFIG_DRM_I915_GVT=y
CONFIG_DRM_I915_GVT_KVMGT=m
CONFIG_DRM_I915_PXP=y
CONFIG_DRM_I915_REQUEST_TIMEOUT=20000
@@ -6609,6 +6606,7 @@ CONFIG_DRM_I915_PREEMPT_TIMEOUT=640
CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT=8000
CONFIG_DRM_I915_STOP_TIMEOUT=100
CONFIG_DRM_I915_TIMESLICE_DURATION=1
CONFIG_DRM_I915_GVT=y
CONFIG_DRM_VGEM=m
CONFIG_DRM_VKMS=m
CONFIG_DRM_VMWGFX=m

View File

@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
# Linux/x86 6.2.5-arch1 Kernel Configuration
# Linux/x86 6.2.10 Kernel Configuration
#
CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.1 20230201"
CONFIG_CC_IS_GCC=y
@@ -4494,7 +4494,6 @@ CONFIG_SERIAL_ARC_NR_PORTS=1
CONFIG_SERIAL_RP2=m
CONFIG_SERIAL_RP2_NR_UARTS=32
CONFIG_SERIAL_FSL_LPUART=m
CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
CONFIG_SERIAL_FSL_LINFLEXUART=m
CONFIG_SERIAL_MEN_Z135=m
CONFIG_SERIAL_SPRD=m

View File

@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
# Linux/x86 6.3.0-rc1 Kernel Configuration
# Linux/x86 6.3.0-rc6 Kernel Configuration
#
CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.1 20230201"
CONFIG_CC_IS_GCC=y
@@ -993,7 +993,6 @@ CONFIG_EFI_PARTITION=y
# CONFIG_CMDLINE_PARTITION is not set
# end of Partition Types
CONFIG_BLOCK_COMPAT=y
CONFIG_BLK_MQ_PCI=y
CONFIG_BLK_MQ_VIRTIO=y
CONFIG_BLK_MQ_RDMA=y
@@ -3333,6 +3332,7 @@ CONFIG_MICROSOFT_MANA=m
CONFIG_NET_VENDOR_MYRI=y
CONFIG_MYRI10GE=m
CONFIG_MYRI10GE_DCA=y
CONFIG_FEALNX=m
CONFIG_NET_VENDOR_NI=y
CONFIG_NI_XGE_MANAGEMENT_ENET=m
CONFIG_NET_VENDOR_NATSEMI=y
@@ -4499,7 +4499,6 @@ CONFIG_SERIAL_ARC_NR_PORTS=1
CONFIG_SERIAL_RP2=m
CONFIG_SERIAL_RP2_NR_UARTS=32
CONFIG_SERIAL_FSL_LPUART=m
CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
CONFIG_SERIAL_FSL_LINFLEXUART=m
CONFIG_SERIAL_MEN_Z135=m
CONFIG_SERIAL_SPRD=m
@@ -8756,7 +8755,6 @@ CONFIG_RTLLIB_CRYPTO_WEP=m
CONFIG_RTL8192E=m
CONFIG_RTL8723BS=m
CONFIG_R8712U=m
CONFIG_R8188EU=m
CONFIG_RTS5208=m
CONFIG_VT6655=m
CONFIG_VT6656=m

View File

@@ -252,7 +252,7 @@ _set_cpu_scheduler() {
["bore"]="BORE (Burst-Oriented Response Enhancer) CPU Scheduler"
)
# CPU SCHED selector
# CPU SCHED selector - _projectc_unoff=1 sets unofficial Project C revision flag for a given version
if [ "$_kver" = "504" ]; then
_avail_cpu_scheds=("pds" "bmq" "muqss" "cacule" "cfs")
elif [ "$_kver" = "507" ]; then
@@ -285,7 +285,6 @@ _set_cpu_scheduler() {
_avail_cpu_scheds=("cfs" "pds" "bmq" "tt" "bore")
elif [ "$_kver" = "602" ]; then
_avail_cpu_scheds=("cfs" "pds" "bmq" "tt" "bore")
_projectc_unoff=1
else
_avail_cpu_scheds=("cfs")
fi
@@ -726,6 +725,8 @@ _tkg_srcprep() {
rev=2
elif [ "$_kver" = "601" ]; then
rev=1
elif [ "$_kver" = "602" ]; then
rev=2
else
rev=0
fi

View File

@@ -1,57 +1,3 @@
From e44ef62b127f6a161a131c84db92a7527d8fc72d Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Wed, 22 Feb 2023 19:24:36 +0100
Subject: [PATCH] prjc
Signed-off-by: Peter Jung <admin@ptr1337.dev>
---
.../admin-guide/kernel-parameters.txt | 6 +
Documentation/admin-guide/sysctl/kernel.rst | 10 +
Documentation/scheduler/sched-BMQ.txt | 110 +
fs/proc/base.c | 2 +-
include/asm-generic/resource.h | 2 +-
include/linux/sched.h | 33 +-
include/linux/sched/deadline.h | 20 +
include/linux/sched/prio.h | 26 +
include/linux/sched/rt.h | 2 +
include/linux/sched/topology.h | 3 +-
init/Kconfig | 34 +
init/init_task.c | 18 +
kernel/Kconfig.preempt | 2 +-
kernel/cgroup/cpuset.c | 4 +-
kernel/delayacct.c | 2 +-
kernel/exit.c | 4 +-
kernel/locking/rtmutex.c | 16 +-
kernel/sched/Makefile | 5 +
kernel/sched/alt_core.c | 8111 +++++++++++++++++
kernel/sched/alt_debug.c | 31 +
kernel/sched/alt_sched.h | 671 ++
kernel/sched/bmq.h | 110 +
kernel/sched/build_policy.c | 8 +-
kernel/sched/build_utility.c | 2 +
kernel/sched/cpufreq_schedutil.c | 10 +
kernel/sched/cputime.c | 10 +-
kernel/sched/debug.c | 10 +
kernel/sched/idle.c | 2 +
kernel/sched/pds.h | 127 +
kernel/sched/pelt.c | 4 +-
kernel/sched/pelt.h | 8 +-
kernel/sched/sched.h | 9 +
kernel/sched/stats.c | 4 +
kernel/sched/stats.h | 2 +
kernel/sched/topology.c | 17 +
kernel/sysctl.c | 15 +
kernel/time/hrtimer.c | 2 +
kernel/time/posix-cpu-timers.c | 10 +-
kernel/trace/trace_selftest.c | 5 +
39 files changed, 9445 insertions(+), 22 deletions(-)
create mode 100644 Documentation/scheduler/sched-BMQ.txt
create mode 100644 kernel/sched/alt_core.c
create mode 100644 kernel/sched/alt_debug.c
create mode 100644 kernel/sched/alt_sched.h
create mode 100644 kernel/sched/bmq.h
create mode 100644 kernel/sched/pds.h
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 6cfa6e3996cf..1b6a407213da 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
@@ -686,10 +632,10 @@ index 976092b7bd45..31d587c16ec1 100644
obj-y += build_utility.o
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
new file mode 100644
index 000000000000..f5e9c01f9382
index 000000000000..a122b1f8678e
--- /dev/null
+++ b/kernel/sched/alt_core.c
@@ -0,0 +1,8111 @@
@@ -0,0 +1,8120 @@
+/*
+ * kernel/sched/alt_core.c
+ *
@@ -759,7 +705,12 @@ index 000000000000..f5e9c01f9382
+#define sched_feat(x) (0)
+#endif /* CONFIG_SCHED_DEBUG */
+
+#define ALT_SCHED_VERSION "v6.2-r0"
+#define ALT_SCHED_VERSION "v6.2-r2"
+
+/*
+ * Compile time debug macro
+ * #define ALT_SCHED_DEBUG
+ */
+
+/* rt_prio(prio) defined in include/linux/sched/rt.h */
+#define rt_task(p) rt_prio((p)->prio)
@@ -814,9 +765,9 @@ index 000000000000..f5e9c01f9382
+#ifdef CONFIG_SMP
+static cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp;
+
+DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
+DEFINE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
+DEFINE_PER_CPU(cpumask_t *, sched_cpu_topo_end_mask);
+DEFINE_PER_CPU_ALIGNED(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
+DEFINE_PER_CPU_ALIGNED(cpumask_t *, sched_cpu_llc_mask);
+DEFINE_PER_CPU_ALIGNED(cpumask_t *, sched_cpu_topo_end_mask);
+
+#ifdef CONFIG_SCHED_SMT
+DEFINE_STATIC_KEY_FALSE(sched_smt_present);
@@ -910,13 +861,13 @@ index 000000000000..f5e9c01f9382
+
+ if (prio < last_prio) {
+ if (IDLE_TASK_SCHED_PRIO == last_prio) {
+ cpumask_clear_cpu(cpu, sched_idle_mask);
+ last_prio -= 2;
+#ifdef CONFIG_SCHED_SMT
+ if (static_branch_likely(&sched_smt_present))
+ cpumask_andnot(&sched_sg_idle_mask,
+ &sched_sg_idle_mask, cpu_smt_mask(cpu));
+#endif
+ cpumask_clear_cpu(cpu, sched_idle_mask);
+ last_prio -= 2;
+ }
+ clear_recorded_preempt_mask(pr, prio, last_prio, cpu);
+
@@ -924,18 +875,14 @@ index 000000000000..f5e9c01f9382
+ }
+ /* last_prio < prio */
+ if (IDLE_TASK_SCHED_PRIO == prio) {
+ cpumask_set_cpu(cpu, sched_idle_mask);
+ prio -= 2;
+#ifdef CONFIG_SCHED_SMT
+ if (static_branch_likely(&sched_smt_present)) {
+ cpumask_t tmp;
+
+ cpumask_and(&tmp, cpu_smt_mask(cpu), sched_idle_mask);
+ if (cpumask_equal(&tmp, cpu_smt_mask(cpu)))
+ if (static_branch_likely(&sched_smt_present) &&
+ cpumask_intersects(cpu_smt_mask(cpu), sched_idle_mask))
+ cpumask_or(&sched_sg_idle_mask,
+ &sched_sg_idle_mask, cpu_smt_mask(cpu));
+ }
+#endif
+ cpumask_set_cpu(cpu, sched_idle_mask);
+ prio -= 2;
+ }
+ set_recorded_preempt_mask(pr, last_prio, prio, cpu);
+}
@@ -1476,11 +1423,13 @@ index 000000000000..f5e9c01f9382
+
+static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
+{
+#ifdef ALT_SCHED_DEBUG
+ lockdep_assert_held(&rq->lock);
+
+ /*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
+ WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
+ task_cpu(p), cpu_of(rq));
+#endif
+
+ __SCHED_DEQUEUE_TASK(p, rq, flags);
+ --rq->nr_running;
@@ -1494,11 +1443,13 @@ index 000000000000..f5e9c01f9382
+
+static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
+{
+#ifdef ALT_SCHED_DEBUG
+ lockdep_assert_held(&rq->lock);
+
+ /*printk(KERN_INFO "sched: enqueue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
+ WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
+ task_cpu(p), cpu_of(rq));
+#endif
+
+ __SCHED_ENQUEUE_TASK(p, rq, flags);
+ update_sched_preempt_mask(rq);
@@ -1513,10 +1464,12 @@ index 000000000000..f5e9c01f9382
+
+static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
+{
+#ifdef ALT_SCHED_DEBUG
+ lockdep_assert_held(&rq->lock);
+ /*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
+ WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
+ cpu_of(rq), task_cpu(p));
+#endif
+
+ list_del(&p->sq_node);
+ list_add_tail(&p->sq_node, &rq->queue.heads[idx]);
@@ -2035,8 +1988,8 @@ index 000000000000..f5e9c01f9382
+ */
+static inline void deactivate_task(struct task_struct *p, struct rq *rq)
+{
+ dequeue_task(p, rq, DEQUEUE_SLEEP);
+ p->on_rq = 0;
+ dequeue_task(p, rq, DEQUEUE_SLEEP);
+ cpufreq_update_util(rq, 0);
+}
+
@@ -2253,7 +2206,7 @@ index 000000000000..f5e9c01f9382
+{
+ lockdep_assert_held(&rq->lock);
+
+ WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
+ p->on_rq = TASK_ON_RQ_MIGRATING;
+ dequeue_task(p, rq, 0);
+ update_sched_preempt_mask(rq);
+ set_task_cpu(p, new_cpu);
@@ -4849,10 +4802,9 @@ index 000000000000..f5e9c01f9382
+/*
+ * sg_balance - slibing group balance check for run queue @rq
+ */
+static inline void sg_balance(struct rq *rq)
+static inline void sg_balance(struct rq *rq, int cpu)
+{
+ cpumask_t chk;
+ int cpu = cpu_of(rq);
+
+ /* exit when cpu is offline */
+ if (unlikely(!rq->online))
@@ -5166,11 +5118,6 @@ index 000000000000..f5e9c01f9382
+ schedstat_inc(this_rq()->sched_count);
+}
+
+/*
+ * Compile time debug macro
+ * #define ALT_SCHED_DEBUG
+ */
+
+#ifdef ALT_SCHED_DEBUG
+void alt_sched_debug(void)
+{
@@ -5207,10 +5154,12 @@ index 000000000000..f5e9c01f9382
+ (p = sched_rq_next_task(skip, rq)) != rq->idle) {
+ skip = sched_rq_next_task(p, rq);
+ if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
+ p->on_rq = TASK_ON_RQ_MIGRATING;
+ __SCHED_DEQUEUE_TASK(p, rq, 0);
+ set_task_cpu(p, dest_cpu);
+ sched_task_sanity_check(p, dest_rq);
+ __SCHED_ENQUEUE_TASK(p, dest_rq, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
+ nr_migrated++;
+ }
+ nr_tries--;
@@ -5507,19 +5456,21 @@ index 000000000000..f5e9c01f9382
+ */
+ ++*switch_count;
+
+ psi_sched_switch(prev, next, !task_on_rq_queued(prev));
+ psi_sched_switch(prev, next, deactivated);
+
+ trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
+
+ /* Also unlocks the rq: */
+ rq = context_switch(rq, prev, next);
+
+ cpu = cpu_of(rq);
+ } else {
+ __balance_callbacks(rq);
+ raw_spin_unlock_irq(&rq->lock);
+ }
+
+#ifdef CONFIG_SCHED_SMT
+ sg_balance(rq);
+ sg_balance(rq, cpu);
+#endif
+}
+
@@ -5844,13 +5795,17 @@ index 000000000000..f5e9c01f9382
+
+static inline void check_task_changed(struct task_struct *p, struct rq *rq)
+{
+ /* Trigger resched if task sched_prio has been modified. */
+ if (task_on_rq_queued(p)) {
+ int idx;
+
+ /* Trigger resched if task sched_prio has been modified. */
+ if (task_on_rq_queued(p) && (idx = task_sched_prio_idx(p, rq)) != p->sq_idx) {
+ update_rq_clock(rq);
+ idx = task_sched_prio_idx(p, rq);
+ if (idx != p->sq_idx) {
+ requeue_task(p, rq, idx);
+ check_preempt_curr(rq);
+ }
+ }
+}
+
+static void __setscheduler_prio(struct task_struct *p, int prio)
@@ -5902,7 +5857,6 @@ index 000000000000..f5e9c01f9382
+ return;
+
+ rq = __task_access_lock(p, &lock);
+ update_rq_clock(rq);
+ /*
+ * Set under pi_lock && rq->lock, such that the value can be used under
+ * either lock.
@@ -8275,7 +8229,8 @@ index 000000000000..f5e9c01f9382
+ int i;
+ struct rq *rq;
+
+ printk(KERN_INFO ALT_SCHED_VERSION_MSG);
+ printk(KERN_INFO "sched/alt: "ALT_SCHED_NAME" CPU Scheduler "ALT_SCHED_VERSION\
+ " by Alfred Chen.\n");
+
+ wait_bit_init();
+
@@ -8840,10 +8795,10 @@ index 000000000000..1212a031700e
+{}
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
new file mode 100644
index 000000000000..0b563999d4c1
index 000000000000..e9b93e63406a
--- /dev/null
+++ b/kernel/sched/alt_sched.h
@@ -0,0 +1,671 @@
@@ -0,0 +1,672 @@
+#ifndef ALT_SCHED_H
+#define ALT_SCHED_H
+
@@ -8965,6 +8920,8 @@ index 000000000000..0b563999d4c1
+};
+
+struct rq;
+struct cpuidle_state;
+
+struct balance_callback {
+ struct balance_callback *next;
+ void (*func)(struct rq *rq);
@@ -9136,8 +9093,7 @@ index 000000000000..0b563999d4c1
+ NR_CPU_AFFINITY_LEVELS
+};
+
+DECLARE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
+DECLARE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
+DECLARE_PER_CPU_ALIGNED(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
+
+static inline int
+__best_mask_cpu(const cpumask_t *cpumask, const cpumask_t *mask)
@@ -9517,11 +9473,11 @@ index 000000000000..0b563999d4c1
+#endif /* ALT_SCHED_H */
diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
new file mode 100644
index 000000000000..66b77291b9d0
index 000000000000..f29b8f3aa786
--- /dev/null
+++ b/kernel/sched/bmq.h
@@ -0,0 +1,110 @@
+#define ALT_SCHED_VERSION_MSG "sched/bmq: BMQ CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
+#define ALT_SCHED_NAME "BMQ"
+
+/*
+ * BMQ only routines
@@ -9860,14 +9816,15 @@ index f26ab2675f7d..480d4ad16d45 100644
+#endif
diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
new file mode 100644
index 000000000000..56a649d02e49
index 000000000000..27e09b4feb8c
--- /dev/null
+++ b/kernel/sched/pds.h
@@ -0,0 +1,127 @@
+#define ALT_SCHED_VERSION_MSG "sched/pds: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
@@ -0,0 +1,133 @@
+#define ALT_SCHED_NAME "PDS"
+
+static int sched_timeslice_shift = 22;
+
+/* PDS assume NORMAL_PRIO_NUM is power of 2 */
+#define NORMAL_PRIO_MOD(x) ((x) & (NORMAL_PRIO_NUM - 1))
+
+/*
@@ -9884,38 +9841,43 @@ index 000000000000..56a649d02e49
+{
+ s64 delta = p->deadline - rq->time_edge + NORMAL_PRIO_NUM - NICE_WIDTH;
+
+#ifdef ALT_SCHED_DEBUG
+ if (WARN_ONCE(delta > NORMAL_PRIO_NUM - 1,
+ "pds: task_sched_prio_normal() delta %lld\n", delta))
+ return NORMAL_PRIO_NUM - 1;
+#endif
+
+ return (delta < 0) ? 0 : delta;
+ return max(0LL, delta);
+}
+
+static inline int task_sched_prio(const struct task_struct *p)
+{
+ return (p->prio < MAX_RT_PRIO) ? p->prio :
+ return (p->prio < MIN_NORMAL_PRIO) ? p->prio :
+ MIN_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
+}
+
+static inline int
+task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
+{
+ return (p->prio < MAX_RT_PRIO) ? p->prio : MIN_NORMAL_PRIO +
+ NORMAL_PRIO_MOD(task_sched_prio_normal(p, rq) + rq->time_edge);
+ u64 idx;
+
+ if (p->prio < MAX_RT_PRIO)
+ return p->prio;
+
+ idx = max(p->deadline + NORMAL_PRIO_NUM - NICE_WIDTH, rq->time_edge);
+ return MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(idx);
+}
+
+static inline int sched_prio2idx(int prio, struct rq *rq)
+{
+ return (IDLE_TASK_SCHED_PRIO == prio || prio < MAX_RT_PRIO) ? prio :
+ MIN_NORMAL_PRIO + NORMAL_PRIO_MOD((prio - MIN_NORMAL_PRIO) +
+ rq->time_edge);
+ MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(prio + rq->time_edge);
+}
+
+static inline int sched_idx2prio(int idx, struct rq *rq)
+{
+ return (idx < MAX_RT_PRIO) ? idx : MIN_NORMAL_PRIO +
+ NORMAL_PRIO_MOD((idx - MIN_NORMAL_PRIO) + NORMAL_PRIO_NUM -
+ NORMAL_PRIO_MOD(rq->time_edge));
+ NORMAL_PRIO_MOD(idx - rq->time_edge);
+}
+
+static inline void sched_renew_deadline(struct task_struct *p, const struct rq *rq)
@@ -9940,6 +9902,7 @@ index 000000000000..56a649d02e49
+ if (now == old)
+ return;
+
+ rq->time_edge = now;
+ delta = min_t(u64, NORMAL_PRIO_NUM, now - old);
+ INIT_LIST_HEAD(&head);
+
@@ -9949,10 +9912,9 @@ index 000000000000..56a649d02e49
+
+ rq->queue.bitmap[2] = (NORMAL_PRIO_NUM == delta) ? 0UL :
+ rq->queue.bitmap[2] >> delta;
+ rq->time_edge = now;
+ if (!list_empty(&head)) {
+ u64 idx = MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(now);
+ struct task_struct *p;
+ u64 idx = MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(now);
+
+ list_for_each_entry(p, &head, sq_node)
+ p->sq_idx = idx;
@@ -10322,6 +10284,363 @@ index ff0536cea968..ce266990006d 100644
};
struct wakeup_test_data *x = data;
--
2.39.2
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
index a122b1f8678e..78748ebb1d71 100644
--- a/kernel/sched/alt_core.c
+++ b/kernel/sched/alt_core.c
@@ -254,8 +254,7 @@ static inline void update_sched_preempt_mask(struct rq *rq)
*/
static inline struct task_struct *sched_rq_first_task(struct rq *rq)
{
- unsigned long idx = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
- const struct list_head *head = &rq->queue.heads[sched_prio2idx(idx, rq)];
+ const struct list_head *head = &rq->queue.heads[sched_prio2idx(rq->prio, rq)];
return list_first_entry(head, struct task_struct, sq_node);
}
@@ -767,13 +766,15 @@ unsigned long get_wchan(struct task_struct *p)
* Add/Remove/Requeue task to/from the runqueue routines
* Context: rq->lock
*/
-#define __SCHED_DEQUEUE_TASK(p, rq, flags) \
+#define __SCHED_DEQUEUE_TASK(p, rq, flags, func) \
sched_info_dequeue(rq, p); \
psi_dequeue(p, flags & DEQUEUE_SLEEP); \
\
list_del(&p->sq_node); \
- if (list_empty(&rq->queue.heads[p->sq_idx])) \
- clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
+ if (list_empty(&rq->queue.heads[p->sq_idx])) { \
+ clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap); \
+ func; \
+ }
#define __SCHED_ENQUEUE_TASK(p, rq, flags) \
sched_info_enqueue(rq, p); \
@@ -788,12 +789,12 @@ static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
#ifdef ALT_SCHED_DEBUG
lockdep_assert_held(&rq->lock);
- /*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
+ /*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->deadline);*/
WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
task_cpu(p), cpu_of(rq));
#endif
- __SCHED_DEQUEUE_TASK(p, rq, flags);
+ __SCHED_DEQUEUE_TASK(p, rq, flags, update_sched_preempt_mask(rq));
--rq->nr_running;
#ifdef CONFIG_SMP
if (1 == rq->nr_running)
@@ -808,7 +809,7 @@ static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
#ifdef ALT_SCHED_DEBUG
lockdep_assert_held(&rq->lock);
- /*printk(KERN_INFO "sched: enqueue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
+ /*printk(KERN_INFO "sched: enqueue(%d) %px %d\n", cpu_of(rq), p, p->prio);*/
WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
task_cpu(p), cpu_of(rq));
#endif
@@ -828,7 +829,7 @@ static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
{
#ifdef ALT_SCHED_DEBUG
lockdep_assert_held(&rq->lock);
- /*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
+ /*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->deadline);*/
WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
cpu_of(rq), task_cpu(p));
#endif
@@ -837,8 +838,7 @@ static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
list_add_tail(&p->sq_node, &rq->queue.heads[idx]);
if (idx != p->sq_idx) {
if (list_empty(&rq->queue.heads[p->sq_idx]))
- clear_bit(sched_idx2prio(p->sq_idx, rq),
- rq->queue.bitmap);
+ clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
p->sq_idx = idx;
set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
update_sched_preempt_mask(rq);
@@ -1350,8 +1350,8 @@ static void activate_task(struct task_struct *p, struct rq *rq)
*/
static inline void deactivate_task(struct task_struct *p, struct rq *rq)
{
- p->on_rq = 0;
dequeue_task(p, rq, DEQUEUE_SLEEP);
+ p->on_rq = 0;
cpufreq_update_util(rq, 0);
}
@@ -1568,9 +1568,8 @@ static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int
{
lockdep_assert_held(&rq->lock);
- p->on_rq = TASK_ON_RQ_MIGRATING;
+ WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
dequeue_task(p, rq, 0);
- update_sched_preempt_mask(rq);
set_task_cpu(p, new_cpu);
raw_spin_unlock(&rq->lock);
@@ -4516,12 +4515,10 @@ migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu)
(p = sched_rq_next_task(skip, rq)) != rq->idle) {
skip = sched_rq_next_task(p, rq);
if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
- p->on_rq = TASK_ON_RQ_MIGRATING;
- __SCHED_DEQUEUE_TASK(p, rq, 0);
+ __SCHED_DEQUEUE_TASK(p, rq, 0, );
set_task_cpu(p, dest_cpu);
sched_task_sanity_check(p, dest_rq);
__SCHED_ENQUEUE_TASK(p, dest_rq, 0);
- p->on_rq = TASK_ON_RQ_QUEUED;
nr_migrated++;
}
nr_tries--;
@@ -4566,6 +4563,7 @@ static inline int take_other_rq_tasks(struct rq *rq, int cpu)
if (rq->nr_running > 1)
cpumask_set_cpu(cpu, &sched_rq_pending_mask);
+ update_sched_preempt_mask(rq);
cpufreq_update_util(rq, 0);
return 1;
@@ -4637,8 +4635,7 @@ choose_next_task(struct rq *rq, int cpu)
#ifdef CONFIG_HIGH_RES_TIMERS
hrtick_start(rq, next->time_slice);
#endif
- /*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu,
- * next);*/
+ /*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu, next);*/
return next;
}
@@ -4706,7 +4703,6 @@ static void __sched notrace __schedule(unsigned int sched_mode)
unsigned long prev_state;
struct rq *rq;
int cpu;
- int deactivated = 0;
cpu = smp_processor_id();
rq = cpu_rq(cpu);
@@ -4771,7 +4767,6 @@ static void __sched notrace __schedule(unsigned int sched_mode)
*/
sched_task_deactivate(prev, rq);
deactivate_task(prev, rq);
- deactivated = 1;
if (prev->in_iowait) {
atomic_inc(&rq->nr_iowait);
@@ -4791,11 +4786,10 @@ static void __sched notrace __schedule(unsigned int sched_mode)
#endif
if (likely(prev != next)) {
- if (deactivated)
- update_sched_preempt_mask(rq);
next->last_ran = rq->clock_task;
rq->last_ts_switch = rq->clock;
+ /*printk(KERN_INFO "sched: %px -> %px\n", prev, next);*/
rq->nr_switches++;
/*
* RCU users of rcu_dereference(rq->curr) may not see
@@ -4818,7 +4812,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
*/
++*switch_count;
- psi_sched_switch(prev, next, deactivated);
+ psi_sched_switch(prev, next, !task_on_rq_queued(prev));
trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
index e9b93e63406a..60bbb4583d16 100644
--- a/kernel/sched/alt_sched.h
+++ b/kernel/sched/alt_sched.h
@@ -22,8 +22,8 @@
#endif
#ifdef CONFIG_SCHED_PDS
-/* bits: RT(0-99), reserved(100-127), NORMAL_PRIO_NUM, cpu idle task */
-#define SCHED_BITS (MIN_NORMAL_PRIO + NORMAL_PRIO_NUM + 1)
+/* bits: RT(0-24), reserved(25-31), SCHED_NORMAL_PRIO_NUM(32), cpu idle task(1) */
+#define SCHED_BITS (64 + 1)
#endif /* CONFIG_SCHED_PDS */
#define IDLE_TASK_SCHED_PRIO (SCHED_BITS - 1)
@@ -142,7 +142,7 @@ struct rq {
#ifdef CONFIG_SCHED_PDS
u64 time_edge;
#endif
- unsigned long prio;
+ unsigned long prio;
/* switch count */
u64 nr_switches;
diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
index 27e09b4feb8c..5a94a98e19af 100644
--- a/kernel/sched/pds.h
+++ b/kernel/sched/pds.h
@@ -1,9 +1,15 @@
#define ALT_SCHED_NAME "PDS"
-static int sched_timeslice_shift = 22;
+#define MIN_SCHED_NORMAL_PRIO (32)
+#define SCHED_NORMAL_PRIO_NUM (32)
+#define SCHED_EDGE_DELTA (SCHED_NORMAL_PRIO_NUM - NICE_WIDTH / 2)
/* PDS assume NORMAL_PRIO_NUM is power of 2 */
#define NORMAL_PRIO_MOD(x) ((x) & (NORMAL_PRIO_NUM - 1))
+#define SCHED_NORMAL_PRIO_MOD(x) ((x) & (SCHED_NORMAL_PRIO_NUM - 1))
+
+/* 4ms -> shift 22, 2 time slice slots -> shift 23 */
+static int sched_timeslice_shift = 23;
/*
* Common interfaces
@@ -11,18 +17,18 @@ static int sched_timeslice_shift = 22;
static inline void sched_timeslice_imp(const int timeslice_ms)
{
if (2 == timeslice_ms)
- sched_timeslice_shift = 21;
+ sched_timeslice_shift = 22;
}
static inline int
task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
{
- s64 delta = p->deadline - rq->time_edge + NORMAL_PRIO_NUM - NICE_WIDTH;
+ s64 delta = p->deadline - rq->time_edge + SCHED_EDGE_DELTA;
#ifdef ALT_SCHED_DEBUG
if (WARN_ONCE(delta > NORMAL_PRIO_NUM - 1,
"pds: task_sched_prio_normal() delta %lld\n", delta))
- return NORMAL_PRIO_NUM - 1;
+ return SCHED_NORMAL_PRIO_NUM - 1;
#endif
return max(0LL, delta);
@@ -30,8 +36,8 @@ task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
static inline int task_sched_prio(const struct task_struct *p)
{
- return (p->prio < MIN_NORMAL_PRIO) ? p->prio :
- MIN_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
+ return (p->prio < MIN_NORMAL_PRIO) ? (p->prio >> 2) :
+ MIN_SCHED_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
}
static inline int
@@ -39,30 +45,35 @@ task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
{
u64 idx;
- if (p->prio < MAX_RT_PRIO)
- return p->prio;
+ if (p->prio < MIN_NORMAL_PRIO)
+ return p->prio >> 2;
- idx = max(p->deadline + NORMAL_PRIO_NUM - NICE_WIDTH, rq->time_edge);
- return MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(idx);
+ idx = max(p->deadline + SCHED_EDGE_DELTA, rq->time_edge);
+ /*printk(KERN_INFO "sched: task_sched_prio_idx edge:%llu, deadline=%llu idx=%llu\n", rq->time_edge, p->deadline, idx);*/
+ return MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(idx);
}
-static inline int sched_prio2idx(int prio, struct rq *rq)
+static inline int sched_prio2idx(int sched_prio, struct rq *rq)
{
- return (IDLE_TASK_SCHED_PRIO == prio || prio < MAX_RT_PRIO) ? prio :
- MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(prio + rq->time_edge);
+ return (IDLE_TASK_SCHED_PRIO == sched_prio || sched_prio < MIN_SCHED_NORMAL_PRIO) ?
+ sched_prio :
+ MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(sched_prio + rq->time_edge);
}
-static inline int sched_idx2prio(int idx, struct rq *rq)
+static inline int sched_idx2prio(int sched_idx, struct rq *rq)
{
- return (idx < MAX_RT_PRIO) ? idx : MIN_NORMAL_PRIO +
- NORMAL_PRIO_MOD(idx - rq->time_edge);
+ int ret;
+ ret = (sched_idx < MIN_SCHED_NORMAL_PRIO) ? sched_idx :
+ MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(sched_idx - rq->time_edge);
+ /*printk(KERN_INFO "sched: sched_idx2prio edge:%llu, %d -> %d\n", rq->time_edge, sched_idx, ret);*/
+
+ return ret;
}
static inline void sched_renew_deadline(struct task_struct *p, const struct rq *rq)
{
- if (p->prio >= MAX_RT_PRIO)
- p->deadline = (rq->clock >> sched_timeslice_shift) +
- p->static_prio - (MAX_PRIO - NICE_WIDTH);
+ if (p->prio >= MIN_NORMAL_PRIO)
+ p->deadline = rq->time_edge + (p->static_prio - (MAX_PRIO - NICE_WIDTH)) / 2;
}
int task_running_nice(struct task_struct *p)
@@ -70,36 +81,48 @@ int task_running_nice(struct task_struct *p)
return (p->prio > DEFAULT_PRIO);
}
+const u64 RT_MASK = 0xffffffffULL;
+
static inline void update_rq_time_edge(struct rq *rq)
{
struct list_head head;
u64 old = rq->time_edge;
u64 now = rq->clock >> sched_timeslice_shift;
u64 prio, delta;
+ DECLARE_BITMAP(normal, SCHED_QUEUE_BITS);
if (now == old)
return;
rq->time_edge = now;
- delta = min_t(u64, NORMAL_PRIO_NUM, now - old);
+ delta = min_t(u64, SCHED_NORMAL_PRIO_NUM, now - old);
INIT_LIST_HEAD(&head);
- for_each_set_bit(prio, &rq->queue.bitmap[2], delta)
- list_splice_tail_init(rq->queue.heads + MIN_NORMAL_PRIO +
- NORMAL_PRIO_MOD(prio + old), &head);
+ /*printk(KERN_INFO "sched: update_rq_time_edge 0x%016lx %llu\n", rq->queue.bitmap[0], delta);*/
+ prio = MIN_SCHED_NORMAL_PRIO;
+ for_each_set_bit_from(prio, &rq->queue.bitmap[0], MIN_SCHED_NORMAL_PRIO + delta)
+ list_splice_tail_init(rq->queue.heads + MIN_SCHED_NORMAL_PRIO +
+ SCHED_NORMAL_PRIO_MOD(prio + old), &head);
- rq->queue.bitmap[2] = (NORMAL_PRIO_NUM == delta) ? 0UL :
- rq->queue.bitmap[2] >> delta;
+ bitmap_shift_right(&normal[0], &rq->queue.bitmap[0], delta, SCHED_QUEUE_BITS);
if (!list_empty(&head)) {
struct task_struct *p;
- u64 idx = MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(now);
+ u64 idx = MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(now);
list_for_each_entry(p, &head, sq_node)
p->sq_idx = idx;
list_splice(&head, rq->queue.heads + idx);
- rq->queue.bitmap[2] |= 1UL;
+ set_bit(MIN_SCHED_NORMAL_PRIO, &normal[0]);
}
+ bitmap_replace(&rq->queue.bitmap[0], &normal[0], &rq->queue.bitmap[0],
+ (const unsigned long *)&RT_MASK, SCHED_QUEUE_BITS);
+ /*printk(KERN_INFO "sched: update_rq_time_edge 0x%016lx 0x%016lx", rq->queue.bitmap[0], normal);*/
+ if (rq->prio < MIN_SCHED_NORMAL_PRIO || IDLE_TASK_SCHED_PRIO == rq->prio)
+ return;
+
+ rq->prio = (rq->prio < MIN_SCHED_NORMAL_PRIO + delta) ?
+ MIN_SCHED_NORMAL_PRIO : rq->prio - delta;
}
static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
@@ -112,7 +135,7 @@ static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq)
{
- u64 max_dl = rq->time_edge + NICE_WIDTH - 1;
+ u64 max_dl = rq->time_edge + NICE_WIDTH / 2 - 1;
if (unlikely(p->deadline > max_dl))
p->deadline = max_dl;
}

View File

@@ -182,10 +182,10 @@ index 4700d24e5d55..8f7a3d7fd9c1 100644
/*
* Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
* The device will throw a Link Down error on AER-capable systems and
@@ -4513,6 +4613,7 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
/* Zhaoxin Root/Downstream Ports */
@@ -5102,6 +5102,7 @@
{ PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
/* Wangxun nics */
{ PCI_VENDOR_ID_WANGXUN, PCI_ANY_ID, pci_quirk_wangxun_nic_acs },
+ { PCI_ANY_ID, PCI_ANY_ID, pcie_acs_overrides },
{ 0 }
};