Moved Zenify CFS tweaks to cfs-additions to prevent conflicts. Squashed from https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/tree/?h=sched/eevdf&id=d07f09a1f99cabbc86bc5c97d962eb8a466106b5
143 lines
4.4 KiB
Diff
143 lines
4.4 KiB
Diff
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
|
index 6b3b59cc51d6..2a0072192c3d 100644
|
|
--- a/kernel/sched/fair.c
|
|
+++ b/kernel/sched/fair.c
|
|
@@ -81,10 +95,17 @@ unsigned int sysctl_sched_child_runs_first __read_mostly;
|
|
*
|
|
* (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
|
|
*/
|
|
+#ifdef CONFIG_ZENIFY
|
|
+unsigned int sysctl_sched_wakeup_granularity = 500000UL;
|
|
+static unsigned int normalized_sysctl_sched_wakeup_granularity = 500000UL;
|
|
+
|
|
+const_debug unsigned int sysctl_sched_migration_cost = 50000UL;
|
|
+#else
|
|
unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
|
|
static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
|
|
|
|
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
|
|
+#endif
|
|
|
|
int sched_thermal_decay_shift;
|
|
static int __init setup_sched_thermal_decay_shift(char *str)
|
|
|
|
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
|
|
index 051aaf65c..705df5511 100644
|
|
--- a/kernel/sched/topology.c
|
|
+++ b/kernel/sched/topology.c
|
|
@@ -208,7 +208,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
|
|
|
|
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
|
|
DEFINE_STATIC_KEY_FALSE(sched_energy_present);
|
|
-static unsigned int sysctl_sched_energy_aware = 1;
|
|
+static unsigned int sysctl_sched_energy_aware = 0;
|
|
static DEFINE_MUTEX(sched_energy_mutex);
|
|
static bool sched_energy_update;
|
|
|
|
From f85ed068b4d0e6c31edce8574a95757a60e58b87 Mon Sep 17 00:00:00 2001
|
|
From: Etienne Juvigny <Ti3noU@gmail.com>
|
|
Date: Mon, 3 Sep 2018 17:36:25 +0200
|
|
Subject: [PATCH] Zenify & stuff
|
|
|
|
---
|
|
kernel/sched/fair.c | 25 +++++++++++++++++++++++++
|
|
mm/page-writeback.c | 8 ++++++++
|
|
2 files changed, 33 insertions(+)
|
|
|
|
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
|
index 6b3b59cc51d6..2a0072192c3d 100644
|
|
--- a/kernel/sched/fair.c
|
|
+++ b/kernel/sched/fair.c
|
|
@@ -37,8 +37,13 @@
|
|
*
|
|
* (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
|
|
*/
|
|
+#ifdef CONFIG_ZENIFY
|
|
+unsigned int sysctl_sched_latency = 3000000ULL;
|
|
+static unsigned int normalized_sysctl_sched_latency = 3000000ULL;
|
|
+#else
|
|
unsigned int sysctl_sched_latency = 6000000ULL;
|
|
static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
|
|
+#endif
|
|
|
|
/*
|
|
* The initial- and re-scaling of tunables is configurable
|
|
@@ -58,21 +63,34 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_L
|
|
*
|
|
* (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
|
|
*/
|
|
+#ifdef CONFIG_ZENIFY
|
|
+unsigned int sysctl_sched_min_granularity = 300000ULL;
|
|
+static unsigned int normalized_sysctl_sched_min_granularity = 300000ULL;
|
|
+#else
|
|
unsigned int sysctl_sched_min_granularity = 750000ULL;
|
|
static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
|
|
+#endif
|
|
|
|
/*
|
|
* Minimal preemption granularity for CPU-bound SCHED_IDLE tasks.
|
|
* Applies only when SCHED_IDLE tasks compete with normal tasks.
|
|
*
|
|
* (default: 0.75 msec)
|
|
*/
|
|
+#ifdef CONFIG_ZENIFY
|
|
+unsigned int sysctl_sched_idle_min_granularity = 300000ULL;
|
|
+#else
|
|
unsigned int sysctl_sched_idle_min_granularity = 750000ULL;
|
|
+#endif
|
|
|
|
/*
|
|
* This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
|
|
*/
|
|
+#ifdef CONFIG_ZENIFY
|
|
+static unsigned int sched_nr_latency = 10;
|
|
+#else
|
|
static unsigned int sched_nr_latency = 8;
|
|
+#endif
|
|
|
|
/*
|
|
* After fork, child runs first. If set to 0 (default) then
|
|
@@ -128,8 +149,12 @@ int __weak arch_asym_cpu_priority(int cpu)
|
|
*
|
|
* (default: 5 msec, units: microseconds)
|
|
*/
|
|
+#ifdef CONFIG_ZENIFY
|
|
+static unsigned int sysctl_sched_cfs_bandwidth_slice = 3000UL;
|
|
+#else
|
|
static unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
|
|
#endif
|
|
+#endif
|
|
|
|
#ifdef CONFIG_SYSCTL
|
|
static struct ctl_table sched_fair_sysctls[] = {
|
|
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
|
|
index 28b3e7a67565..01a1aef2b9b1 100644
|
|
--- a/mm/page-writeback.c
|
|
+++ b/mm/page-writeback.c
|
|
@@ -71,7 +71,11 @@ static long ratelimit_pages = 32;
|
|
/*
|
|
* Start background writeback (via writeback threads) at this percentage
|
|
*/
|
|
+#ifdef CONFIG_ZENIFY
|
|
+static int dirty_background_ratio = 20;
|
|
+#else
|
|
static int dirty_background_ratio = 10;
|
|
+#endif
|
|
|
|
/*
|
|
* dirty_background_bytes starts at 0 (disabled) so that it is a function of
|
|
@@ -88,7 +92,11 @@ int vm_highmem_is_dirtyable;
|
|
/*
|
|
* The generator of dirty data starts writeback at this percentage
|
|
*/
|
|
+#ifdef CONFIG_ZENIFY
|
|
+static int vm_dirty_ratio = 50;
|
|
+#else
|
|
static int vm_dirty_ratio = 20;
|
|
+#endif
|
|
|
|
/*
|
|
* vm_dirty_bytes starts at 0 (disabled) so that it is a function of
|
|
--
|
|
2.28.0
|