sdiff -w 180 /usr/lib/tuned/throughput-performance/tuned.conf /usr/lib/tuned/latency-performance/tuned.conf
# #
# tuned configuration # tuned configuration
# #
[main] [main]
summary=Broadly applicable tuning that provides excellent performance across a variet | summary=Optimize for deterministic performance at the cost of increased power consump
[cpu] [cpu]
> force_latency=1
governor=performance governor=performance
energy_perf_bias=performance energy_perf_bias=performance
min_perf_pct=100 min_perf_pct=100
[disk] <
readahead=>4096 <
<
[sysctl] [sysctl]
# ktune sysctl settings for rhel6 servers, maximizing i/o throughput # ktune sysctl settings for rhel6 servers, maximizing i/o throughput
# #
# Minimal preemption granularity for CPU-bound tasks: # Minimal preemption granularity for CPU-bound tasks:
# (default: 1 msec# (1 + ilog(ncpus)), units: nanoseconds) # (default: 1 msec# (1 + ilog(ncpus)), units: nanoseconds)
kernel.sched_min_granularity_ns = 10000000 | kernel.sched_min_granularity_ns=10000000
<
# SCHED_OTHER wake-up granularity. <
# (default: 1 msec# (1 + ilog(ncpus)), units: nanoseconds) <
# <
# This option delays the preemption effects of decoupled workloads <
# and reduces their over-scheduling. Synchronous workloads will still <
# have immediate wakeup/sleep latencies. <
kernel.sched_wakeup_granularity_ns = 15000000 <
# If a workload mostly uses anonymous memory and it hits this limit, the entire # If a workload mostly uses anonymous memory and it hits this limit, the entire
# working set is buffered for I/O, and any more write buffering would require # working set is buffered for I/O, and any more write buffering would require
# swapping, so it's time to throttle writes until I/O can catch up. Workloads # swapping, so it's time to throttle writes until I/O can catch up. Workloads
# that mostly use file mappings may be able to use even higher values. # that mostly use file mappings may be able to use even higher values.
# #
# The generator of dirty data starts writeback at this percentage (system default # The generator of dirty data starts writeback at this percentage (system default
# is 20%) # is 20%)
vm.dirty_ratio = 40 | vm.dirty_ratio=10
# Start background writeback (via writeback threads) at this percentage (system # Start background writeback (via writeback threads) at this percentage (system
# default is 10%) # default is 10%)
vm.dirty_background_ratio = 10 | vm.dirty_background_ratio=3
<
# PID allocation wrap value. When the kernel's next PID value <
# reaches this value, it wraps back to a minimum PID value. <
# PIDs of value pid_max or larger are not allocated. <
# <
# A suggested value for pid_max is 1024 * <# of cpu cores/threads in system> <
# e.g., a box with 32 cpus, the default of 32768 is reasonable, for 64 cpus, <
# 65536, for 4096 cpus, 4194304 (which is the upper limit possible). <
#kernel.pid_max = 65536 <
# The swappiness parameter controls the tendency of the kernel to move # The swappiness parameter controls the tendency of the kernel to move
# processes out of physical memory and onto the swap disk. # processes out of physical memory and onto the swap disk.
# 0 tells the kernel to avoid swapping processes out of physical memory # 0 tells the kernel to avoid swapping processes out of physical memory
# for as long as possible # for as long as possible
# 100 tells the kernel to aggressively swap processes out of physical memory # 100 tells the kernel to aggressively swap processes out of physical memory
# and move them to swap cache # and move them to swap cache
vm.swappiness=10 vm.swappiness=10
>
> # The total time the scheduler will consider a migrated process
> # "cache hot" and thus less likely to be re-migrated
> # (system default is 500000, i.e. 0.5 ms)
> kernel.sched_migration_cost_ns=5000000