ok
Direktori : /lib/tuned/accelerator-performance/ |
Current File : //lib/tuned/accelerator-performance/tuned.conf |
# # tuned configuration # [main] summary=Throughput performance based tuning with disabled higher latency STOP states [cpu] governor=performance energy_perf_bias=performance min_perf_pct=100 force_latency=99 [acpi] platform_profile=performance [disk] readahead=>4096 [sysctl] # If a workload mostly uses anonymous memory and it hits this limit, the entire # working set is buffered for I/O, and any more write buffering would require # swapping, so it's time to throttle writes until I/O can catch up. Workloads # that mostly use file mappings may be able to use even higher values. # # The generator of dirty data starts writeback at this percentage (system default # is 20%) vm.dirty_ratio = 40 # Start background writeback (via writeback threads) at this percentage (system # default is 10%) vm.dirty_background_ratio = 10 # PID allocation wrap value. When the kernel's next PID value # reaches this value, it wraps back to a minimum PID value. # PIDs of value pid_max or larger are not allocated. # # A suggested value for pid_max is 1024 * <# of cpu cores/threads in system> # e.g., a box with 32 cpus, the default of 32768 is reasonable, for 64 cpus, # 65536, for 4096 cpus, 4194304 (which is the upper limit possible). #kernel.pid_max = 65536 # The swappiness parameter controls the tendency of the kernel to move # processes out of physical memory and onto the swap disk. # 0 tells the kernel to avoid swapping processes out of physical memory # for as long as possible # 100 tells the kernel to aggressively swap processes out of physical memory # and move them to swap cache vm.swappiness=10 [scheduler] # ktune sysctl settings for rhel6 servers, maximizing i/o throughput # # Minimal preemption granularity for CPU-bound tasks: # (default: 1 msec# (1 + ilog(ncpus)), units: nanoseconds) sched_min_granularity_ns = 10000000 # SCHED_OTHER wake-up granularity. # (default: 1 msec# (1 + ilog(ncpus)), units: nanoseconds) # # This option delays the preemption effects of decoupled workloads # and reduces their over-scheduling. Synchronous workloads will still # have immediate wakeup/sleep latencies. sched_wakeup_granularity_ns = 15000000