shell bypass 403

GrazzMean Shell

: /proc/thread-self/root/proc/self/cwd/ [ drwxr-xr-x ]
Uname: Linux web3.us.cloudlogin.co 5.10.226-xeon-hst #2 SMP Fri Sep 13 12:28:44 UTC 2024 x86_64
Software: Apache
PHP version: 8.1.31 [ PHP INFO ] PHP os: Linux
Server Ip: 162.210.96.117
Your Ip: 3.143.235.90
User: edustar (269686) | Group: tty (888)
Safe Mode: OFF
Disable Function:
NONE

name : tuned.tar
recommend.d/50-tuned.conf000064400000003551147511266550011167 0ustar00# Tuned rules for recommend_profile.
#
# Syntax:
# [PROFILE1]
# KEYWORD11=RE11
# KEYWORD21=RE12
#
# [PROFILE2]
# KEYWORD21=RE21
# KEYWORD22=RE22

# KEYWORD can be:
# virt            - for RE to match output of virt-what
# system          - for RE to match content of /etc/system-release-cpe
# process         - for RE to match running processes. It can have arbitrary
#                   suffix, all process* lines have to match for the PROFILE
#                   to match (i.e. the AND operator)
# /FILE           - for RE to match content of the FILE, e.g.:
#                   '/etc/passwd=.+'. If file doesn't exist, its RE will not
#                   match.
# chassis_type    - for RE to match the chassis type as reported by dmidecode
# syspurpose_role - for RE to match the system role as reported by syspurpose

# All REs for all KEYWORDs have to match for PROFILE to match (i.e. the AND operator).
# If 'virt' or 'system' is not specified, it matches for every string.
# If 'virt' or 'system' is empty, i.e. 'virt=', it matches only empty string (alias for '^$').
# If several profiles matched, the first match is taken.
#
# Limitation:
# Each profile can be specified only once, because there cannot be
# multiple sections in the configuration file with the same name
# (ConfigObj limitation).
# If there is a need to specify the profile multiple times, unique
# suffix like ',ANYSTRING' can be used. Everything after the last ','
# is stripped by the parser, e.g.:
#
# [balanced,1]
# /FILE1=RE1
#
# [balanced,2]
# /FILE2=RE2
#
# This will set 'balanced' profile in case there is FILE1 matching RE1 or
# FILE2 matching RE2 or both.

[atomic-host]
virt=
syspurpose_role=.*atomic.*

[atomic-guest]
virt=.+
syspurpose_role=.*atomic.*

[virtual-guest]
virt=.+

[balanced]
syspurpose_role=(.*(desktop|workstation).*)|^$
chassis_type=.*(Notebook|Laptop|Portable).*

[throughput-performance]
virtual-host/tuned.conf000064400000001043147511266550011205 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for running KVM guests
include=throughput-performance

[sysctl]
# Start background writeback (via writeback threads) at this percentage (system
# default is 10%)
vm.dirty_background_ratio = 5

[cpu]
# Setting C3 state sleep mode/power savings
force_latency=cstate.id_no_zero:3|70

[scheduler]
runtime=0
# The total time the scheduler will consider a migrated process
# "cache hot" and thus less likely to be re-migrated
# (system default is 500000, i.e. 0.5 ms)
sched_migration_cost_ns = 5000000
hpc-compute/tuned.conf000064400000002560147511266550010775 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for HPC compute workloads
description=Configures virtual memory, CPU governors, and network settings for HPC compute workloads.
include=latency-performance

[vm]
# Most HPC application can take advantage of hugepages. Force them to on.
transparent_hugepages=always

[disk]
# Increase the readahead value to support large, contiguous, files.
readahead=>4096

[sysctl]
# Keep a reasonable amount of memory free to support large mem requests
vm.min_free_kbytes=135168

# Most HPC applications are NUMA aware. Enabling zone reclaim ensures
# memory is reclaimed and reallocated from local pages. Disabling
# automatic NUMA balancing prevents unwanted memory unmapping.
vm.zone_reclaim_mode=1
kernel.numa_balancing=0

# Busy polling helps reduce latency in the network receive path
# by allowing socket layer code to poll the receive queue of a
# network device, and disabling network interrupts.
# busy_read value greater than 0 enables busy polling. Recommended
# net.core.busy_read value is 50.
# busy_poll value greater than 0 enables polling globally. 
# Recommended net.core.busy_poll value is 50 
net.core.busy_read=50
net.core.busy_poll=50

# TCP fast open reduces network latency by enabling data exchange
# during the sender's initial TCP SYN. The value 3 enables fast open
# on client and server connections.
net.ipv4.tcp_fastopen=3


balanced/tuned.conf000064400000000711147511266550010276 0ustar00#
# tuned configuration
#

[main]
summary=General non-specialized tuned profile

[modules]
cpufreq_conservative=+r

[cpu]
priority=10
governor=conservative|powersave
energy_perf_bias=normal
energy_performance_preference=balance_performance

[acpi]
platform_profile=balanced

[audio]
timeout=10

[video]
radeon_powersave=dpm-balanced, auto

[disk]
# Comma separated list of devices, all devices if commented out.
# devices=sda

[scsi_host]
alpm=medium_power
aws/tuned.conf000064400000001032147511266550007334 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for aws ec2 instances
include=throughput-performance

# Marvell ThunderX
[bootloader.thunderx]
# rhbz:1836058
type=bootloader
uname_regex=aarch64
cmdline=+iommu.strict=0

[bootloader]
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html#timeout-nvme-ebs-volumes
# set nvme_core.io_timeout to 4294967295
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking-ena.html
# set net.ifnames to 0
cmdline=+net.ifnames=0 nvme_core.io_timeout=4294967295
epyc-eda/tuned.conf000064400000000600147511266550010231 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for EDA compute workloads on AMD EPYC CPUs
description=Configures virtual memory, CPU governors, and network settings for EDA compute workloads.
include=throughput-performance

# AMD
[scheduler.amd]
type=scheduler
#Allow processes to rapidly move between cores to avoid idle time and maximize CPU usage
sched_migration_cost_ns=10000
functions000064400000036164147511266550006525 0ustar00#
# This is library of helper functions that can be used in scripts in tuned profiles.
#
# API provided by this library is under heavy development and could be changed anytime
#

#
# Config
#
STORAGE=/run/tuned
STORAGE_PERSISTENT=/var/lib/tuned
STORAGE_SUFFIX=".save"

#
# Helpers
#

# Save value
# $0 STORAGE_NAME VALUE
save_value() {
	[ "$#" -ne 2 ] && return
	[ "$2" -a -e "${STORAGE}" ] && echo "$2" > "${STORAGE}/${1}${STORAGE_SUFFIX}"
}

# Parse sysfs value, i.e. for "val1 [val2] val3" return "val2"
# $0 SYSFS_NAME
parse_sys() {
	local V1 V2
	[ -r "$1" ] || return
	V1=`cat "$1"`
	V2="${V1##*[}"
	V2="${V2%%]*}"
	echo "${V2:-$V1}"
}

# Save sysfs value
# $0 STORAGE_NAME SYSFS_NAME
save_sys() {
	[ "$#" -ne 2 ] && return
	[ -r "$2" -a ! -e "${STORAGE}/${1}${STORAGE_SUFFIX}" ] && parse_sys "$2" > "${STORAGE}/${1}${STORAGE_SUFFIX}"
}

# Set sysfs value
# $0 SYSFS_NAME VALUE
set_sys() {
	[ "$#" -ne 2 ] && return
	[ -w "$1" ] && echo "$2" > "$1"
}

# Save and set sysfs value
# $0 STORAGE_NAME SYSFS_NAME VALUE
save_set_sys() {
	[ "$#" -ne 3 ] && return
	save_sys "$1" "$2"
	set_sys "$2" "$3"
}

# Get stored sysfs value from storage
# $0 STORAGE_NAME
get_stored_sys() {
	[ "$#" -ne 1 ] && return
	[ -r "${STORAGE}/${1}${STORAGE_SUFFIX}" ] && cat "${STORAGE}/${1}${STORAGE_SUFFIX}"
}

# Restore value from storage
# $0 STORAGE_NAME
restore_value() {
	[ "$#" -ne 1 ] && return
	_rs_value="`get_stored_sys \"$1\"`"
	unlink "${STORAGE}/${1}${STORAGE_SUFFIX}" >/dev/null 2>&1
	[ "$_rs_value" ] && echo "$_rs_value"
}

# Restore sysfs value from storage, if nothing is stored, use VALUE
# $0 STORAGE_NAME SYSFS_NAME [VALUE]
restore_sys() {
	[ "$#" -lt 2 -o "$#" -gt 3 ] && return
	_rs_value="`get_stored_sys \"$1\"`"
	unlink "${STORAGE}/${1}${STORAGE_SUFFIX}" >/dev/null 2>&1
	[ "$_rs_value" ] || _rs_value="$3"
	[ "$_rs_value" ] && set_sys "$2" "$_rs_value"
}


#
# DISK tuning
#

DISKS_DEV="$(command ls -d1 /dev/[shv]d*[a-z] 2>/dev/null)"
DISKS_SYS="$(command ls -d1 /sys/block/{sd,cciss,dm-,vd,dasd,xvd}* 2>/dev/null)"

_check_elevator_override()
{
	/bin/fgrep -q 'elevator=' /proc/cmdline
}

# $0 OPERATOR DEVICES ELEVATOR
_set_elevator_helper() {
	_check_elevator_override && return
	SYS_BLOCK_SDX=""
	[ "$2" ] && SYS_BLOCK_SDX=$(eval LANG=C /bin/ls -1 "${2}" 2>/dev/null)

	# if there is no kernel command line elevator settings, apply the elevator
	if [ "$1" -a "$SYS_BLOCK_SDX" ]; then
		for i in $SYS_BLOCK_SDX; do
			se_dev="`echo \"$i\" | sed 's|/sys/block/\([^/]\+\)/queue/scheduler|\1|'`"
			$1 "elevator_${se_dev}" "$i" "$3"
		done
	fi
}

# $0 DEVICES ELEVATOR
set_elevator() {
	_set_elevator_helper save_set_sys "$1" "$2"
}

# $0 DEVICES [ELEVATOR]
restore_elevator() {
	re_elevator="$2"
	[ "$re_elevator" ] || re_elevator=cfq
	_set_elevator_helper restore_sys "$1" "$re_elevator"
}

# SATA Aggressive Link Power Management
# usage: set_disk_alpm policy
set_disk_alpm() {
	policy=$1

        for host in /sys/class/scsi_host/*; do
                if [ -f $host/ahci_port_cmd ]; then
                        port_cmd=`cat $host/ahci_port_cmd`;
                        if [ $((0x$port_cmd & 0x240000)) = 0 -a -f $host/link_power_management_policy ]; then
                                echo $policy >$host/link_power_management_policy;
                        else
                                echo "max_performance" >$host/link_power_management_policy;
                        fi
                fi
        done
}

# usage: set_disk_apm level
set_disk_apm() {
	level=$1
	for disk in $DISKS_DEV; do
		hdparm -B $level $disk &>/dev/null
	done
}

# usage: set_disk_spindown level
set_disk_spindown() {
	level=$1
	for disk in $DISKS_DEV; do
		hdparm -S $level $disk &>/dev/null
	done
}

# usage: multiply_disk_readahead by
multiply_disk_readahead() {
	by=$1

	# float multiplication not supported in bash
	# bc might not be installed, python is available for sure

	for disk in $DISKS_SYS; do
		control="${disk}/queue/read_ahead_kb"
		old=$(cat $control)
		new=$(echo "print int($old*$by)" | python)

		(echo $new > $control) &>/dev/null
	done
}

# usage: remount_disk options partition1 partition2 ...
remount_partitions() {
	options=$1
	shift

	for partition in $@; do
		mount -o remount,$options $partition >/dev/null 2>&1
	done
}

remount_all_no_rootboot_partitions() {
	[ "$1" ] || return
	# Find non-root and non-boot partitions, disable barriers on them
	rootvol=$(df -h / | grep "^/dev" | awk '{print $1}')
	bootvol=$(df -h /boot | grep "^/dev" | awk '{print $1}')
	volumes=$(df -hl --exclude=tmpfs | grep "^/dev" | awk '{print $1}')
	nobarriervols=$(echo "$volumes" | grep -v $rootvol | grep -v $bootvol)
	remount_partitions "$1" $nobarriervols
}


DISK_QUANTUM_SAVE="${STORAGE}/disk_quantum${STORAGE_SUFFIX}"

set_disk_scheduler_quantum() {
	value=$1
	rm -f "$DISK_QUANTUM_SAVE"
	for disk in $DISKS_SYS; do
		control="${disk}/queue/iosched/quantum"
		echo "echo $(cat $control) > $control" >> "$DISK_QUANTUM_SAVE" 2>/dev/null
		(echo $value > $control) &2>/dev/null
	done
}

restore_disk_scheduler_quantum() {
	if [ -r "$DISK_QUANTUM_SAVE" ]; then
		/bin/bash "$DISK_QUANTUM_SAVE" &>/dev/null
		rm -f "$DISK_QUANTUM_SAVE"
	fi
}

#
# CPU tuning
#

CPUSPEED_SAVE_FILE="${STORAGE}/cpuspeed${STORAGE_SUFFIX}"
CPUSPEED_ORIG_GOV="${STORAGE}/cpuspeed-governor-%s${STORAGE_SUFFIX}"
CPUSPEED_STARTED="${STORAGE}/cpuspeed-started"
CPUSPEED_CFG="/etc/sysconfig/cpuspeed"
CPUSPEED_INIT="/etc/rc.d/init.d/cpuspeed"
# do not use cpuspeed
CPUSPEED_USE="0"
CPUS="$(ls -d1 /sys/devices/system/cpu/cpu* | sed 's;^.*/;;' |  grep "cpu[0-9]\+")"

# set CPU governor setting and store the old settings
# usage: set_cpu_governor governor
set_cpu_governor() {
	governor=$1

	# always patch cpuspeed configuration if exists, if it doesn't exist and is enabled,
	# explicitly disable it with hint
	if [ -e $CPUSPEED_INIT ]; then
		if [ ! -e $CPUSPEED_SAVE_FILE -a -e $CPUSPEED_CFG ]; then
			cp -p $CPUSPEED_CFG $CPUSPEED_SAVE_FILE
			sed -e 's/^GOVERNOR=.*/GOVERNOR='$governor'/g' $CPUSPEED_SAVE_FILE > $CPUSPEED_CFG
		fi
	else
		if [ "$CPUSPEED_USE" = "1" ]; then
			echo >&2
			echo "Suggestion: install 'cpuspeed' package to get best tuning results." >&2
			echo "Falling back to sysfs control." >&2
			echo >&2
		fi

		CPUSPEED_USE="0"
	fi

	if [ "$CPUSPEED_USE" = "1" ]; then
		service cpuspeed status &> /dev/null
		[ $? -eq 3 ] && touch $CPUSPEED_STARTED || rm -f $CPUSPEED_STARTED

		service cpuspeed restart &> /dev/null

	# direct change using sysfs
	elif [ -e /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor ]; then

		for cpu in $CPUS; do
			gov_file=/sys/devices/system/cpu/$cpu/cpufreq/scaling_governor
			save_file=$(printf $CPUSPEED_ORIG_GOV $cpu)
			rm -f $save_file
			if [ -e $gov_file ]; then
				cat $gov_file > $save_file
				echo $governor > $gov_file
			fi
		done
	fi
}

# re-enable previous CPU governor settings
# usage: restore_cpu_governor
restore_cpu_governor() {
	if [ -e $CPUSPEED_INIT ]; then
		if [ -e $CPUSPEED_SAVE_FILE ]; then
			cp -fp $CPUSPEED_SAVE_FILE $CPUSPEED_CFG
			rm -f $CPUSPEED_SAVE_FILE
		fi

		if [ "$CPUSPEED_USE" = "1" ]; then
			if [ -e $CPUSPEED_STARTED ]; then
				service cpuspeed stop &> /dev/null
			else
				service cpuspeed restart &> /dev/null
			fi
		fi
		if [ -e $CPUSPEED_STARTED ]; then
			rm -f $CPUSPEED_STARTED
		fi
	else
		CPUSPEED_USE="0"
	fi

	if [ "$CPUSPEED_USE" != "1" -a -e /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor ]; then
		for cpu in $CPUS; do
			cpufreq_dir=/sys/devices/system/cpu/$cpu/cpufreq
			save_file=$(printf $CPUSPEED_ORIG_GOV $cpu)

			if [ -e $cpufreq_dir/scaling_governor ]; then
				if [ -e $save_file ]; then
					cat $save_file > $cpufreq_dir/scaling_governor
					rm -f $save_file
				else
					echo userspace > $cpufreq_dir/scaling_governor
					cat $cpufreq_dir/cpuinfo_max_freq > $cpufreq_dir/scaling_setspeed
				fi
			fi
		done
	fi
}

_cpu_multicore_powersave() {
	value=$1
	[ -e /sys/devices/system/cpu/sched_mc_power_savings ] && echo $value > /sys/devices/system/cpu/sched_mc_power_savings
}

# enable multi core power savings for low wakeup systems
enable_cpu_multicore_powersave() {
	_cpu_multicore_powersave 1
}

disable_cpu_multicore_powersave() {
	_cpu_multicore_powersave 0
}

#
# MEMORY tuning
#

THP_ENABLE="/sys/kernel/mm/transparent_hugepage/enabled"
THP_SAVE="${STORAGE}/thp${STORAGE_SUFFIX}"

[ -e "$THP_ENABLE" ] || THP_ENABLE="/sys/kernel/mm/redhat_transparent_hugepage/enabled"

enable_transparent_hugepages() {
	if [ -e $THP_ENABLE ]; then
		cut -f2 -d'[' $THP_ENABLE  | cut -f1 -d']' > $THP_SAVE
		(echo always > $THP_ENABLE) &> /dev/null
	fi
}

restore_transparent_hugepages() {
	if [ -e $THP_SAVE ]; then
		(echo $(cat $THP_SAVE) > $THP_ENABLE) &> /dev/null
		rm -f $THP_SAVE
	fi
}

#
# WIFI tuning
#

# usage: _wifi_set_power_level level
_wifi_set_power_level() {
	# 0    auto, PM enabled
	# 1-5  least savings and lowest latency - most savings and highest latency
	# 6    disable power savings
	level=$1

	# do not report errors on systems with no wireless
	[ -e /proc/net/wireless ] || return 0

	# apply the settings using iwpriv
	ifaces=$(cat /proc/net/wireless | grep -v '|' | sed 's@^ *\([^:]*\):.*@\1@')
	for iface in $ifaces; do
		iwpriv $iface set_power $level
	done

	# some adapters may rely on sysfs
	for i in /sys/bus/pci/devices/*/power_level; do
		(echo $level > $i) &> /dev/null
	done
}

enable_wifi_powersave() {
	_wifi_set_power_level 5
}

disable_wifi_powersave() {
	_wifi_set_power_level 0
}

#
# BLUETOOTH tuning
#

disable_bluetooth() {
	hciconfig hci0 down >/dev/null 2>&1
	lsmod | grep -q btusb && rmmod btusb
}

enable_bluetooth() {
	modprobe btusb
	hciconfig hci0 up >/dev/null 2>&1
}

#
# USB tuning
#

_usb_autosuspend() {
	value=$1
	for i in /sys/bus/usb/devices/*/power/autosuspend; do echo $value > $i; done &> /dev/null
}

enable_usb_autosuspend() {
	_usb_autosuspend 1
}

disable_usb_autosuspend() {
	_usb_autosuspend 0
}

#
# SOUND CARDS tuning
#

enable_snd_ac97_powersave() {
	save_set_sys ac97 /sys/module/snd_ac97_codec/parameters/power_save Y
}

disable_snd_ac97_powersave() {
	save_set_sys ac97 /sys/module/snd_ac97_codec/parameters/power_save N
}

restore_snd_ac97_powersave() {
	restore_sys ac97 /sys/module/snd_ac97_codec/parameters/power_save $1
}

set_hda_intel_powersave() {
	save_set_sys hda_intel /sys/module/snd_hda_intel/parameters/power_save $1
}

restore_hda_intel_powersave() {
	restore_sys hda_intel /sys/module/snd_hda_intel/parameters/power_save $1
}

#
# VIDEO CARDS tuning
#

# Power savings settings for Radeon
# usage: set_radeon_powersave dynpm | default | low | mid | high
set_radeon_powersave () {
	[ "$1" ] || return
	[ -e /sys/class/drm/card0/device/power_method ] || return
	if [ "$1" = default -o "$1" = auto -o "$1" = low -o "$1" = med -o "$1" = high ]; then
		[ -w /sys/class/drm/card0/device/power_profile ] || return
		save_sys radeon_profile /sys/class/drm/card0/device/power_profile
		save_set_sys radeon_method /sys/class/drm/card0/device/power_method profile
		set_sys /sys/class/drm/card0/device/power_profile "$1"
	elif [ "$1" = dynpm ]; then
		save_sys radeon_profile /sys/class/drm/card0/device/power_profile
		save_set_sys radeon_method /sys/class/drm/card0/device/power_method dynpm
	fi
}

restore_radeon_powersave () {
  restore_sys radeon_method /sys/class/drm/card0/device/power_method profile
  _rrp_method="`get_stored_sys radeon_method`"
  [ -z "$_rrp_method" -o _rrp_method="profile" ] && restore_sys radeon_profile /sys/class/drm/card0/device/power_profile default
}

#
# SOFTWARE tuning
#

RSYSLOG_CFG="/etc/rsyslog.conf"
RSYSLOG_SAVE="${STORAGE}/cpuspeed${STORAGE_SUFFIX}"

disable_logs_syncing() {
	cp -p $RSYSLOG_CFG $RSYSLOG_SAVE
	sed -i 's/ \/var\/log/-\/var\/log/' $RSYSLOG_CFG
}

restore_logs_syncing() {
	mv -Z $RSYSLOG_SAVE $RSYSLOG_CFG || mv $RSYSLOG_SAVE $RSYSLOG_CFG
}

irqbalance_banned_cpus_clear() {
    sed -i '/^IRQBALANCE_BANNED_CPUS=/d' /etc/sysconfig/irqbalance || return
    if [ ${1:-restart} = restart ]; then
        systemctl try-restart irqbalance
    fi
}

irqbalance_banned_cpus_setup() {
    irqbalance_banned_cpus_clear norestart
    if [ -n "$1" ]; then
        echo "IRQBALANCE_BANNED_CPUS=$1" >> /etc/sysconfig/irqbalance
    fi
    systemctl try-restart irqbalance
}

#
# HARDWARE SPECIFIC tuning
#

# Asus EEE with Intel Atom
_eee_fsb_control() {
	value=$1
	if [ -e /sys/devices/platform/eeepc/she ]; then
		echo $value > /sys/devices/platform/eeepc/she
	elif [ -e /sys/devices/platform/eeepc/cpufv ]; then
		echo $value > /sys/devices/platform/eeepc/cpufv
	elif [ -e /sys/devices/platform/eeepc-wmi/cpufv ]; then
		echo $value > /sys/devices/platform/eeepc-wmi/cpufv
	fi
}

eee_set_reduced_fsb() {
	_eee_fsb_control 2
}

eee_set_normal_fsb() {
	_eee_fsb_control 1
}

#
# modprobe configuration handling
#

kvm_modprobe_file=/etc/modprobe.d/kvm.rt.tuned.conf

teardown_kvm_mod_low_latency()
{
	rm -f $kvm_modprobe_file
}

setup_kvm_mod_low_latency()
{
	local HAS_KPS=""
	local HAS_NX_HP=""
	local HAS_PLE_GAP=""
	local WANTS_KPS=""
	local WANTS_NX_HP=""
	local WANTS_PLE_GAP=""

	modinfo -p kvm | grep -q kvmclock_periodic_sync && HAS_KPS=1
	modinfo -p kvm | grep -q nx_huge_pages && HAS_NX_HP=1
	modinfo -p kvm_intel | grep -q ple_gap && HAS_PLE_GAP=1
	grep -qs kvmclock_periodic_sync "$kvm_modprobe_file" && WANTS_KPS=1
	grep -qs nx_huge_pages "$kvm_modprobe_file" && WANTS_NX_HP=1
	grep -qs ple_gap "$kvm_modprobe_file" && WANTS_PLE_GAP=1

	if [ "$HAS_KPS" != "$WANTS_KPS" -o "$HAS_PLE_GAP" != "$WANTS_PLE_GAP" -o \
	     "$HAS_NX_HP" != "$WANTS_NX_HP" ]; then
		teardown_kvm_mod_low_latency
		[ "$HAS_KPS" ] && echo "options kvm kvmclock_periodic_sync=0" > $kvm_modprobe_file
		[ "$HAS_NX_HP" ] && echo "options kvm nx_huge_pages=0" >> $kvm_modprobe_file
		[ "$HAS_PLE_GAP" ] && echo "options kvm_intel ple_gap=0" >> $kvm_modprobe_file
	fi
	return 0
}

#
# KSM
#

KSM_SERVICES="ksm ksmtuned"
KSM_RUN_PATH=/sys/kernel/mm/ksm/run
KSM_MASK_FILE="${STORAGE_PERSISTENT}/ksm-masked"

disable_ksm()
{
	if [ ! -f $KSM_MASK_FILE ]; then
		# Always create $KSM_MASK_FILE, since we don't want to
		# run any systemctl commands during boot
		if ! touch $KSM_MASK_FILE; then
			die "failed to create $KSM_MASK_FILE"
		fi
		# Do not run any systemctl commands if $KSM_SERVICES units do not exist
		systemctl cat -- $KSM_SERVICES &> /dev/null || return 0
		systemctl --now --quiet mask $KSM_SERVICES
		# Unmerge all shared pages
		test -f $KSM_RUN_PATH && echo 2 > $KSM_RUN_PATH
	fi
}

# Should only be called when full_rollback == true
enable_ksm()
{
	if [ -f $KSM_MASK_FILE ]; then
		# Do not run any systemctl commands if $KSM_SERVICES units do not exist
		systemctl cat -- $KSM_SERVICES &> /dev/null || return 0
		if systemctl --quiet unmask $KSM_SERVICES; then
			rm -f $KSM_MASK_FILE
		fi
	fi
}

die() {
	echo "$@" >&2
	exit 1
}

#
# ACTION PROCESSING
#

error_not_implemented() {
	echo "tuned: script function '$1' is not implemented." >&2
}

# implicit actions, will be used if not provided by profile script:
#
# * start    must be implemented
# * stop     must be implemented

start() {
	error_not_implemented start
	return 16
}

stop() {
	error_not_implemented stop
	return 16
}

#
# main processing
#

process() {
	ARG="$1"
	shift
	case "$ARG" in
	start)
		start "$@"
		RETVAL=$?
		;;
	stop)
		stop "$@"
		RETVAL=$?
		;;
	verify)
		if declare -f verify &> /dev/null;
		then
			verify "$@"
		else
			:
		fi
		RETVAL=$?
		;;
	*)
		echo $"Usage: $0 {start|stop|verify}"
		RETVAL=2
		;;
	esac

	exit $RETVAL
}
intel-sst/tuned.conf000064400000000165147511266550010472 0ustar00[main]
summary=Configure for Intel Speed Select Base Frequency

[bootloader]
cmdline_intel_sst=-intel_pstate=disable
latency-performance/tuned.conf000064400000003170147511266550012505 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for deterministic performance at the cost of increased power consumption

[cpu]
force_latency=cstate.id_no_zero:1|3
governor=performance
energy_perf_bias=performance
min_perf_pct=100

[acpi]
platform_profile=performance

[sysctl]
# If a workload mostly uses anonymous memory and it hits this limit, the entire
# working set is buffered for I/O, and any more write buffering would require
# swapping, so it's time to throttle writes until I/O can catch up.  Workloads
# that mostly use file mappings may be able to use even higher values.
#
# The generator of dirty data starts writeback at this percentage (system default
# is 20%)
vm.dirty_ratio=10

# Start background writeback (via writeback threads) at this percentage (system
# default is 10%)
vm.dirty_background_ratio=3

# The swappiness parameter controls the tendency of the kernel to move
# processes out of physical memory and onto the swap disk.
# 0 tells the kernel to avoid swapping processes out of physical memory
# for as long as possible
# 100 tells the kernel to aggressively swap processes out of physical memory
# and move them to swap cache
vm.swappiness=10

[scheduler]
runtime=0
# ktune sysctl settings for rhel6 servers, maximizing i/o throughput
#
# Minimal preemption granularity for CPU-bound tasks:
# (default: 1 msec#  (1 + ilog(ncpus)), units: nanoseconds)
sched_min_granularity_ns = 3000000
sched_wakeup_granularity_ns = 4000000

# The total time the scheduler will consider a migrated process
# "cache hot" and thus less likely to be re-migrated
# (system default is 500000, i.e. 0.5 ms)
sched_migration_cost_ns = 5000000
throughput-performance/tuned.conf000064400000006142147511266550013261 0ustar00#
# tuned configuration
#

[main]
summary=Broadly applicable tuning that provides excellent performance across a variety of common server workloads

[variables]
thunderx_cpuinfo_regex=CPU part\s+:\s+(0x0?516)|(0x0?af)|(0x0?a[0-3])|(0x0?b8)\b
amd_cpuinfo_regex=model name\s+:.*\bAMD\b

[cpu]
governor=performance
energy_perf_bias=performance
min_perf_pct=100
energy_performance_preference=performance

[acpi]
platform_profile=performance

# Marvell ThunderX
[vm.thunderx]
type=vm
uname_regex=aarch64
cpuinfo_regex=${thunderx_cpuinfo_regex}
transparent_hugepages=never

[disk]
# The default unit for readahead is KiB.  This can be adjusted to sectors
# by specifying the relevant suffix, eg. (readahead => 8192 s). There must
# be at least one space between the number and suffix (if suffix is specified).
readahead=>4096

[sysctl]
# If a workload mostly uses anonymous memory and it hits this limit, the entire
# working set is buffered for I/O, and any more write buffering would require
# swapping, so it's time to throttle writes until I/O can catch up.  Workloads
# that mostly use file mappings may be able to use even higher values.
#
# The generator of dirty data starts writeback at this percentage (system default
# is 20%)
vm.dirty_ratio = 40

# Start background writeback (via writeback threads) at this percentage (system
# default is 10%)
vm.dirty_background_ratio = 10

# PID allocation wrap value.  When the kernel's next PID value
# reaches this value, it wraps back to a minimum PID value.
# PIDs of value pid_max or larger are not allocated.
#
# A suggested value for pid_max is 1024 * <# of cpu cores/threads in system>
# e.g., a box with 32 cpus, the default of 32768 is reasonable, for 64 cpus,
# 65536, for 4096 cpus, 4194304 (which is the upper limit possible).
#kernel.pid_max = 65536

# The swappiness parameter controls the tendency of the kernel to move
# processes out of physical memory and onto the swap disk.
# 0 tells the kernel to avoid swapping processes out of physical memory
# for as long as possible
# 100 tells the kernel to aggressively swap processes out of physical memory
# and move them to swap cache
vm.swappiness=10

# The default kernel value 128 was over twenty years old default,
# kernel-5.4 increased it to 4096, thus do not have it lower than 2048
# on older kernels
net.core.somaxconn=>2048

[scheduler]
runtime=0
# ktune sysctl settings for rhel6 servers, maximizing i/o throughput
#
# Minimal preemption granularity for CPU-bound tasks:
# (default: 1 msec#  (1 + ilog(ncpus)), units: nanoseconds)
sched_min_granularity_ns = 10000000

# SCHED_OTHER wake-up granularity.
# (default: 1 msec#  (1 + ilog(ncpus)), units: nanoseconds)
#
# This option delays the preemption effects of decoupled workloads
# and reduces their over-scheduling. Synchronous workloads will still
# have immediate wakeup/sleep latencies.
sched_wakeup_granularity_ns = 15000000

# Marvell ThunderX
[sysctl.thunderx]
type=sysctl
uname_regex=aarch64
cpuinfo_regex=${thunderx_cpuinfo_regex}
kernel.numa_balancing=0

# AMD
[scheduler.amd]
type=scheduler
uname_regex=x86_64
cpuinfo_regex=${amd_cpuinfo_regex}
runtime=0
sched_migration_cost_ns=5000000
powersave/script.sh000075500000000417147511266550010440 0ustar00#!/bin/bash

. /usr/lib/tuned/functions

start() {
    [ "$USB_AUTOSUSPEND" = 1 ] && enable_usb_autosuspend
    enable_wifi_powersave
    return 0
}

stop() {
    [ "$USB_AUTOSUSPEND" = 1 ] && disable_usb_autosuspend
    disable_wifi_powersave
    return 0
}

process $@
powersave/tuned.conf000064400000001172147511266550010562 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for low power consumption

[cpu]
governor=ondemand|powersave
energy_perf_bias=powersave|power
energy_performance_preference=power

[acpi]
platform_profile=low-power|quiet

[eeepc_she]

[vm]

[audio]
timeout=10

[video]
radeon_powersave=dpm-battery, auto

[disk]
# Comma separated list of devices, all devices if commented out.
# devices=sda

[net]
# Comma separated list of devices, all devices if commented out.
# devices=eth0

[scsi_host]
alpm=min_power

[sysctl]
vm.laptop_mode=5
vm.dirty_writeback_centisecs=1500
kernel.nmi_watchdog=0

[script]
script=${i:PROFILE_DIR}/script.sh
virtual-guest/tuned.conf000064400000001355147511266550011365 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for running inside a virtual guest
include=throughput-performance

[sysctl]
# If a workload mostly uses anonymous memory and it hits this limit, the entire
# working set is buffered for I/O, and any more write buffering would require
# swapping, so it's time to throttle writes until I/O can catch up.  Workloads
# that mostly use file mappings may be able to use even higher values.
#
# The generator of dirty data starts writeback at this percentage (system default
# is 20%)
vm.dirty_ratio = 30

# Filesystem I/O is usually much more efficient than swapping, so try to keep
# swapping low.  It's usually safe to go even lower than this on systems with
# server-grade storage.
vm.swappiness = 30
network-throughput/tuned.conf000064400000000763147511266550012454 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for streaming network throughput, generally only necessary on older CPUs or 40G+ networks
include=throughput-performance

[sysctl]
# Increase kernel buffer size maximums.  Currently this seems only necessary at 40Gb speeds.
#
# The buffer tuning values below do not account for any potential hugepage allocation.
# Ensure that you do not oversubscribe system memory.
net.ipv4.tcp_rmem="4096 131072 16777216"
net.ipv4.tcp_wmem="4096 16384 16777216"
accelerator-performance/tuned.conf000064400000004161147511266550013333 0ustar00#
# tuned configuration
#

[main]
summary=Throughput performance based tuning with disabled higher latency STOP states

[cpu]
governor=performance
energy_perf_bias=performance
min_perf_pct=100
force_latency=99

[acpi]
platform_profile=performance

[disk]
readahead=>4096

[sysctl]
# If a workload mostly uses anonymous memory and it hits this limit, the entire
# working set is buffered for I/O, and any more write buffering would require
# swapping, so it's time to throttle writes until I/O can catch up.  Workloads
# that mostly use file mappings may be able to use even higher values.
#
# The generator of dirty data starts writeback at this percentage (system default
# is 20%)
vm.dirty_ratio = 40

# Start background writeback (via writeback threads) at this percentage (system
# default is 10%)
vm.dirty_background_ratio = 10

# PID allocation wrap value.  When the kernel's next PID value
# reaches this value, it wraps back to a minimum PID value.
# PIDs of value pid_max or larger are not allocated.
#
# A suggested value for pid_max is 1024 * <# of cpu cores/threads in system>
# e.g., a box with 32 cpus, the default of 32768 is reasonable, for 64 cpus,
# 65536, for 4096 cpus, 4194304 (which is the upper limit possible).
#kernel.pid_max = 65536

# The swappiness parameter controls the tendency of the kernel to move
# processes out of physical memory and onto the swap disk.
# 0 tells the kernel to avoid swapping processes out of physical memory
# for as long as possible
# 100 tells the kernel to aggressively swap processes out of physical memory
# and move them to swap cache
vm.swappiness=10

[scheduler]
# ktune sysctl settings for rhel6 servers, maximizing i/o throughput
#
# Minimal preemption granularity for CPU-bound tasks:
# (default: 1 msec#  (1 + ilog(ncpus)), units: nanoseconds)
sched_min_granularity_ns = 10000000

# SCHED_OTHER wake-up granularity.
# (default: 1 msec#  (1 + ilog(ncpus)), units: nanoseconds)
#
# This option delays the preemption effects of decoupled workloads
# and reduces their over-scheduling. Synchronous workloads will still
# have immediate wakeup/sleep latencies.
sched_wakeup_granularity_ns = 15000000
network-latency/tuned.conf000064400000001044147511266550011673 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for deterministic performance at the cost of increased power consumption, focused on low latency network performance
include=latency-performance

[vm]
transparent_hugepages=never

[sysctl]
net.core.busy_read=50
net.core.busy_poll=50
net.ipv4.tcp_fastopen=3
kernel.numa_balancing=0
kernel.hung_task_timeout_secs = 600
kernel.nmi_watchdog = 0
vm.stat_interval = 10
kernel.timer_migration = 0

[bootloader]
cmdline_network_latency=skew_tick=1 tsc=reliable rcupdate.rcu_normal_after_boot=1

[rtentsk]
optimize-serial-console/tuned.conf000064400000000357147511266550013330 0ustar00#
# tuned configuration
#
# This tuned configuration optimizes for serial console performance at the
# expense of reduced debug information to the console.

[main]
summary=Optimize for serial console use.

[sysctl]
kernel.printk="4 4 1 7"
desktop/tuned.conf000064400000000210147511266550010210 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for the desktop use-case
include=balanced

[sysctl]
kernel.sched_autogroup_enabled=1
© 2025 GrazzMean