summaryrefslogtreecommitdiff
path: root/linux-basics/linux-processes/pres_linux-processes_en.tex
diff options
context:
space:
mode:
Diffstat (limited to 'linux-basics/linux-processes/pres_linux-processes_en.tex')
-rw-r--r--linux-basics/linux-processes/pres_linux-processes_en.tex70
1 files changed, 27 insertions, 43 deletions
diff --git a/linux-basics/linux-processes/pres_linux-processes_en.tex b/linux-basics/linux-processes/pres_linux-processes_en.tex
index a23a881..8d47f98 100644
--- a/linux-basics/linux-processes/pres_linux-processes_en.tex
+++ b/linux-basics/linux-processes/pres_linux-processes_en.tex
@@ -137,7 +137,7 @@ renice [-n] prio -u|--user user [.. user]
\frametitle{SCHED\_IDLE and SCHED\_BATCH}
\begin{itemize}
\item SCHED\_BATCH: The scheduler will always assume the process to be CPU
-intensive and therefor will apply a penalty when calculating the dynamic
+intensive and therefore will apply a penalty when calculating the dynamic
priority.
\item SCHED\_IDLE: For very low prio processes. Even the nice value is
ignored. The resulting priority will be \textbf{below} SCHED\_OTHER and SCHED\_BATCH
@@ -261,8 +261,8 @@ $ cat /proc/irq/default_smp_affinity
# Set default IRQ affinity to CPU0
$ echo 1 > /proc/irq/default_smp_affinity
-# Set affinity for specific IRQ to CPU1
-$ echo 2 > /proc/irq/
+# Set affinity for IRQ19 to CPU1
+$ echo 2 > /proc/irq/19/smp_affinity
\end{verbatim}
\end{frame}
@@ -278,50 +278,37 @@ CPU time, a specific set of CPUs, ...
\begin{frame}[fragile]
\frametitle{Using CGROUPS}
\begin{verbatim}
-$ mount -t cgroup -o defaults none /sys/fs/cgroup
+$ mount -t cgroup -o cpu,cpuacct none /sys/fs/cgroup/cpu,cpuacct
+
+$ mount -t cgroup -o cpuset none /sys/fs/cgroup/cpuset
\end{verbatim}
\end{frame}
\begin{frame}[fragile]
\frametitle{CGROUPS: Creating new groups}
\begin{verbatim}
-$ cd /sys/fs/cgroup
-
-$ mkdir group_01
+$ mkdir /sys/fs/cgroup/cpu,cpuacct/group_nice
-$ mkdir group_02
+$ mkdir /sys/fs/cgroup/cpuset/group_cpu0only
\end{verbatim}
\end{frame}
\begin{frame}[fragile]
\frametitle{CGROUPS: Setting up groups}
\begin{verbatim}
-$ cd group_01
-
-# Set allowed CPUs
-$ echo 0 > cpuset.cpus
-
-# Set allowed memory nodes
-$ echo 0 > cpuset.mems
-
# Limit CPU time
-$ echo 100 > cpu.shares
+$ echo 100 > /sys/fs/cgroup/cpu,cpuacct/group_nice/cpu.shares
\end{verbatim}
\end{frame}
\begin{frame}[fragile]
\frametitle{CGROUPS: Setting up groups}
\begin{verbatim}
-$ cd group_02
-
# Set allowed CPUs
-$ echo 0 > cpuset.cpus
+$ echo 0 > /sys/fs/cgroup/cpuset/group_cpu0only/cpuset.cpus
# Set allowed memory nodes
-$ echo 0 > cpuset.mems
-
-# Limit CPU time
-$ echo 900 > cpu.shares
+$ echo 0 > /sys/fs/cgroup/cpuset/group_cpu0only/cpuset.mems
\end{verbatim}
\end{frame}
@@ -329,23 +316,22 @@ $ echo 900 > cpu.shares
\frametitle{CGROUPS: Setting up groups}
Open two shells. In shell 1 do:
\begin{verbatim}
-$ cd group_01
+# Add the current task to the cpu0only group
+$ echo $$ > /sys/fs/cgroup/cpuset/group_cpu0only/tasks
-# Add the current task to the group
-$ echo $$ > tasks
+# Add the current task to the nice group
+$ echo $$ > /sys/fs/cgroup/cpu,cpuacct/group_nice/tasks
# Create some noise
-$ while [ 1 ]; do echo bla > /dev/null; done;
+$ while true; do echo -n; done
\end{verbatim}
In shell 2:
\begin{verbatim}
-$ cd group_02
-
-# Add the current task to the group
-$ echo $$ > tasks
+# Add the current task to the cpu0only group
+$ echo $$ > /sys/fs/cgroup/cpuset/group_cpu0only/tasks
# Create some noise
-$ while [ 1 ]; do echo bla > /dev/null; done;
+$ while true; do echo -n; done
\end{verbatim}
\end{frame}
@@ -355,26 +341,24 @@ Now check the CPU usage with top
\begin{verbatim}
$ top
[...]
-4285 root 20 0 4708 1864 1524 R 89 0.2 8:59.68 bash
-4152 root 20 0 4720 1904 1548 R 10 0.2 4:02.00 bash
+PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
+871 root 20 0 22588 3480 3036 R 90.7 0.2 0:56.10 bash
+872 root 20 0 22588 3484 3044 R 9.0 0.2 0:05.45 bash
[...]
\end{verbatim}
\end{frame}
\begin{frame}[fragile]
\frametitle{CGROUPS: Who is using CGROUPS?}
-SystemD uses CGROUPS! Boot with SystemD and check SysFS:
+systemd uses CGROUPS! Boot with systemd and check SysFS:
\begin{verbatim}
-$ cd /sys/kernel/cgroup
+$ cd /sys/fs/cgroup/systemd
$ ls
-blkio cpu cpuacct cpu,cpuacct cpuset devices freezer net_cls systemd
+... system.slice ... user.slice
-$ ls -1 /sys/fs/cgroup/cpu,cpuacct/system/
-cron.service
-dbus.service
-exim4.service
-[...]
+$ ls /sys/fs/cgroup/systemd/system.slice
+... networking.service ... systemd-user-sessions.service ...
\end{verbatim}
\end{frame}