summaryrefslogtreecommitdiff
path: root/linux-basics
diff options
context:
space:
mode:
Diffstat (limited to 'linux-basics')
-rw-r--r--linux-basics/linux-processes/pres_linux-processes_en.tex98
1 files changed, 53 insertions, 45 deletions
diff --git a/linux-basics/linux-processes/pres_linux-processes_en.tex b/linux-basics/linux-processes/pres_linux-processes_en.tex
index 8d47f98..56dd760 100644
--- a/linux-basics/linux-processes/pres_linux-processes_en.tex
+++ b/linux-basics/linux-processes/pres_linux-processes_en.tex
@@ -232,6 +232,15 @@ working directory and so on...)
\end{frame}
\begin{frame}[fragile]
+\frametitle{Setting the CPU Affinity}
+The cpu affinity can be set using the taskset command:
+\begin{verbatim}
+taskset [options] mask command [arg]...
+taskset [options] -p [mask] pid
+\end{verbatim}
+\end{frame}
+
+\begin{frame}[fragile]
\frametitle{CPU affinity}
\begin{lstlisting}
#define _GNU_SOURCE
@@ -253,90 +262,83 @@ sched_setaffinity(pid, CPU_SETSIZE, &set);
\frametitle{SMP and interrupt routing}
\begin{verbatim}
$ ls /proc/irq/
-0 1 10 11 12 13 14 15 17 18 19 2 3 4 5 6 7 8 9 default_smp_affinity
+0 1 10 11 12 13 14 15 17 18 19 ... default_smp_affinity
$ cat /proc/irq/default_smp_affinity
3
-
-# Set default IRQ affinity to CPU0
-$ echo 1 > /proc/irq/default_smp_affinity
-
-# Set affinity for IRQ19 to CPU1
-$ echo 2 > /proc/irq/19/smp_affinity
+\end{verbatim}
+Set default IRQ affinity to CPU0
+\begin{verbatim}
+echo 1 > /proc/irq/default_smp_affinity
+\end{verbatim}
+Set affinity for IRQ19 to CPU1
+\begin{verbatim}
+echo 2 > /proc/irq/19/smp_affinity
\end{verbatim}
\end{frame}
-\subsection{Control Groups: CGROUPS}
+\subsection{Control Groups: cgroups}
\begin{frame}
-\frametitle{CGROUPS}
-CGROUPS are a mechanism for partitioning and aggregating tasks
+\frametitle{What are cgroups}
+Control groups (cgroups) are a mechanism for partitioning and aggregating tasks
into hierarchical groups. Each group has several options like
CPU time, a specific set of CPUs, ...
\end{frame}
\begin{frame}[fragile]
-\frametitle{Using CGROUPS}
+\frametitle{Setting up cgroups}
\begin{verbatim}
-$ mount -t cgroup -o cpu,cpuacct none /sys/fs/cgroup/cpu,cpuacct
+mount -t cgroup -o cpu,cpuacct none /sys/fs/cgroup/cpu,cpuacct
-$ mount -t cgroup -o cpuset none /sys/fs/cgroup/cpuset
+mount -t cgroup -o cpuset none /sys/fs/cgroup/cpuset
\end{verbatim}
\end{frame}
\begin{frame}[fragile]
-\frametitle{CGROUPS: Creating new groups}
+\frametitle{Setting up cgroups}
+Create groups nice and cpu0only
\begin{verbatim}
-$ mkdir /sys/fs/cgroup/cpu,cpuacct/group_nice
+mkdir /sys/fs/cgroup/cpu,cpuacct/group_nice
-$ mkdir /sys/fs/cgroup/cpuset/group_cpu0only
+mkdir /sys/fs/cgroup/cpuset/group_cpu0only
\end{verbatim}
\end{frame}
\begin{frame}[fragile]
-\frametitle{CGROUPS: Setting up groups}
+\frametitle{Setting up cgroups}
+Limit CPU time (relative to other tasks) for the nice group
\begin{verbatim}
-# Limit CPU time
-$ echo 100 > /sys/fs/cgroup/cpu,cpuacct/group_nice/cpu.shares
+echo 100 > /sys/fs/cgroup/cpu,cpuacct/group_nice/cpu.shares
\end{verbatim}
-\end{frame}
-
-\begin{frame}[fragile]
-\frametitle{CGROUPS: Setting up groups}
+Set allowed CPUs and memory nodes for the cpu0only group
\begin{verbatim}
-# Set allowed CPUs
-$ echo 0 > /sys/fs/cgroup/cpuset/group_cpu0only/cpuset.cpus
+echo 0 > /sys/fs/cgroup/cpuset/group_cpu0only/cpuset.cpus
-# Set allowed memory nodes
-$ echo 0 > /sys/fs/cgroup/cpuset/group_cpu0only/cpuset.mems
+echo 0 > /sys/fs/cgroup/cpuset/group_cpu0only/cpuset.mems
\end{verbatim}
\end{frame}
\begin{frame}[fragile]
-\frametitle{CGROUPS: Setting up groups}
-Open two shells. In shell 1 do:
+\frametitle{Testing cgroups}
+Open two shells. In shell 1 add the current task to the cpu0only and nice groups and burn the cpu:
\begin{verbatim}
-# Add the current task to the cpu0only group
-$ echo $$ > /sys/fs/cgroup/cpuset/group_cpu0only/tasks
+echo $$ > /sys/fs/cgroup/cpuset/group_cpu0only/tasks
-# Add the current task to the nice group
-$ echo $$ > /sys/fs/cgroup/cpu,cpuacct/group_nice/tasks
+echo $$ > /sys/fs/cgroup/cpu,cpuacct/group_nice/tasks
-# Create some noise
-$ while true; do echo -n; done
+while true; do echo -n; done
\end{verbatim}
-In shell 2:
+In shell 2 add the current task to the cpu0only group and burn the cpu:
\begin{verbatim}
-# Add the current task to the cpu0only group
-$ echo $$ > /sys/fs/cgroup/cpuset/group_cpu0only/tasks
+echo $$ > /sys/fs/cgroup/cpuset/group_cpu0only/tasks
-# Create some noise
-$ while true; do echo -n; done
+while true; do echo -n; done
\end{verbatim}
\end{frame}
\begin{frame}[fragile]
-\frametitle{CGROUPS: Setting up groups}
+\frametitle{Testing cgroups}
Now check the CPU usage with top
\begin{verbatim}
$ top
@@ -349,12 +351,18 @@ PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
\end{frame}
\begin{frame}[fragile]
-\frametitle{CGROUPS: Who is using CGROUPS?}
-systemd uses CGROUPS! Boot with systemd and check SysFS:
+\frametitle{CPU shares vs. quota}
+What is the difference between cpu.shares and cpu.cfs\_quota\_us?
\begin{verbatim}
-$ cd /sys/fs/cgroup/systemd
+echo 10000 > /sys/fs/cgroup/cpu,cpuacct/group_nice/cpu.cfs_quota_us
+\end{verbatim}
+\end{frame}
-$ ls
+\begin{frame}[fragile]
+\frametitle{Who is using cgroups?}
+systemd uses cgroups! Boot with systemd and check sysfs:
+\begin{verbatim}
+$ ls /sys/fs/cgroup/systemd
... system.slice ... user.slice
$ ls /sys/fs/cgroup/systemd/system.slice