diff options
| author | John Ogness <john.ogness@linutronix.de> | 2018-01-30 22:35:20 -0600 |
|---|---|---|
| committer | John Ogness <john.ogness@linutronix.de> | 2018-02-20 14:58:34 +0100 |
| commit | 2bcff18bcc8cdfa92b5384f3b14b79ea9e74c9bf (patch) | |
| tree | 706d7edb25db3ec4c6ff4316a154f3f00befe3d5 | |
| parent | 70e4b1c68e9ae9b0735c62939f7070c6bff23f7d (diff) | |
update/cleanup earlyprintk and cgroups
Update slides for latest kernels. Cleanup slide contents. Add slides
about manually setting the CPU affinity.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
| -rw-r--r-- | kernel-devel/kernel-debugging/pres_kernel-debugging_en.tex | 2 | ||||
| -rw-r--r-- | linux-basics/linux-processes/pres_linux-processes_en.tex | 98 |
2 files changed, 54 insertions, 46 deletions
diff --git a/kernel-devel/kernel-debugging/pres_kernel-debugging_en.tex b/kernel-devel/kernel-debugging/pres_kernel-debugging_en.tex index 82dc25b..240eb5a 100644 --- a/kernel-devel/kernel-debugging/pres_kernel-debugging_en.tex +++ b/kernel-devel/kernel-debugging/pres_kernel-debugging_en.tex @@ -216,7 +216,7 @@ Kernel hacking ---> [*] Early printk \end{verbatim} \begin{verbatim} -earlyprintk=serial,ttyAMA0,115200,keep \ +earlyprintk \ console=ttyAMA0,115200 \end{verbatim} \end{frame} diff --git a/linux-basics/linux-processes/pres_linux-processes_en.tex b/linux-basics/linux-processes/pres_linux-processes_en.tex index 8d47f98..56dd760 100644 --- a/linux-basics/linux-processes/pres_linux-processes_en.tex +++ b/linux-basics/linux-processes/pres_linux-processes_en.tex @@ -232,6 +232,15 @@ working directory and so on...) \end{frame} \begin{frame}[fragile] +\frametitle{Setting the CPU Affinity} +The cpu affinity can be set using the taskset command: +\begin{verbatim} +taskset [options] mask command [arg]... +taskset [options] -p [mask] pid +\end{verbatim} +\end{frame} + +\begin{frame}[fragile] \frametitle{CPU affinity} \begin{lstlisting} #define _GNU_SOURCE @@ -253,90 +262,83 @@ sched_setaffinity(pid, CPU_SETSIZE, &set); \frametitle{SMP and interrupt routing} \begin{verbatim} $ ls /proc/irq/ -0 1 10 11 12 13 14 15 17 18 19 2 3 4 5 6 7 8 9 default_smp_affinity +0 1 10 11 12 13 14 15 17 18 19 ... default_smp_affinity $ cat /proc/irq/default_smp_affinity 3 - -# Set default IRQ affinity to CPU0 -$ echo 1 > /proc/irq/default_smp_affinity - -# Set affinity for IRQ19 to CPU1 -$ echo 2 > /proc/irq/19/smp_affinity +\end{verbatim} +Set default IRQ affinity to CPU0 +\begin{verbatim} +echo 1 > /proc/irq/default_smp_affinity +\end{verbatim} +Set affinity for IRQ19 to CPU1 +\begin{verbatim} +echo 2 > /proc/irq/19/smp_affinity \end{verbatim} \end{frame} -\subsection{Control Groups: CGROUPS} +\subsection{Control Groups: cgroups} \begin{frame} -\frametitle{CGROUPS} -CGROUPS are a mechanism for partitioning and aggregating tasks +\frametitle{What are cgroups} +Control groups (cgroups) are a mechanism for partitioning and aggregating tasks into hierarchical groups. Each group has several options like CPU time, a specific set of CPUs, ... \end{frame} \begin{frame}[fragile] -\frametitle{Using CGROUPS} +\frametitle{Setting up cgroups} \begin{verbatim} -$ mount -t cgroup -o cpu,cpuacct none /sys/fs/cgroup/cpu,cpuacct +mount -t cgroup -o cpu,cpuacct none /sys/fs/cgroup/cpu,cpuacct -$ mount -t cgroup -o cpuset none /sys/fs/cgroup/cpuset +mount -t cgroup -o cpuset none /sys/fs/cgroup/cpuset \end{verbatim} \end{frame} \begin{frame}[fragile] -\frametitle{CGROUPS: Creating new groups} +\frametitle{Setting up cgroups} +Create groups nice and cpu0only \begin{verbatim} -$ mkdir /sys/fs/cgroup/cpu,cpuacct/group_nice +mkdir /sys/fs/cgroup/cpu,cpuacct/group_nice -$ mkdir /sys/fs/cgroup/cpuset/group_cpu0only +mkdir /sys/fs/cgroup/cpuset/group_cpu0only \end{verbatim} \end{frame} \begin{frame}[fragile] -\frametitle{CGROUPS: Setting up groups} +\frametitle{Setting up cgroups} +Limit CPU time (relative to other tasks) for the nice group \begin{verbatim} -# Limit CPU time -$ echo 100 > /sys/fs/cgroup/cpu,cpuacct/group_nice/cpu.shares +echo 100 > /sys/fs/cgroup/cpu,cpuacct/group_nice/cpu.shares \end{verbatim} -\end{frame} - -\begin{frame}[fragile] -\frametitle{CGROUPS: Setting up groups} +Set allowed CPUs and memory nodes for the cpu0only group \begin{verbatim} -# Set allowed CPUs -$ echo 0 > /sys/fs/cgroup/cpuset/group_cpu0only/cpuset.cpus +echo 0 > /sys/fs/cgroup/cpuset/group_cpu0only/cpuset.cpus -# Set allowed memory nodes -$ echo 0 > /sys/fs/cgroup/cpuset/group_cpu0only/cpuset.mems +echo 0 > /sys/fs/cgroup/cpuset/group_cpu0only/cpuset.mems \end{verbatim} \end{frame} \begin{frame}[fragile] -\frametitle{CGROUPS: Setting up groups} -Open two shells. In shell 1 do: +\frametitle{Testing cgroups} +Open two shells. In shell 1 add the current task to the cpu0only and nice groups and burn the cpu: \begin{verbatim} -# Add the current task to the cpu0only group -$ echo $$ > /sys/fs/cgroup/cpuset/group_cpu0only/tasks +echo $$ > /sys/fs/cgroup/cpuset/group_cpu0only/tasks -# Add the current task to the nice group -$ echo $$ > /sys/fs/cgroup/cpu,cpuacct/group_nice/tasks +echo $$ > /sys/fs/cgroup/cpu,cpuacct/group_nice/tasks -# Create some noise -$ while true; do echo -n; done +while true; do echo -n; done \end{verbatim} -In shell 2: +In shell 2 add the current task to the cpu0only group and burn the cpu: \begin{verbatim} -# Add the current task to the cpu0only group -$ echo $$ > /sys/fs/cgroup/cpuset/group_cpu0only/tasks +echo $$ > /sys/fs/cgroup/cpuset/group_cpu0only/tasks -# Create some noise -$ while true; do echo -n; done +while true; do echo -n; done \end{verbatim} \end{frame} \begin{frame}[fragile] -\frametitle{CGROUPS: Setting up groups} +\frametitle{Testing cgroups} Now check the CPU usage with top \begin{verbatim} $ top @@ -349,12 +351,18 @@ PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND \end{frame} \begin{frame}[fragile] -\frametitle{CGROUPS: Who is using CGROUPS?} -systemd uses CGROUPS! Boot with systemd and check SysFS: +\frametitle{CPU shares vs. quota} +What is the difference between cpu.shares and cpu.cfs\_quota\_us? \begin{verbatim} -$ cd /sys/fs/cgroup/systemd +echo 10000 > /sys/fs/cgroup/cpu,cpuacct/group_nice/cpu.cfs_quota_us +\end{verbatim} +\end{frame} -$ ls +\begin{frame}[fragile] +\frametitle{Who is using cgroups?} +systemd uses cgroups! Boot with systemd and check sysfs: +\begin{verbatim} +$ ls /sys/fs/cgroup/systemd ... system.slice ... user.slice $ ls /sys/fs/cgroup/systemd/system.slice |
