425 lines
9.8 KiB
TeX
425 lines
9.8 KiB
TeX
\UseRawInputEncoding
|
|
%\documentclass[hyperref={pdfpagelabels=false}]{beamer}
|
|
\documentclass[hyperref={pdfpagelabels=false},aspectratio=169]{beamer}
|
|
% Die Hyperref Option hyperref={pdfpagelabels=false} verhindert die Warnung:
|
|
% Package hyperref Warning: Option `pdfpagelabels' is turned off
|
|
% (hyperref) because \thepage is undefined.
|
|
% Hyperref stopped early
|
|
%
|
|
|
|
\usepackage{lmodern}
|
|
% Das Paket lmodern erspart die folgenden Warnungen:
|
|
% LaTeX Font Warning: Font shape `OT1/cmss/m/n' in size <4> not available
|
|
% (Font) size <5> substituted on input line 22.
|
|
% LaTeX Font Warning: Size substitutions with differences
|
|
% (Font) up to 1.0pt have occurred.
|
|
%
|
|
|
|
% Wenn \titel{\ldots} \author{\ldots} erst nach \begin{document} kommen,
|
|
% kommt folgende Warnung:
|
|
% Package hyperref Warning: Option `pdfauthor' has already been used,
|
|
% (hyperref) ...
|
|
% Daher steht es hier vor \begin{document}
|
|
|
|
\title[Anomaly Detection and AutoML]{Lets go deeper!}
|
|
\author{Simon Kluettermann}
|
|
\date{\today}
|
|
|
|
|
|
\institute{ls9 tu Dortmund}
|
|
|
|
|
|
% Dadurch wird verhindert, dass die Navigationsleiste angezeigt wird.
|
|
\setbeamertemplate{navigation symbols}{}
|
|
|
|
% zusaetzlich ist das usepackage{beamerthemeshadow} eingebunden
|
|
\usepackage{beamerthemeshadow}
|
|
|
|
\hypersetup{pdfstartview={Fit}} % fits the presentation to the window when first displayed
|
|
|
|
\usepackage{appendixnumberbeamer}
|
|
\usepackage{listings}
|
|
|
|
|
|
\usetheme{CambridgeUS}
|
|
\usepackage{ngerman}
|
|
\usecolortheme{dolphin}
|
|
|
|
|
|
% \beamersetuncovermixins{\opaqueness<1>{25}}{\opaqueness<2$\Rightarrow${15}}
|
|
% sorgt dafuer das die Elemente die erst noch (zukuenftig) kommen
|
|
% nur schwach angedeutet erscheinen
|
|
%\beamersetuncovermixins{\opaqueness<1>{25}}{\opaqueness<2$\Rightarrow${15}}%here disabled
|
|
% klappt auch bei Tabellen, wenn teTeX verwendet wird\ldots
|
|
\renewcommand{\figurename}{}
|
|
|
|
\setbeamertemplate{footline}
|
|
{
|
|
\leavevmode%
|
|
\hbox{%
|
|
\begin{beamercolorbox}[wd=.4\paperwidth,ht=2.25ex,dp=1ex,center]{author in head/foot}%
|
|
\usebeamerfont{author in head/foot}\insertshorttitle
|
|
\end{beamercolorbox}%
|
|
\begin{beamercolorbox}[wd=.25\paperwidth,ht=2.25ex,dp=1ex,center]{title in head/foot}%
|
|
\usebeamerfont{title in head/foot}\insertsection
|
|
\end{beamercolorbox}%
|
|
\begin{beamercolorbox}[wd=.3499\paperwidth,ht=2.25ex,dp=1ex,right]{date in head/foot}%
|
|
\usebeamerfont{date in head/foot}\insertshortdate{}\hspace*{2em}
|
|
\hyperlink{toc}{\insertframenumber{} / \inserttotalframenumber\hspace*{2ex}}
|
|
\end{beamercolorbox}}%
|
|
\vskip0pt%
|
|
}
|
|
|
|
\usepackage[absolute,overlay]{textpos}
|
|
\usepackage{graphicx}
|
|
|
|
\newcommand{\source}[1]{\begin{textblock*}{9cm}(0.1cm,8.9cm)
|
|
\begin{beamercolorbox}[ht=0.5cm,left]{framesource}
|
|
\usebeamerfont{framesource}\usebeamercolor[fg!66]{framesource} Source: {#1}
|
|
\end{beamercolorbox}
|
|
\end{textblock*}}
|
|
|
|
|
|
\begin{document}
|
|
|
|
|
|
|
|
%from file ../case3/data/000.txt
|
|
\begin{frame}[label=]
|
|
\frametitle{}
|
|
\begin{titlepage}
|
|
|
|
\centering
|
|
{\huge\bfseries \par}
|
|
\vspace{2cm}
|
|
{\LARGE\itshape Simon Kluettermann\par}
|
|
\vspace{1.5cm}
|
|
{\scshape\Large Master Thesis in Physics\par}
|
|
\vspace{0.2cm}
|
|
{\Large submitted to the \par}
|
|
\vspace{0.2cm}
|
|
{\scshape\Large Faculty of Mathematics Computer Science and Natural Sciences \par}
|
|
\vspace{0.2cm}
|
|
{\Large \par}
|
|
\vspace{0.2cm}
|
|
{\scshape\Large RWTH Aachen University}
|
|
\vspace{1cm}
|
|
|
|
\vfill
|
|
{\scshape\Large Department of Physics\par}
|
|
\vspace{0.2cm}
|
|
{\scshape\Large Insitute for theoretical Particle Physics and Cosmology\par}
|
|
\vspace{0.2cm}
|
|
{ \Large\par}
|
|
\vspace{0.2cm}
|
|
{\Large First Referee: Prof. Dr. Michael Kraemer \par}
|
|
{\Large Second Referee: Prof. Dr. Felix Kahlhoefer}
|
|
|
|
\vfill
|
|
|
|
% Bottom of the page
|
|
{\large November 2020 \par}
|
|
\end{titlepage}
|
|
\pagenumbering{roman}
|
|
\thispagestyle{empty}
|
|
\null
|
|
\newpage
|
|
\setcounter{page}{1}
|
|
\pagenumbering{arabic}
|
|
\end{frame}
|
|
|
|
|
|
%from file ../case3/data/001reminder.txt
|
|
\begin{frame}[label=reminder]
|
|
\frametitle{reminder}
|
|
\begin{itemize}
|
|
|
|
\item No Case Study next week
|
|
|
|
\begin{itemize}
|
|
|
|
\item neither Tuesday (29.11) nor Thursday (01.12)
|
|
|
|
\item if you need help: just write me an email!
|
|
|
|
|
|
\end{itemize}
|
|
\item In two weeks: Case Study switched
|
|
|
|
\begin{itemize}
|
|
|
|
\item Q+A Tuesday (6.12, 14:00) online only
|
|
|
|
\item Case Study Meeting Thursday (08.12, 14:00-16:00), in OH12 Room 3.032
|
|
|
|
|
|
\end{itemize}
|
|
|
|
\end{itemize}
|
|
\end{frame}
|
|
|
|
|
|
%from file ../case3/data/002Big Picture.txt
|
|
\begin{frame}[label=Big Picture]
|
|
\frametitle{Big Picture}
|
|
\begin{itemize}
|
|
|
|
\item Goal for this case study: Have better hyperparameters than pyod!
|
|
|
|
\item So each of you: Gets assigned two algorithms!
|
|
|
|
\begin{itemize}
|
|
|
|
\item One fairly simple before
|
|
|
|
\item One more complicated one today
|
|
|
|
|
|
\end{itemize}
|
|
\item Try to find the best possible hyperparameters for your algorithms
|
|
|
|
\begin{itemize}
|
|
|
|
\item Try to be clever (for example: PCA: $n_{components}<n_{features}$. Maybe $\frac{n_{components}}{n_{features}}$ constant?
|
|
|
|
|
|
\end{itemize}
|
|
\item Afterwards
|
|
|
|
\begin{itemize}
|
|
|
|
\item Write down your findings into a simple function (given data, what are my best hyperparameters)
|
|
|
|
\item Write down your finding into a report (together, double collumn. max 6 Pages per student, plus comparison of algorithms to each other)
|
|
|
|
\item One final presentation together in front of my colleagues. About 10min per student.
|
|
|
|
|
|
\end{itemize}
|
|
|
|
\end{itemize}
|
|
\end{frame}
|
|
|
|
|
|
%from file ../case3/data/003Evaluating your hyperparameter.txt
|
|
\begin{frame}[label=Evaluating your hyperparameter]
|
|
\frametitle{Evaluating your hyperparameter}
|
|
\begin{itemize}
|
|
|
|
\item My suggestion: Compare to normal parameters.
|
|
|
|
\item This means you get two lists of AUC scores
|
|
|
|
\item Your params: [0.80,0.75,0.73,....,0.95]
|
|
|
|
\item Pyod params: [0.82,0.71,0.48,....,0.95]
|
|
|
|
\item look at two values
|
|
|
|
\item $\sum_i your_i-pyod_i$
|
|
|
|
\begin{itemize}
|
|
|
|
\item Total improvment. If positive, then your parameters help;)
|
|
|
|
\item But hard to see if this is significant
|
|
|
|
|
|
\end{itemize}
|
|
\item Fraction of $your_i>pyod_i$
|
|
|
|
\begin{itemize}
|
|
|
|
\item Quantised, so does not care about improving your parameters further
|
|
|
|
\item But easy to see if this is significant
|
|
|
|
\begin{itemize}
|
|
|
|
\item 0.5$\Rightarrow$Probably just random
|
|
|
|
\item 0.9$\Rightarrow$Probably quite significant
|
|
|
|
|
|
\end{itemize}
|
|
|
|
\end{itemize}
|
|
|
|
\end{itemize}
|
|
\end{frame}
|
|
|
|
|
|
%from file ../case3/data/004How to continue.txt
|
|
\begin{frame}[label=How to continue]
|
|
\frametitle{How to continue}
|
|
\begin{itemize}
|
|
|
|
\item See how far you can improve this?
|
|
|
|
\item Treat this as a supervised optimisation problem: Given this dataset, find the best hyperparameters
|
|
|
|
\item Might be useful to look at more input parameters
|
|
|
|
\item Might help to formulate your parameters differently
|
|
|
|
\item But be aware of \textbf{overfitting}!
|
|
|
|
|
|
\end{itemize}
|
|
\end{frame}
|
|
|
|
|
|
%from file ../case3/data/005Intro to Deep Learning.txt
|
|
\begin{frame}[label=Intro to Deep Learning]
|
|
\frametitle{Intro to Deep Learning}
|
|
\begin{figure}[H]
|
|
\centering
|
|
\includegraphics[width=0.9\textwidth]{..//prep/05Intro_to_Deep_Learning/adsasda.png}
|
|
\label{fig:prep05Intro_to_Deep_Learningadsasdapng}
|
|
\end{figure}
|
|
|
|
|
|
\end{frame}
|
|
|
|
|
|
%from file ../case3/data/006Intro to Deep Learning.txt
|
|
\begin{frame}[label=Intro to Deep Learning]
|
|
\frametitle{Intro to Deep Learning}
|
|
\begin{figure}[H]
|
|
\centering
|
|
\includegraphics[width=0.9\textwidth]{..//prep/06Intro_to_Deep_Learning/adsasd.png}
|
|
\label{fig:prep06Intro_to_Deep_Learningadsasdpng}
|
|
\end{figure}
|
|
|
|
|
|
\end{frame}
|
|
|
|
|
|
%from file ../case3/data/007Intro to Deep Learning.txt
|
|
\begin{frame}[label=Intro to Deep Learning]
|
|
\frametitle{Intro to Deep Learning}
|
|
\begin{itemize}
|
|
|
|
\item The idea is always the same:
|
|
|
|
\begin{itemize}
|
|
|
|
\item Define complicated model to learn (often millions of parameters)
|
|
|
|
\item Define loss function that this model should minimize (example: $\sum_i (y_i-f(x_i))^2$)
|
|
|
|
\item Find parameters that minimize the loss ($\Rightarrow$Backpropagation)
|
|
|
|
|
|
\end{itemize}
|
|
\item Usually Neural Networks:
|
|
|
|
\begin{itemize}
|
|
|
|
\item $f(x)=f_n(x)=activation(A_n\cdot f_{n-1}(x)+b_n)$
|
|
|
|
\item $f_0(x)=x$
|
|
|
|
|
|
\end{itemize}
|
|
\item Powerful, as you can show that when there are 3 Layers+ (and infinitely sized matrices), you can approximate any function
|
|
|
|
\item $\Rightarrow$So a model becomes a loss function
|
|
|
|
|
|
\end{itemize}
|
|
\end{frame}
|
|
|
|
|
|
%from file ../case3/data/008Autoencoder.txt
|
|
\begin{frame}[label=Autoencoder]
|
|
\frametitle{Autoencoder}
|
|
\begin{figure}[H]
|
|
\centering
|
|
\includegraphics[width=0.9\textwidth]{..//prep/08Autoencoder/ae.png}
|
|
\label{fig:prep08Autoencoderaepng}
|
|
\end{figure}
|
|
|
|
|
|
\end{frame}
|
|
|
|
|
|
%from file ../case3/data/009Autoencoder.txt
|
|
\begin{frame}[label=Autoencoder]
|
|
\frametitle{Autoencoder}
|
|
\begin{itemize}
|
|
|
|
\item Lets look at some of its Hyperparameters
|
|
|
|
\item Autoencoder Specific
|
|
|
|
\begin{itemize}
|
|
|
|
\item Compression factor (Latent space size)
|
|
|
|
\item Loss function (mse?)
|
|
|
|
|
|
\end{itemize}
|
|
\item Neural Network architecture
|
|
|
|
\begin{itemize}
|
|
|
|
\item Number of layers
|
|
|
|
\item Number of neurons in each layer (Shape of the matrices $A_n$)
|
|
|
|
|
|
\end{itemize}
|
|
\item Optimisation parameters
|
|
|
|
\begin{itemize}
|
|
|
|
\item Learning Rate
|
|
|
|
\begin{itemize}
|
|
|
|
\item Controls how fast the parameters are found
|
|
|
|
\item To high value makes the training unstable
|
|
|
|
|
|
\end{itemize}
|
|
\item Batch size
|
|
|
|
\begin{itemize}
|
|
|
|
\item Controls how many samples are averaged together.
|
|
|
|
\item Lower values make the training more stable, but also the result less optimal
|
|
|
|
|
|
\end{itemize}
|
|
|
|
\end{itemize}
|
|
|
|
\end{itemize}
|
|
\end{frame}
|
|
|
|
|
|
%from file ../case3/data/010For next time.txt
|
|
\begin{frame}[label=For next time]
|
|
\frametitle{For next time}
|
|
\begin{itemize}
|
|
|
|
\item (if you have not finished finding good parameters for your old algorithm, continue searching for them)
|
|
|
|
\item Take a look at your new algorithm
|
|
|
|
\item Run it once on cardio, take a look at which parameters you have
|
|
|
|
\item Prepare a similar presentation to last time (include your cardio result)
|
|
|
|
|
|
\end{itemize}
|
|
\end{frame}
|
|
|
|
|
|
|
|
\end{document}
|