\begin{frame} \frametitle{Topic 2: Adversarial Examples} \Large \textbf{Practical Black-Box Attacks against Machine Learning} (Papernot, McDaniel, et al., 2016)\\ \LARGE \textbf{Chapter:} 10.4 \textbf{Supervisor:} Benedikt Böing (benedikt.böing@cs.uni-dortmund.de)\\ %\begin{center} %\includegraphics[height=3cm]{illustrations/benedikt.png} %\end{center} \begin{columns} \begin{column}{.575\textwidth} \begin{center} \includegraphics[height=3cm]{illustrations/benedikt.png} \end{center} \end{column} \begin{column}{.375\textwidth} \begin{itemize} \item Slight changes in a neural network can change its output drastically \end{itemize} \end{column} \end{columns} \end{frame}