adm-pres/pres.tex

504 lines
23 KiB
TeX
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

\documentclass{beamer}
\usetheme[progressbar=frametitle]{metropolis}
% g \in G is explanation as a model
% f is the model we're trying to explain
% does, being model agnostic, means we do not care about specifics of f.
% We use Locally Weighted Square Loss as L, where I suspect pi is the weight and we thus estimate the difference between the actual model
% and our explanation, and multiply this with the proximity of the data point z, to x.
% Spørg lige Lasse hvorfor min(L(f,g,pi_x(z)) + omega(g)) bliver intractable, når omega(g) er en konstant!
\usepackage{bbm}
\usepackage{setspace}
\usepackage[T1]{fontenc}
\usepackage[sfdefault,scaled=.85]{FiraSans}
%\usepackage{newtxsf}
\usepackage[ruled, linesnumbered]{algorithm2e}
\SetKwInput{kwRequire}{Require}
\SetKw{kwExpl}{explain}
\title{Why Should I Trust You?}
\subtitle{Explaining the Predictions of Any Classifier}
\author{Casper Vestergaard Kristensen \and Alexander Munch-Hansen}
\institute{Aarhus University}
\date{\today}
\begin{document}
\begin{frame}
\titlepage
\end{frame}
\begin{frame}
\setbeamertemplate{section in toc}[sections numbered]
\frametitle{Outline}
\setstretch{0.5}
\tableofcontents
\end{frame}
\section{Meta information}
%\subsection{Authors}
\begin{frame}
\frametitle{Authors}
\begin{itemize}
\item Marco Tulio Ribeiro
\item Sameer Singh
\item Carlos Guestrin
\end{itemize}
\end{frame}
%\subsection{Publishing}
\begin{frame}[fragile]{Metropolis}
\frametitle{Publishing}
\begin{itemize}
\item Conference Paper, Research
\item KDD '16 Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining
\begin{itemize}
\item A premier interdisciplinary conference, brings together researchers and practitioners from data science, data mining, knowledge discovery, large-scale data analytics, and big data.
\item Sigkdd has the highest h5 index of any conference involving databases or data in general
\item Highly trusted source
\end{itemize}
\end{itemize}
\end{frame}
\section{Article}
%\subsection{Problem}
\begin{frame}
\frametitle{Problem definition}
\begin{itemize}
\item People often use Machine Learning models for predictions
\item Blindly trusting a prediction can lead to poor decision making
\item We seek to understand the reasons behind predictions
\begin{itemize}
\item As well as the model doing the predictions
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Problem definition}
\begin{itemize}
\item People often use Machine Learning models for predictions
\item Blindly trusting a prediction can lead to poor decision making
\item We seek to understand the reasons behind predictions
\begin{itemize}
\item As well as the model doing the predictions
\end{itemize}
\end{itemize}
\center
\includegraphics[scale=0.2]{graphics/doctor_pred.png}
\end{frame}
%\subsection{Previous Solutions}
\begin{frame}
\frametitle{Previous Solutions}
\begin{itemize}
% Practitioners consistently overestimate their models accuracy [20], propagate feedback loops [23], or fail to notice data leaks
\item Relying on accuracy based on validation set
\item Gestalt
\item Modeltracker
\begin{itemize}
\item Help users navigate individual instances.
\item Complementary to LIME in terms of explaining models, since they do not address the problem of explaining individual predictions.
\item The our submodular pick procedure of LIME can be incorporated in such tools to aid users in navigating larger datasets.
\end{itemize}
\item Recognizing the utility of explanations in assessing trust, many have proposed using interpretable models
\begin{itemize}
\item May generalize poorly, if data can't be explained in few dimensions
\item So interpretability, in these cases, comes at the cost of flexibility, accuracy, or efficiency
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
% It becomes clear the dataset has issues, as there is a fake correlation between the header information and the class Atheism. It is also clear what the problems are, and the steps that can be taken to fix these issues and train a more trustworthy classifier.
\frametitle{A look into two predictions}
\includegraphics[scale=0.25]{graphics/christ_vs_atheism.png}
\end{frame}
\begin{frame}
\frametitle{A look into two predictions}
\includegraphics[scale=0.25]{graphics/christ_vs_atheism_annotated_1.png}
\end{frame}
\begin{frame}
\frametitle{A look into two predictions}
\includegraphics[scale=0.25]{graphics/christ_vs_atheism_annotated_2.png}
\end{frame}
\subsection{The LIME framework}
\begin{frame}
\frametitle{LIME}
\begin{itemize}
\item The algorithm created
\item Explains the predictions of \emph{any} classifier or regressor in a faithful way, by approximating it locally with an \emph{interpretable} model.
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Properties of a good explanation}
\begin{itemize}
\item It should be \emph{intepretable}:
\begin{itemize}
\item They must provide qualitative understanding between the input variables and the response
\item They must take into account the users limitations
\item Use a representation understandable to humans
\item Could be a binary vector indicating presence or absence of a word
\item Could be a binary vector indicating presence of absence of super-pixels in an image
\end{itemize}
\item It should have \emph{fidelity}:
\begin{itemize}
\item Essentially means the model should be faithful.
\item Local fidelity does not imply global fidelity
\item The explanation should aim to correspond to how the model behaves in the vicinity of the instance being predicted
\end{itemize}
\item It should be \emph{model-agnostic}:
\begin{itemize}
\item The explanation should be blind to what model is underneath
\end{itemize}
\end{itemize}
\end{frame}
\subsection{Explaining Predictions}
\begin{frame}[shrink=20]
\frametitle{The Fidelity-Interpretability Trade-off}
We want a simple explanation, still capable of displaying fidelity
\begin{itemize}
\item Let an explanation be defined as a model $g \in \{0,1\}^{d^{\prime}} \in G$, where $G$ is a class of \emph{potentially interpretable} models
\begin{itemize}
\item Linear models, decision trees
\item $g$ is a vector showing presence or absence of \emph{interpretable components}
\end{itemize}
\item $\Omega(g)$ explains the \emph{complexity} of an explanation $g$
\begin{itemize}
\item Could be height of a decision tree or number of non-zero weights of a linear model
\end{itemize}
\item The model we try to explain is $f : \mathbb{R}^d \rightarrow \mathbb{R}$
\begin{itemize}
\item In classification, $f(x)$ is the probability or binary indicator that x belongs to a certain class
\end{itemize}
\item $\pi_x(z)$ is a proximity measure between instance $z$ and $x$ and defines the locality around $x$
\item $\mathcal{L}(f,g,\pi_x)$ defines how \emph{unfaithful} $g$ is in approximating $f$ in the locality around $\pi_x$.
\item Ensuring both \emph{interpretability} and \emph{local fidelity}, we minimize $\mathcal{L}$ while having $\Omega(g)$ be low as well
\end{itemize}
% So a more complex g will achieve a more faithful interpretation (a lower L), but will increase the value of Omega(g)
$$\xi(x) = \operatornamewithlimits{argmin}_{g \in G} \mathcal{L}(f,g,\pi_x) + \Omega(g)$$
\end{frame}
\begin{frame}
% Note: WTF is x' here? - An interpretable version of x
% Note: g acts in d' while f acts in d, so when we say that we have z' in dimension d', it's the model g, we can recover the z in the original representation i.e. explained by f in dimension d.
\frametitle{Sampling for Local Exploration}
Goal: Minimizing $\mathcal{L}(f,g,\pi_x)$ without making assumptions on $f$
\begin{itemize}
\item For a sample $x$, we need to draw samples around $x$
\item Accomplished by drawing non-zero elements of $x$, resulting in perturbed samples $z^\prime$
\item Given $z^\prime \in \{0,1\}^{d^\prime}$, we compute un-pertubed $z \in R^d$, $f(z)$, so we have a label for $z^\prime$.
\end{itemize}
\center
\includegraphics[scale=0.15]{graphics/sample_points.png}
\end{frame}
% \subsubsection{Examples}
\begin{frame}
\frametitle{More definitions}
\begin{itemize}
\item $G = $ Class of linear models: $g(z^\prime) = w_g \cdot z^\prime$
\item $L = $ The locally weighted square loss
\item $\pi_x(z) = \exp(-D(x,z)^2 / \sigma^2)$
\begin{itemize}
\item An exponential kernel function based on some distance function D (could be L2 distance for images)
\end{itemize}
\item Thus; $L(f, g, \pi_x) = \sum\limits_{z,z^\prime \in \mathcal{Z}} \pi_x(z) (f(z) - g(z^\prime))^2$
\end{itemize}
\end{frame}
\begin{frame}
% \frametitle{Sparse Linear Explanations}
% Talk through the algorithm, discussing the sampling and K-Lasso (least absolute shrinkage and selection operator), which is used for feature selection
\frametitle{Explaining an individual prediction}
\begin{algorithm}[H]
\setstretch{0.9}
\SetAlgoLined
\kwRequire{Classifier $f$, Number of samples $N$}
\kwRequire{Instance $x$, and its intepretable version $x^{\prime}$}
\kwRequire{Similarity kernel $\pi_x$, Length of explanation $K$}
\Indp
$\mathcal{Z} \leftarrow \{\}$ \\
\For{$i \in \{1,2,3,\dots, N\}$}{
$z_i^{\prime} \leftarrow sample\_around(x^{\prime})$ \\
$\mathcal{Z} \leftarrow \mathcal{Z} \cup \langle z_i^{\prime}, f(z_i), \pi_{x}(z_i) \rangle$ \\
}
$w \leftarrow \text{K-Lasso}(\mathcal{Z},K) \vartriangleright \text{with } z_i^{\prime} \text{ as features, } f(z) \text{ as target}$ \\
\Return $w$
\caption{Sparse Linear Explanations using LIME}
\end{algorithm}
% This algorithm approximates the minimization problem of computing a single individual explanation of a prediction.
% K-Lasso is the procedure of learning the weights via least squares. Wtf are these weights??? - The features
\end{frame}
\subsection{Explaining Models}
\begin{frame}
\frametitle{Explaining models}
Idea: We give a global understanding of the model by explaining a set of individual instances
\begin{itemize}
\item Still model agnositc (since the indiviudal explanations are)
\item Instances need to be selected in a clever way, as people won't have time to look through all explanations
\item Some definitions
\begin{itemize}
\item Time/patience of humans is explained by a budget \emph{B} which denotes number of explanations a human will sit through.
\item Given a set of instances \textbf{X}, we define the \emph{pick step} as the task of selecting \emph{B} instances for the user to inspect.
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{The pick step}
The task of selecting \emph{B} instances for the user to inspect
\begin{itemize}
\item Not dependent on the existence of explanations
\item So it should not assist users in selecting instances themselves
\item Looking at raw data is not enough to understand predicitions and get insights
\item Should take into account the explanations that accompany each prediction
\item Should pick a diverse, representative set of explanations to show the user, so non-redundant explanations that represent how the model behaves globally.
\end{itemize}
\end{frame}
% This is a matrix explaining instances and their features explained by a binary list s.t. an instance either has a feature or does not. The blue line explains the most inherent feature, which is important, as it is found in omst of the instances. The red lines indicate the two samples which are most important in explaining the model. Thus, explaining importance, is done by: I_j = sqrt(sum_i=1^n W_ij)
\begin{frame}
\frametitle{Picking instances}
\center
\includegraphics[scale=0.81]{graphics/picker_first.png}
\end{frame}
\begin{frame}
\frametitle{Picking instances}
\center
\includegraphics[scale=0.32]{graphics/picker_second.png}
\end{frame}
\begin{frame}
\frametitle{Picking instances}
\center
\includegraphics[scale=0.32]{graphics/picker_third.png}
\end{frame}
\begin{frame}
\frametitle{Picking instances}
\center
\includegraphics[scale=0.32]{graphics/picker_final.png}
\end{frame}
\begin{frame}
\frametitle{Definitions}
\begin{itemize}
\item $I_j = \sqrt{\sum_{i=1}^n W_{ij}}$
% c is a coverage function, which computes the total importance of the features that appear in at least one instance in a set V .
% NOte: maximizing a weighted coverage function is NP-hard, but the version used in the algorithm is iterativily greedy, so it just adds the one with the maximum gain, which offers a constant-factor approximation guarantee of 11/e to the optimum.
\item $c(V,W,I) = \sum\limits_{j=1}^{d^\prime} \mathbbm{1}_{[\exists i \in V : W_{ij} > 0]}\ I_j$
\item $Pick(W,I) = \operatornamewithlimits{argmax}\limits_{V,|V| \leq B} c(V,W,I)$
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Explanation of algorithm 2}
\begin{itemize}
\item Given explanations for set of instances $X$, $(|X| = n)$. Construct $n \times d^\prime$ \emph{explanation matrix} $W$
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Submodular Picks}
\begin{algorithm}[H]
\setstretch{0.9}
\SetAlgoLined
\kwRequire{Instances $X$, Budget $B$}
\Indp
\ForAll{$x_i \in X$}{
$W_i \leftarrow \mathbf{explain}(x_i, x_i^{\prime})$ \qquad \qquad $\vartriangleright$ Using Algorithm 1
}
\For{$j \in \{1\dots d^{\prime}$} {
$I_j \leftarrow \sqrt{\sum_{i=1}^n |W_{ij}|}$ \qquad \qquad \quad $\vartriangleright$ Compute feature importances
}
$V \leftarrow \{\}$ \\
\While(\qquad \qquad \qquad \quad \ \ $\vartriangleright$ Greedy optimisation of Eq 4){$|V| < B$} {
$V \leftarrow V \cup \text{argmax}_i \ c(V \cup \{i\}, W, i)$
}
\Return $V$
\caption{Submodular pick (SP) algorithm}
\end{algorithm}
\end{frame}
\section{Experiments}
%\subsection{Simulated User Experiments}
%\subsubsection{Setup}
\begin{frame}
\frametitle{Experiments}
Interested in three questions:
\begin{itemize}
\item Are the explanations faithful to the model?
\item Can the explanations aid users in ascertaining trust in the predictions
\item Are the explanations useful for evaluating the model as a whole
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Setup}
\begin{itemize}
\item Use two datasets, \emph{books} and \emph{DVDs}, both of $2000$ instances.
\begin{itemize}
\item Task is to classify reviews as \emph{positive} or \emph{negative}
\end{itemize}
\item Decision Trees (\textbf{DT}), Logistic Regression (\textbf{LR}), Nearest Neighbours (\textbf{NN}), and SVMs with RBF kernel (\textbf{SVM}), all used BoW as features, are trained.
\begin{itemize}
% Note, random forest will make no sense without any explanation system, such as LIME
\item Also train random forest (\textbf{RF}) with $1000$ trees.
\end{itemize}
\item Each dataset used for training will consist of $1600$ instances and $400$ will be used for testing.
\item Explanations of \textbf{LIME} is compared with \textbf{parzen}
\begin{itemize}
\item \textbf{parzen} approximates black box classifier globally and explains individual predictions by taking the gradient of the prediction probability function.
\item Both are also compared to a greedy method where features are picked by removing most contributing ones until prediction change, as well as a random procedure.
% K explains the amount of words in the BoW model and the complexity of the model. Higher K => More complex but more faithful, lower k => Less complex, potentially less faithful
\item $K = 10$ for the experiments
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Faithfulness}
\begin{itemize}
\item Faithfulness of explanations is measured on classifiers that are interpretable, \textbf{LR} and \textbf{DT}.
\begin{itemize}
\item Both are trained s.t. the max no. of features which they can find is $10$, so features found by these are the \emph{gold standard} of features, in regards to which features are important.
\end{itemize}
\item For each prediction on the test set, explanations are produced and the fraction of the gold features found, is computed.
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Faithfulness}
% We observe that the greedy approach is comparable to parzen on logistic regression, but is substantially worse on decision trees since changing a single feature at a time often does not have an effect on the prediction. The overall recall by parzen is low, likely due to the difficulty in approximating the original highdimensional classifier. LIME consistently provides > 90% recall for both classifiers on both datasets, demonstrating that LIME explanations are faithful to the models.
\centering
% Books faithfulness
\includegraphics[height=0.35\textheight]{graphics/books_dt_lr.png}{ }
% Dvds faithfulness
\includegraphics[height=0.35\textheight]{graphics/dvds_dt_lr.png}
\end{frame}
\begin{frame}
\frametitle{Should I trust this prediction?}
% In statistical analysis of binary classification, the F1 score (also F-score or F-measure) is a measure of a test's accuracy. It considers both the precision p and the recall r of the test to compute the score: p is the number of correct positive results divided by the number of all positive results returned by the classifier, and r is the number of correct positive results divided by the number of all relevant samples (all samples that should have been identified as positive). The F1 score is the harmonic mean of the precision and recall, where an F1 score reaches its best value at 1 (perfect precision and recall) and worst at 0.
% Seems kind of unfair, that random and greedy is mistrusted by simply having an unstrutworthy feature in their explanation, while LIME and parzen just have to not change, when these untrustworthy are removed.
\includegraphics[scale=0.4]{graphics/F1_trust.png}
\end{frame}
\begin{frame}
\frametitle{Can I trust this model?}
\begin{itemize}
\item Evaluate if explanations can be used for model selection
\item They add 10 artificially “noisy” features s.t.
\begin{itemize}
\item Each artificial feature appears in 10\% of the examples in one class, and 20\% of the other in the training/validation data.
\item While on the test instances, each artificial feature appears in 10\% of the examples in each class.
\end{itemize}
\item Results in models both using actual informative features, but also ones creating random correlations.
\item Pairs of competing classifiers are computed by repeatedly training pairs of random forests with 30 trees until their validation accuracy is within 0.1\% of each other, but their test accuracy differs by at least 5\%.
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Can I trust this model?}
% They evaluate whether the explanations can be used for model selection, simulating the case where a human has to decide between two competing models with similar accuracy on validation data.
% Accomplished by "marking" the artificial features found within the B instances seen, as unstrustworthy. We then evaluate how many total predictions in the validation set should be trusted (as in the previous section, treating only marked features as untrustworthy).
% SP-parzen and RP-parzen are omittedfrom the figure since they did not produce useful explanations, performing only slightly better than random. Is this ok?
\includegraphics[scale=0.4]{graphics/graph_trust.png}
\end{frame}
\begin{frame}
\frametitle{Can humans pick the best classifier?}
\includegraphics[scale=0.35]{graphics/avg_acc_humans.png}
\end{frame}
\begin{frame}
\frametitle{Can non-experts improve a classifier?}
\includegraphics[scale=0.4]{graphics/picking_features_human.png}
\end{frame}
\begin{frame}
\frametitle{Can we learn something from the explanations?}
% Hand picked images to create the correlation between wolf and snow, s.t. the classifier miss-predicts whenever a husky is in snow or a wolf is without snow
\center
\includegraphics[scale=0.2]{graphics/husky_wolf_img.png}
\end{frame}
\begin{frame}
\frametitle{Can we learn something from the explanations?}
\begin{itemize}
\item Present 10 predictions without explanations % Such as the previous image (a)
\begin{itemize}
\item 2 are miss-predictions with a husky in snow and a wolf without snow, the rest are correct
\end{itemize}
\item Ask three questions:
\begin{enumerate}
\item Do you trust this algorithm to generalize?
\item Why?
\item How do you think the algorithm distinguishes?
\end{enumerate}
\item Results shown in table, before and after having seen the explanations.
\end{itemize}
\center
\includegraphics[scale=0.3]{graphics/husky_wolf_expla.png}
\end{frame}
%\subsection{Human Subjects}
\section{Conclusion}
\begin{frame}
\frametitle{Conclusion}
\begin{itemize}
\item They argue that trust is crucial for effective human interaction with machine learning systems
\item Explaining individual predictions is important in assessing trust
\item They proposed LIME, a modular and extensible ap- proach to faithfully explain the predictions of any model in an interpretable manner
\item They introduced SP-LIME, a method to select representative and non-redundant predictions, providing a global view of the model to users.
\item Experiments demonstrated that explanations are useful for a variety of models in trust-related tasks in the text and image domains
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Future work}
\begin{itemize}
\item They use only sparse linear models as explanations, our framework supports the exploration of a variety of explanation families, such as DTs.
\begin{itemize}
\item This estimate of faithfulness can also be used for selecting an appropriate family of explanations from a set of multiple interpretable model classes, thus adapting to the given dataset and the classifier.
\end{itemize}
\item One issue that they do not mention in this work was how to perform the pick step for images.
\item They would like to investigate potential uses in speech, video, and medical domains, as well as recommendation systems.
\item They would like to explore theoretical properties (such as the appropriate number of samples) and computational optimizations (such as using parallelization and GPU processing)
\end{itemize}
\end{frame}
\section{Recap}
\begin{frame}
\frametitle{Recap}
\end{frame}
\end{document}