This commit is contained in:
Alexander Munch-Hansen 2019-11-27 18:32:40 +01:00
parent 92b57a6016
commit 261b37419d
2 changed files with 56 additions and 79 deletions

BIN
pres.pdf

Binary file not shown.

135
pres.tex
View File

@ -13,6 +13,8 @@
% Spørg lige Lasse hvorfor min(L(f,g,pi_x(z)) + omega(g)) bliver intractable, når omega(g) er en konstant!
\usepackage{dirtytalk}
\usepackage{bbm}
\usepackage{setspace}
@ -111,15 +113,8 @@
\begin{frame}
\frametitle{Previous Solutions}
\begin{itemize}
\note{ Practitioners consistently overestimate their models accuracy [20], propagate feedback loops [23], or fail to notice data leaks }
\note{ Practitioners consistently overestimate their models accuracy [20], propagate feedback loops [23], or fail to notice data leaks }
\item Relying on accuracy based on validation set
\item Gestalt
\item Modeltracker
\begin{itemize}
\item Help users navigate individual instances.
\item Complementary to LIME in terms of explaining models, since they do not address the problem of explaining individual predictions.
\item The our submodular pick procedure of LIME can be incorporated in such tools to aid users in navigating larger datasets.
\end{itemize}
\item Recognizing the utility of explanations in assessing trust, many have proposed using interpretable models
\begin{itemize}
\item May generalize poorly, if data can't be explained in few dimensions
@ -245,8 +240,9 @@
% \subsubsection{Examples}
\begin{frame}
\frametitle{More definitions}
\frametitle{Specifics for linear models}
\begin{itemize}
\item They focus only on linear explanations
\item $G = $ Class of linear models: $g(z^\prime) = w_g \cdot z^\prime$
\item $L = $ The locally weighted square loss
\item $\pi_x(z) = \exp(-D(x,z)^2 / \sigma^2)$
@ -300,7 +296,7 @@
\item Some definitions
\begin{itemize}
\item Time/patience of humans is explained by a budget \emph{B} which denotes number of explanations a human will sit through.
\item Given a set of instances \textbf{X}, we define the \emph{pick step} as the task of selecting \emph{B} instances for the user to inspect.
\item Given a set of instances \textbf{X}, we define the \emph{pick step} as the task of selecting \textbf{B} instances for the user to inspect.
\end{itemize}
\end{itemize}
\end{frame}
@ -308,8 +304,7 @@
\frametitle{The pick step}
The task of selecting \emph{B} instances for the user to inspect
\begin{itemize}
\item Not dependent on the existence of explanations
\item Should not assist users in selecting instances themselves
\item Should return the instances which best explains the model
\item Looking at raw data is not enough to understand predicitions and get insights
\item Should take into account the explanations that accompany each prediction
\note{Should pick a diverse, representative set of explanations to show the user, so non-redundant explanations that represent how the model behaves globally.}
@ -320,7 +315,8 @@
\begin{frame}
\frametitle{Picking instances}
\center
\includegraphics[scale=0.81]{graphics/picker_first.png}
\includegraphics[scale=0.68]{graphics/picker_first.png} \\
\hspace{1cm}
\end{frame}
\note[itemize] {
\item This is a matrix explaining instances and their features explained by a binary list s.t. an instance either has a feature or does not.
@ -331,7 +327,10 @@
\begin{frame}
\frametitle{Picking instances}
\center
\includegraphics[scale=0.32]{graphics/picker_second.png}
\includegraphics[scale=0.27]{graphics/picker_second.png}
\begin{itemize}
\item $I_j = \sqrt{\sum_{i=1}^n W_{ij}}$
\end{itemize}
\end{frame}
\note[itemize] {
\item This is a matrix explaining instances and their features explained by a binary list s.t. an instance either has a feature or does not.
@ -342,48 +341,33 @@
\begin{frame}
\frametitle{Picking instances}
\center
\includegraphics[scale=0.32]{graphics/picker_third.png}
\includegraphics[scale=0.27]{graphics/picker_third.png}
\begin{itemize}
\item $I_j = \sqrt{\sum_{i=1}^n W_{ij}}$
\end{itemize}
\end{frame}
\note[itemize] {
\item This is a matrix explaining instances and their features explained by a binary list s.t. an instance either has a feature or does not.
\item The blue line explains the most inherent feature, which is important, as it is found in most of the instances.
\item The red lines indicate the two samples which are most important in explaining the model.
\item Thus, explaining importance, is done by: $I_j = sqrt(sum_i=1^n W_ij)$
\item Thus, explaining importance, is done by: $I_j = \sqrt(\sum_{i=1}^n W_ij)$
}
\begin{frame}
\frametitle{Picking instances}
\center
\includegraphics[scale=0.32]{graphics/picker_final.png}
\end{frame}
\note[itemize] {
\item This is a matrix explaining instances and their features explained by a binary list s.t. an instance either has a feature or does not.
\item The blue line explains the most inherent feature, which is important, as it is found in most of the instances.
\item The red lines indicate the two samples which are most important in explaining the model.
\item Thus, explaining importance, is done by: $I_j = sqrt(sum_i=1^n W_ij)$
}
\begin{frame}
\frametitle{Definitions}
\begin{itemize}
\item $I_j = \sqrt{\sum_{i=1}^n W_{ij}}$
\item $c(V,W,I) = \sum\limits_{j=1}^{d^\prime} \mathbbm{1}_{[\exists i \in V : W_{ij} > 0]}\ I_j$
\item $Pick(W,I) = \operatornamewithlimits{argmax}\limits_{V,|V| \leq B} c(V,W,I)$
\end{itemize}
\end{frame}
\includegraphics[scale=0.27]{graphics/picker_final.png}
\begin{itemize}
\item $I_j = \sqrt{\sum_{i=1}^n W_{ij}}$
\end{itemize}
\note[itemize] {
\item c is a coverage function, which computes the total importance of the features that appear in at least one instance in a set V .
\item Note: maximizing a weighted coverage function is NP-hard, but the version used in the algorithm is iterativily greedy, so it just adds the one with the maximum gain, which offers a constant-factor approximation guarantee of $11/e$ to the optimum.
}
\begin{frame}
\frametitle{Explanation of algorithm 2}
\begin{itemize}
\item Given explanations for set of instances $X$, $(|X| = n)$. Construct $n \times d^\prime$ \emph{explanation matrix} $W$
\end{itemize}
\end{frame}
\note[itemize] {
\item This is a matrix explaining instances and their features explained by a binary list s.t. an instance either has a feature or does not.
\item The blue line explains the most inherent feature, which is important, as it is found in most of the instances.
\item The red lines indicate the two samples which are most important in explaining the model.
\item Thus, explaining importance, is done by: $I_j = sqrt(sum_i=1^n W_ij)$
}
\begin{frame}
\frametitle{Submodular Picks}
$c(V,W,I) = \sum\limits_{j=1}^{d^\prime} \mathbbm{1}_{[\exists i \in V : W_{ij} > 0]}\ I_j$
@ -405,7 +389,9 @@
\Return $V$
\caption{Submodular pick (SP) algorithm}
\end{algorithm}
\end{frame}
\note{ Note: maximizing a weighted coverage function is NP-hard, but the version used in the algorithm is iterativily greedy, so it just adds the one with the maximum gain, which offers a constant-factor approximation guarantee of $11/e$ to the optimum.}
\end{frame}
\section{Experiments}
\subsection{Simulated User Experiments}
@ -415,43 +401,22 @@
Interested in three questions:
\begin{itemize}
\item Are the explanations faithful to the model?
\item Can the explanations aid users in ascertaining trust in the predictions
\item Are the explanations useful for evaluating the model as a whole
\item Can the explanations aid users in ascertaining trust in the predictions?
\item Are the explanations useful for evaluating the model as a whole?
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Setup}
\frametitle{Faithfulness and golden standard}
\begin{itemize}
\item Use two datasets, \emph{books} and \emph{DVDs}, both of $2000$ instances.
\begin{itemize}
\item Task is to classify reviews as \emph{positive} or \emph{negative}
\end{itemize}
\item Decision Trees (\textbf{DT}), Logistic Regression (\textbf{LR}), Nearest Neighbours (\textbf{NN}), and SVMs with RBF kernel (\textbf{SVM}), all used BoW as features, are trained.
\begin{itemize}
% Note, random forest will make no sense without any explanation system, such as LIME
\item Also train random forest (\textbf{RF}) with $1000$ trees.
\end{itemize}
\item Each dataset used for training will consist of $1600$ instances and $400$ will be used for testing.
\item Explanations of \textbf{LIME} is compared with \textbf{parzen}
\item Explanations of \textbf{LIME} are compared with \textbf{parzen} as well as greedy and random algorithms.
\begin{itemize}
\item \textbf{parzen} approximates black box classifier globally and explains individual predictions by taking the gradient of the prediction probability function.
\end{itemize}
\item $K = 10$ for the experiments
\end{itemize}
\end{frame}
\note[itemize] {
\item Both are also compared to a greedy method where features are picked by removing most contributing ones until prediction change, as well as a random procedure.
\item K explains the amount of words in the BoW model and the complexity of the model. Higher K => More complex but more faithful, lower k => Less complex, potentially less faithful
}
\begin{frame}
\frametitle{Faithfulness}
\begin{itemize}
\item Faithfulness of explanations is measured on classifiers that are interpretable, \textbf{LR} and \textbf{DT}.
\item Faithfulness of explanations is measured on classifiers that are interpretable: \textbf{Logistic Regression} and \textbf{Decision Tree}.
\begin{itemize}
\item Both are trained s.t. the max no. of features which they can find is $10$, so features found by these are the \emph{gold standard} of features, in regards to which features are important.
\item Both find $10$ features, which are the \emph{gold standard} features
\end{itemize}
\item For each prediction on the test set, explanations are produced and the fraction of the gold features found, is computed.
@ -469,7 +434,7 @@
\end{frame}
\note[itemize] {
\item We observe that the greedy approach is comparable to parzen on logistic regression, but is substantially worse on decision trees since changing a single feature at a time often does not have an effect on the prediction.
\item We observe that the greedy approach is comparable to parzen on logistic regression, but is significantly worse on decision trees, since changing a single feature at a time often does not have an effect on the prediction.
\item The overall recall by parzen is low, likely due to the difficulty in approximating the original highdimensional classifier.
\item LIME consistently provides > 90\% recall for both classifiers on both datasets, demonstrating that LIME explanations are faithful to the models.
}
@ -477,12 +442,20 @@
\begin{frame}
\frametitle{Should I trust this prediction?}
\includegraphics[scale=0.4]{graphics/F1_trust.png}
\begin{itemize}
\item Compute F-measure over trustworthy predictions where whether a prediction is trustworthy is based on:
\begin{itemize}
\item Random and greedy explanations are untrustworthy if they contain any untrustworthy features
\item Parzen and LIME explanations are untrustworthy if the linear approximation change, if an untrustworthy feature is removed from the explanation
\end{itemize}
\end{itemize}
\includegraphics[width=0.5\linewidth]{graphics/F1_trust.png}
\includegraphics[width=0.5\linewidth]{graphics/sample_points.png}
\end{frame}
\note[itemize] {
\item In statistical analysis of binary classification, the F1 score (also F-score or F-measure) is a measure of a test's accuracy. It considers both the precision p and the recall r of the test to compute the score: p is the number of correct positive results divided by the number of all positive results returned by the classifier, and r is the number of correct positive results divided by the number of all relevant samples (all samples that should have been identified as positive). The F1 score is the harmonic mean of the precision and recall, where an F1 score reaches its best value at 1 (perfect precision and recall) and worst at 0.
\item Seems kind of unfair, that random and greedy is mistrusted by simply having an unstrutworthy feature in their explanation, while LIME and parzen just have to not change, when these untrustworthy are removed.
\item In statistical analysis of binary classification, the F1 score (also F-score or F-measure) is a measure of a test's accuracy.
\item Seems kind of unfair, that random and greedy is mistrusted by simply having an unstrutworthy feature in their explanation, while LIME and parzen just have to not change, when these untrustworthy are removed.
}
@ -638,6 +611,10 @@
\begin{frame}
\frametitle{Discussion}
\begin{itemize}
\item Is it fair that the authors create their data in such a way that \emph{Parzen} becomes unusable in their tests?
\item What do you expect to happen if the data is very non-linear even in the local predicitions?
\item The \emph{K-Lasso} algorithm used in \emph{Algorithm 1} is explicitly used for regression analysis and as such it should only work when they use linear models for their explanations. Is this okay?
\end{itemize}
\end{frame}
\end{document}