@inproceedings{strout-etal-2019-human,
title = "Do Human Rationales Improve Machine Explanations?",
author = "Strout, Julia and
Zhang, Ye and
Mooney, Raymond",
editor = "Linzen, Tal and
Chrupa{\l}a, Grzegorz and
Belinkov, Yonatan and
Hupkes, Dieuwke",
booktitle = "Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP",
month = aug,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W19-4807",
doi = "10.18653/v1/W19-4807",
pages = "56--62",
abstract = "Work on {``}learning with rationales{''} shows that humans providing explanations to a machine learning system can improve the system{'}s predictive accuracy. However, this work has not been connected to work in {``}explainable AI{''} which concerns machines explaining their reasoning to humans. In this work, we show that learning with rationales can also improve the quality of the machine{'}s explanations as evaluated by human judges. Specifically, we present experiments showing that, for CNN-based text classification, explanations generated using {``}supervised attention{''} are judged superior to explanations generated using normal unsupervised attention.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="strout-etal-2019-human">
<titleInfo>
<title>Do Human Rationales Improve Machine Explanations?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Julia</namePart>
<namePart type="family">Strout</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ye</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Raymond</namePart>
<namePart type="family">Mooney</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP</title>
</titleInfo>
<name type="personal">
<namePart type="given">Tal</namePart>
<namePart type="family">Linzen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Grzegorz</namePart>
<namePart type="family">Chrupała</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yonatan</namePart>
<namePart type="family">Belinkov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dieuwke</namePart>
<namePart type="family">Hupkes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Florence, Italy</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Work on “learning with rationales” shows that humans providing explanations to a machine learning system can improve the system’s predictive accuracy. However, this work has not been connected to work in “explainable AI” which concerns machines explaining their reasoning to humans. In this work, we show that learning with rationales can also improve the quality of the machine’s explanations as evaluated by human judges. Specifically, we present experiments showing that, for CNN-based text classification, explanations generated using “supervised attention” are judged superior to explanations generated using normal unsupervised attention.</abstract>
<identifier type="citekey">strout-etal-2019-human</identifier>
<identifier type="doi">10.18653/v1/W19-4807</identifier>
<location>
<url>https://aclanthology.org/W19-4807</url>
</location>
<part>
<date>2019-08</date>
<extent unit="page">
<start>56</start>
<end>62</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Do Human Rationales Improve Machine Explanations?
%A Strout, Julia
%A Zhang, Ye
%A Mooney, Raymond
%Y Linzen, Tal
%Y Chrupała, Grzegorz
%Y Belinkov, Yonatan
%Y Hupkes, Dieuwke
%S Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP
%D 2019
%8 August
%I Association for Computational Linguistics
%C Florence, Italy
%F strout-etal-2019-human
%X Work on “learning with rationales” shows that humans providing explanations to a machine learning system can improve the system’s predictive accuracy. However, this work has not been connected to work in “explainable AI” which concerns machines explaining their reasoning to humans. In this work, we show that learning with rationales can also improve the quality of the machine’s explanations as evaluated by human judges. Specifically, we present experiments showing that, for CNN-based text classification, explanations generated using “supervised attention” are judged superior to explanations generated using normal unsupervised attention.
%R 10.18653/v1/W19-4807
%U https://aclanthology.org/W19-4807
%U https://doi.org/10.18653/v1/W19-4807
%P 56-62
Markdown (Informal)
[Do Human Rationales Improve Machine Explanations?](https://aclanthology.org/W19-4807) (Strout et al., BlackboxNLP 2019)
ACL
- Julia Strout, Ye Zhang, and Raymond Mooney. 2019. Do Human Rationales Improve Machine Explanations?. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 56–62, Florence, Italy. Association for Computational Linguistics.