@inproceedings{kim-etal-2023-cotever,
title = "{C}o{TEV}er: Chain of Thought Prompting Annotation Toolkit for Explanation Verification",
author = "Kim, Seungone and
Joo, Se June and
Jang, Yul and
Chae, Hyungjoo and
Yeo, Jinyoung",
editor = "Croce, Danilo and
Soldaini, Luca",
booktitle = "Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics: System Demonstrations",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.eacl-demo.23",
doi = "10.18653/v1/2023.eacl-demo.23",
pages = "195--208",
abstract = "Chain-of-thought (CoT) prompting enables large language models (LLMs) to solve complex reasoning tasks by generating an explanation before the final prediction. Despite it{'}s promising ability, a critical downside of CoT prompting is that the performance is greatly affected by the factuality of the generated explanation. To improve the correctness of the explanations, fine-tuning language models with explanation data is needed. However, there exists only a few datasets that can be used for such approaches, and no data collection tool for building them. Thus, we introduce CoTEVer, a tool-kit for annotating the factual correctness of generated explanations and collecting revision data of wrong explanations. Furthermore, we suggest several use cases where the data collected with CoTEVer can be utilized for enhancing the faithfulness of explanations. Our toolkit is publicly available at \url{https://github.com/SeungoneKim/CoTEVer}.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kim-etal-2023-cotever">
<titleInfo>
<title>CoTEVer: Chain of Thought Prompting Annotation Toolkit for Explanation Verification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Seungone</namePart>
<namePart type="family">Kim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Se</namePart>
<namePart type="given">June</namePart>
<namePart type="family">Joo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yul</namePart>
<namePart type="family">Jang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hyungjoo</namePart>
<namePart type="family">Chae</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jinyoung</namePart>
<namePart type="family">Yeo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics: System Demonstrations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Danilo</namePart>
<namePart type="family">Croce</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Luca</namePart>
<namePart type="family">Soldaini</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dubrovnik, Croatia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Chain-of-thought (CoT) prompting enables large language models (LLMs) to solve complex reasoning tasks by generating an explanation before the final prediction. Despite it’s promising ability, a critical downside of CoT prompting is that the performance is greatly affected by the factuality of the generated explanation. To improve the correctness of the explanations, fine-tuning language models with explanation data is needed. However, there exists only a few datasets that can be used for such approaches, and no data collection tool for building them. Thus, we introduce CoTEVer, a tool-kit for annotating the factual correctness of generated explanations and collecting revision data of wrong explanations. Furthermore, we suggest several use cases where the data collected with CoTEVer can be utilized for enhancing the faithfulness of explanations. Our toolkit is publicly available at https://github.com/SeungoneKim/CoTEVer.</abstract>
<identifier type="citekey">kim-etal-2023-cotever</identifier>
<identifier type="doi">10.18653/v1/2023.eacl-demo.23</identifier>
<location>
<url>https://aclanthology.org/2023.eacl-demo.23</url>
</location>
<part>
<date>2023-05</date>
<extent unit="page">
<start>195</start>
<end>208</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T CoTEVer: Chain of Thought Prompting Annotation Toolkit for Explanation Verification
%A Kim, Seungone
%A Joo, Se June
%A Jang, Yul
%A Chae, Hyungjoo
%A Yeo, Jinyoung
%Y Croce, Danilo
%Y Soldaini, Luca
%S Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics: System Demonstrations
%D 2023
%8 May
%I Association for Computational Linguistics
%C Dubrovnik, Croatia
%F kim-etal-2023-cotever
%X Chain-of-thought (CoT) prompting enables large language models (LLMs) to solve complex reasoning tasks by generating an explanation before the final prediction. Despite it’s promising ability, a critical downside of CoT prompting is that the performance is greatly affected by the factuality of the generated explanation. To improve the correctness of the explanations, fine-tuning language models with explanation data is needed. However, there exists only a few datasets that can be used for such approaches, and no data collection tool for building them. Thus, we introduce CoTEVer, a tool-kit for annotating the factual correctness of generated explanations and collecting revision data of wrong explanations. Furthermore, we suggest several use cases where the data collected with CoTEVer can be utilized for enhancing the faithfulness of explanations. Our toolkit is publicly available at https://github.com/SeungoneKim/CoTEVer.
%R 10.18653/v1/2023.eacl-demo.23
%U https://aclanthology.org/2023.eacl-demo.23
%U https://doi.org/10.18653/v1/2023.eacl-demo.23
%P 195-208
Markdown (Informal)
[CoTEVer: Chain of Thought Prompting Annotation Toolkit for Explanation Verification](https://aclanthology.org/2023.eacl-demo.23) (Kim et al., EACL 2023)
ACL