@inproceedings{wuebker-etal-2018-compact,
title = "Compact Personalized Models for Neural Machine Translation",
author = "Wuebker, Joern and
Simianer, Patrick and
DeNero, John",
editor = "Riloff, Ellen and
Chiang, David and
Hockenmaier, Julia and
Tsujii, Jun{'}ichi",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
month = oct # "-" # nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/D18-1104",
doi = "10.18653/v1/D18-1104",
pages = "881--886",
abstract = "We propose and compare methods for gradient-based domain adaptation of self-attentive neural machine translation models. We demonstrate that a large proportion of model parameters can be frozen during adaptation with minimal or no reduction in translation quality by encouraging structured sparsity in the set of offset tensors during learning via group lasso regularization. We evaluate this technique for both batch and incremental adaptation across multiple data sets and language pairs. Our system architecture{--}combining a state-of-the-art self-attentive model with compact domain adaptation{--}provides high quality personalized machine translation that is both space and time efficient.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wuebker-etal-2018-compact">
<titleInfo>
<title>Compact Personalized Models for Neural Machine Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Joern</namePart>
<namePart type="family">Wuebker</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Patrick</namePart>
<namePart type="family">Simianer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">John</namePart>
<namePart type="family">DeNero</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-oct-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ellen</namePart>
<namePart type="family">Riloff</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Chiang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Julia</namePart>
<namePart type="family">Hockenmaier</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jun’ichi</namePart>
<namePart type="family">Tsujii</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Brussels, Belgium</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We propose and compare methods for gradient-based domain adaptation of self-attentive neural machine translation models. We demonstrate that a large proportion of model parameters can be frozen during adaptation with minimal or no reduction in translation quality by encouraging structured sparsity in the set of offset tensors during learning via group lasso regularization. We evaluate this technique for both batch and incremental adaptation across multiple data sets and language pairs. Our system architecture–combining a state-of-the-art self-attentive model with compact domain adaptation–provides high quality personalized machine translation that is both space and time efficient.</abstract>
<identifier type="citekey">wuebker-etal-2018-compact</identifier>
<identifier type="doi">10.18653/v1/D18-1104</identifier>
<location>
<url>https://aclanthology.org/D18-1104</url>
</location>
<part>
<date>2018-oct-nov</date>
<extent unit="page">
<start>881</start>
<end>886</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Compact Personalized Models for Neural Machine Translation
%A Wuebker, Joern
%A Simianer, Patrick
%A DeNero, John
%Y Riloff, Ellen
%Y Chiang, David
%Y Hockenmaier, Julia
%Y Tsujii, Jun’ichi
%S Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing
%D 2018
%8 oct nov
%I Association for Computational Linguistics
%C Brussels, Belgium
%F wuebker-etal-2018-compact
%X We propose and compare methods for gradient-based domain adaptation of self-attentive neural machine translation models. We demonstrate that a large proportion of model parameters can be frozen during adaptation with minimal or no reduction in translation quality by encouraging structured sparsity in the set of offset tensors during learning via group lasso regularization. We evaluate this technique for both batch and incremental adaptation across multiple data sets and language pairs. Our system architecture–combining a state-of-the-art self-attentive model with compact domain adaptation–provides high quality personalized machine translation that is both space and time efficient.
%R 10.18653/v1/D18-1104
%U https://aclanthology.org/D18-1104
%U https://doi.org/10.18653/v1/D18-1104
%P 881-886
Markdown (Informal)
[Compact Personalized Models for Neural Machine Translation](https://aclanthology.org/D18-1104) (Wuebker et al., EMNLP 2018)
ACL