@inproceedings{a390ccffc1774faea12dd6830efba9ea,
title = "Unilmv2: Pseudo-masked language models for unified language model pre-Training",
abstract = "We propose to pre-Train a unified language model for both autoencoding and partially autoregressive language modeling tasks using a novel training procedure, referred to as a pseudo-masked language model (PMLM). Given an input text with masked tokens, we rely on conventional masks to learn inter-relations between corrupted tokens and context via autoencoding, and pseudo masks to learn intra-relations between masked spans via partially autoregressive modeling. With welldesigned position embeddings and self-Attention masks, the context encodings are reused to avoid redundant computation. Moreover, conventional masks used for autoencoding provide global masking information, so that all the position embeddings are accessible in partially autoregressive language modeling. In addition, the two tasks pre-Train a unified language model as a bidirectional encoder and a sequence-To-sequence decoder, respectively. Our experiments show that the unified language models pre-Trained using PMLM achieve new state-of-The-Art results on a wide range of language understanding and generation tasks across several widely used benchmarks. The code and pre-Trained models are available at https://github.com/ microsoft/unilm.",
author = "Hangbo Bao and Li Dong and Furu Wei and Wenhui Wang and Nan Yang and Xiaodong Liu and Yu Wang and Songhao Piao and Jianfeng Gao and Ming Zhou and Hon, \{Hsiao Wuen\}",
note = "Publisher Copyright: {\textcopyright} ICML 2020. All rights reserved.; 37th International Conference on Machine Learning, ICML 2020 ; Conference date: 13-07-2020 Through 18-07-2020",
year = "2020",
language = "英语",
series = "37th International Conference on Machine Learning, ICML 2020",
publisher = "International Machine Learning Society (IMLS)",
pages = "619--629",
editor = "Hal Daume and Aarti Singh",
booktitle = "37th International Conference on Machine Learning, ICML 2020",
}