@inproceedings{oai:repository.ninjal.ac.jp:00003618, author = {Kanashiro Pereira, Lis and Duh, Kevin and Cheng, Fei and Asahara, Masayuki and Kobayashi, Ichiro}, book = {Proceedings of the 13th Conference on Language Resources and Evaluation(LREC 2022)}, month = {}, note = {application/pdf, Ochanomizu University, John Hopkins University, Kyoto University, National Institute for Japanese Language and Linguistics / Tokyo University of Foreign Studies, Ochanomizu University, We propose an enhanced adversarial training algorithm for fine-tuning transformer-based language models (i.e., RoBERTa) and apply it to the temporal reasoning task. Current adversarial training approaches for NLP add the adversarial perturbation only to the embedding layer, ignoring the other layers of the model, which might limit the generalization power of adversarial training. Instead, our algorithm searches for the best combination of layers to add the adversarial perturbation. We add the adversarial perturbation to multiple hidden states or attention representations of the model layers. Adding the perturbation to the attention representations performed best in our experiments. Our model can improve performance on several temporal reasoning benchmarks, and establishes new state-of-the-art results.}, pages = {7352--7359}, publisher = {European Language Resources Association}, title = {Attention-Focused Adversarial Training for Robust Temporal Reasoning}, year = {2022} }