{"created":"2023-05-15T14:25:00.582599+00:00","id":3618,"links":{},"metadata":{"_buckets":{"deposit":"430f575e-c0a5-4e1c-89f6-957a310c7c3b"},"_deposit":{"created_by":3,"id":"3618","owners":[3],"pid":{"revision_id":0,"type":"depid","value":"3618"},"status":"published"},"_oai":{"id":"oai:repository.ninjal.ac.jp:00003618","sets":["320:324"]},"author_link":["12111","12114","12110","12113","12112"],"item_10003_biblio_info_32":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicIssueDates":{"bibliographicIssueDate":"2022","bibliographicIssueDateType":"Issued"},"bibliographicPageEnd":"7359","bibliographicPageStart":"7352","bibliographic_titles":[{},{"bibliographic_title":"Proceedings of the 13th Conference on Language Resources and Evaluation(LREC 2022)","bibliographic_titleLang":"en"}]}]},"item_10003_description_43":{"attribute_name":"フォーマット","attribute_value_mlt":[{"subitem_description":"application/pdf","subitem_description_type":"Other"}]},"item_10003_description_52":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_description":"Ochanomizu University","subitem_description_type":"Other"},{"subitem_description":"John Hopkins University","subitem_description_type":"Other"},{"subitem_description":"Kyoto University","subitem_description_type":"Other"},{"subitem_description":"National Institute for Japanese Language and Linguistics / Tokyo University of Foreign Studies","subitem_description_type":"Other"},{"subitem_description":"Ochanomizu University","subitem_description_type":"Other"}]},"item_10003_description_53":{"attribute_name":"抄録(英)","attribute_value_mlt":[{"subitem_description":"We propose an enhanced adversarial training algorithm for fine-tuning transformer-based language models (i.e., RoBERTa) and apply it to the temporal reasoning task. Current adversarial training approaches for NLP add the adversarial perturbation only to the embedding layer, ignoring the other layers of the model, which might limit the generalization power of adversarial training. Instead, our algorithm searches for the best combination of layers to add the adversarial perturbation. We add the adversarial perturbation to multiple hidden states or attention representations of the model layers. Adding the perturbation to the attention representations performed best in our experiments. Our model can improve performance on several temporal reasoning benchmarks, and establishes new state-of-the-art results.","subitem_description_type":"Other"}]},"item_10003_publisher_45":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"European Language Resources Association"}]},"item_10003_version_type_44":{"attribute_name":"著者版フラグ","attribute_value_mlt":[{"subitem_version_resource":"http://purl.org/coar/version/c_970fb48d4fbd8a85","subitem_version_type":"VoR"}]},"item_creator":{"attribute_name":"著者","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Kanashiro Pereira, Lis","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Duh, Kevin","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Cheng, Fei","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Asahara, Masayuki","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Kobayashi, Ichiro","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_files":{"attribute_name":"ファイル情報","attribute_type":"file","attribute_value_mlt":[{"accessrole":"open_date","date":[{"dateType":"Available","dateValue":"2022-08-17"}],"displaytype":"detail","filename":"lrec2022_7352.pdf","filesize":[{"value":"329.0 kB"}],"format":"application/pdf","licensetype":"license_9","mimetype":"application/pdf","url":{"label":"lrec2022_7352.pdf","url":"https://repository.ninjal.ac.jp/record/3618/files/lrec2022_7352.pdf"},"version_id":"4c4623d0-bf1c-49bb-bb3c-44b9681c97c7"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"adversarial training","subitem_subject_language":"en","subitem_subject_scheme":"Other"},{"subitem_subject":"robustness","subitem_subject_language":"en","subitem_subject_scheme":"Other"},{"subitem_subject":"temporal reasoning","subitem_subject_language":"en","subitem_subject_scheme":"Other"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourcetype":"conference paper","resourceuri":"http://purl.org/coar/resource_type/c_5794"}]},"item_title":"Attention-Focused Adversarial Training for Robust Temporal Reasoning","item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Attention-Focused Adversarial Training for Robust Temporal Reasoning","subitem_title_language":"en"}]},"item_type_id":"10003","owner":"3","path":["324"],"pubdate":{"attribute_name":"公開日","attribute_value":"2022-08-17"},"publish_date":"2022-08-17","publish_status":"0","recid":"3618","relation_version_is_last":true,"title":["Attention-Focused Adversarial Training for Robust Temporal Reasoning"],"weko_creator_id":"3","weko_shared_id":3},"updated":"2023-05-15T14:47:43.863447+00:00"}