@COMMENT This file was generated by bib2html.pl version 0.94 @COMMENT written by Patrick Riley @COMMENT This file came from Sanjit Seshia's publication pages at http://www.eecs.berkeley.edu/~sseshia @inproceedings{ak-aaai22, author = {Abdus Salam Azad and Edward Kim and Mark Wu and Kimin Lee and Ion Stoica and Pieter Abbeel and Alberto Sangiovanni-Vincentelli and Sanjit A. Seshia}, title = {Programmatic Modeling and Generation of Real-time Strategic Soccer Environments for Reinforcement Learning}, booktitle = {Thirty-Sixth {AAAI} Conference on Artificial Intelligence (AAAI)}, pages = {}, publisher = {{AAAI} Press}, year = {2022}, month = feb, abstract = {The capability of a reinforcement learning (RL) agent heavily depends on the diversity of the learning scenarios generated by the environment. Generation of diverse realistic scenarios is challenging for real-time strategy (RTS) environments. The RTS environments are characterized by intelligent entities/non-RL agents cooperating and competing with the RL agents with large state and action spaces over a long period of time, resulting in an infinite space of feasible, but not necessarily realistic, scenarios involving complex interaction among different RL and non-RL agents. Yet, most of the existing simulators rely on randomly generating the environments based on predefined settings/layouts and offer limited flexibility and control over the environment dynamics for researchers to generate diverse, realistic scenarios as per their demand. To address this issue, for the first time, we formally introduce the benefits of adopting an existing formal scenario specification language, Scenic, to assist researchers to \textit{model} and \textit{generate} diverse scenarios in an RTS environment in a flexible, systematic, and programmatic manner. To showcase the benefits, we interfaced Scenic to an existing RTS environment Google Research Football(GRF) simulator and introduced a benchmark consisting of 32 realistic scenarios, encoded in Scenic, to train RL agents and testing their generalization capabilities. We also show how researchers/RL practitioners can incorporate their domain knowledge to expedite the training process by intuitively modeling stochastic programmatic policies with Scenic.} }