@COMMENT This file was generated by bib2html.pl version 0.94
@COMMENT written by Patrick Riley
@COMMENT This file came from Sanjit Seshia's publication pages at http://www.eecs.berkeley.edu/~sseshia
@inproceedings{dreossi-nfm17,
author = {Tommaso Dreossi and Alexandre Donze and Sanjit A. Seshia},
title = {Compositional Falsification of Cyber-Physical Systems with Machine Learning Components},
booktitle = {Proceedings of the NASA Formal Methods Conference (NFM)},
month = "May",
year = {2017},
pages = {357--372},
abstract = {Cyber-physical systems (CPS), such as automotive systems, are starting to include
sophisticated machine learning (ML) components. Their correctness, therefore, depends on
properties of the inner ML
modules. While learning algorithms aim to generalize from examples, they are only
as good as the examples provided, and
recent efforts have shown that they can produce inconsistent output under small
adversarial perturbations. This raises the question: can the output from learning components
can lead to a failure of the entire CPS? In this work, we address this question by
formulating it as a problem of
falsifying signal temporal logic (STL) specifications for CPS with ML components.
We propose a compositional falsification framework where a temporal
logic falsifier and a machine learning analyzer cooperate with the aim
of finding falsifying executions of the considered model. The efficacy
of the proposed technique is shown on an automatic emergency braking system
model with a perception component based on deep neural networks.},
}