@inbook{https://resolver.caltech.edu/CaltechAUTHORS:20210903-222215502, title = "2020 59th IEEE Conference on Decision and Control (CDC)", chapter = "Adaptive Control for Linearizable Systems Using On-Policy Reinforcement Learning", url = "https://resolver.caltech.edu/CaltechAUTHORS:20210903-222215502", id = "record", isbn = "978-1-7281-7447-1", doi = "10.1109/CDC42340.2020.9304242" } @inbook{https://resolver.caltech.edu/CaltechAUTHORS:20210903-222215409, title = "2020 59th IEEE Conference on Decision and Control (CDC)", chapter = "High Confidence Sets for Trajectories of Stochastic Time-Varying Nonlinear Systems", url = "https://resolver.caltech.edu/CaltechAUTHORS:20210903-222215409", id = "record", isbn = "978-1-7281-7447-1", doi = "10.1109/CDC42340.2020.9304491" } @inbook{https://resolver.caltech.edu/CaltechAUTHORS:20210903-222215578, title = "2020 59th IEEE Conference on Decision and Control (CDC)", chapter = "Expert Selection in High-Dimensional Markov Decision Processes", url = "https://resolver.caltech.edu/CaltechAUTHORS:20210903-222215578", id = "record", isbn = "978-1-7281-7447-1", doi = "10.1109/CDC42340.2020.9303788" } @inbook{https://resolver.caltech.edu/CaltechAUTHORS:20210903-222215650, title = "2020 IEEE International Conference on Robotics and Automation (ICRA)", chapter = "Feedback Linearization for Uncertain Systems via Reinforcement Learning", url = "https://resolver.caltech.edu/CaltechAUTHORS:20210903-222215650", id = "record", isbn = "978-1-7281-7395-5", doi = "10.1109/ICRA40945.2020.9197158" } @inbook{https://resolver.caltech.edu/CaltechAUTHORS:20210903-222215800, title = "2019 IEEE 58th Conference on Decision and Control (CDC)", chapter = "Local Nash Equilibria are Isolated, Strict Local Nash Equilibria in 'Almost All' Zero-Sum Continuous Games", url = "https://resolver.caltech.edu/CaltechAUTHORS:20210903-222215800", id = "record", isbn = "978-1-7281-1398-2", doi = "10.1109/CDC40024.2019.9030203" } @inbook{https://resolver.caltech.edu/CaltechAUTHORS:20210903-222215867, title = "2018 IEEE Conference on Decision and Control (CDC)", chapter = "On the Analysis of Cyclic Drug Schedules for Cancer Treatment using Switched Dynamical Systems", url = "https://resolver.caltech.edu/CaltechAUTHORS:20210903-222215867", id = "record", isbn = "978-1-5386-1395-5", doi = "10.1109/CDC.2018.8619490" } @inbook{https://resolver.caltech.edu/CaltechAUTHORS:20210903-222215940, title = "2017 IEEE 56th Annual Conference on Decision and Control (CDC)", chapter = "Gradient-based inverse risk-sensitive reinforcement learning", url = "https://resolver.caltech.edu/CaltechAUTHORS:20210903-222215940", id = "record", isbn = "978-1-5090-2873-3", doi = "10.1109/CDC.2017.8264535" } @inbook{https://resolver.caltech.edu/CaltechAUTHORS:20210903-222216008, chapter = "Understanding the impact of parking on urban mobility via routing games on queue-flow networks", url = "https://resolver.caltech.edu/CaltechAUTHORS:20210903-222216008", id = "record", doi = "10.1109/CDC.2016.7799444" } @inbook{https://resolver.caltech.edu/CaltechAUTHORS:20210903-222215263, title = "2016 IEEE 55th Conference on Decision and Control (CDC)", chapter = "To observe or not to observe: Queuing game framework for urban parking", url = "https://resolver.caltech.edu/CaltechAUTHORS:20210903-222215263", id = "record", isbn = "978-1-5090-1837-6", doi = "10.1109/CDC.2016.7799079" }