@phdthesis{10.7907/cdf6-0w78, author = {Li, Tongxin}, title = {Learning-Augmented Control and Decision-Making: Theory and Applications in Smart Grids}, school = {California Institute of Technology}, year = {2023}, doi = {10.7907/cdf6-0w78}, url = {https://resolver.caltech.edu/CaltechThesis:07202022-040725024}, abstract = {

Achieving carbon neutrality by 2050 does not only lead to the increasing penetration of renewable energy, but also an explosive growth of smart meter data. Recently, augmenting classical methods in real-world cyber-physical systems such as smart grids with black-box AI tools, forecasts, and ML algorithms has attracted a lot of growing interest. Integrating AI techniques into smart grids, on the one hand, provides a new approach to handle the uncertainties caused by renewable resources and human behaviors, but on the other hand, creates practical issues such as reliability, stability, privacy, and scalability, etc. to the AI-integrated algorithms.

This dissertation focuses on solving problems raised in designing learning-augmented control and decision-making algorithms.

The results presented in this dissertation are three-fold. We first study a problem in linear quadratic control, where imperfect/untrusted AI predictions of system perturbations are available. We show that it is possible to design a learning-augmented algorithm with performance guarantees that is aggressive if the predictions are accurate and conservative if they are imperfect. Machine-learned black-box policies are ubiquitous for nonlinear control problems. Meanwhile, crude model information is often available for these problems from, e.g., linear approximations of nonlinear dynamics. We next study the problem of equipping a black-box control policy with model-based advice for nonlinear control on a single trajectory. We first show a general negative result that a naive convex combination of a black-box policy and a linear model-based policy can lead to instability, even if the two policies are both stabilizing. We then propose an adaptive λ-confident policy, with a coefficient λ indicating the confidence in a black-box policy, and prove its stability. With bounded nonlinearity, in addition, we show that the adaptive λ-confident policy achieves a bounded competitive ratio when a black-box policy is near-optimal. Finally, we propose an online learning approach to implement the adaptive λ-confident policy and verify its efficacy in case studies about the Cart-Pole problem and a real-world electric vehicle (EV) charging problem with data bias due to COVID-19.

Aggregators have emerged as crucial tools for the coordination of distributed, controllable loads. To be used effectively, an aggregator must be able to communicate the available flexibility of the loads they control, known as the aggregate flexibility to a system operator. However, most existing aggregate flexibility measures often are slow-timescale estimations and much less attention has been paid to real-time coordination between an aggregator and an operator. In the second part of this dissertation, we consider solving an online decision-making problem in a closed-loop system and present a design of real-time aggregate flexibility feedback, termed the maximum entropy feedback (MEF). In addition to deriving analytic properties of the MEF, combining learning and control, we show that it can be approximated using reinforcement learning and used as a penalty term in a novel control algorithm–the penalized predictive control (PPC) that enables efficient communication, fast computation, and lower costs. We illustrate the efficacy of the PPC using a dataset from an adaptive electric vehicle charging network and show that PPC outperforms classical MPC. We show that under certain regularity assumptions, the PPC is optimal. We illustrate the efficacy of the PPC using a dataset from an adaptive electric vehicle charging network and show that PPC outperforms classical model predictive control (MPC). In a theoretical perspective, a two-controller problem is formulated. A central controller chooses an action from a feasible set that is determined by time-varying and coupling constraints, which depend on all past actions and states. The central controller’s goal is to minimize the cumulative cost; however, the controller has access to neither the feasible set nor the dynamics directly, which are determined by a remote local controller. Instead, the central controller receives only an aggregate summary of the feasibility information from the local controller, which does not know the system costs. We show that it is possible for an online algorithm using feasibility information to nearly match the dynamic regret of an online algorithm using perfect information whenever the feasible sets satisfy some criterion, which is satisfied by inventory and tracking constraints.

The third part of this dissertation consists of examples of learning, inference, and data analysis methods for power system identification and electric charging. We present a power system identification problem with noisy nodal measurements and efficient algorithms, based on fundamental trade-offs between the number of measurements, the complexity of the graph class, and the probability of error. Next, we specifically consider prediction and unsupervised learning tasks in EV charging. We provide basic data analysis results of a public dataset released by Caltech and develop a novel iterative clustering method for classifying time series of EV charging rates.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Low, Steven H.}, } @phdthesis{10.7907/8817-xy25, author = {Liang, Chen}, title = {Cascading Failures in Power Systems: Modeling, Characterization, and Mitigation}, school = {California Institute of Technology}, year = {2022}, doi = {10.7907/8817-xy25}, url = {https://resolver.caltech.edu/CaltechTHESIS:06032022-035416994}, abstract = {

Reliability is a critical goal for power systems. Due to the connectivity of power grids, an initial failure may trigger a cascade of failures and eventually lead to a large-scale blackout, causing significant economic and social impacts. Cascading failure analysis thus draws wide attention from power system practitioners and researchers. A well-known observation is that cascading failures in power systems propagate non-locally because of the complex mechanism of power grids. Such non-local propagation makes it particularly challenging to model, analyze and control the failure process. In this thesis, we tackle these challenges by establishing a mathematical theory to model and characterize failure patterns, discover structural properties of failure propagation, and design novel techniques for failure mitigation.

First, we propose a failure propagation model considering both fast-timescale system frequency dynamics and the slow-timescale line tripping process. This model provides mathematical justifications to the widely used static DC model and can be generalized to capture a variety of failure propagation patterns induced by different control mechanisms of the power grid. More importantly, this model provides flexibility to design real-time control algorithms for failure mitigation and localization.

Second, we provide a complete characterization of line failures under the static DC model. Our results unveil a deep connection between the power redistribution patterns and the network block decomposition. More specifically, we show that a non-cut line failure in a block will only impact the branch power flows on the transmission lines within the block. In contrast, a cut set line failure will propagate globally depending on both the power balancing rules and the network topological structure. Further, we discuss three types of interface networks to connect the sub-grids, all achieving better failure localization performance.

Third, we study corrective control algorithms for failure mitigation. We integrate a distributed frequency control strategy with the network block decomposition to provide provable failure mitigation and localization guarantees on line failures. This strategy operates on the frequency control timescale and supplements existing corrective mechanisms, improving grid reliability and operational efficiency. We further explore the failure mitigation approach with direct post-contingency injection adjustments. Specifically, we propose an optimization-based control method with strong structural properties, which is highly desirable in large-scale power networks.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Low, Steven H. and Wierman, Adam C.}, } @phdthesis{10.7907/tg26-9857, author = {Zhou, Fengyu}, title = {Optimization of Distribution Power Networks: from Single-Phase to Multi-Phase}, school = {California Institute of Technology}, year = {2022}, doi = {10.7907/tg26-9857}, url = {https://resolver.caltech.edu/CaltechTHESIS:06012022-005449566}, abstract = {

Distributed energy resources play an important role in today’s distribution power system. The Optimal Power Flow (OPF) problem is fundamental in power systems as many important applications such as economic dispatch, battery displacement, unit commitment, and voltage control can be formulated as an OPF. A paradoxical observation is the problem’s complexity in theory but simplicity in practice. On the one hand, the problem is well known to be non-convex and NP-hard, so it is likely that no simple algorithms can solve all problem instances efficiently. On the other hand, there are many known algorithms which perform extremely well in practice for both standard test cases and real-world systems. This thesis attempts to reconcile this seeming contradiction.

Specifically, this thesis focuses on two types of properties that may underlie the simplicity in practice of OPF problems. The first property is the exactness of relaxations, meaning that one can find a convex relaxation of the original non-convex problem such that the two problems share the same optimal solution. This property would allow us to convexify the non-convex problem without altering the optimal solution and cost. The second property is that all locally optimal solutions of the non-convex problem are also globally optimal. This property allows us to apply local algorithms such as gradient descent without being trapped at some spurious local optima. We focus on distribution systems with radial networks (i.e., the underlying graphs are trees). We consider both single-phase models and unbalanced multi-phase models, since most real-world distribution systems are multi-phase unbalanced, and distributed energy resources (DERs) can be connected in either Wye or Delta configurations.

The main results of this thesis are two-fold. In the first half, we propose a class of sufficient conditions for a non-convex problem to simultaneously have exact relaxation and no spurious local optima. Then we apply the result to single-phase system and conclude that if all buses have no injection lowerbounds, then both properties (exactness and global optimality) can be achieved. While the same condition is already known to be sufficient for exactness, our work is the first to extend it to global optimality. In the second half, we focus on the exactness property for multi-phase systems. For systems without Delta connections, the exactness can be guaranteed if 1) the binding constraints are sparse in the network at optimality; or 2) all nodal prices fall within a narrow range. Using the DC model as an approximation, we further analyze the OPF sensitivity and explain why nodal prices tend to be close to each other. In the presence of Delta connections, we conclude that the inexactness can be resolved by either postprocessing an optimal solution, or adding a new regularization term in the cost function. Both methods achieve global optimality for IEEE standard test cases.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Low, Steven H.}, } @phdthesis{10.7907/8eqg-e110, author = {Lee, Zachary Jordan}, title = {The Adaptive Charging Network Research Portal: Systems, Tools, and Algorithms}, school = {California Institute of Technology}, year = {2021}, doi = {10.7907/8eqg-e110}, url = {https://resolver.caltech.edu/CaltechTHESIS:05282021-174411678}, abstract = {

Millions of electric vehicles (EVs) will enter service in the next decade, generating gigawatt-hours of additional energy demand. Charging these EVs cleanly, affordably, and without excessive stress on the grid will require advances in charging system design, hardware, monitoring, and control. Collectively, we refer to these advances as smart charging. While researchers have explored smart charging for over a decade, very few smart charging systems have been deployed in practice, leaving a sizeable gap between the research literature and the real world. In particular, we find that research is often based on simplified theoretical models. These simple models make analysis tractable but do not account for the complexities of physical systems. Moreover, researchers often lack the data needed to evaluate the performance of their algorithms on real workloads or apply techniques like machine learning. Even when promising algorithms are developed, they are rarely deployed since field tests can be costly and time-consuming.

The goal of this thesis is to develop systems, tools, and algorithms to bridge these gaps between theory and practice.

First, we describe the architecture of a first-of-its-kind smart charging system we call the Adaptive Charging Network (ACN). Next, we use data and models from the ACN to develop a suite of tools to help researchers. These tools include ACN-Data, a public dataset of over 80,000 charging sessions; ACN-Sim, an open-source simulator based on realistic models; and ACN-Live, a platform for field testing algorithms on the ACN. Finally, we describe the algorithms we have developed using these tools. For example, we propose a practical and robust algorithm based on model predictive control, which can reduce infrastructure requirements by over 75%, increase operator profits by up to 3.4 times, and significantly reduce strain on the electric power grid. Other examples include a pricing scheme that fairly allocates costs to users considering time-of-use tariffs and demand charges and a data-driven approach to optimally size on-site solar generation with smart EV charging systems.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Low, Steven H.}, } @phdthesis{10.7907/EN8K-W872, author = {Guo, Linqi}, title = {Impact of Transmission Network Topology on Electrical Power Systems}, school = {California Institute of Technology}, year = {2019}, doi = {10.7907/EN8K-W872}, url = {https://resolver.caltech.edu/CaltechTHESIS:05312019-191005982}, abstract = {

Power system reliability is a crucial component in the development of sustainable infrastructure. Because of the intricate interactions among power system components, it is often difficult to make general inferences on how the transmission network topology impacts performance of the grid in different scenarios. This complexity poses significant challenges for researches in the modeling, control, and management of power systems.

In this work, we develop a theory that aims to address this challenge from both the fast-timescale and steady state aspects of power grids. Our analysis builds upon the transmission network Laplacian matrix, and reveals new properties of this well-studied concept in spectral graph theory that are specifically tailored to the power system context. A common theme of this work is the representation of certain physical quantities in terms of graphical structures, which allows us to establish algebraic results on power grid performance using purely topological information. This view is particularly powerful and often leads to surprisingly simple characterizations of complicated system behaviors. Depending on the timescale of the underlying problem, our results can be roughly categorized into the study of frequency regulation and the study of cascading failures.

Fast-timescale: Frequency Regulation. We first study how the transmission network impacts power system robustness against disturbances in transient phase. Towards this goal, we develop a framework based on the Laplacian spectrum that captures the interplay among network topology, system inertia, and generator/load damping. This framework shows that the impact of network topology in frequency regulation can be quantified through the network Laplacian eigenvalues, and that such eigenvalues fully determine the grid robustness against low frequency perturbations. Moreover, we can explicitly decompose the frequency signal along scaled Laplacian eigenvectors when damping-inertia ratios are uniform across the buses. The insights revealed by this framework explain why load-side participation in frequency regulation not only makes the system respond faster, but also helps lower the system nadir after a disturbance, providing useful guidelines in the controller design. We simulate an improved controller reverse engineered from our results on the IEEE 39-bus New England interconnection system, and illustrate its robustness against high frequency oscillations compared to both the conventional droop control and a recent controller design.

We then switch to a more combinatorial problem that seeks to characterize the controllability and observability of the power system in frequency regulation if only a subset of buses are equipped with controllers/sensors. Our results show that the controllability/observability of the system depends on two orthogonal conditions: (a) intrinsic structure of the system graph, and (b) algebraic coverage of buses with controllers/sensors. Condition (a) encodes information on graph symmetry and is shown to hold for almost all practical systems. Condition (b) captures how buses interact with each other through the network and can be verified using the eigenvectors of the graph Laplacian matrix. Based on this characterization, the optimal placement of controllers and sensors in the network can be formulated as a set cover problem. We demonstrate how our results identify the critical buses in real systems using a simulation in the IEEE 39-bus New England interconnection test system. In particular, for this testbed a single well chosen bus is capable of providing full controllability and observability.

Steady State: Cascading Failures. Cascading failures in power systems exhibit non-monotonic, non-local propagation patterns which make the analysis and mitigation of failures difficult. By studying the transmission network Laplacian matrix, we reveal two useful structures that make the analysis of this complex evolution more tractable: (a) In contrast to the lack of monotonicity in the physical system, there is a rich collection of monotonicity we can explore in the spectrum of the Laplacian matrix. This allows us to systematically design topological measures that are monotonic over the cascading event. (b) Power redistribution patterns are closely related to the distribution of different types of trees in the power network topology. Such graphical interpretation captures the Kirchhoff’s Law in a precise way and naturally suggests that we can eliminate long-distance propagation of system disturbances by forming a tree-partition.

We then show that the tree-partition of transmission networks provides a precise analytical characterization of line failure localizability. Specifically, when a non-bridge line is tripped, the impact of this failure only propagates within well-defined components, which we refer to as cells, of the tree-partition defined by the bridges. In contrast, when a bridge line is tripped, the impact of this failure propagates globally across the network, affecting the power flow on all remaining transmission lines. This characterization suggests that it is possible to improve the system robustness by switching off certain transmission lines, so as to create more, smaller components in the tree-partition; thus spatially localizing line failures and making the grid less vulnerable to large-scale outages. We illustrate this approach using the IEEE 118-bus test system and demonstrate that switching off a negligible portion of transmission lines allows the impact of line failures to be significantly more localized without substantial changes in line congestion.

Unified Controller on Tree-partitions. Combining our results from both the fast-timescale and steady state behaviors of power grids, we propose a distributed control strategy that offers strong guarantees in both the mitigation and localization of cascading failures in power systems. This control strategy leverages a new controller design known as Unified Controller (UC) from frequency regulation literature, and revolves around the powerful properties that emerge when the management areas that UC operates over form a tree-partition. After an initial failure, the proposed strategy always prevents successive failures from happening, and regulates the system to the desired steady state where the impact of initial failures are localized as much as possible. For extreme failures that cannot be localized, the proposed framework has a configurable design that progressively involves and coordinates across more control areas for failure mitigation and, as a last resort, imposes minimal load shedding. We compare the proposed control framework with the classical Automatic Generation Control (AGC) on the IEEE 118-bus test system. Simulation results show that our novel control greatly improves the system robustness in terms of the N-1 security standard, and localizes the impact of initial failures in majority of the load profiles that are examined. Moreover, the proposed framework incurs significantly less load loss, if any, compared to AGC, in all of our case studies.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Low, Steven H.}, } @phdthesis{10.7907/6N9W-3J20, author = {Tang, Yujie}, title = {Time-Varying Optimization and Its Application to Power System Operation}, school = {California Institute of Technology}, year = {2019}, doi = {10.7907/6N9W-3J20}, url = {https://resolver.caltech.edu/CaltechTHESIS:01222019-221628111}, abstract = {The main topic of this thesis is time-varying optimization, which studies algorithms that can track optimal trajectories of optimization problems that evolve with time. A typical time-varying optimization algorithm is implemented in a running fashion in the sense that the underlying optimization problem is updated during the iterations of the algorithm, and is especially suitable for optimizing large-scale fast varying systems. Motivated by applications in power system operation, we propose and analyze first-order and second-order running algorithms for time-varying nonconvex optimization problems. The first-order algorithm we propose is the regularized proximal primal-dual gradient algorithm, and we develop a comprehensive theory on its tracking performance. Specifically, we provide analytical results in terms of tracking a KKT point, and derive bounds for the tracking error defined as the distance between the algorithmic iterates and a KKT trajectory. We then provide sufficient conditions under which there exists a set of algorithmic parameters that guarantee that the tracking error bound holds. Qualitatively, the sufficient conditions for the existence of feasible parameters suggest that the problem should be “sufficiently convex” around a KKT trajectory to overcome the nonlinearity of the nonconvex constraints. The study of feasible algorithmic parameters motivates us to analyze the continuous-time limit of the discrete-time algorithm, which we formulate as a system of differential inclusions; results on its tracking performance as well as feasible and optimal algorithmic parameters are also derived. Finally, we derive conditions under which the KKT points for a given time instant will always be isolated so that bifurcations or merging of KKT trajectories do not happen. The second-order algorithms we develop are approximate Newton methods that incorporate second-order information. We first propose the approximate Newton method for a special case where there are no explicit inequality or equality constraints. It is shown that good estimation of second-order information is important for achieving satisfactory tracking performance. We also propose a specific version of the approximate Newton method based on L-BFGS-B that handles box constraints. Then, we propose two variants of the approximate Newton method that handle explicit inequality and equality constraints. The first variant employs penalty functions to obtain a modified version of the original problem, so that the approximate Newton method for the special case can be applied. The second variant can be viewed as an extension of the sequential quadratic program in the time-varying setting. Finally, we discuss application of the proposed algorithms to power system operation. We formulate the time-varying optimal power flow problem, and introduce partition of the decision variables that enables us to model the power system by an implicit power flow map. The implicit power flow map allows us to incorporate real-time feedback measurements naturally in the algorithm. The use of real-time feedback measurement is a central idea in real-time optimal power flow algorithms, as it helps reduce the computation burden and potentially improve robustness against model mismatch. We then present in detail two real-time optimal power flow algorithms, one based on the regularized proximal primal-dual gradient algorithm, and the other based on the approximate Newton method with the penalty approach.}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Low, Steven H.}, } @phdthesis{10.7907/Z95M63W4, author = {Chen, Niangjun}, title = {Online Algorithms: From Prediction to Decision}, school = {California Institute of Technology}, year = {2018}, doi = {10.7907/Z95M63W4}, url = {https://resolver.caltech.edu/CaltechTHESIS:10182017-210853845}, abstract = {

Making use of predictions is a crucial, but under-explored, area of sequential decision problems with limited information. While in practice most online algorithms rely on predictions to make real time decisions, in theory their performance is only analyzed in simplified models of prediction noise, either adversarial or i.i.d. The goal of this thesis is to bridge this divide between theory and practice: to study online algorithm under more practical predictions models, gain better understanding about the value of prediction, and design online algorithms that make the best use of predictions.

This thesis makes three main contributions. First, we propose a stochastic prediction error model that generalizes prior models in the learning and stochastic control communities, incorporates correlation among prediction errors, and captures the fact that predictions improve as time passes. Using this general prediction model, we prove that Averaging Fixed Horizon Control (AFHC) can simultaneously achieve sublinear regret and constant competitive ratio in expectation using only a constant- sized prediction window, overcoming the hardnesss results in adversarial prediction models. Second, to understand the optimal use of noisy prediction, we introduce a new class of policies, Committed Horizon Control (CHC), that generalizes both popular policies Receding Horizon Control (RHC) and Averaging Fixed Horizon Control (AFHC). Our results provide explicit results characterizing the optimal use of prediction in CHC policy as a function of properties of the prediction noise, e.g., variance and correlation structure. Third, we apply the general prediction model and algorithm design framework to the deferrable load control problem in power systems. Our proposed model predictive algorithm provides significant reduction in variance of total load in the power system. Throughout this thesis, we provide both average-case analysis and concentration results for our proposed online algorithms, highlighting that the typical performance is tightly concentrated around the average-case performance.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Wierman, Adam C.}, } @phdthesis{10.7907/Z99C6VBW, author = {Peng, Qiuyu}, title = {Distributed Control and Optimization for Communication and Power Systems}, school = {California Institute of Technology}, year = {2016}, doi = {10.7907/Z99C6VBW}, url = {https://resolver.caltech.edu/CaltechTHESIS:01262016-194420781}, abstract = {

We are at the cusp of a historic transformation of both communication system and electricity system. This creates challenges as well as opportunities for the study of networked systems. Problems of these systems typically involve a huge number of end points that require intelligent coordination in a distributed manner. In this thesis, we develop models, theories, and scalable distributed optimization and control algorithms to overcome these challenges.

This thesis focuses on two specific areas: multi-path TCP (Transmission Control Protocol) and electricity distribution system operation and control. Multi-path TCP (MP-TCP) is a TCP extension that allows a single data stream to be split across multiple paths. MP-TCP has the potential to greatly improve reliability as well as efficiency of communication devices. We propose a fluid model for a large class of MP-TCP algorithms and identify design criteria that guarantee the existence, uniqueness, and stability of system equilibrium. We clarify how algorithm parameters impact TCP-friendliness, responsiveness, and window oscillation and demonstrate an inevitable tradeoff among these properties. We discuss the implications of these properties on the behavior of existing algorithms and motivate a new algorithm Balia (balanced linked adaptation) which generalizes existing algorithms and strikes a good balance among TCP-friendliness, responsiveness, and window oscillation. We have implemented Balia in the Linux kernel. We use our prototype to compare the new proposed algorithm Balia with existing MP-TCP algorithms.

Our second focus is on designing computationally efficient algorithms for electricity distribution system operation and control. First, we develop efficient algorithms for feeder reconfiguration in distribution networks. The feeder reconfiguration problem chooses the on/off status of the switches in a distribution network in order to minimize a certain cost such as power loss. It is a mixed integer nonlinear program and hence hard to solve. We propose a heuristic algorithm that is based on the recently developed convex relaxation of the optimal power flow problem. The algorithm is efficient and can successfully computes an optimal configuration on all networks that we have tested. Moreover we prove that the algorithm solves the feeder reconfiguration problem optimally under certain conditions. We also propose a more efficient algorithm and it incurs a loss in optimality of less than 3% on the test networks.

Second, we develop efficient distributed algorithms that solve the optimal power flow (OPF) problem on distribution networks. The OPF problem determines a network operating point that minimizes a certain objective such as generation cost or power loss. Traditionally OPF is solved in a centralized manner. With increasing penetration of volatile renewable energy resources in distribution systems, we need faster and distributed solutions for real-time feedback control. This is difficult because power flow equations are nonlinear and kirchhoff’s law is global. We propose solutions for both balanced and unbalanced radial distribution networks. They exploit recent results that suggest solving for a globally optimal solution of OPF over a radial network through a second-order cone program (SOCP) or semi-definite program (SDP) relaxation. Our distributed algorithms are based on the alternating direction method of multiplier (ADMM), but unlike standard ADMM-based distributed OPF algorithms that require solving optimization subproblems using iterative methods, the proposed solutions exploit the problem structure that greatly reduce the computation time. Specifically, for balanced networks, our decomposition allows us to derive closed form solutions for these subproblems and it speeds up the convergence by 1000x times in simulations. For unbalanced networks, the subproblems reduce to either closed form solutions or eigenvalue problems whose size remains constant as the network scales up and computation time is reduced by 100x compared with iterative methods.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Low, Steven H.}, } @phdthesis{10.7907/Z9BG2KZG, author = {Cai, Wuhan Desmond}, title = {Electricity Markets for the Smart Grid: Networks, Timescales, and Integration with Control}, school = {California Institute of Technology}, year = {2016}, doi = {10.7907/Z9BG2KZG}, url = {https://resolver.caltech.edu/CaltechTHESIS:05262016-112813537}, abstract = {

We are at the dawn of a significant transformation in the electric industry. Renewable generation and customer participation in grid operations and markets have been growing at tremendous rates in recent years and these trends are expected to continue. These trends are likely to be accompanied by both engineering and market integration challenges. Therefore, to incorporate these resources efficiently into the grid, it is important to deal with the inefficiencies in existing markets. The goal of this thesis is to contribute new insights towards improving the design of electricity markets.

This thesis makes three main contributions. First, we provide insights into how the economic dispatch mechanism could be designed to account for price-anticipating participants. We study this problem in the context of a networked Cournot competition with a market maker and we give an algorithm to find improved market clearing designs. Our findings illustrate the potential inefficiencies in existing markets and provides a framework for improving the design of the markets. Second, we provide insights into the strategic interactions between generation flexibility and forward markets. Our key insight is an observation that spot market capacity constraints can significantly impact the efficiency and existence of equilibrium in forward markets, as they give producers incentives to strategically withhold offers from the markets. Third, we provide insights into how optimization decomposition theory can guide optimal design of the architecture of power systems control. In particular, we illustrate a context where decomposition theory enables us to jointly design market and control mechanisms to allocate resources efficiently across both the economic dispatch and frequency regulation timescales.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Low, Steven H.}, } @phdthesis{10.7907/Z9JW8BSM, author = {Farivar, Masoud}, title = {Optimization and Control of Power Flow in Distribution Networks}, school = {California Institute of Technology}, year = {2016}, doi = {10.7907/Z9JW8BSM}, url = {https://resolver.caltech.edu/CaltechTHESIS:12092015-021431773}, abstract = {

Climate change is arguably the most critical issue facing our generation and the next. As we move towards a sustainable future, the grid is rapidly evolving with the integration of more and more renewable energy resources and the emergence of electric vehicles. In particular, large scale adoption of residential and commercial solar photovoltaics (PV) plants is completely changing the traditional slowly-varying unidirectional power flow nature of distribution systems. High share of intermittent renewables pose several technical challenges, including voltage and frequency control. But along with these challenges, renewable generators also bring with them millions of new DC-AC inverter controllers each year. These fast power electronic devices can provide an unprecedented opportunity to increase energy efficiency and improve power quality, if combined with well-designed inverter control algorithms. The main goal of this dissertation is to develop scalable power flow optimization and control methods that achieve system-wide efficiency, reliability, and robustness for power distribution networks of future with high penetration of distributed inverter-based renewable generators.

Proposed solutions to power flow control problems in the literature range from fully centralized to fully local ones. In this thesis, we will focus on the two ends of this spectrum. In the first half of this thesis (chapters 2 and 3), we seek optimal solutions to voltage control problems provided a centralized architecture with complete information. These solutions are particularly important for better understanding the overall system behavior and can serve as a benchmark to compare the performance of other control methods against. To this end, we first propose a branch flow model (BFM) for the analysis and optimization of radial and meshed networks. This model leads to a new approach to solve optimal power flow (OPF) problems using a two step relaxation procedure, which has proven to be both reliable and computationally efficient in dealing with the non-convexity of power flow equations in radial and weakly-meshed distribution networks. We will then apply the results to fast time- scale inverter var control problem and evaluate the performance on real-world circuits in Southern California Edison’s service territory.

The second half (chapters 4 and 5), however, is dedicated to study local control approaches, as they are the only options available for immediate implementation on today’s distribution networks that lack sufficient monitoring and communication infrastructure. In particular, we will follow a reverse and forward engineering approach to study the recently proposed piecewise linear volt/var control curves. It is the aim of this dissertation to tackle some key problems in these two areas and contribute by providing rigorous theoretical basis for future work.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Hassibi, Babak}, } @phdthesis{10.7907/Z9RN35TJ, author = {Zhao, Changhong}, title = {Real-Time Load-Side Control of Electric Power Systems}, school = {California Institute of Technology}, year = {2016}, doi = {10.7907/Z9RN35TJ}, url = {https://resolver.caltech.edu/CaltechTHESIS:05232016-160307020}, abstract = {

Two trends are emerging from modern electric power systems: the growth of renewable (e.g., solar and wind) generation, and the integration of information technologies and advanced power electronics. The former introduces large, rapid, and random fluctuations in power supply, demand, frequency, and voltage, which become a major challenge for real-time operation of power systems. The latter creates a tremendous number of controllable intelligent endpoints such as smart buildings and appliances, electric vehicles, energy storage devices, and power electronic devices that can sense, compute, communicate, and actuate. Most of these endpoints are distributed on the load side of power systems, in contrast to traditional control resources such as centralized bulk generators. This thesis focuses on controlling power systems in real time, using these load side resources. Specifically, it studies two problems.

  1. Distributed load-side frequency control: We establish a mathematical framework to design distributed frequency control algorithms for flexible electric loads. In this framework, we formulate a category of optimization problems, called optimal load control (OLC), to incorporate the goals of frequency control, such as balancing power supply and demand, restoring frequency to its nominal value, restoring inter-area power flows, etc., in a way that minimizes total disutility for the loads to participate in frequency control by deviating from their nominal power usage. By exploiting distributed algorithms to solve OLC and analyzing convergence of these algorithms, we design distributed load-side controllers and prove stability of closed-loop power systems governed by these controllers. This general framework is adapted and applied to different types of power systems described by different models, or to achieve different levels of control goals under different operation scenarios. We first consider a dynamically coherent power system which can be equivalently modeled with a single synchronous machine. We then extend our framework to a multi-machine power network, where we consider primary and secondary frequency controls, linear and nonlinear power flow models, and the interactions between generator dynamics and load control.

(2) Two-timescale voltage control: The voltage of a power distribution system must be maintained closely around its nominal value in real time, even in the presence of highly volatile power supply or demand. For this purpose, we jointly control two types of reactive power sources: a capacitor operating at a slow timescale, and a power electronic device, such as a smart inverter or a D-STATCOM, operating at a fast timescale. Their control actions are solved from optimal power flow problems at two timescales. Specifically, the slow-timescale problem is a chance-constrained optimization, which minimizes power loss and regulates the voltage at the current time instant while limiting the probability of future voltage violations due to stochastic changes in power supply or demand. This control framework forms the basis of an optimal sizing problem, which determines the installation capacities of the control devices by minimizing the sum of power loss and capital cost. We develop computationally efficient heuristics to solve the optimal sizing problem and implement real-time control. Numerical experiments show that the proposed sizing and control schemes significantly improve the reliability of voltage control with a moderate increase in cost.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Low, Steven H.}, } @phdthesis{10.7907/Z9FQ9TJ0, author = {Gan, Lingwen}, title = {Distributed Load Control in Multiphase Radial Networks}, school = {California Institute of Technology}, year = {2015}, doi = {10.7907/Z9FQ9TJ0}, url = {https://resolver.caltech.edu/CaltechTHESIS:01272015-214848277}, abstract = {

The current power grid is on the cusp of modernization due to the emergence of distributed generation and controllable loads, as well as renewable energy. On one hand, distributed and renewable generation is volatile and difficult to dispatch. On the other hand, controllable loads provide significant potential for compensating for the uncertainties. In a future grid where there are thousands or millions of controllable loads and a large portion of the generation comes from volatile sources like wind and solar, distributed control that shifts or reduces the power consumption of electric loads in a reliable and economic way would be highly valuable.

Load control needs to be conducted with network awareness. Otherwise, voltage violations and overloading of circuit devices are likely. To model these effects, network power flows and voltages have to be considered explicitly. However, the physical laws that determine power flows and voltages are nonlinear. Furthermore, while distributed generation and controllable loads are mostly located in distribution networks that are multiphase and radial, most of the power flow studies focus on single-phase networks.

This thesis focuses on distributed load control in multiphase radial distribution networks. In particular, we first study distributed load control without considering network constraints, and then consider network-aware distributed load control.

Distributed implementation of load control is the main challenge if network constraints can be ignored. In this case, we first ignore the uncertainties in renewable generation and load arrivals, and propose a distributed load control algorithm, Algorithm 1, that optimally schedules the deferrable loads to shape the net electricity demand. Deferrable loads refer to loads whose total energy consumption is fixed, but energy usage can be shifted over time in response to network conditions. Algorithm 1 is a distributed gradient decent algorithm, and empirically converges to optimal deferrable load schedules within 15 iterations.

We then extend Algorithm 1 to a real-time setup where deferrable loads arrive over time, and only imprecise predictions about future renewable generation and load are available at the time of decision making. The real-time algorithm Algorithm 2 is based on model-predictive control: Algorithm 2 uses updated predictions on renewable generation as the true values, and computes a pseudo load to simulate future deferrable load. The pseudo load consumes 0 power at the current time step, and its total energy consumption equals the expectation of future deferrable load total energy request.

Network constraints, e.g., transformer loading constraints and voltage regulation constraints, bring significant challenge to the load control problem since power flows and voltages are governed by nonlinear physical laws. Remarkably, distribution networks are usually multiphase and radial. Two approaches are explored to overcome this challenge: one based on convex relaxation and the other that seeks a locally optimal load schedule.

To explore the convex relaxation approach, a novel but equivalent power flow model, the branch flow model, is developed, and a semidefinite programming relaxation, called BFM-SDP, is obtained using the branch flow model. BFM-SDP is mathematically equivalent to a standard convex relaxation proposed in the literature, but numerically is much more stable. Empirical studies show that BFM-SDP is numerically exact for the IEEE 13-, 34-, 37-, 123-bus networks and a real-world 2065-bus network, while the standard convex relaxation is numerically exact for only two of these networks.

Theoretical guarantees on the exactness of convex relaxations are provided for two types of networks: single-phase radial alternative-current (AC) networks, and single-phase mesh direct-current (DC) networks. In particular, for single-phase radial AC networks, we prove that a second-order cone program (SOCP) relaxation is exact if voltage upper bounds are not binding; we also modify the optimal load control problem so that its SOCP relaxation is always exact. For single-phase mesh DC networks, we prove that an SOCP relaxation is exact if 1) voltage upper bounds are not binding, or 2) voltage upper bounds are uniform and power injection lower bounds are strictly negative; we also modify the optimal load control problem so that its SOCP relaxation is always exact.

To seek a locally optimal load schedule, a distributed gradient-decent algorithm, Algorithm 9, is proposed. The suboptimality gap of the algorithm is rigorously characterized and close to 0 for practical networks. Furthermore, unlike the convex relaxation approach, Algorithm 9 ensures a feasible solution. The gradients used in Algorithm 9 are estimated based on a linear approximation of the power flow, which is derived with the following assumptions: 1) line losses are negligible; and 2) voltages are reasonably balanced. Both assumptions are satisfied in practical distribution networks. Empirical results show that Algorithm 9 obtains 70+ times speed up over the convex relaxation approach, at the cost of a suboptimality within numerical precision.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Low, Steven H.}, } @phdthesis{10.7907/FRGW-AF26, author = {Bose, Subhonmesh}, title = {An Integrated Design Approach to Power Systems: From Power Flows to Electricity Markets}, school = {California Institute of Technology}, year = {2014}, doi = {10.7907/FRGW-AF26}, url = {https://resolver.caltech.edu/CaltechTHESIS:06012014-040224456}, abstract = {Power system is at the brink of change. Engineering needs, economic forces and environmental factors are the main drivers of this change. The vision is to build a smart electrical grid and a smarter market mechanism around it to fulfill mandates on clean energy. Looking at engineering and economic issues in isolation is no longer an option today; it needs an integrated design approach. In this thesis, I shall revisit some of the classical questions on the engineering operation of power systems that deals with the nonconvexity of power flow equations. Then I shall explore some issues of the interaction of these power flow equations on the electricity markets to address the fundamental issue of market power in a deregulated market environment. Finally, motivated by the emergence of new storage technologies, I present an interesting result on the investment decision problem of placing storage over a power network. The goal of this study is to demonstrate that modern optimization and game theory can provide unique insights into this complex system. Some of the ideas carry over to applications beyond power systems.}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Low, Steven H.}, } @mastersthesis{10.7907/493H-BD56, author = {Chen, Niangjun}, title = {Model Predictive Control for Deferrable Loads Scheduling}, school = {California Institute of Technology}, year = {2014}, doi = {10.7907/493H-BD56}, url = {https://resolver.caltech.edu/CaltechTHESIS:06022014-205438709}, abstract = {

Real-time demand response is essential for handling the uncertainties of renewable generation. Traditionally, demand response has been focused on large industrial and commercial loads, however it is expected that a large number of small residential loads such as air conditioners, dish washers, and electric vehicles will also participate in the coming years. The electricity consumption of these smaller loads, which we call deferrable loads, can be shifted over time, and thus be used (in aggregate) to compensate for the random fluctuations in renewable generation.

In this thesis, we propose a real-time distributed deferrable load control algorithm to reduce the variance of aggregate load (load minus renewable generation) by shifting the power consumption of deferrable loads to periods with high renewable generation. The algorithm is model predictive in nature, i.e., at every time step, the algorithm minimizes the expected variance to go with updated predictions. We prove that suboptimality of this model predictive algorithm vanishes as time horizon expands in the average case analysis. Further, we prove strong concentration results on the distribution of the load variance obtained by model predictive deferrable load control. These concentration results highlight that the typical performance of model predictive deferrable load control is tightly concentrated around the average-case performance. Finally, we evaluate the algorithm via trace-based simulations.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, } @phdthesis{10.7907/296T-HR79, author = {Liu, Zhenhua}, title = {Sustainable IT and IT for Sustainability}, school = {California Institute of Technology}, year = {2014}, doi = {10.7907/296T-HR79}, url = {https://resolver.caltech.edu/CaltechTHESIS:05312014-215801543}, abstract = {

Energy and sustainability have become one of the most critical issues of our generation. While the abundant potential of renewable energy such as solar and wind provides a real opportunity for sustainability, their intermittency and uncertainty present a daunting operating challenge. This thesis aims to develop analytical models, deployable algorithms, and real systems to enable efficient integration of renewable energy into complex distributed systems with limited information.

The first thrust of the thesis is to make IT systems more sustainable by facilitating the integration of renewable energy into these systems. IT represents the fastest growing sectors in energy usage and greenhouse gas pollution. Over the last decade there are dramatic improvements in the energy efficiency of IT systems, but the efficiency improvements do not necessarily lead to reduction in energy consumption because more servers are demanded. Further, little effort has been put in making IT more sustainable, and most of the improvements are from improved “engineering” rather than improved “algorithms”. In contrast, my work focuses on developing algorithms with rigorous theoretical analysis that improve the sustainability of IT. In particular, this thesis seeks to exploit the flexibilities of cloud workloads both (i) in time by scheduling delay-tolerant workloads and (ii) in space by routing requests to geographically diverse data centers. These opportunities allow data centers to adaptively respond to renewable availability, varying cooling efficiency, and fluctuating energy prices, while still meeting performance requirements. The design of the enabling algorithms is however very challenging because of limited information, non-smooth objective functions and the need for distributed control. Novel distributed algorithms are developed with theoretically provable guarantees to enable the “follow the renewables” routing. Moving from theory to practice, I helped HP design and implement industry’s first Net-zero Energy Data Center.

The second thrust of this thesis is to use IT systems to improve the sustainability and efficiency of our energy infrastructure through data center demand response. The main challenges as we integrate more renewable sources to the existing power grid come from the fluctuation and unpredictability of renewable generation. Although energy storage and reserves can potentially solve the issues, they are very costly. One promising alternative is to make the cloud data centers demand responsive. The potential of such an approach is huge.

To realize this potential, we need adaptive and distributed control of cloud data centers and new electricity market designs for distributed electricity resources. My work is progressing in both directions. In particular, I have designed online algorithms with theoretically guaranteed performance for data center operators to deal with uncertainties under popular demand response programs. Based on local control rules of customers, I have further designed new pricing schemes for demand response to align the interests of customers, utility companies, and the society to improve social welfare.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Wierman, Adam C.}, } @phdthesis{10.7907/NHVJ-FX37, author = {Li, Na (Lina)}, title = {Distributed Optimization in Power Networks and General Multi-agent Systems}, school = {California Institute of Technology}, year = {2013}, doi = {10.7907/NHVJ-FX37}, url = {https://resolver.caltech.edu/CaltechTHESIS:05312013-122007615}, abstract = {

The dissertation studies the general area of complex networked systems that consist of interconnected and active heterogeneous components and usually operate in uncertain environments and with incomplete information. Problems associated with those systems are typically large-scale and computationally intractable, yet they are also very well-structured and have features that can be exploited by appropriate modeling and computational methods. The goal of this thesis is to develop foundational theories and tools to exploit those structures that can lead to computationally-efficient and distributed solutions, and apply them to improve systems operations and architecture.

Specifically, the thesis focuses on two concrete areas. The first one is to design distributed rules to manage distributed energy resources in the power network. The power network is undergoing a fundamental transformation. The future smart grid, especially on the distribution system, will be a large-scale network of distributed energy resources (DERs), each introducing random and rapid fluctuations in power supply, demand, voltage and frequency. These DERs provide a tremendous opportunity for sustainability, efficiency, and power reliability. However, there are daunting technical challenges in managing these DERs and optimizing their operation. The focus of this dissertation is to develop scalable, distributed, and real-time control and optimization to achieve system-wide efficiency, reliability, and robustness for the future power grid. In particular, we will present how to explore the power network structure to design efficient and distributed market and algorithms for the energy management. We will also show how to connect the algorithms with physical dynamics and existing control mechanisms for real-time control in power networks.

The second focus is to develop distributed optimization rules for general multi-agent engineering systems. A central goal in multiagent systems is to design local control laws for the individual agents to ensure that the emergent global behavior is desirable with respect to the given system level objective. Ideally, a system designer seeks to satisfy this goal while conditioning each agent’s control on the least amount of information possible. Our work focused on achieving this goal using the framework of game theory. In particular, we derived a systematic methodology for designing local agent objective functions that guarantees (i) an equivalence between the resulting game-theoretic equilibria and the system level design objective and (ii) that the resulting game possesses an inherent structure that can be exploited for distributed learning, e.g., potential games. The control design can then be completed by applying any distributed learning algorithm that guarantees convergence to the game-theoretic equilibrium. One main advantage of this game theoretic approach is that it provides a hierarchical decomposition between the decomposition of the systemic objective (game design) and the specific local decision rules (distributed learning algorithms). This decomposition provides the system designer with tremendous flexibility to meet the design objectives and constraints inherent in a broad class of multiagent systems. Furthermore, in many settings the resulting controllers will be inherently robust to a host of uncertainties including asynchronous clock rates, delays in information, and component failures.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Doyle, John Comstock}, } @phdthesis{10.7907/AAXJ-EX10, author = {Nair, Jayakrishnan U.}, title = {Scheduling for Heavy-Tailed and Light-Tailed Workloads in Queueing Systems}, school = {California Institute of Technology}, year = {2012}, doi = {10.7907/AAXJ-EX10}, url = {https://resolver.caltech.edu/CaltechTHESIS:06012012-134536732}, abstract = {

In much of classical queueing theory, workloads are assumed to be light-tailed, with job sizes being described using exponential or phase type distributions. However, over the past two decades, studies have shown that several real-world workloads exhibit heavy-tailed characteristics. As a result, there has been a strong interest in studying queues with heavy-tailed workloads. So at this stage, there is a large body of literature on queues with light-tailed workloads, and a large body of literature on queues with heavy-tailed workloads. However, heavy-tailed workloads and light-tailed workloads differ considerably in their behavior, and these two types of workloads are rarely studied jointly.

In this thesis, we design scheduling policies for queueing systems, considering both heavy-tailed as well as light-tailed workloads. The motivation for this line of work is twofold. First, since real world workloads can be heavy-tailed or light-tailed, it is desirable to design schedulers that are robust in their performance to distributional assumptions on the workload. Second, there might be scenarios where a heavy-tailed and a light-tailed workload interact in a queueing system. In such cases, it is desirable to design schedulers that guarantee fairness in resource allocation for both workload types.

In this thesis, we study three models involving the design of scheduling disciplines for both heavy-tailed as well as light-tailed workloads. In Chapters 3 and 4, we design schedulers that guarantee robust performance across heavy-tailed and light-tailed workloads. In Chapter 5, we consider a setting in which a heavy-tailed and a light-tailed workload complete for service. In this setting, we design scheduling policies that guarantee good response time tail performance for both workloads, while also maintaining throughput optimality.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Low, Steven H. and Wierman, Adam C.}, } @mastersthesis{10.7907/QGF7-4K56, author = {Liu, Zhenhua}, title = {Greening Geographical Load Balancing}, school = {California Institute of Technology}, year = {2011}, doi = {10.7907/QGF7-4K56}, url = {https://resolver.caltech.edu/CaltechTHESIS:05252011-221926445}, abstract = {

Energy expenditure has become a significant fraction of data center operating costs. Recently, “geographical load balancing” has been suggested to reduce energy cost by exploiting the electricity price differences across regions. However, this reduction of cost can paradoxically increase total energy use. This work explores whether the geographical diversity of internet-scale systems can additionally be used to provide environmental gains.

We first focus on geographical load balancing, which is modeled as a convex optimization problem. We derive two distributed algorithms for achieving optimal geographical load balancing and characterize the optimal solutions.

Then we continue to use the framework and algorithms to investigate whether geographical load balancing can encourage use of “green” renewable energy and reduce use of “brown” fossil fuel energy. Here we consider two approaches, namely, dynamic pricing and local renewables.

For the dynamic pricing case, our numeric results show that if electricity is dynamically priced in proportion to the instantaneous fraction of the total energy that is brown, then geographical load balancing provides significant reductions in brown energy use. However, the benefits depend strongly on the degree to which systems accept dynamic energy pricing and the form of pricing used.

For the local renewable case, we perform a trace-based study to evaluate three issues related to achieving this goal: the impact of geographical load balancing, the role of storage, and the optimal mix of renewables. Our results highlight that geographical load balancing can significantly reduce the required capacity of renewable energy by using the energy more efficiently with “follow the renewables” routing. Further, our results show that small-scale storage can be useful, especially in combination with geographical load balancing, and that an optimal mix of renewables includes significantly more wind than photovoltaic solar.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, } @mastersthesis{10.7907/HPA7-2B52, author = {Hu, Cheng}, title = {Concurrent System Design Using Flow}, school = {California Institute of Technology}, year = {2007}, doi = {10.7907/HPA7-2B52}, url = {https://resolver.caltech.edu/CaltechETD:etd-05252007-140855}, abstract = {

We define a formal model for concurrent systems named Flow. The goal of this formal model is to be both practical in allowing users to build the types of concurrent systems they want to build in real life and practical in allowing users to quickly prove correctness properties about the systems they build.

We define the model formally as the asynchronous product of extended state automata. Extended state automata and their asynchronous products are used to define other modeling languages as well, such as the state space exploration verification tool SPIN. Thus an offshoot of our formal definition is that we see it would not be difficult to use automated verification tools to verify Flow model properties.

Using the formal definition, we show a set of theorems that users will be able to reuse to prove correctness properties about Flow models. One category of theorems deals with showing and constructing Flow models for which all executions are guaranteed to be finite. Another category of theorems deals with showing or constructing Flow models for which all executions from a start state are guaranteed to have the same end state. This is useful because it allows users to know how all concurrent executions from a start state terminate by looking at just one execution. Another category of theorems deals with dividing complex Flow models into smaller sub-models, and showing the properties of the full model by showing the properties of the sub-models, allowing for a divide and conquer strategy to be used with Flow model proofs. The last category deals with using Hoare triples to prove properties about all executions from a set of possible start states as characterized by some pre-condition by proving properties about a set of representative executions from those start states. In the best case, we can use a combination of these techniques to show that all executions of a Flow model with start states that satisfy some pre-condition have end states that satisfy some post-condition by considering just one feasible execution of the model.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Low, Steven H.}, } @phdthesis{10.7907/Q9EV-S167, author = {Mehyar, Mortada}, title = {Distributed Averaging and Efficient File Sharing on Peer-to-Peer Networks}, school = {California Institute of Technology}, year = {2007}, doi = {10.7907/Q9EV-S167}, url = {https://resolver.caltech.edu/CaltechETD:etd-01102007-010550}, abstract = {

The work presented in this thesis is mainly divided in two parts. In the first part we study the problem of distributed averaging, which has attracted a lot of interest in the research community in recent years. Our work focuses on the issues of implementing distributed averaging algorithms on peer-to-peer networks such as the Internet. We present algorithms that eliminate the need for global coordination or synchronization, as many other algorithms require, and show mathematical analysis of their convergence.

Discrete-event simulations that verify the theoretical results are presented. We show that the algorithms proposed converge rapidly in practical scenarios. Real-world experiments are also presented to further corroborate these results. We present experiments conducted on the PlanetLab research network. Finally, we present several promising applications of distributed averaging that can be implemented in a wide range of areas of interest.

The second part of this thesis, also related to peer-to-peer networking, is about modelling and understanding peer-to-peer file sharing. The BitTorrent protocol has become one of the most popular peer-to-peer file sharing systems in recent years. Theoretical understanding of the global behavior of BitTorrent and similar peer-to-peer file sharing systems is however not very complete yet. We study a model that requires very simple assumptions yet exhibits a lot structure. We show that it is possible to consider a wide range of performance criteria within the framework, and that the model captures many of the important issues of peer-to-peer file sharing.

We believe the results provide fundamental insights to practical peer-to-peer file sharing systems. We show that many optimization criteria can be studied within our framework. Many new directions of research are also opened up.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Low, Steven H.}, } @phdthesis{10.7907/W5E3-9N04, author = {Wei, Xiaoliang (David)}, title = {Microscopic Behavior of Internet Congestion Control}, school = {California Institute of Technology}, year = {2007}, doi = {10.7907/W5E3-9N04}, url = {https://resolver.caltech.edu/CaltechETD:etd-05292007-223200}, abstract = {

The Internet research community has focused on the macroscopic behavior of Transmission Control Protocol (TCP) and overlooked its microscopic behavior for years. This thesis studies the microscopic behavior of TCP and its effects on performance. We go into the packet-level details of TCP control algorithms and explore the behavior in short time scales within one round-trip time. We find that the burstiness effects in such small time scales have significant impacts on both delay-based TCP and loss-based TCP.

For delay-based TCP algorithms, the micro-burst leads to much faster queue convergence than what the traditional macroscopic models predict. With such fast queue convergence, some delay-based congestion control algorithms are much more stable in reality than in the analytical results from existing macroscopic models. This observation allows us to design more responsive yet stable algorithm which would otherwise be impossible.

For loss-based TCP algorithms, the sub-RTT burstiness in TCP packet transmission process has significant impacts on the loss synchronization rate, an important parameter which affects the efficiency, fairness and convergence of loss-based TCP congestion control algorithms.

Our findings explain several long-standing controversial problems and have inspired new algorithms that achieve better TCP performance.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Low, Steven H.}, } @phdthesis{10.7907/9G3P-7F13, author = {Li, Lun}, title = {Topologies of Complex Networks: Functions and Structures}, school = {California Institute of Technology}, year = {2007}, doi = {10.7907/9G3P-7F13}, url = {https://resolver.caltech.edu/CaltechETD:etd-05282007-223415}, abstract = {

During the last decade, significant efforts have been made toward improving our understanding of the topological structures underlying complex networks and illuminating some of the intriguing large-scale properties exhibited by these systems. The dominant theme of these efforts has been on studying the graph-theoretic properties of the corresponding connectivity structures and on developing universal theories and models that transcend system-specific details and describe the different systems well in a statistical sense.

However, in this thesis we argue that these efforts have had limited success and are in need of substantial correction. Using a highly engineered system, the Internet, as a case study we demonstrate that networks are designed for a purpose, and ignoring that aspect or obscuring it with the use of some generic but random mechanism can result in models that misrepresent what matters for system functions. By accounting in a minimal manner for both the functional requirements and structural features inherent in the design of an engineered system, we propose an alternative, optimization-based modeling approach that highlights the necessary trade-offs between system performance and the technological and economic constraints that are crucial when designing the system. We show that our proposed approach yields network models that not only match the large-scale graph-theoretic properties of measured router-level topologies well but are also fully consistent with engineering intuition and networking reality, especially as far as their performance aspects and robustness properties are concerned. In fact, we show that our design-inspired network models can be easily distinguished from previously considered probabilistic network models and efficiently achieve the level of performance for which they were designed in the first place.

While this thesis focuses on the Internet, it has much broader implications for complex networks and graph theory generally. To better differentiate between different graphs that are identical in certain graph statistics, we introduce a structural metric, the s-metric, and demonstrate that it provides insights into the diversity of graphs constrained by certain common properties and sheds new light on many classic graph concepts such as the various notions of self-similarity, likelihood, and assortativity. Our s-metric clarifies much of the confusion surrounding the sensational qualitative claims in the current graph theory literature for complex networks and offers a rigorous and quantitative alternative.

Moreover, to examine the space of graphs that satisfy certain common properties, we propose a new approach that is based on establishing a link between two graphs if and only if one can be obtained from the other via a local transformation. Exploring the resulting connected space of graphs by dividing it into countable subspaces provides a much clearer picture on the whole space. We also show that this space of graphs has a rich and interesting structure and that some properties of the latter can be related to features of the individual graphs in this space (e.g., degree variability of a node g in the space of graphs and the s-metric for g).

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Doyle, John Comstock}, } @phdthesis{10.7907/TEVD-PK95, author = {Chen, Lijun}, title = {Wireless Network Design and Control}, school = {California Institute of Technology}, year = {2007}, doi = {10.7907/TEVD-PK95}, url = {https://resolver.caltech.edu/CaltechETD:etd-12282006-181735}, abstract = {

Optimization theory and game theory provide a suite of tools that are flexible in modelling various network systems, and a rich series of equilibrium solution concepts and convergent algorithms. In this thesis, we view network protocols as distributed algorithms achieving the corresponding network equilibria, and study wireless network design and control in optimization and game-theoretic frameworks.

Specifically, we first take a holistic approach and design an overall framework for the protocol architecture in ad hoc wireless networks. The goal is to integrate various protocol layers into a unified framework, by regarding them as distributed computations over the network to solve some optimization problem. Our current theory integrates three functions–congestion control, routing and scheduling–in transport, network and link layers into a coherent framework. These three functions interact through and are regulated by congestion price so as to achieve a global optimality, even in a time-varying environment. This framework is promising to be extended to provide a mathematical theory for network architecture, and to allow us to systematically carry out cross-layer design.

We then develop a general game-theoretic framework for contention control. We define a general game-theoretic model, called random access game, to study the contention/interaction among wireless nodes, and propose a novel medium access method derived from carrier sensing multiple access with collision avoidance in which each node estimates its conditional collision probability and adjusts its persistence probability or contention window, according to a distributed strategy update mechanism achieving the Nash equilibrium of random access game. This results in simple dynamics, controllable performance objectives, good short-term fairness, low collision, and high throughput. As wireless nodes can estimate conditional collision probabilities by observing consecutive idle slots between transmissions, we can decouple contention control from handling failed transmissions. This also opens up other opportunities such as rate adaptation to channel variations. In addition to providing a general and systematic design methodology for medium access control, the random access game model also provides an analytical framework to understand the equilibrium properties such as throughput, loss and fairness, and dynamic properties of different medium access protocols and their interactions.

Finally, we conclude this work with some suggestions for future research.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Doyle, John Comstock and Low, Steven H.}, } @phdthesis{10.7907/4DQ0-GA49, author = {Wang, Jiantao}, title = {A Theoretical Study of Internet Congestion Control: Equilibrium and Dynamics}, school = {California Institute of Technology}, year = {2006}, doi = {10.7907/4DQ0-GA49}, url = {https://resolver.caltech.edu/CaltechETD:etd-11122005-082753}, abstract = {

In the last several years, significant progress has been made in modelling the Internet congestion control using theories from convex optimization and feedback control. In this dissertation, the equilibrium and dynamics of various congestion control schemes are rigorously studied using these mathematical frameworks.

First, we study the dynamics of TCP/AQM systems. We demonstrate that the dynamics of queue and average window in Reno/RED networks are determined predominantly by the protocol stability, not by AIMD probing nor noise traffic. Our study shows that Reno/RED becomes unstable when delay increases and more strikingly, when link capacity increases. Therefore, TCP Reno is ill suited for the future high-speed network, which has motivated the design of FAST TCP. Using a continuous-time model, we prove that FAST TCP is globally stable without feedback delays and provide a sufficient condition for local stability when feedback delays are present. We also introduce a discrete-time model for FAST TCP that fully captures the effect of self-clocking and derive the local stability condition for general networks with feedback delays.

Second, the equilibrium properties (i.e., fairness, throughput, and capacity) of TCP/AQM systems are studied using the utility maximization framework. We quantitatively capture the variations in network throughput with changes in link capacity and allocation fairness. We clarify the open conjecture of whether a fairer allocation is always more efficient. The effects of changes in routing are studied using a joint optimization problem over both source rates and their routes. We investigate whether minimal-cost routing with proper link costs can solve this joint optimization problem in a distributed way. We also identify the tradeoff between achievable utility and routing stability.

At the end, two other related projects are briefly described.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Doyle, John Comstock and Low, Steven H.}, } @phdthesis{10.7907/eh43-pa83, author = {Tang, Ao (Kevin)}, title = {Heterogeneous Congestion Control Protocols}, school = {California Institute of Technology}, year = {2006}, doi = {10.7907/eh43-pa83}, url = {https://resolver.caltech.edu/CaltechETD:etd-05242006-170918}, abstract = {

Homogeneity of price is an implicit yet fundamental assumption underlying price based resource allocation theory. In this thesis, we study the effects of relaxing this assumption by examining a concrete engineering system (network with heterogeneous congestion control protocols). The behavior of the system turns out to be very different from the homogeneous case and can potentially be much more complicated. A systematic theory is developed that includes all major properties of equilibrium of the system such as existence, uniqueness, optimality, and stability. In addition to analysis, we also present numerical examples, simulations, and experiments to illustrate the theory and verify its predictions.

When heterogeneous congestion control protocols that react to different pricing signals share the same network, the resulting equilibrium can no longer be interpreted as a solution to the standard utility maximization problem as the current theory suggests. After introducing a mathematical formulation of network equilibrium for multi-protocol networks, we prove the existence of equilibrium under mild assumptions. For almost all networks, the equilibria are locally unique. They are finite and odd in number. They cannot all be locally stable unless the equilibrium is globally unique. We also derive two conditions for global uniqueness. By identifying an optimization problem associated with every equilibrium, we show that every equilibrium is Pareto efficient and provide an upper bound on efficiency loss due to pricing heterogeneity. Both intra-protocol and inter-protocol fairness are then discussed. On dynamics, various stability results are provided. In particular it is shown that if the degree of pricing heterogeneity is small enough, the network equilibrium is not only unique but also locally stable. Finally, a distributed algorithm is proposed to steer a network to the unique equilibrium that maximizes the aggregate utility, by only updating a linear parameter in the sources’ algorithms in a slow timescale.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Low, Steven H.}, } @mastersthesis{10.7907/262Z-0J78, author = {Pongsajapan, John}, title = {Optimization and Stability of TCP/IP with Delay-Sensitive Utility Functions}, school = {California Institute of Technology}, year = {2006}, doi = {10.7907/262Z-0J78}, url = {https://resolver.caltech.edu/CaltechETD:etd-06022006-162638}, abstract = {

TCP/IP can be interpreted as a distributed primal-dual algorithm to maximize aggregate utility over source rates. It has recently been shown that an equilibrium of TCP/IP, if exists, maximize the same aggregate utility function over both source rates and routes, provided pure congestion prices are used as link costs in the shortest-path calculation of IP. Moreover, the utility functions are delay-insensitive, i.e., they are functions of rates only. We extend this result in several ways.

First, we show that if utility functions are delay-insensitive, then there are networks for which TCP/IP optimizes aggregate utility only if routing is based on pure congestion prices. Routing based on the weighted sum of congestion prices and propagation delays optimizes aggregate utility for general networks only if the utility functions are delay-sensitive. Moreover, we identify such a class of delay-sensitive utility functions that is implicitly optimized by TCP/IP. As for the delay-insensitive case, we show for this class of utility functions, equilibrium of TCP/IP exists if and only if the optimization problem has zero duality gap. In that case, there is no penalty for not splitting the traffic. We exhibit some counter-intuitive properties of this class of utility functions. We also prove that any class of delay-sensitive utility functions that are optimized by TCP/IP necessarily possess some strange properties.

We prove that, for general networks, if the weight on congestion prices is small enough, only minimum-propagation-delay paths are selected. Hence if all source-destination pairs have unique minimum-propagation-delay paths, then equilibrium of TCP/IP exists and is asymptotically stable. For general networks, their equilibrium properties are the same as a modified network where paths with non-minimum propagation delays are deleted and routing is based on pure congestion prices.

It is commonly believed that there is generally an inevitable tradeoff between utility maximization and stability in TCP/IP networks. In particular, as the weight on congestion prices increases, the routing will change from stable to unstable. We exhibit a counterexample where routing changes from stable to unstable and then to stable again, as the weight on congestion prices increases. Moreover, one can construct a network with any given utility profile as a function of the weight on congestion prices.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Low, Steven H.}, }