article_herbst.bib

@article{HuKoAm2013-CCPE-WorkloadClassificationAndForecasting,
  abstract = {As modern enterprise software systems become increasingly dynamic, workload forecasting techniques are gaining in importance as a foundation for online capacity planning and resource management. Time series analysis covers a broad spectrum of methods to calculate workload forecasts based on history monitoring data. Related work in the field of workload forecasting mostly concentrates on evaluating specific methods and their individual optimisation potential or on predicting Quality-of-Service (QoS) metrics directly. As a basis, we present a survey on established forecasting methods of the time series analysis concerning their benefits and drawbacks and group them according to their computational overheads. In this paper, we propose a novel self-adaptive approach that selects suitable forecasting methods for a given context based on a decision tree and direct feedback cycles together with a corresponding implementation. The user needs to provide only his general forecasting objectives. In several experiments and case studies based on real world workload traces, we show that our implementation of the approach provides continuous and reliable forecast results at run-time. The results of this extensive evaluation show that the relative error of the individual forecast points is significantly reduced compared to statically applied forecasting methods, e.g. in an exemplary scenario on average by 37%. In a case study, between 55% and 75% of the violations of a given service level agreement can be prevented by applying proactive resource provisioning based on the forecast results of our implementation.},
  author = {Nikolas Roman Herbst and Nikolaus Huber and Samuel Kounev and Erich Amrehn},
  doi = {10.1002/cpe.3224},
  issn = {1532-0634},
  journal = {Concurrency and Computation - Practice and Experience, Special Issue with extended versions of the best papers from ICPE 2013, John Wiley and Sons, Ltd.},
  keywords = {workload forecasting, arrival rate, time series analysis, proactive resource provisioning, assurance of service level objectives},
  title = {{Self-Adaptive Workload Classification and Forecasting for Proactive Resource Provisioning}},
  url = {http://dx.doi.org/10.1002/cpe.3224},
  year = {2014}
}
@article{KiHeKo-TAAS17-ModelExtractLoadProfiles,
  author = {J\'oakim von Kistowski and Nikolas Herbst and Samuel Kounev and Henning Groenda and Christian Stier and Stebastian Lehrig},
  title = {{Modeling and Extracting Load Intensity Profiles}},
  journal = {ACM Transactions on Autonomous and Adaptive Systems (TAAS)},
  issue_date = {2017},
  year = {2017},
  publisher = {ACM},
  address = {New York, NY, USA},
  keywords = {Load Intensity Variation, Load Profile, Open Workloads, Meta-Modeling, Transformation, Model Extraction},
  abstract = {Today's system developers and operators face the challenge of creating software systems that make efficient use of dynamically allocated resources under highly variable and dynamic load profiles, while at the same time delivering reliable performance. Autonomic controllers, e.g., an advanced auto-scaling mechanism in a cloud computing context, can benefit from an abstracted load model as knowledge to reconfigure on time and precisely. Existing workload characterization approaches have limited support to capture variations the inter-arrival times of incoming work units over time (i.e., a variable load profile). For example, industrial and scientific benchmarks support constant or stepwise increasing load, or inter-arrival times defined by statistical distributions or recorded traces. These options show shortcomings either in representative character of load variation patterns or in abstraction and flexibility of their format. In this article, we present the Descartes Load Intensity Model (DLIM) approach addressing these issues. DLIM provides a modeling formalism for describing load intensity variations over time. A DLIM instance is a compact formal description of a load intensity trace. DLIM-based tools provide features for benchmarking, performance and recorded load intensity trace analysis. As manually obtaining and maintaining DLIM instances becomes time consuming, we contribute three automated extraction methods and devised metrics for comparison and method selection. We discuss how these features are used to enhance system management approaches for adaptations during run-time, and how they are integrated into simulation contexts and enable benchmarking of elastic or adaptive behavior. We show that automatically extracted DLIM instances exhibit an average modeling error of 15.2\% over ten different real-world traces that cover between two weeks and seven months. These results underline DLIM model expressiveness. In terms of accuracy and processing speed, our proposed extraction methods for the descriptive models are comparable to existing time series decomposition methods. Additionally, we illustrate DLIM applicability by outlining approaches of workload modeling in systems engineering that employ or rely on our proposed load intensity modeling formalism.},
  note = {{To Appear}}
}