inproceedings_westermann.bib

@inproceedings{Westermann2011b,
  author = {Dennis, Westermann and Rouven, Krebs and Jens, Happe},
  booktitle = {Proceedings of the Computer Performance Engineering - 8th European Performance Engineering Workshop (EPEW 2011)},
  day = {12--13},
  location = {Borrowdale, UK},
  month = {October},
  pages = {325-339},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/Westermann2011b.pdf},
  publisher = {Springer},
  title = {{E}fficient {E}xperiment {S}election in {A}utomated {S}oftware {P}erformance {E}valuations},
  year = {2011}
}
@inproceedings{happe2010a,
  abstract = {Software performance engineering (SPE) enables software architects to ensure high performance standards for their applications. However, applying SPE in practice is still challenging. Most enterprise applications include a large software basis, such as middleware and legacy systems. In many cases, the software basis is the determining factor of the system's overall timing behavior, throughput, and resource utilization. To capture these influences on the overall system's performance, established performance prediction methods (modelbased and analytical) rely on models that describe the performance-relevant aspects of the system under study. Creating such models requires detailed knowledge on the system's structure and behavior that, in most cases, is not available. In this paper, we abstract from the internal structure of the system under study. We focus our efforts on message-oriented middleware and analyze the dependency between the MOM's usage and its performance. We use statistical inference to conclude these dependencies from observations. For ActiveMQ 5.3, the resulting functions predict the performance with an relative mean square error 0.1.},
  author = {Jens Happe and Dennis Westermann and Kai Sachs and Lucia Kapova},
  booktitle = {{Research into Practice - Reality and Gaps (Proceedings of QoSA 2010)}},
  editor = {George Heineman and Jan Kofron and Frantisek Plasil},
  pages = {20--35},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/happe2010a.pdf},
  publisher = {Springer},
  series = {Lecture Notes in Computer Science (LNCS)},
  title = {{Statistical Inference of Software Performance Models for Parametric Performance Completions}},
  volume = {6093},
  year = {2010}
}
@inproceedings{weiss2013,
  acmid = {2479934},
  address = {New York, NY, USA},
  author = {Weiss, Christian and Westermann, Dennis and Heger, Christoph and Moser, Martin},
  booktitle = {Proceedings of the 4th ACM/SPEC International Conference on Performance Engineering},
  doi = {10.1145/2479871.2479934},
  isbn = {978-1-4503-1636-1},
  keywords = {benchmarking, database, java persistence api, performance},
  location = {Prague, Czech Republic},
  note = {Industrial Track},
  numpages = {10},
  pages = {411--420},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/weiss2013.pdf},
  publisher = {ACM},
  series = {ICPE '13},
  title = {Systematic performance evaluation based on tailored benchmark applications},
  url = {http://doi.acm.org/10.1145/2479871.2479934},
  year = {2013}
}
@inproceedings{wert2012integrating,
  author = {Alexander Wert and Jens Happe and Dennis Westermann},
  booktitle = {Proceedings of the third joint WOSP/SIPEW international conference on Performance Engineering},
  organization = {ACM},
  pages = {283--286},
  title = {Integrating software performance curves with the palladio component model},
  url = {http://dl.acm.org/citation.cfm?id=2188339},
  year = {2012}
}
@inproceedings{westermann2012a,
  author = {Dennis Westermann},
  booktitle = {Proceedings of the 34th International Conference on Software Engineering (ICSE 2012), Doctoral Symposium},
  location = {Zuerich, Switzerland},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/westermann2012a.pdf},
  publisher = {ACM, New York, NY, USA},
  title = {A Generic Methodology to Derive Domain-Specific Performance Feedback for Developers},
  year = {2012}
}
@inproceedings{westermann2010b,
  abstract = {Understanding the performance characteristics of enterprise applications, such as response time, throughput, and resource utilization, is crucial for satisfying customer expectations and minimizing costs of application hosting. Enterprise applications are usually based on a large set of existing software (e.g. middleware, legacy applications, and third party services). Furthermore, they continuously evolve due to changing market requirements and short innovation cycles. Software performance engineering in its essence is not directly applicable to such scenarios. Many approaches focus on early lifecycle phases assuming that a software system is built from scratch and all its details are known. These approaches neglect influences of already existing middleware, legacy applications, and third party services. For performance prediction, detailed information about the internal structure of the systems is necessary. However, such information may not be available or accessible due to the complexity of existing software. In this paper, we propose a combined approach of model based and measurement based performance evaluation techniques to handle the complexity of large enterprise applications. We outline open research questions that have to be answered in order to put performance engineering in industrial practice. For validation, we plan to apply our approach to different real-world scenarios that involve current SAP enterprise solutions such as SAP Business ByDesign and the SAP Business Suite.},
  address = {Karlsruhe, Germany},
  author = {Dennis Westermann and Jens Happe},
  booktitle = {Proceedings of the Fifteenth International Workshop on Component-Oriented Programming (WCOP) 2010},
  editor = {B{\"{u}}hnov{\'{a}}, Barbora and Reussner, Ralf H. and Szyperski, Clemens and Weck, Wolfgang},
  isbn = {ISSN 1432 - 7864},
  month = {June},
  pages = {71--78},
  publisher = {Karlsruhe Institue of Technology, Faculty of Informatics},
  series = {Interne Berichte},
  title = {Towards Performance Prediction of Large Enterprise Applications Based on Systematic Measurements},
  url = {http://digbib.ubka.uni-karlsruhe.de/volltexte/1000018464},
  volume = {2010-14},
  year = {2010}
}
@inproceedings{westermann2011a,
  address = {New York, NY, USA},
  author = {Dennis Westermann and Jens Happe},
  booktitle = {ICPE'11: Proceedings of the 2nd ACM/SPEC International Conference on Performance Engineering},
  location = {Karlsruhe, Germany},
  publisher = {ACM},
  title = {{P}erformance {C}ockpit: {S}ystematic {M}easurements and {A}nalyses},
  url = {http://icpe2011.ipd.kit.edu},
  year = {2011}
}
@inproceedings{westermann2013a,
  author = {Dennis Westermann and Jens Happe and Roozbeh Farahbod},
  booktitle = {Proc. of the ACM Symposium on Applied Computing, {SAC 2013}},
  pages = {to appear},
  timestamp = {2012.12.13},
  title = {An Experiment Specification Language for Goal-Driven, Automated Performance Evaluations},
  year = {2013}
}
@inproceedings{westermann2010a,
  abstract = {Evaluating the performance (timing behavior, throughput, and resource utilization) of a software system becomes more and more challenging as today's enterprise applications are built on a large basis of existing software (e.g. middleware, legacy applications, and third party services). As the performance of a system is affected by multiple factors on each layer of the system, performance analysts require detailed knowledge about the system under test and have to deal with a huge number of tools for benchmarking, monitoring, and analyzing. In practice, performance analysts try to handle the complexity by focusing on certain aspects, tools, or technologies. However, these isolated solutions are inefficient due to the small reuse and knowledge sharing. The Performance Cockpit presented in this paper is a framework that encapsulates knowledge about performance engineering, the system under test, and analyses in a single application by providing a flexible, plug-in based architecture. We demonstrate the value of the framework by means of two different case studies.},
  author = {Dennis Westermann and Jens Happe and Michael Hauck and Christian Heupel},
  booktitle = {Proceedings of the 36th EUROMICRO Conference on Software Engineering and Advanced Applications (SEAA 2010)},
  pages = {31-38},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/westermann2010a.pdf},
  publisher = {IEEE Computer Society},
  title = {The Performance Cockpit Approach: A Framework for Systematic Performance Evaluations},
  year = {2010}
}
@inproceedings{westermann2012b,
  author = {Dennis Westermann and Jens Happe and Rouven Krebs and Roozbeh Farahbod},
  booktitle = {Proceedings of the 27th IEEE/ACM International Conference On Automated Software Engineering (ASE 2012)},
  day = {3--7},
  location = {Essen, Germany},
  month = {September},
  title = {Automated Inference of Goal-oriented Performance Prediction Functions},
  year = {2012}
}
@inproceedings{westermann2010c,
  abstract = {The upcoming business model of providing software as a service (SaaS) bears a lot of challenges to a service provider. On the one hand, service providers have to guarantee a certain quality of service (QoS) and ensure that they adhere to these guarantees at runtime. On the other hand, they have to minimize the total cost of ownership (TCO) of their IT landscape in order to offer competitive prices. The performance of a system is a critical attribute that affects QoS as well as TCO. However, the evaluation of performance characteristics is a complex task. Many existing solutions do not provide the accuracy required for offering dependable guarantees. One major reason for this is that the dependencies between the usage profile (provided by the service consumers) and the performance of the actual system is barely described sufficiently. Software Performance Curves are performance models that are derived by goal-oriented systematic measurements of the actual software service. In this paper, we describe how Software Performance Curves can be derived by a service provider that hosts a multi-tenant system. Moreover, we illustrate how Software Performance Curves can be used to derive feasible performance guarantees, develop pricing functions, and minimize hardware resources.},
  acmid = {1858267},
  address = {New York, NY, USA},
  articleno = {3},
  author = {Dennis Westermann and Christof Momm},
  booktitle = {Proceedings of the 2nd International Workshop on the Quality of Service-Oriented Software Systems},
  doi = {http://doi.acm.org/10.1145/1858263.1858267},
  isbn = {978-1-4503-0239-5},
  location = {Oslo, Norway},
  numpages = {6},
  pages = {3:1--3:6},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/westermann2010c.pdf},
  publisher = {ACM},
  series = {QUASOSS '10},
  title = {Using software performance curves for dependable and cost-efficient service hosting},
  url = {http://doi.acm.org/10.1145/1858263.1858267},
  year = {2010}
}