inproceedings_kuperberg.bib

@inproceedings{hauck2011b,
  address = {New York, NY, USA},
  author = {Michael Hauck and Michael Kuperberg and Nikolaus Huber and Ralf Reussner},
  booktitle = {Proceedings of the 7th ACM SIGSOFT International Conference on the Quality of Software Architectures (QoSA 2011)},
  day = {20--24},
  doi = {http://doi.acm.org/10.1145/2000259.2000269},
  isbn = {978-1-4503-0724-6},
  month = {June},
  numpages = {10},
  pages = {53--62},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/hauck2011b.pdf},
  publisher = {ACM},
  title = {{Ginpex: Deriving Performance-relevant Infrastructure Properties Through Goal-oriented Experiments}},
  url = {10.1145/2000259.2000269},
  year = {2011}
}
@inproceedings{hauck2009b,
  abstract = {Software architects often use model-based techniques to analyse performance (e.g. response times), reliability and other extra-functional properties of software systems. These techniques operate on models of software architecture and execution environment, and are applied at design time for early evaluation of design alternatives, especially to avoid implementing systems with insufficient quality. Virtualisation (such as operating system hypervisors or virtual machines) and multiple layers in execution environments (e.g. RAID disk array controllers on top of hard disks) are becoming increasingly popular in reality and need to be reflected in the models of execution environments. However, current component meta-models do not support virtualisation and cannot model individual layers of execution environments. This means that the entire monolithic model must be recreated when different implementations of a layer must be compared to make a design decision, e.g. when comparing different Java Virtual Machines. In this paper, we present an extension of an established model-based performance prediction approach and associated tools which allow to model and predict state-of-the-art layered execution environments, such as disk arrays, virtual machines, and application servers. The evaluation of the presented approach shows its applicability and the resulting accuracy of the performance prediction while respecting the structure of the modelled resource environment.},
  author = {Michael Hauck and Michael Kuperberg and Klaus Krogmann and Ralf Reussner},
  booktitle = {{Proceedings of the 12th International Symposium on Component Based Software Engineering (CBSE 2009)}},
  doi = {10.1007/978-3-642-02414-6_12},
  ee = {http://dx.doi.org/10.1007/978-3-642-02414-6_12},
  isbn = {978-3-642-02413-9},
  number = {5582},
  pages = {191--208},
  pdf = {http://sdqweb.ipd.uka.de/publications/pdfs/hauck2009b.pdf},
  publisher = {Springer},
  series = {LNCS},
  title = {{Modelling Layered Component Execution Environments for Performance Prediction}},
  url = {http://www.comparch-events.org/pages/present.html},
  year = {2009}
}
@inproceedings{krogmann2008b,
  abstract = {Integrating heterogeneous software systems becomes increasingly important. It requires combining existing components to form new applications. Such new applications are required to satisfy non-functional properties, such as performance. Design-time performance prediction of new applications built from existing components helps to compare design decisions before actually implementing them to the full, avoiding costly prototype and glue code creation. But design-time performance prediction requires understanding and modeling of data flow and control flow accross component boundaries, which is not given for most black-box components. If, for example one component processes and forwards files to other components, this effect should be an explicit model parameter to correctly capture its performance impact. This impact should also be parameterised over data, but no reverse engineering approach exists to recover such dependencies. In this paper, we present an approach that allows reverse engineering of such behavioural models, which is applicable for blackbox components. By runtime monitoring and application of genetic programming, we recover functional dependencies in code, which then are expressed as parameterisation in the output model. We successfully validated our approach in a case study on a file sharing application, showing that all dependencies could correctly be reverse engineered from black-box components.},
  address = {Oldenburg},
  author = {Klaus Krogmann and Michael Kuperberg and Ralf Reussner},
  booktitle = {MDD, SOA und IT-Management (MSI 2008)},
  editor = {Steffens, Ulrike and Addicks, Jan Stefan and Streekmann, Niels},
  month = {September},
  pages = {57--71},
  publisher = {GITO Verlag},
  title = {{Reverse Engineering of Parametric Behavioural Service Performance Models from Black-Box Components}},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/krogmann2008b.pdf},
  year = {2008}
}
@inproceedings{krogmann2009a,
  abstract = {Software performance engineering provides techniques to analyze and predict the performance (e.g., response time or resource utilization) of software systems to avoid implementations with insufficient performance. These techniques operate on models of software, often at an architectural level, to enable early, design-time predictions for evaluating design alternatives. Current software performance engineering approaches allow the prediction of performance at design time, but often provide cryptic results (e.g., lengths of queues). These prediction results can be hardly mapped back to the software architecture by humans, making it hard to derive the right design decisions. In this paper, we integrate software cartography (a map technique) with software performance engineering to overcome the limited interpretability of raw performance prediction results. Our approach is based on model transformations and a general software visualization approach. It provides an intuitive mapping of prediction results to the software architecture which simplifies design decisions. We successfully evaluated our approach in a quasi experiment involving 41 participants by comparing the correctness of performance-improving design decisions and participants' time effort using our novel approach to an existing software performance visualization.},
  author = {Klaus Krogmann and Christian M. Schweda and Sabine Buckl and Michael Kuperberg and Anne Martens and Florian Matthes},
  booktitle = {{Architectures for Adaptive Systems (Proceedings of QoSA 2009)}},
  doi = {10.1007/978-3-642-02351-4_4},
  editor = {Raffaela Mirandola and Ian Gorton and Christine Hofmeister},
  note = {Best Paper Award},
  pages = {52--69},
  publisher = {Springer},
  series = {Lecture Notes in Computer Science},
  title = {{Improved Feedback for Architectural Performance Prediction using Software Cartography Visualizations}},
  url = {http://www.springerlink.com/content/m0325512hl4857v1},
  volume = {5581},
  year = {2009},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/krogmann2009a.pdf}
}
@inproceedings{kuperberg2009b,
  abstract = {ICS include supervisory control and data acquisition (SCADA) systems, distributed control systems (DCS), and other control system configurations such as skid-mounted Programmable Logic Controllers (PLC) as are often found in the industrial control sector. In contrast to traditional information processing systems logic executing in ICS has a direct effect on the physical world. These control systems are critical for the operation of complex infrastructures that are often highly interconnected and thus mutually dependent systems. Numerous methodical approaches aim at modeling, analysis and simulation of single systems� behavior. However, modeling the interdependencies between different systems and describing their complex behavior by simulation is still an open issue. Although different modeling approaches from classic network theory to bio-inspired methods can be found in scientific literature a comprehensive method for modeling and simulation of interdependencies among complex systems has still not been established. An overall model is needed to provide security and reliability assessment taking into account various kinds of threats and failures. These metrics are essential for a vulnerability analysis. Vulnerability of a critical infrastructure is defined as the presence of flaws or weaknesses in its design, implementation, operation and/or management that render it susceptible to destruction or incapacitation by a threat, in spite of its capacity to absorb and recover (�resilience�). A significant challenge associated with this model may be to create �what-if� scenarios for the analysis of interdependencies. Interdependencies affect the consequences of single or multiple failures or disruption in interconnected systems. The different types of interdependencies can induce feedback loops which have accelerating or retarding effects on a systems response as observed in system dynamics. Threats to control systems can come from numerous sources, including hostile governments, terrorist groups, disgruntled employees, malicious intruders, complexities, accidents, natural disasters and malicious or accidental actions by insiders. The threats and failures can impact ICS themselves as well as underlying (controlled) systems. In previous work seven evaluation criteria have been defined and eight good praxis methods have been selected and are briefly described. Analysis of these techniques is undertaken and their suitability for modeling and simulation of interdependent critical infrastructures in general is hypothesized. With},
  author = {Michael Kuperberg},
  booktitle = {Proceedings of the 2008 Dependability Metrics Research Workshop, Technical Report TR-2009-002},
  editor = {Felix C. Freiling and Irene Eusgeld and Ralf Reussner},
  location = {November 10, 2008, Mannheim, Germany},
  month = {May},
  organization = {Department of Computer Science, University of Mannheim},
  pages = {7--11},
  title = {{FOBIC: A Platform-Independent Performance Metric based on Dynamic Java Bytecode Counts}},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/kuperberg2009b.pdf},
  year = {2009}
}
@inproceedings{kuperberg2006a,
  abstract = {Performance prediction of component-based software systems is needed for systematic evaluation of design decisions, but also when an application's execution system is changed. Often, the entire application cannot be benchmarked in advance on its new execution system due to high costs or because some required services cannot be provided there. In this case, performance of bytecode instructions or other atomic building blocks of components can be used for performance prediction. However, the performance of bytecode instructions depends not only on the execution system they use, but also on their parameters, which are not considered by most existing research. In this paper, we demonstrate that parameters cannot be ignored when considering Java bytecode. Consequently, we outline a suitable benchmarking approach and the accompanying challenges.},
  author = {Michael Kuperberg},
  booktitle = {Proceedings of the 2nd International Research Training Groups Workshop, Dagstuhl, Germany, November 6 - 8, 2006},
  editor = {Jens Happe and Heiko Koziolek and Matthias Rohr},
  series = {Reihe Trustworthy Software Systems},
  title = {{I}nfluence of {E}xecution {E}nvironments on the {P}erformance of {S}oftware {C}omponents},
  url = {http://www.gito.de/impress/produkte.nsf/0/81B3A5D1DBB12943C125738B00762D3C},
  volume = {3},
  year = {2006}
}
@inproceedings{kuperberg2007a,
  abstract = {Performance prediction of component-based software systems is needed for systematic evaluation of design decisions, but also when an application�s execution system is changed. Often, the entire application cannot be benchmarked in advance on its new execution system due to high costs or because some required services cannot be provided there. In this case, performance of bytecode instructions or other atomic building blocks of components can be used for performance prediction. However, the performance of bytecode instructions depends not only on the execution system they use, but also on their parameters, which are not considered by most existing research. In this paper, we demonstrate that parameters cannot be ignored when considering Java bytecode. Consequently, we outline a suitable benchmarking approach and the accompanying challenges.},
  author = {Michael Kuperberg and Steffen Becker},
  booktitle = {Proceedings of the 12th International Workshop on Component Oriented Programming (WCOP 2007)},
  editor = {Ralf Reussner and Clemens Czyperski and Wolfgang Weck},
  month = {July},
  title = {{P}redicting {S}oftware {C}omponent {P}erformance: {O}n the {R}elevance of {P}arameters for {B}enchmarking {B}ytecode and {API}s},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/kuperberg2007a.pdf},
  year = {2007}
}
@inproceedings{kuperberg2008c,
  abstract = {In component-based software engineering, the response time of an entire application is often predicted from the execution durations of individual component services. However, these execution durations are specific for an execution platform (i.e. its resources such as CPU) and for a usage profile. Reusing an existing component on different execution platforms up to now required repeated measurements of the concerned components for each relevant combination of execution platform and usage profile, leading to high effort. This paper presents a novel integrated approach that overcomes these limitations by reconstructing behaviour models with platform-independent resource demands of bytecode components. The reconstructed models are parameterised over input parameter values. Using platform-specific results of bytecode benchmarking, our approach is able to translate the platform-independent resource demands into predictions for execution durations on a certain platform. We validate our approach by predicting the performance of a file sharing application.},
  author = {Michael Kuperberg and Klaus Krogmann and Ralf Reussner},
  booktitle = {Proceedings of the 11th International Symposium on Component Based Software Engineering (CBSE 2008), Karlsruhe, Germany, 14th-17th October 2008},
  month = {October},
  pages = {48-63},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{P}erformance {P}rediction for {B}lack-{B}ox {C}omponents using {R}eengineered {P}arametric {B}ehaviour {M}odels},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/kuperberg2008c.pdf},
  volume = {5282},
  year = {2008}
}
@inproceedings{kuperberg2011a,
  abstract = {Performance measurements are often concerned with accurate recording of timing values, which requires timer methods of high quality. Evaluating the quality of a given timer method or performance counter involves analysing several properties, such as accuracy, invocation cost and timer stability. These properties are metrics with platform-dependent values, and ranking and selecting timer methods requires comparisons using multidimensional metric sets, which make the comparisons ambiguous and unnecessary complex. To solve this problem, this paper proposes a new unified metric that allows for a simpler comparison. The one-dimensional metric is designed to capture fine-granular differences between timer methods, and normalises accuracy and other quality attributes by using CPU cycles instead of time units. The proposed metric is evaluated on all timer methods provided by Java and .NET platform APIs.},
  author = {Michael Kuperberg and Martin Krogmann and Ralf Reussner},
  booktitle = {Proceedings of the 2nd ACM/SPEC International Conference on Performance Engineering},
  pdf = {http://sdqweb.ipd.uka.de/publications/pdfs/kuperberg2011a.pdf},
  title = {{Metric-based Selection of Timer Methods for Accurate Measurements}},
  year = {2011},
  series = {ICPE '11},
  isbn = {978-1-4503-0519-8},
  location = {Karlsruhe, Germany},
  pages = {151--156},
  numpages = {6},
  url = {http://doi.acm.org/10.1145/1958746.1958770},
  doi = {10.1145/1958746.1958770},
  publisher = {ACM},
  address = {New York, NY, USA}
}
@inproceedings{kuperberg2009c,
  author = {Michael Kuperberg and Martin Krogmann and Ralf Reussner},
  booktitle = {{Proceedings of the 6th International Conference on Quantitative Evaluation of SysTems (QEST) 2009}},
  locatio = {September 13-16, 2009, Budapest, Hungary},
  title = {{TimerMeter: Quantifying Accuracy of Software Times for System Analysis}},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/kuperberg2009c.pdf},
  year = {2009}
}
@inproceedings{kuperberg2008a,
  abstract = {For bytecode-based applications, runtime instruction counts can be used as a platform- independent application execution metric, and also can serve as the basis for bytecode-based performance prediction. However, different instruction types have different execution durations, so they must be counted separately, and method invocations should be identified and counted because of their substantial contribution to the total application performance. For Java bytecode, most JVMs and profilers do not provide such functionality at all, and existing bytecode analysis frameworks require expensive JVM instrumentation for instruction-level counting. In this paper, we present ByCounter, a lightweight approach for exact runtime counting of executed bytecode instructions and method invocations. ByCounter significantly reduces total counting costs by instrumenting only the application bytecode and not the JVM, and it can be used without modifications on any JVM. We evaluate the presented approach by successfully applying it to multiple Java applications on different JVMs, and discuss the runtime costs of applying ByCounter to these cases.},
  author = {Michael Kuperberg and Martin Krogmann and Ralf Reussner},
  booktitle = {{Proceedings of the 3rd International Workshop on Bytecode Semantics, Verification, Analysis and Transformation, Budapest, Hungary, 5th April 2008 (ETAPS 2008, 11th European Joint Conferences on Theory and Practice of Software)}},
  keywords = {Java, bytecode, counting, portable, runtime, instrumentation, fine-grained},
  title = {{ByCounter: Portable Runtime Counting of Bytecode Instructions and Method Invocations}},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/kuperberg2008a.pdf},
  year = {2008}
}
@inproceedings{kuperberg2010a,
  abstract = {Performance is an extra-functional property of software systems which is often critical for achieving sufficient scalability or efficient resource utilisation. As many applications are built using application programmer interfaces (APIs) of execution platforms and external components, the performance of the used API implementations has a strong impact on the performance of the application itself. Yet the sheer size and complexity of today's APIs make it hard to manually benchmark them, while many semantical constraints and requirements (on method parameters, etc.) make it complicated to automate the creation of API benchmarks. Benchmarking the whole API is necessary since it is in the majority of the cases hard to exactly specify which parts of the API would be used by a given application. Additionally, modern execution platforms such as the Java Virtual Machine perform extensive nondeterministic runtime optimisations, which need to be considered and quantified for realistic benchmarking. In this paper, we present an automated solution for benchmarking any large APIs that are written in the Java programming language, not just the Java Platform API. Our implementation induces the optimisations of the Just-In-Time compiler to obtain realistic benchmarking results. We evaluate the approach on a large subset of the Java Platform API exposed by the base libraries of the Java Virtual Machine.},
  author = {Michael Kuperberg and Fouad Omri},
  booktitle = {{Proceedings of Software Engineering 2010 (SE2010)}},
  month = {February},
  pdf = {http://sdqweb.ipd.uka.de/publications/pdfs/kuperberg2010a.pdf},
  title = {{Automated Benchmarking of Java APIs}},
  year = {2010}
}
@inproceedings{kuperberg2009a,
  abstract = {Automated generation of method parameters is needed in benchmarking scenarios where manual or random generation of parameters are not suitable, do not scale or are too costly. However, for a method to execute correctly, the generated input parameters must not violate implicit semantical constraints, such as ranges of numeric parameters or the maximum length of a collection. For most methods, such constraints have no formal documentation, and human-readable documentation of them is usually incomplete and ambiguous. Random search of appropriate parameter values is possible but extremely ineffective and does not pay respect to such implicit constraints. Also, the role of polymorphism and of the method invocation targets is often not taken into account. Most existing approaches that claim automation focus on a single method and ignore the structure of the surrounding APIs where those exist. In this paper, we present HEURIGENJ, a novel heuristics-based approach for automatically finding legal and appropriate method input parameters and invocation targets, by approximating the implicit constraints imposed on them. Our approach is designed to support systematic benchmarking of API methods written in the Java language. We evaluate the presented approach by applying it to two frequently-used packages of the Java platform API, and demonstrating its coverage and effectiveness.},
  author = {Michael Kuperberg and Fouad Omri and Ralf Reussner},
  booktitle = {Proceedings of the 6th International Workshop on Formal Engineering approaches to Software Components and Architectures, York, UK, 28th March 2009 (ETAPS 2009, 12th European Joint Conferences on Theory and Practice of Software)},
  keywords = {Heuristics, parameter generation, exception handling, Java, benchmarking},
  title = {{Using Heuristics to Automate Parameter Generation for Benchmarking of Java Methods}},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/kuperberg2009a.pdf},
  year = {2009}
}
@inproceedings{kuperberg2011b,
  abstract = {Performance evaluation requires accurate and dependable measurements of timing values. Such measurements are usually made using timer methods, but these methods are often too coarse-grained and too inaccurate. Thus, direct usage of hardware performance counters is frequently used for fine-granular measurements due to higher accuracy. However, direct access to these counters may be misleading on multicore computers because cores can be paused or core affinity changed by the operating system, resulting in misleading counter values. The contribution of this paper is the demonstration of an additional, significant flaw arising from the direct use of hardware performance counters. We demonstrate that using JNI and assembler instructions to access the Timestamp Counter from Java applications can result in grossly wrong values, even in single-threaded scenarios.},
  author = {Michael Kuperberg and Ralf Reussner},
  booktitle = {Proceedings of the International Conference on Software Engineering 2011 (ICPE'11), March 14--16, 2011, Karlsruhe, Germany},
  pdf = {http://sdqweb.ipd.uka.de/publications/pdfs/kuperberg2011b.pdf},
  title = {{Analysing the Fidelity of Measurements Performed With Hardware Performance Counters}},
  year = {2011}
}
@inproceedings{simovici2004a,
  abstract = {We present an algorithm for clustering nominal data that is based on a metric on the set of partitions of a finite set of objects; this metric is defined starting from a lower valuation of the lattice of partitions. The proposed algorithm seeks to determine a clustering partition such that the total distance between this partition and the partitions determined by the attributes of the objects has a local minimum. The resulting clustering is quite stable relative to the ordering of the objects.},
  address = {Brighton, UK},
  author = {Dan A. Simovici and Namita Singla and Michael Kuperberg},
  booktitle = {The Fourth IEEE International Conference on Data Mining},
  pages = {523--526},
  title = {{M}etric {I}ncremental {C}lustering of {N}ominal {D}ata},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/simovici2004a.pdf},
  year = {2004}
}