2010.bib

@inproceedings{becker2010a,
  abstract = {Legacy applications are still widely spread. If a need to change deployment or update its functionality arises, it becomes difficult to estimate the performance impact of such modifications due to absence of corresponding models. In this paper, we present an extendable integrated environment based on Eclipse developed in the scope of the Q-ImPrESS project for reverse engineering of legacy applications (in C/C++/Java). The Q-ImPrESS project aims at modeling quality attributes at an architectural level and allows for choosing the most suitable variant for implementation of a desired modification. The main contributions of the project include i) a high integration of all steps of the entire process into a single tool, a beta version of which has been already successfully tested on a case study, ii) integration of multiple research approaches to performance modeling, and iii) an extendable underlying meta-model for different quality dimensions.},
  author = {Steffen Becker and Michael Hauck and Mircea Trifu and Klaus Krogmann and Jan Kofron},
  booktitle = {Proceedings of the 14th European Conference on Software Maintenance and Reengineering, European Projects Track},
  keywords = {Q-ImPrESS},
  pages = {199-202},
  publisher = {IEEE},
  title = {{Reverse Engineering Component Models for Quality Predictions}},
  url = {http://sdqweb.ipd.kit.edu/publications/pdfs/becker2010a.pdf},
  year = {2010}
}
@mastersthesis{bender2010,
  address = {Karlsruhe, Germany},
  author = {Konstantin Bender},
  month = {May},
  school = {Karlsruhe Institute of Technology, Karlsruhe, Germany},
  title = {{Automated Performance Model Extraction of Enterprise Data Fabrics}},
  year = {2010}
}
@inproceedings{brosch2010a,
  abstract = {Software failures can lead to substantial costs for the user. Existing models for software reliability prediction do not provide much insight into this financial impact. Our approach presents a first step towards the integration of reliability prediction from the IT perspective and the business perspective. We show that failure impact should be taken into account not only at their date of occurrence but already in the design stage of the development. First we model cost relevant business processes as well as the associated IT layerand then connect them to failure probabilities. Based on this we conduct a reliability and cost estimation. The method is illustrated by a case study.},
  author = {Brosch, Franz and Gitzel, Ralf and Koziolek, Heiko and Krug, Simone},
  booktitle = {International Workshop on Formal Engineering approaches to Software Components and Architectures (FESCA)},
  doi = {10.1016/j.entcs.2010.07.002},
  issn = {1571-0661},
  pages = {3--17},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/brosch2010a.pdf},
  publisher = {Elsevier},
  series = {ENTCS},
  title = {Combining Architecture-based Software Reliability Predictions with Financial Impact Calculations},
  volume = {264},
  year = {2010}
}
@inproceedings{brosch2010b,
  abstract = {Critical properties of software systems, such as reliability, should be considered early in the development, when they can govern crucial architectural design decisions. A number of design-time reliability-analysis methods has been developed to support this task. However, the methods are often based on very low-level formalisms, and the connection to different architectural aspects (e.g., the system usage profile) is either hidden in the constructs of a formal model (e.g., transition probabilities of a Markov chain), or even neglected (e.g., resource availability). This strongly limits the applicability of the methods to effectively support architectural design. Our approach, based on the Palladio Component Model (PCM), integrates the reliability-relevant architectural aspects in a highly parameterized UML-like model, which allows for transparent evaluation of architectural design options. It covers the propagation of the system usage profile throughout the architecture, and the impact of the execution environment, which are neglected in most of the existing approaches. Before analysis, the model is automatically transformed into a formal Markov model in order to support effective analytical techniques to be employed. The approach has been validated against a reliability simulation of a distributed Business Reporting System.},
  author = {Franz Brosch and Heiko Koziolek and Barbora Buhnova and Ralf Reussner},
  booktitle = {International Conference on the Quality of Software Architectures (QoSA)},
  doi = {10.1007/978-3-642-13821-8_5},
  pages = {36-51},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/brosch2010b.pdf},
  publisher = {Springer},
  series = {LNCS},
  title = {{Parameterized Reliability Prediction for Component-based Software Architectures}},
  volume = {6093},
  year = {2010}
}
@article{broy2010a,
  address = {Los Alamitos, CA, USA},
  author = {Manfred Broy and Ralf Reussner},
  doi = {http://doi.ieeecomputersociety.org/10.1109/MC.2010.277},
  issn = {0018-9162},
  journal = {Computer},
  pages = {88-91},
  publisher = {IEEE Computer Society},
  title = {Architectural Concepts in Programming Languages},
  url = {http://sdqweb.ipd.kit.edu/publications/pdfs/broy2010a.pdf},
  volume = {43},
  year = {2010}
}
@inproceedings{burger2010a,
  abstract = {The evolution of software systems often produces incompatibilities with existing data and applications. To prevent incompatibilities, changes have to be well-planned, and developers should know the impact of changes on a software system. This consideration also applies to the field of model-driven development, where changes occur with the modification of the underlying metamodels. Models that are instantiated from an earlier metamodel version may not be valid instances of the new version of a metamodel. In contrast to other metamodeling standards like the Eclipse Modeling Framework (EMF), no classification of metamodel changes has been performed yet for the Meta Object Facility (MOF).The contribution of this paper is the evaluation of the impact of metamodel changes on models. For the formalisation of changes to MOF-based metamodels, a ChangeMetamodel is introduced to describe the transformation of one version of a metamodel to another. The changes are then classifed by their impact on the compatibility to existing model data. The classification is formalised using OCL constraints. The ChangeMetamodel and the change classifications presented in this paper lay the foundation for the implemention of a mechanism that allows metamodel editors to estimate the impact of metamodel changes semi-automatically.},
  author = {Erik Burger and Boris Gruschko},
  booktitle = {Proceedings of Modellierung 2010},
  day = {26},
  editor = {Gregor Engels and Dimitris Karagiannis and Heinrich C. Mayr},
  location = {Klagenfurt, Austria},
  month = {March},
  series = {GI-LNI},
  slides = {http://sdqweb.ipd.kit.edu/publications/pdfs/burger2010a_slides.pdf},
  title = {{A Change Metamodel for the Evolution of MOF-Based Metamodels}},
  url = {http://sdqweb.ipd.kit.edu/publications/pdfs/burger2010a.pdf},
  volume = {P-161},
  year = {2010},
  pages = {285--300},
  tags = {Vitruv}
}
@inproceedings{burger2010b,
  abstract = {Software certification as it is practised today guarantees that certainstandards are kept in the process of software development. However, thisdoes not make any statements about the actual quality of implemented code.We propose an approach to certify the non-functional properties of component-based software which is based on a formal refinement calculus, using the performance abstractions of the Palladio Component Model.The certification process guarantees the conformance of a component implementationto its specification regarding performance properties, without having toexpose the source code of the product to a certification authority. Instead,the provable refinement of an abstract performance specification to the performance description of the implementation, together with evidence that the performance description reflects the propertiesof the component implementation, yields the certification seal.The refinement steps are described as Prolog rules so that the validity ofrefinement between two performance descriptions can be checked automatically.},
  address = {Karlsruhe, Germany},
  author = {Erik Burger},
  booktitle = {Proceedings of the Fifteenth International Workshop on Component-Oriented Programming (WCOP) 2010},
  editor = {Barbora B{\"{u}}hnov{\'{a}} and Ralf H. Reussner and Clemens Szyperski and Wolfgang Weck},
  isbn = {ISSN 1432 - 7864},
  month = {June},
  pages = {15--22},
  publisher = {Karlsruhe Institue of Technology, Faculty of Informatics},
  series = {Interne Berichte},
  slides = {http://sdqweb.ipd.kit.edu/publications/pdfs/burger2010b_slides.pdf},
  title = {Towards Formal Certification of Software Components},
  url = {http://sdqweb.ipd.kit.edu/publications/pdfs/burger2010b.pdf},
  volume = {2010-14},
  year = {2010}
}
@inproceedings{comuzzi2009,
  abstract = {Service-Oriented Architectures (SOA) represent an architectural shift for building business applications based on loosely-coupled services. In a multi-layered SOA environment the exact conditions under which services are to be delivered can be formally specified by Service Level Agreements (SLAs). However, typical SLAs are just specified at the customer-level and do not allow service providers to manage their IT stack accordingly as they have no insight on how customer-level SLAs translate to metrics or parameters at the various layers of the IT stack. In this paper we present a technical architecture for a multi-level SLA management framework. We discuss the fundamental components and in- terfaces in this architecture and explain the developed integrated framework. Furthermore, we show results from a qualitative evaluation of the framework in the context of an open reference case.},
  address = {Berlin, Heidelberg},
  author = {Comuzzi, Marco and Kotsokalis, Constantinos and Rathfelder, Christoph and Theilmann, Wolfgang and Winkler, Ulrich and Zacco, Gabriele},
  booktitle = {Service-Oriented Computing. ICSOC/ServiceWave 2009 Workshops},
  day = {23--27},
  doi = {10.1007/978-3-642-16132-2_18},
  editor = {Dan, Asit and Gittler, Fr{\'e}d{\'e}ric and Toumani, Farouk},
  isbn = {978-3-642-16131-5},
  keyword = {Computer Science},
  location = {Stockholm, Sweden},
  month = {November},
  pages = {187--196},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/comuzzi2009.pdf},
  publisher = {Springer},
  series = {Lecture Notes in Computer Science},
  title = {A Framework for Multi-level SLA Management},
  url = {http://dx.doi.org/10.1007/978-3-642-16132-2_18},
  volume = {6275},
  year = {2010}
}
@inproceedings{cortellessa2010a,
  abstract = {The problem of interpreting the results of software performance analysis is very critical. Software developers expect feedbacks in terms of architectural design alternatives (e.g., split a software component in two components and re-deploy one of them), whereas the results of performance analysis are either pure numbers (e.g. mean values) or functions (e.g. probability distributions). Support to the interpretation of such results that helps to fill the gap between numbers/functions and software alternatives is still lacking. Performance antipatterns can play a key role in the search of performance problems and in the formulation of their solutions. In this paper we tackle the problem of identifying, among a set of detected performance antipatterns, the ones that are the real causes of problems (i.e. the guilty ones). To this goal we introduce a process to elaborate the performance analysis results and to score performance requirements, model entities and performance antipatterns. The cross observation of such scores allows to classify the level of guiltiness of each antipattern. An example modeled in Palladio is provided to demonstrate the validity of our approach by comparing the performance improvements obtained after removal of differently scored antipatterns.},
  author = {Vittorio Cortellessa and Anne Martens and Ralf Reussner and Catia Trubiani},
  bookseries = {LNCS},
  booktitle = {Fundamental Approaches to Software Engineering, 13th International Conference, FASE 2010},
  doi = {10.1007/978-3-642-12029-9_26},
  editor = {Rosenblum, David and Taentzer, Gabriele},
  location = {Paphos, Cyprus},
  pages = {368--382},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/cortellessa2010a.pdf},
  publisher = {Springer-Verlag Berlin Heidelberg},
  title = {A Process to Effectively Identify Guilty Performance Antipatterns},
  url = {http://www.springerlink.com/content/wl11718486334174},
  year = {2010}
}
@misc{vaupelpatent3,
  author = {Dritschler, G. and Vaupel, Robert and Vater, G. and Yocom, P.},
  howpublished = {Patent No. 7734676, United States},
  month = {June},
  title = {{Method for Controlling the Number of Servers in a Hierarchical Resource Environment}},
  year = {2010}
}
@inproceedings{durdik2010a,
  abstract = {Agile methods and architectural modelling havebeen considered to be mutually exclusive. On the one hand, agilemethods try to reduce overheads by avoiding activities that donot directly contribute to the immediate needs of the currentproject. This often leads to bad cross-project reuse. On the otherhand, architectural modelling is considered a pre-requisite forthe systematic cross-project reuse and for the resulting increasein software developer productivity. In this paper, I discuss therelationship between agile methods and architectural modellingand propose a novel process for agile architectural modelling,which drives requirements elicitation through the use of patternsand components. This process is in-line with agile principles andis illustrated on an example application.},
  address = {Karlsruhe, Germany},
  author = {Durdik, Zoya},
  booktitle = {Proceedings of the Fifteenth International Workshop on Component-Oriented Programming (WCOP) 2010},
  editor = {B{\"{u}}hnov{\'{a}}, Barbora and Reussner, Ralf H. and Szyperski, Clemens and Weck, Wolfgang},
  isbn = {ISSN 1432 - 7864},
  month = {June},
  note = { CompArch Young Investigator Award },
  pages = {23--30},
  publisher = {Karlsruhe Institue of Technology, Faculty of Informatics},
  series = {Interne Berichte},
  title = {Architectural Modeling in Agile Methods},
  url = {http://digbib.ubka.uni-karlsruhe.de/volltexte/1000018464},
  volume = {2010-14},
  year = {2010}
}
@inproceedings{eichinger10on,
  abstract = {Frequent subgraph mining is an important data-mining technique. In this paper we look at weighted graphs, which are ubiquitous in the real world. The analysis of weights in combination with mining for substructures might yield more precise results. In particular, we study frequent subgraph mining in the presence of weight-based constraints and explain how to integrate them into mining algorithms. While such constraints only yield approximate mining results in most cases, we demonstrate that such results are useful nevertheless and explain this effect. To do so, we both assess the completeness of the approximate result sets, and we carry out application-oriented studies with real-world data-analysis problems: software-defect localization and explorative mining in transportation logistics. Our results are that the runtime can improve by a factor of up to 3.5 in defect localization and 7 in explorative mining. At the same time, we obtain an even slightly increased defect-localization precision and obtain good explorative mining results.},
  address = {Cambridge, UK},
  author = {Frank Eichinger and Matthias Huber and Klemens B{\"o}hm},
  booktitle = {{P}roceedings of the 30th {BCS} {SGAI} {I}nternational {C}onference on {I}nnovative {T}echniques and {A}pplications of {A}rtificial {I}ntelligence ({AI})},
  editor = {Max Bramer and Miltos Petridis and Adrian Hopgood},
  month = {December},
  organization = {BCS SGAI},
  publisher = {Springer London, UK},
  title = {{O}n the {U}sefulness of {W}eight-{B}ased {C}onstraints in {F}requent {S}ubgraph {M}ining},
  url = {http://www.ipd.kit.edu/~eichi/papers/eichinger10on.pdf},
  year = {2010}
}
@inproceedings{eichinger2010a,
  abstract = {Defect localisation is essential in software engineering and is an important task in domain-specific data mining. Existing techniques building on call-graph mining can localise different kinds of defects. However, these techniques focus on defects that affect the control flow and are agnostic regarding the data flow. In this paper, we introduce data flow enabled call graphs that incorporate abstractions of the data flow. Building on these graphs, we present an approach for defect localisation. The creation of the graphs and the defect localisation are essentially data mining problems, making use of discretisation, frequent subgraph mining and feature selection. We demonstrate the defect-localisation qualities of our approach with a study on defects introduced into Weka. As a result, defect localisation now works much better, and a developer has to investigate on average only 1.5 out of 30 methods to fix a defect.},
  address = {Barcelona, Spain},
  author = {Frank Eichinger and Klaus Krogmann and Roland Klug and Klemens B\"{o}hm},
  booktitle = {Proceedings of the 10th European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases (ECML PKDD)},
  title = {{Software-Defect Localisation by Mining Dataflow-Enabled Call Graphs}},
  url = {http://digbib.ubka.uni-karlsruhe.de/volltexte/1000019636},
  year = {2010}
}
@inproceedings{goldschmidt2010a,
  abstract = {Model-Driven Engineering (MDE) aims at improving the development of complex computer systems. Within this context textual concrete syntaxes for models are beneficial for many reasons. They foster usability and productivity because of their fast editing style, their usage of error markers, autocompletion and quick fixes. Several frameworks and tools from different communities for creating concrete textual syntaxes for models emerged during recent years. However, there are still cases where no solution has been published yet. Open issues are incremental parsing and model updating as well as partial and federated views. On the other hand incremental parsing and the handling of abstract syntaxes as leading entities has been investigated within the compiler construction communities many years ago. In this paper we present an approach for concrete textual syntaxes that makes use of incremental parsing and transformation techniques. Thus, we circumvent problems that occur when dealing with concrete textual syntaxes in a UUID based environment including multiple partial and federated views. We validated our approach using a proof of concept implementation including a case study.},
  author = {Goldschmidt, Thomas and Becker, Steffen and Uhl, Axel},
  booktitle = {Proceedings of the 15th IEEE International Conference on Engineering of Complex Computer Systems (ICECCS 2010) - Poster Paper},
  publisher = {IEEE},
  timestamp = {2010.01.08},
  title = {{Incremental Updates for Textual Modeling of Large Scale Models}},
  year = {2010}
}
@inproceedings{groenda2010,
  abstract = {Assessing providable service levels based on model-driven prediction approaches requires valid service behavior specifications. Such specifications must be suitable for the requested usage profile and available hardware to make correct predictions and decisions on providable service levels. Assessing the validity of given parameterized performance specifications is often done manually in an ad-hoc way based on the experience of the performance engineer. In this paper, we show how model-based testing can be applied to validate a specification's accuracy and how the attachment of validation settings to specifications can ease validity assessments. The applicability of the approach is shown on a case study. We demonstrate how our approach allows usage profile and platform independent performance validations, as well as point out how validity assessments are eased.},
  acmid = {1858271},
  address = {New York, NY, USA},
  articleno = {6},
  author = {Henning Groenda},
  booktitle = {Proceedings of the 2nd International Workshop on the Quality of Service-Oriented Software Systems},
  doi = {http://doi.acm.org/10.1145/1858263.1858271},
  isbn = {978-1-4503-0239-5},
  location = {Oslo, Norway},
  numpages = {6},
  pages = {6:1--6:6},
  publisher = {ACM},
  series = {QUASOSS '10},
  title = {Usage profile and platform independent automated validation of service behavior specifications},
  url = {http://doi.acm.org/10.1145/1858263.1858271},
  year = {2010}
}
@article{happe2009a,
  abstract = {Performance prediction methods can help software architects to identify potential performance problems, such as bottlenecks, in their software systems during the design phase. In such early stages of the software life-cycle, only a little information is available about the system�s implementation and execution environment. However, these details are crucial for accurate performance predictions. Performance completions close the gap between available high-level models and required low-level details. Using model-driven technologies, transformations can include details of the implementation and execution environment into abstract performance models. However, existing approaches do not consider the relation of actual implementations and performance models used for prediction. Furthermore, they neglect the broad variety of possible implementations and middleware platforms, possible configurations, and possible usage scenarios. In this paper, we (i) establish a formal relation between generated performance models and generated code, (ii) introduce a design and application process for parametric performance completions, and (iii) develop a parametric performance completion for Message-oriented Middleware according to our method. Parametric performance completions are independent of a specific platform, reflect performance-relevant software configurations, and capture the influence of different usage scenarios. To evaluate the prediction accuracy of the completion for Message-oriented Middleware, we conducted a real-world case study with the SPECjms2007 Benchmark [http://www.spec.org/jms2007/]. The observed deviation of measurements and predictions was below 10% to 15%},
  author = {Jens Happe and Steffen Becker and Christoph Rathfelder and Holger Friedrich and Ralf H. Reussner},
  doi = {10.1016/j.peva.2009.07.006},
  journal = {Performance Evaluation (PE)},
  month = {August},
  number = {8},
  pages = {694--716},
  pdf = {http://sdqweb.ipd.uka.de/publications/pdfs/happe2009a.pdf},
  publisher = {Elsevier},
  title = {{P}arametric {P}erformance {C}ompletions for {M}odel-{D}riven {P}erformance {P}rediction},
  url = {http://dx.doi.org/10.1016/j.peva.2009.07.006},
  volume = {67},
  year = {2010}
}
@inproceedings{happe2010b,
  abstract = {The broad introduction of multi-core processors made symmetric multiprocessing (SMP) environments mainstream. The additional cores can significantly increase software performance. However, their actual benefit depends on the operating system scheduler's capabilities, the system's workload, and the software's degree of concurrency. The load distribution on the available processors (or cores) strongly influences response times and throughput of software applications. Hence, understanding the operating system scheduler's influence on performance and scalability is essential for the accurate prediction of software performance (response time, throughput, and resource utilisation). Existing prediction approaches tend to approximate the influence of operating system schedulers by abstract policies such as processor sharing and its more sophisticated extensions. However, these abstractions often fail to accurately capture software performance in SMP environments. In this paper, we present a performance Model for general-purpose Operating System Schedulers (MOSS). It allows analyses of software performance taking the influences of schedulers in SMP environments into account. The model is defined in terms of timed Coloured Petri Nets and predicts the effect of different operating system schedulers (e.g., Windows 7, Vista, Server 2003, and Linux 2.6) on software performance. We validated the prediction accuracy of MOSS in a case study using a business information system. In our experiments, the deviation of predictions and measurements was below 10% in most cases and did not exceed 30%.},
  acmid = {1906836},
  address = {Washington, DC, USA},
  author = {Jens Happe and Henning Groenda and Michael Hauck and Ralf H. Reussner},
  booktitle = {Proceedings of the 2010 7th International Conference on the Quantitative Evaluation of Systems},
  doi = {http://dx.doi.org/10.1109/QEST.2010.15},
  isbn = {978-0-7695-4188-4},
  numpages = {10},
  pages = {59--68},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/happe2010b.pdf},
  publisher = {IEEE Computer Society},
  series = {QEST '10},
  title = {{A Prediction Model for Software Performance in Symmetric Multiprocessing Environments}},
  url = {http://dx.doi.org/10.1109/QEST.2010.15},
  year = {2010}
}
@inproceedings{happe2010a,
  abstract = {Software performance engineering (SPE) enables software architects to ensure high performance standards for their applications. However, applying SPE in practice is still challenging. Most enterprise applications include a large software basis, such as middleware and legacy systems. In many cases, the software basis is the determining factor of the system's overall timing behavior, throughput, and resource utilization. To capture these influences on the overall system's performance, established performance prediction methods (modelbased and analytical) rely on models that describe the performance-relevant aspects of the system under study. Creating such models requires detailed knowledge on the system's structure and behavior that, in most cases, is not available. In this paper, we abstract from the internal structure of the system under study. We focus our efforts on message-oriented middleware and analyze the dependency between the MOM's usage and its performance. We use statistical inference to conclude these dependencies from observations. For ActiveMQ 5.3, the resulting functions predict the performance with an relative mean square error 0.1.},
  author = {Jens Happe and Dennis Westermann and Kai Sachs and Lucia Kapova},
  booktitle = {{Research into Practice - Reality and Gaps (Proceedings of QoSA 2010)}},
  editor = {George Heineman and Jan Kofron and Frantisek Plasil},
  pages = {20--35},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/happe2010a.pdf},
  publisher = {Springer},
  series = {Lecture Notes in Computer Science (LNCS)},
  title = {{Statistical Inference of Software Performance Models for Parametric Performance Completions}},
  volume = {6093},
  year = {2010}
}
@inproceedings{hauck2010a,
  abstract = {In symmetric multiprocessing environments, the performance of a software system heavily depends on the application's parallelism, the scheduling and load-balancing policies of the operating system, and the infrastructure it is running on. The scheduling of tasks can influence the response time of an application by several orders of magnitude. Thus, detailed models of the operating system scheduler are essential for accurate performance predictions. However, building such models for schedulers and including them into performance prediction models involves a lot of effort. For this reason, simplified scheduler models are used for the performance evaluation of business information systems in general. In this work, we present an approach to derive load-balancing properties of general-purpose operating system (GPOS) schedulers automatically. Our approach uses goal-oriented measurements to derive performance models based on observations. Furthermore, the derived performance model is plugged into the Palladio Component Model (PCM), a model-based performance prediction approach. We validated the applicability of the approach and its prediction accuracy in a case study on different operating systems.},
  author = {Michael Hauck and Jens Happe and Ralf H. Reussner},
  booktitle = {Proceedings of the 18th IEEE International Symposium on Modeling, Analysis and Simulation of Computer and Telecommunication Systems (MASCOTS'10)},
  doi = {10.1109/MASCOTS.2010.44},
  isbn = {978-0-7695-4197-6},
  issn = {1526-7539},
  numpages = {9},
  pages = {361--369},
  publisher = {IEEE Computer Society},
  title = {{Automatic Derivation of Performance Prediction Models for Load-balancing Properties Based on Goal-oriented Measurements}},
  url = {http://dx.doi.org/10.1109/MASCOTS.2010.44},
  year = {2010}
}
@techreport{hauck2010b,
  author = {Michael Hauck and Matthias Huber and Markus Klems and Samuel Kounev and J{\"o}rn M{\"u}ller-Quade and Alexander Pretschner and Ralf Reussner and Stefan Tai},
  institution = {Karlsruhe Institue of Technology, Faculty of Informatics},
  number = {2010-19},
  title = {{Challenges and Opportunities of Cloud Computing -- Trade-off Decisions in Cloud Computing Architecture}},
  url = {http://digbib.ubka.uni-karlsruhe.de/volltexte/1000020328},
  year = {2010}
}
@inproceedings{HeidingerBHBM10,
  author = {Clemens Heidinger and Erik Buchmann and Matthias Huber and Klemens B{\"o}hm and J{\"o}rn M{\"u}ller-Quade},
  bibsource = {DBLP, http://dblp.uni-trier.de},
  booktitle = {ECDL},
  ee = {http://dx.doi.org/10.1007/978-3-642-15464-5_17},
  pages = {156-167},
  title = {Privacy-Aware Folksonomies},
  year = {2010}
}
@inproceedings{henss2010a,
  abstract = {Currently more and more highly distributed systems emerge, ranging from classic client-server architectures to peer-to-peer systems. With the vast introduction of cloud computing this trend has even accelerated. Single software services are relocated to remote server farms. The communication with the services has to use uncertain network connections over the internet. Performance of such distributed systems is not easy to predict as many performance relevant factors, including network performance impacts, have to be considered. Current software performance prediction approaches, based on analytical and simulative methods, lack the support for detailed network models. Hence an integrated software and network performance prediction is required. In this paper general techniques for the model integration of differently targeted simulation domains are presented. At plus design alternatives for the coupling of simulation frameworks are discussed. Finally this paper presents a model driven approach for an integrated simulation of software and network aspects, based on the palladio component model and the OMNeT++ simulation framework.},
  address = {Karlsruhe, Germany},
  author = {J{\"o}rg Henss},
  booktitle = {Proceedings of the Fifteenth International Workshop on Component-Oriented Programming (WCOP) 2010},
  editor = {B{\"{u}}hnov{\'{a}}, Barbora and Reussner, Ralf H. and Szyperski, Clemens and Weck, Wolfgang},
  isbn = {ISSN 1432 - 7864},
  month = {June},
  pages = {39--46},
  publisher = {Karlsruhe Institue of Technology, Faculty of Informatics},
  series = {Interne Berichte},
  title = {Performance Prediction for Highly Distributed Systems},
  url = {http://digbib.ubka.uni-karlsruhe.de/volltexte/1000018464},
  volume = {2010-14},
  year = {2010}
}
@inproceedings{huber2010a,
  abstract = {Software services offer many opportunities like reducedcost for IT infrastructure. However, they also introducenew risks, for example losing control over data. While data canbe secured against external threats using standard techniques, theservice providers themselves have to be trusted to ensure privacy.Cryptographic methods combined with architectures adjustedto the client's protection requirements offer promising methodsto build services with a provable amount of security againstinternal adversaries without the need to fully trust the serviceprovider. We propose a reference architecture which separatesservices, restricts privilege of the parts and deploys them ondifferent servers. Assumptions about the servers' and adversary'scapabilities yield security guarantees which are weaker thanclassical cryptographic guarantees, yet can be sufficient.},
  address = {Karlsruhe, Germany},
  author = {Matthias Huber},
  booktitle = {Proceedings of the Fifteenth International Workshop on Component-Oriented Programming (WCOP) 2010},
  editor = {B{\"{u}}hnov{\'{a}}, Barbora and Reussner, Ralf H. and Szyperski, Clemens and Weck, Wolfgang},
  isbn = {ISSN 1432 - 7864},
  month = {June},
  pages = {39--46},
  publisher = {Karlsruhe Institue of Technology, Faculty of Informatics},
  series = {Interne Berichte},
  title = {Towards Secure Services in an Untrusted Environment},
  url = {http://digbib.ubka.uni-karlsruhe.de/volltexte/1000018464},
  volume = {2010-14},
  year = {2010}
}
@inproceedings{HuBeRaScRe2010-ICSE-PerfMod,
  abstract = {In software engineering, performance and the integration of performance analysis methodologies gain increasing importance, especially for complex systems. Well-developed methods and tools can predict non-functional performance properties like response time or resource utilization in early design stages, thus promising time and cost savings. However, as performance modeling and performance prediction is still a young research area, the methods are not yet well-established and in wide-spread industrial use. This work is a case study of the applicability of the Palladio Component Model as a performance prediction method in an industrial environment. We model and analyze different design alternatives for storage virtualization on an IBM (Trademark of IBM in USA and/or other countries) system. The model calibration, validation and evaluation is based on data measured on a System z9 (Trademark of IBM in USA and/or other countries) as a proof of concept. The results show that performance predictions can identify performance bottlenecks and evaluate design alternatives in early stages of system development. The experiences gained were that performance modeling helps to understand and analyze a system. Hence, this case study substantiates that performance modeling is applicable in industry and a valuable method for evaluating design decisions.},
  address = {New York, NY, USA},
  author = {Nikolaus Huber and Steffen Becker and Christoph Rathfelder and Jochen Schweflinghaus and Ralf Reussner},
  booktitle = {ACM/IEEE 32nd International Conference on Software Engineering (ICSE 2010), Software Engineering in Practice Track},
  day = {2--8},
  doi = {10.1145/1810295.1810297},
  isbn = {978-1-60558-719-6},
  location = {Cape Town, South Africa},
  month = {May},
  note = {Acceptance Rate (Full Paper): 23\% (16/71)},
  pages = {1--10},
  pdf = {http://sdqweb.ipd.uka.de/publications/pdfs/hubern2010.pdf},
  publisher = {ACM},
  slides = {http://sdqweb.ipd.uka.de/publications/pdfs/hubern2010_slides.pdf},
  title = {{Performance Modeling in Industry: A Case Study on Storage Virtualization}},
  year = {2010}
}
@inproceedings{HuQuBrKo2010-DOA-AnalysisVirt,
  abstract = {Nowadays, virtualization solutions are gaining increasing importance. By enabling the sharing of physical resources, thus making resource usage more efficient, they promise energy and cost savings. Additionally, virtualization is the key enabling technology for Cloud Computing and server consolidation. However, the effects of sharing resources on system performance are not yet well-understood. This makes performance prediction and performance management of services deployed in such dynamic systems very challenging. Because of the large variety of virtualization solutions, a generic approach to predict the performance influences of virtualization platforms is highly desirable. In this paper, we present a hierarchical model capturing the major performance-relevant factors of virtualization platforms. We then propose a general methodology to quantify the influence of the identified factors based on an empirical approach using benchmarks. Finally, we present a case study of Citrix XenServer 5.5, a state-of-the-art virtualization platform.},
  address = {Crete, Greece},
  author = {Nikolaus Huber and Marcel von Quast and Fabian Brosig and Samuel Kounev},
  booktitle = {The 12th International Symposium on Distributed Objects, Middleware, and Applications (DOA 2010)},
  day = {26},
  location = {Crete, Greece},
  month = {October},
  note = {Acceptance Rate (Full Paper): 33\%},
  pdf = {http://sdqweb.ipd.kit.edu/publications/descartes-pdfs/HuQuBrKo2010-DOA-AnalysisVirt.pdf},
  publisher = {Springer Verlag},
  title = {{Analysis of the Performance-Influencing Factors of Virtualization Platforms}},
  year = {2010}
}
@inproceedings{Koellner2010,
  address = {Los Alamitos, CA, USA},
  author = {Christian K{\"{o}}llner and Georg Dummer and Andreas Rentschler and K.D. M{\"{u}}ller-Glaser},
  booktitle = {13th IEEE International Symposium on Object/Component/Service-Oriented Real-Time Distributed Computing Workshops (ISORCW '10)},
  doi = {10.1109/ISORCW.2010.33},
  isbn = {978-0-7695-4038-2},
  pages = {152--157},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/koellner2010a.pdf},
  publisher = {IEEE Computer Society},
  title = {{Designing a Graphical Domain-Specific Modelling Language Targeting a Filter-Based Data Analysis Framework}},
  url = {http://doi.ieeecomputersociety.org/10.1109/ISORCW.2010.33},
  year = {2010}
}
@inproceedings{kapa1010a,
  abstract = {Increasing of the user requirements on video quality is essential to consider and have in mind while designing any video-providing services. The methods in the user-centered design of services are fairly labor intensive and have to consider resulting user experience. User experience is a term that is very hard to be defined. There are different approaches to user experience assessment. However, they lack a methods to predict expected user experience based on user's subjective point of view. We propose a method of User Experience Sensitivity Analysis to find dependency of user experience on quality attributes of the service and define initial prediction model. Validation of our approach is provided by comparison between the observed values of real user experience and prediction results.},
  address = {Budapest, Hungary},
  author = {Martin Kapa and Lucia Kapova},
  booktitle = {Third Joint IFIP Wireless and Mobile Networking Conference (WMNC'2010)},
  days = {13},
  keywords = {video quality; subjective quality assessment; bayesian networks},
  title = {User Experience Sensitivity Analysis guided by Videostreaming Quality Attributes},
  year = {2010}
}
@inproceedings{kapova2010a,
  abstract = {Model-driven performance prediction methods require detailed design models to evaluate the performance of software systems during early development stages. However, the complexity of detailed prediction models and the semantic gap between modelled performance concerns and functional concerns prevents many developers to address performance. As a solution to this problem, systematic model refinements, called completions, hide low-level details from developers. Completions automatically integrate performance-relevant details into component-based architectures using model-to-model transformations. In such scenarios, conflicts between different completions are likely. Therefore, the application order of completions must be determined unambiguously in order to reduce such conflicts. Many existing approaches employ the concept of performance completions to include performance-relevant details to the prediction model. So far researcher only address the application of a single completion on an architectural model. The reduction of conflicting completions have not yet been considered. In this paper, we present a systematic approach to reduce and avoid conflicts between completions that are applied to the same model. The method presented in this paper is essential for the automated integration of completions in software performance engineering. Furthermore, we apply our approach to reduce conflicts of a set of completions based on design patterns for concurrent software systems.},
  author = {Kapova, Lucia and Becker, Steffen},
  booktitle = {7th International Workshop on Formal Engineering approaches to Software Components and Architectures (FESCA)},
  editors = {Barbora Zimmerova and Jens Happe},
  publisher = {Elsevier},
  series = {Electronic Notes in Theoretical Computer Science},
  title = {Systematic Refinement of Performance Models for Concurrent Component-based Systems},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/kapova2010a.pdf},
  year = {2010}
}
@inproceedings{kapova2010e,
  address = {New York, NY, USA},
  author = {Kapova, Lucia and Buhnova, Barbora},
  booktitle = {QUASOSS '10: Proceedings of the 2nd International Workshop on the Quality of Service-Oriented Software Systems},
  doi = {http://doi.acm.org/10.1145/1858263.1858269},
  isbn = {978-1-4503-0239-5},
  location = {Oslo, Norway},
  pages = {1--7},
  publisher = {ACM},
  title = {Performance-driven stepwise refinement of component-based architectures},
  year = {2010}
}
@inproceedings{kapova2010b,
  abstract = {Using model-to-model transformations to generate analysis models or code from architecture models is sought to promote compliance and reuse of components. The maintainability of transformations is influenced by various characteristics - as with every programming language artifact. Code metrics are often used to estimate code maintainability. However, most of the established metrics do not apply to declarative transformation languages (such as QVT Relations) since they focus on imperative (e.g. object-oriented) coding styles. One way to characterize the maintainability of programs are code metrics. However, the vast majority of these metrics focus on imperative (e.g., object-oriented) coding styles and thus cannot be reused as-is for transformations written in declarative languages. In this paper we propose an initial set of quality metrics to evaluate transformations written in the declarative QVT Relations language.We apply the presented set of metrics to several reference transformations to demonstrate how to judge transformation maintainability based on our metrics.},
  author = {Lucia Kapova and Thomas Goldschmidt and Steffen Becker and Joerg Henss},
  booktitle = {{Research into Practice - Reality and Gaps (Proceeding of QoSA 2010)}},
  editor = {George Heineman and Jan Kofron and Frantisek Plasil},
  pages = {151-166},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/kapova2010b.pdf},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {LNCS},
  title = {{Evaluating Maintainability with Code Metrics for Model-to-Model Transformations}},
  volume = {6093},
  year = {2010}
}
@inproceedings{kapova2010d,
  address = {New York, NY, USA},
  author = {Kapova, Lucia and Goldschmidt, Thomas and Happe, Jens and Reussner, Ralf H.},
  booktitle = {MDI '10: Proceedings of the First International Workshop on Model-Drive Interoperability},
  doi = {http://doi.acm.org/10.1145/1866272.1866282},
  isbn = {978-1-4503-0292-0},
  location = {Oslo, Norway},
  pages = {69--78},
  publisher = {ACM},
  title = {Domain-specific templates for refinement transformations},
  year = {2010}
}
@inproceedings{kapova2010c,
  affiliation = {Software Design and Quality Group, Karlsruhe Institute of Technology (KIT), Germany},
  author = {Kapova, Lucia and Reussner, Ralf},
  booktitle = {Computer Performance Engineering},
  editor = {Aldini, Alessandro and Bernardo, Marco and Bononi, Luciano and Cortellessa, Vittorio},
  note = {10.1007/978-3-642-15784-4\_2},
  pages = {17-36},
  publisher = {Springer Berlin / Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {Application of Advanced Model-Driven Techniques in Performance Engineering},
  url = {http://dx.doi.org/10.1007/978-3-642-15784-4\_2},
  volume = {6342},
  year = {2010}
}
@inproceedings{kapova2009b,
  abstract = {Performance prediction and measurement approaches for component-based software systems help software architects to evaluate their systems based on component performance specifications created by component developers. Integrating classical performance models such as queueing networks, stochastic Petri nets, or stochastic process algebras, these approaches additionally exploit the benefits of component-based software engineering, such as reuse and division of work. Although researchers have proposed many approaches in this direction during the last decade, none of them has attained widespread industrial use. On this basis, we have conducted a comprehensive state-of-the-art survey of more than 20 of these approaches assessing their applicability. We classified the approaches according to the expressiveness of their component performance modelling languages. Our survey helps practitioners to select an appropriate approach and scientists to identify interesting topics for future research.},
  address = {New York, NY, USA},
  author = {Lucia Kapova and Barbora Zimmerova and Anne Martens and Jens Happe and Ralf H. Reussner},
  booktitle = {Proceedings of the 1st Joint WOSP/SIPEW International Conference on Performance Engineering (WOSP/SIPEW '10)},
  doi = {10.1145/1712605.1712613},
  location = {San Jose, California, USA},
  pages = {37--48},
  publisher = {ACM},
  title = {State Dependence in Performance Evaluation of Component-Based Software Systems},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/kapova2009b.pdf},
  year = {2010}
}
@mastersthesis{klatt2010a,
  abstract = {With the increasing demand of large-scale systems and the corresponding high load of data or users, event-based communication has gained increasing attention. Originating from embedded systems and graphical user interfaces, the asynchronous type of communication also provides advantages to business applications by decoupling individual components and their processes respectively. However, the possible scalability gained from the event-based communication can result in performance problems in the overall system which are hard to predict by the software architect. Model-based performance prediction is a reasonable approach to predict system characteristics in general. Today\'s solutions are however limited in handling the complexity of the additional infrastructure. Especially the impact of many-to-many and asynchronous connections on the overall system is not considered even by advanced projects such as the Palladio Component Model. This thesis presents an approach to introduce event-based communication in the Palladio Component Model and a transformation to reuse existing prediction techniques. The approach includes an additional automatic integration of an auxiliary repository model. This model encapsulates the characteristics of the underlying middleware infrastructure which is distributed to all event-based connections in the system architecture. An implementation of the approach has been provided as part of this thesis. It was evaluated in a case study based on a traffic information and monitoring system installed in the city of Cambridge. Compared to an existing case study of the same system, the new approach reduced the modelling effort for event-based connections by about 80 percentage and provided more exibility to test different setups. In addition, the approach reduced the prediction error to less than 5 percentage in most cases.},
  author = {Benjamin Klatt},
  note = {ObjektForum Thesis Award},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/klatt2010a.pdf},
  school = {Karlsruhe Institute of Technology, Germany},
  title = {Modelling and Prediction of Event-Based Communication in Component-Based Architectures},
  year = {2010}
}
@incollection{Ko2010-KIT-Roadmap,
  address = {Karlsruhe, Germany},
  author = {Samuel Kounev},
  booktitle = {{Emerging Research Directions in Computer Science. Contributions from the Young Informatics Faculty in Karlsruhe}},
  isbn = {978-3-86644-508-6},
  month = {July},
  pdf = {http://sdqweb.ipd.kit.edu/publications/descartes-pdfs/Ko2010-KIT-Roadmap.pdf},
  publisher = {KIT Scientific Publishing},
  title = {{Engineering of Next Generation Self-Aware Software Systems: A Research Roadmap}},
  url = {http://uvka.ubka.uni-karlsruhe.de/shop/isbn/978-3-86644-508-6},
  year = {2010}
}
@inproceedings{KoBrHuRe2010-SCC-Towards,
  abstract = {Modern service-oriented systems have increasingly complex loosely-coupled architectures that often exhibit poor performance and resource efficiency and have high operating costs. This is due to the inability to predict at run-time the effect of dynamic changes in the system environment (e.g., varying service workloads) and adapt the system configuration accordingly. In this paper, we describe a long-term vision and approach for designing systems with built-in self-aware performance and resource management capabilities. We advocate the use of architecture-level performance models extracted dynamically from the evolving system configuration and maintained automatically during operation. The models will be exploited at run-time to adapt the system to changes in the environment ensuring that resources are utilized efficiently and performance requirements are continuously satisfied.},
  author = {Samuel Kounev and Fabian Brosig and Nikolaus Huber and Ralf Reussner},
  booktitle = {Proceedings of the 7th IEEE International Conference on Services Computing (SCC 2010), July 5-10, Miami, Florida, USA},
  day = {5--10},
  location = {Miami, Florida, USA},
  month = {July},
  pdf = {http://sdqweb.ipd.kit.edu/publications/descartes-pdfs/KoBrHuRe2010-SCC-Towards.pdf},
  publisher = {IEEE Computer Society},
  title = {{Towards self-aware performance and resource management in modern service-oriented systems}},
  year = {2010}
}
@incollection{KoSpMe2010-Festschrift-QPME2,
  abstract = {Queueing Petri nets are a powerful formalism that can be exploited for modeling distributed systems and analyzing their performance and scalability. By combining the modeling power and expressiveness of queueing networks and stochastic Petri nets, queueing Petri nets provide a number of advantages. In this paper, we present Version 2.0 of our tool QPME (Queueing Petri net Modeling Environment) for modeling and analysis of systems using queueing Petri nets. The development of the tool was initiated by Samuel Kounev in 2003 at the Technische Universit\"{a} Darmstadt in the group of Prof. Alejandro Buchmann. Since then the tool has been distributed to more than 100 organizations worldwide. QPME provides an Eclipse-based editor for building queueing Petri net models and a powerful simulation engine for analyzing the models. After presenting the tool, we discuss ongoing work on the QPME project and the planned future enhancements of the tool.},
  address = {Berlin, Heidelberg},
  affiliation = {Karlsruhe Institute of Technology, 76131 Karlsruhe, Germany},
  author = {Kounev, Samuel and Spinner, Simon and Meier, Philipp},
  booktitle = {From Active Data Management to Event-Based Systems and More},
  editor = {Sachs, Kai and Petrov, Ilia and Guerrero, Pablo},
  isbn = {978-3-642-17225-0},
  keyword = {Computer Science},
  note = {10.1007/978-3-642-17226-7_18},
  pages = {293--311},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/KoSpMe2010-Festschrift-QPME2.pdf},
  publisher = {Springer-Verlag},
  series = {Lecture Notes in Computer Science},
  title = {{QPME 2.0 - A Tool for Stochastic Modeling and Analysis Using Queueing Petri Nets}},
  url = {http://dx.doi.org/10.1007/978-3-642-17226-7_18},
  volume = {6462},
  year = {2010}
}
@mastersthesis{kramer2010a,
  author = {Max E. Kramer},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/kramer2010a.pdf},
  school = {Karlsruhe Institute of Technology (KIT), Germany},
  title = {Mapping Reusable Aspect Models to Aspect-Oriented Code},
  type = {Study Thesis},
  year = {2010}
}
@misc{Kr2010-uspatent-adaptive-elearning,
  author = {Krebs, Rouven},
  howpublished = {Patent US 70443},
  month = {March},
  title = {METHOD AND SYSTEM FOR FOR AN ADAPTIVE LEARNING STRATEGY},
  year = {2010}
}
@mastersthesis{Krebs2010a,
  address = {Moltkestr. 30, 76133 Karlsruhe, Germany},
  author = {Rouven Krebs},
  month = {October},
  school = {University of Applied Sciences Karlsruhe},
  title = {Combination of measurement and model based approaches for performance prediction in service oriented systems},
  year = {2010}
}
@misc{Kr2010-uspatent-offline-content,
  author = {Krebs, Rouven and Hochwarth, Christian},
  howpublished = {Patent US 94886},
  month = {April},
  title = {METHOD AND SYSTEM FOR MANAGING LEARNING MATERIALS PRESENTED OFFLINE},
  year = {2010}
}
@phdthesis{krogmann2010a,
  abstract = {Model-based performance prediction systematically deals with the evaluation of software performance to avoid for example bottlenecks, estimate execution environment sizing, or identify scalability limitations for new usage scenarios. Such performance predictions require up-to-date software performance models. Still, no automated reverse engineering approach for software performance models at an architectural level exist. This book describes a new integrated reverse engineering approach for the reconstruction of software component architectures and software component behaviour models which are parameterised over hardware, component assembly, and control and data flow and as such can serve as software performance models due to the execution semantics of the target meta-model.},
  author = {Klaus Krogmann},
  pdf = {http://sdqweb.ipd.uka.de/publications/pdfs/krogmann2010a.pdf},
  school = {Karlsruhe Institute of Technology (KIT), Karlsruhe, Germany},
  title = {Reconstruction of Software Component Architectures and Behaviour Models using Static and Dynamic Analysis},
  url = {http://digbib.ubka.uni-karlsruhe.de/volltexte/1000022278},
  year = {2010}
}
@article{krogmann2009c,
  abstract = {In component-based software engineering, existing components are often re-used in new applications. Correspondingly, the response time of an entire component-based application can be predicted from the execution durations of individual component services. These execution durations depend on the runtime behaviour of a component, which itself is influenced by three factors: the execution platform, the usage profile, and the component wiring. To cover all relevant combinations of these influencing factors, conventional prediction of response times requires repeated deployment and measurements of component services for all such combinations, incurring a substantial effort. This paper presents a novel comprehensive approach for reverse engineering and performance prediction of components. In it, genetic programming is utilised for reconstructing a behaviour model from monitoring data, runtime bytecode counts and static bytecode analysis. The resulting behaviour model is parametrised over all three performance-influencing factors, which are specified separately. This results in significantly fewer measurements: the behaviour model is reconstructed only once per component service, and one application-independent bytecode benchmark run is sufficient to characterise an execution platform. To predict the execution durations for a concrete platform, our approach combines the behaviour model with platform-specific benchmarking results. We validate our approach by predicting the performance of a file sharing application.},
  author = {Klaus Krogmann and Michael Kuperberg and Ralf Reussner},
  doi = {http://doi.ieeecomputersociety.org/10.1109/TSE.2010.69},
  editor = {Mark Harman and Afshin Mansouri},
  issn = {0098-5589},
  journal = {IEEE Transactions on Software Engineering},
  number = {6},
  pages = {865--877},
  publisher = {{IEEE}},
  title = {{Using Genetic Search for Reverse Engineering of Parametric Behaviour Models for Performance Prediction}},
  url = {http://sdqweb.ipd.kit.edu/publications/pdfs/krogmann2009c.pdf},
  volume = {36},
  year = {2010}
}
@inproceedings{kuperberg2010a,
  abstract = {Performance is an extra-functional property of software systems which is often critical for achieving sufficient scalability or efficient resource utilisation. As many applications are built using application programmer interfaces (APIs) of execution platforms and external components, the performance of the used API implementations has a strong impact on the performance of the application itself. Yet the sheer size and complexity of today's APIs make it hard to manually benchmark them, while many semantical constraints and requirements (on method parameters, etc.) make it complicated to automate the creation of API benchmarks. Benchmarking the whole API is necessary since it is in the majority of the cases hard to exactly specify which parts of the API would be used by a given application. Additionally, modern execution platforms such as the Java Virtual Machine perform extensive nondeterministic runtime optimisations, which need to be considered and quantified for realistic benchmarking. In this paper, we present an automated solution for benchmarking any large APIs that are written in the Java programming language, not just the Java Platform API. Our implementation induces the optimisations of the Just-In-Time compiler to obtain realistic benchmarking results. We evaluate the approach on a large subset of the Java Platform API exposed by the base libraries of the Java Virtual Machine.},
  author = {Michael Kuperberg and Fouad Omri},
  booktitle = {{Proceedings of Software Engineering 2010 (SE2010)}},
  month = {February},
  pdf = {http://sdqweb.ipd.uka.de/publications/pdfs/kuperberg2010a.pdf},
  title = {{Automated Benchmarking of Java APIs}},
  year = {2010}
}
@inproceedings{martens2010b,
  abstract = {Multiple, often conflicting quality of service (QoS) requirements arise when evaluating design decisions and selecting design alternatives of complex component-based software systems. In this scenario, selecting a good solution with respect to a single quality attribute can lead to unacceptable results with respect to the other quality attributes. A promising way to deal with this problem is to exploit multi-objective optimization where the objectives represent different quality attributes. The aim of these techniques is to devise a set of solutions, each of which assures a trade-off between the conflicting qualities. To automate this task, this paper proposes a combined use of analytical optimization techniques and evolutionary algorithms to efficiently identify a significant set of design alternatives, from which an architecture that best fits the different quality objectives can be selected. The proposed approach can lead both to a reduction of development costs and to an improvement of the quality of the final system. We demonstrate the use of this approach on a simple case study.},
  author = {Anne Martens and Danilo Ardagna and Heiko Koziolek and Raffaela Mirandola and Ralf Reussner},
  booktitle = {{Research into Practice - Reality and Gaps (Proceedings of the 6th International Conference on the Quality of Software Architectures, QoSA 2010)}},
  doi = {10.1007/978-3-642-13821-8_8},
  editor = {George Heineman and Jan Kofron and Frantisek Plasil},
  pages = {84-101},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/martens2010b.pdf},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {A Hybrid Approach for Multi-Attribute {QoS} Optimisation in Component Based Software Systems},
  url = {http://sdqweb.ipd.kit.edu/publications/pdfs/martens2010b.pdf},
  volume = {6093},
  year = {2010}
}
@inproceedings{martens2010a,
  abstract = {Quantitative prediction of quality properties (i.e. extra-functional properties such as performance, reliability, and cost) of software architectures during design supports a systematic software engineering approach. Designing architectures that exhibit a good trade-off between multiple quality criteria is hard, because even after a functional design has been created, many remaining degrees of freedom in the software architecture span a large, discontinuous design space. In current practice, software architects try to find solutions manually, which is time-consuming, can be error-prone and can lead to suboptimal designs. We propose an automated approach to search the design space for good solutions. Starting with a given initial architectural model, the approach iteratively modifies and evaluates architectural models. Our approach applies a multi-criteria genetic algorithm to software architectures modelled with the Palladio Component Model. It supports quantitative performance, reliability, and cost prediction and can be extended to other quantitative quality criteria of software architectures. We validate the applicability of our approach by applying it to an architecture model of a component-based business information system and analyse its quality criteria trade-offs by automatically investigating more than 1200 alternative design candidates.},
  author = {Anne Martens and Heiko Koziolek and Steffen Becker and Ralf H. Reussner},
  booktitle = {Proceedings of the first joint WOSP/SIPEW international conference on Performance engineering},
  doi = {10.1145/1712605.1712624},
  editor = {Alan Adamson and Andre B. Bondi and Carlos Juiz and Mark S. Squillante},
  isbn = {978-1-60558-563-5},
  location = {San Jose, California, USA},
  pages = {105--116},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/martens2010a.pdf},
  publisher = {ACM, New York, NY, USA},
  series = {WOSP/SIPEW '10},
  slides = {http://sdqweb.ipd.kit.edu/publications/pdfs/martens2010a_slides.ppsx},
  title = {Automatically Improve Software Models for Performance, Reliability and Cost Using Genetic Algorithms},
  url = {http://www.inf.pucrs.br/wosp},
  year = {2010}
}
@mastersthesis{Meier2010Tra,
  address = {{Karlsruhe, Germany}},
  author = {Philipp Meier},
  note = {FZI Prize "Best Diploma Thesis"},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/Meier2010Tra.pdf},
  school = {Karlsruhe Institute of Technology (KIT)},
  title = {{Automated Transformation of Palladio Component Models to Queueing Petri Nets}},
  year = {2010}
}
@mastersthesis{noorshams2010b,
  address = {Karlsruhe, Germany},
  author = {Qais Noorshams},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/noorshams2010b.pdf},
  school = {Karlsruhe Institute of Technology},
  title = {Focusing the Optimization of Software Architecture Models Using Non-Functional Requirements},
  year = {2010}
}
@inproceedings{noorshams2010a,
  author = {Qais Noorshams and Anne Martens and Ralf Reussner},
  booktitle = {Proceedings of the 2nd International Workshop on the Quality of Service-Oriented Software Systems (QUASOSS '10), Oslo, Norway, October 4, 2010},
  doi = {10.1145/1858263.1858265},
  isbn = {978-1-4503-0239-5},
  pages = {1:1--1:6},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/noorshams2010a.pdf},
  publisher = {ACM, New York, NY, USA},
  title = {Using Quality of Service Bounds for Effective Multi-objective Software Architecture Optimization},
  url = {http://sdq.ipd.kit.edu/conferences_and_events/quasoss2010/},
  year = {2010}
}
@mastersthesis{vQuast2010,
  address = {Karlsruhe, Germany},
  author = {Marcel von Quast},
  school = {Karlsruhe Institute of Technology (KIT)},
  title = {{Automatisierte Performance-Analyse von Virtualisierungsplattformen}},
  year = {2010}
}
@inproceedings{rathfelder2010a,
  abstract = {The event-driven communication paradigm is used increasingly often to build loosely-coupled distributed systems in many industry domains including telecommunications, transportation, and supply chain management. However, the loose coupling of components in such systems makes it hard for developers to estimate their behaviour and performance under load. Most general purpose performance meta-models for component-based systems provide limited support for modelling event-driven communication. In this paper, we present a case study of a real-life road traffic monitoring system that shows how event-driven communication can be modelled for performance prediction and capacity planning. Our approach is based on the Palladio Component Model (PCM) which we have extended to support event-driven communication. We evaluate the accuracy of our modelling approach in a number of different workload and configuration scenarios. The results demonstrate the practicality and effectiveness of the proposed approach.},
  address = {Berlin, Heidelberg},
  author = {Christoph Rathfelder and David Evans and Samuel Kounev},
  booktitle = {Proceedings of the 7th European Performance Engineering Workshop (EPEW 2010)},
  day = {23--24},
  editor = {Alessandro Aldini and Marco Bernardo and Luciano Bononi and Vittorio Cortellessa},
  location = {Bertinoro, Italy},
  month = {September},
  pages = {219--235},
  pdf = {http://sdqweb.ipd.kit.edu/publications/descartes-pdfs/RaEvKo2010-EPEW-p2pCBSE.pdf},
  publisher = {Springer-Verlag},
  series = {Lecture Notes in Computer Science (LNCS)},
  title = {{P}redictive {M}odelling of {P}eer-to-{P}eer {E}vent-driven {C}ommunication in {C}omponent-based {S}ystems},
  volume = {6342},
  year = {2010}
}
@inproceedings{rathfelder2010b,
  abstract = {The event-based communication paradigm is becoming increasingly ubiquitous as an enabling technology for building loosely-coupled distributed systems. However, the loose coupling of components in such systems makes it hard for developers to predict their performance under load. Most general purpose performance meta-models for component-based systems provide limited support for modelling event-based communication and neglect middleware-specific influence factors. In this poster, we present an extension of our approach to modelling event-based communication in the context of the Palladio Component Model (PCM), allowing to take into account middleware-specific influence factors. The latter are captured in a separate model automatically woven into the PCM instance by means of a model-to-model transformation. As a second contribution, we present a short case study of a real-life road traffic monitoring system showing how event-based communication can be modelled for performance prediction and capacity planning.},
  address = {New York, NY, USA},
  author = {Rathfelder, Christoph and Klatt, Benjamin and Kounev, Samuel and Evans, David},
  booktitle = {Proceedings of the Fourth ACM International Conference on Distributed Event-Based Systems (DEBS 2010)},
  day = {12--15},
  doi = {http://doi.acm.org/10.1145/1827418.1827437},
  isbn = {978-1-60558-927-5},
  location = {Cambridge, United Kingdom},
  month = {July},
  pages = {97--98},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/rathfelder2010b.pdf},
  publisher = {ACM},
  title = {Towards middleware-aware integration of event-based communication into the Palladio component model},
  url = {http://doi.acm.org/10.1145/1827418.1827437},
  year = {2010}
}
@mastersthesis{rentschler2010a,
  author = {Andreas Rentschler},
  month = {March},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/rentschler2010a.pdf},
  school = {Karlsruhe Institute of Technology, Germany},
  title = {Entwurf einer grafischen, dom{\"a}nenspezifischen {M}odellierungssprache f{\"u}r ein filterbasiertes {D}atenanalyseframework},
  type = {Diploma Thesis},
  year = {2010}
}
@inproceedings{SaApKoBu2010-BenchmarX-PubSub,
  author = {Kai Sachs and Stefan Appel and Samuel Kounev and Alejandro Buchmann},
  booktitle = {Proc. of 2nd International Workshop on Benchmarking of Database Management Systems and Data-Oriented Web Technologies (BenchmarX'10).},
  editor = {Martin Necasky and Eric Pardede},
  month = {April},
  pdf = {http://sdqweb.ipd.kit.edu/publications/descartes-pdfs/jms2009PS.pdf},
  publisher = {Springer},
  series = {Lecture Notes in Computer Science (LNCS)},
  title = {{Benchmarking Publish/Subscribe-based Messaging Systems}},
  volume = {6193},
  year = {2010}
}
@inproceedings{SchroterMuhlRichling2010Stochastic,
  author = {Schr{\"{o}}ter, Arnd and M{\"{u}}hl, Gero and Kounev, Samuel and Parzyjegla, Helge and Richling, Jan},
  booktitle = {4th ACM International Conference on Distributed Event-Based Systems (DEBS 2010), July 12-15, Cambridge, United Kingdom},
  note = {Acceptance Rate: 25\%},
  pdf = {http://sdqweb.ipd.kit.edu/publications/descartes-pdfs/ScMuKoPaRi2010-DEBS-Stochastic.pdf},
  publisher = {ACM, New York, USA},
  title = {{Stochastic Performance Analysis and Capacity Planning of Publish/Subscribe Systems}},
  year = {2010}
}
@inproceedings{schuster2010a,
  abstract = {In recent years continuously changing market situations required IT systems that are flexible and highly responsive to changes of the underlying business processes. The transformation to service-oriented architecture (SOA) concepts, mainly services and loose coupling, promises to meet these demands. However, elevated complexity in management and evolution processes is required for the migration of existing systems towards SOA. Studies in this area of research have revealed a gap between in continuous and actual tool support of development teams throughout the process phases of evolution processes. Thus, in this article we introduce a method that fosters evolution by an iterative approach and illustrate how each phase of this method can be tool-supported.},
  author = {Thomas Schuster and Christoph Rathfelder and Nelly Schuster and Jens Nimis},
  booktitle = {Proceedings of the International Workshop on SOA Migration and Evolution 2010 (SOAME 2010) as part of the 14th European Conference on Software Maintenance and Reengineering (CSMR 2010)},
  day = {15},
  month = {March},
  pages = {1--10},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/schuster2010a.pdf},
  title = {Comprehensive tool support for iterative SOA evolution},
  year = {2010}
}
@phdthesis{trifu10a,
  author = {Mircea Trifu},
  school = {Karlsruhe Institute of Technology},
  title = {Tool-Supported Identification of Functional Concerns in Object-Oriented Code},
  year = {2010}
}
@misc{vaupelpatent4,
  author = {Vaupel, Robert},
  howpublished = {Patent No. 7752415, United States},
  month = {July},
  title = {{Method for Controlling the Capacity Usage of a Logically Partitioned Data Processing System}},
  year = {2010}
}
@inproceedings{Weiss2010a,
  abstract = {We identified a set of open issues in the context of software aging and long-living systems with respect to the application domain of industrial automation systems, e.g. process control [7] and SCADA (supervisory control and data acquisition) systems. Existing systems in the automation domain suffer from expensive evolution and maintenance as well as from long release cycles. One of the root causes for this is that longevity was not considered during their construction. Most of the solutions that can be found today are not domain-specific, and tend to focus rather on symptoms than on causes. Therefore, we initiated a research project which has the target to define more clearly what domain-specific longevity means, to survey existing approaches, and to derive methods and techniques for addressing the mentioned problem in the industrial automation domain. In this contribution we present the objectives of this project and outline our state of progress.},
  author = {Roland Weiss and Heiko Koziolek and Johannes Stammel and Zoya Durdik},
  booktitle = {Proceedings of 2nd Workshop of GI Working Group "Long-living Software Systems" (L2S2)},
  title = {Evolution problems in the context of sustainable industrial software systems},
  year = {2010}
}
@inproceedings{westermann2010b,
  abstract = {Understanding the performance characteristics of enterprise applications, such as response time, throughput, and resource utilization, is crucial for satisfying customer expectations and minimizing costs of application hosting. Enterprise applications are usually based on a large set of existing software (e.g. middleware, legacy applications, and third party services). Furthermore, they continuously evolve due to changing market requirements and short innovation cycles. Software performance engineering in its essence is not directly applicable to such scenarios. Many approaches focus on early lifecycle phases assuming that a software system is built from scratch and all its details are known. These approaches neglect influences of already existing middleware, legacy applications, and third party services. For performance prediction, detailed information about the internal structure of the systems is necessary. However, such information may not be available or accessible due to the complexity of existing software. In this paper, we propose a combined approach of model based and measurement based performance evaluation techniques to handle the complexity of large enterprise applications. We outline open research questions that have to be answered in order to put performance engineering in industrial practice. For validation, we plan to apply our approach to different real-world scenarios that involve current SAP enterprise solutions such as SAP Business ByDesign and the SAP Business Suite.},
  address = {Karlsruhe, Germany},
  author = {Dennis Westermann and Jens Happe},
  booktitle = {Proceedings of the Fifteenth International Workshop on Component-Oriented Programming (WCOP) 2010},
  editor = {B{\"{u}}hnov{\'{a}}, Barbora and Reussner, Ralf H. and Szyperski, Clemens and Weck, Wolfgang},
  isbn = {ISSN 1432 - 7864},
  month = {June},
  pages = {71--78},
  publisher = {Karlsruhe Institue of Technology, Faculty of Informatics},
  series = {Interne Berichte},
  title = {Towards Performance Prediction of Large Enterprise Applications Based on Systematic Measurements},
  url = {http://digbib.ubka.uni-karlsruhe.de/volltexte/1000018464},
  volume = {2010-14},
  year = {2010}
}
@inproceedings{westermann2010a,
  abstract = {Evaluating the performance (timing behavior, throughput, and resource utilization) of a software system becomes more and more challenging as today's enterprise applications are built on a large basis of existing software (e.g. middleware, legacy applications, and third party services). As the performance of a system is affected by multiple factors on each layer of the system, performance analysts require detailed knowledge about the system under test and have to deal with a huge number of tools for benchmarking, monitoring, and analyzing. In practice, performance analysts try to handle the complexity by focusing on certain aspects, tools, or technologies. However, these isolated solutions are inefficient due to the small reuse and knowledge sharing. The Performance Cockpit presented in this paper is a framework that encapsulates knowledge about performance engineering, the system under test, and analyses in a single application by providing a flexible, plug-in based architecture. We demonstrate the value of the framework by means of two different case studies.},
  author = {Dennis Westermann and Jens Happe and Michael Hauck and Christian Heupel},
  booktitle = {Proceedings of the 36th EUROMICRO Conference on Software Engineering and Advanced Applications (SEAA 2010)},
  pages = {31-38},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/westermann2010a.pdf},
  publisher = {IEEE Computer Society},
  title = {The Performance Cockpit Approach: A Framework for Systematic Performance Evaluations},
  year = {2010}
}
@inproceedings{westermann2010c,
  abstract = {The upcoming business model of providing software as a service (SaaS) bears a lot of challenges to a service provider. On the one hand, service providers have to guarantee a certain quality of service (QoS) and ensure that they adhere to these guarantees at runtime. On the other hand, they have to minimize the total cost of ownership (TCO) of their IT landscape in order to offer competitive prices. The performance of a system is a critical attribute that affects QoS as well as TCO. However, the evaluation of performance characteristics is a complex task. Many existing solutions do not provide the accuracy required for offering dependable guarantees. One major reason for this is that the dependencies between the usage profile (provided by the service consumers) and the performance of the actual system is barely described sufficiently. Software Performance Curves are performance models that are derived by goal-oriented systematic measurements of the actual software service. In this paper, we describe how Software Performance Curves can be derived by a service provider that hosts a multi-tenant system. Moreover, we illustrate how Software Performance Curves can be used to derive feasible performance guarantees, develop pricing functions, and minimize hardware resources.},
  acmid = {1858267},
  address = {New York, NY, USA},
  articleno = {3},
  author = {Dennis Westermann and Christof Momm},
  booktitle = {Proceedings of the 2nd International Workshop on the Quality of Service-Oriented Software Systems},
  doi = {http://doi.acm.org/10.1145/1858263.1858267},
  isbn = {978-1-4503-0239-5},
  location = {Oslo, Norway},
  numpages = {6},
  pages = {3:1--3:6},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/westermann2010c.pdf},
  publisher = {ACM},
  series = {QUASOSS '10},
  title = {Using software performance curves for dependable and cost-efficient service hosting},
  url = {http://doi.acm.org/10.1145/1858263.1858267},
  year = {2010}
}
@proceedings{PaKo2010-KIT-YoungInformatics,
  address = {Karlsruhe, Germany},
  editor = {Victor Pankratius and Samuel Kounev},
  month = {July},
  note = {ISBN: 978-3-86644-508-6},
  publisher = {KIT Scientific Publishing},
  title = {{Emerging Research Directions in Computer Science. Contributions from the Young Informatics Faculty in Karlsruhe}},
  url = {http://uvka.ubka.uni-karlsruhe.de/shop/isbn/978-3-86644-508-6},
  year = {2010}
}
@inproceedings{simko2010snpd,
  abstract = {{ A common practice to capture functional requirements of a software system is to utilize use-cases, which are textual descriptions of system usage scenarios written in a natural language. Since the substantial information about the system is captured by the use-cases, it comes as a natural idea to generate from these descriptions the implementation of the system (at least partially). However, the fact that the use-cases are in a natural language makes this task extremely difficult. In this paper, we describe a model-driven tool allowing code of a system to be generated from use-cases in plain English. The tool is based on the model-driven development paradigm, which makes it modular and extensible, so as to allow for use-cases in multiple language styles and generation for different component frameworks. }},
  author = {Viliam Simko and Petr Hnerynka and Tomas Bures},
  booktitle = {Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing 2010},
  doi = {10.1007/978-3-642-13265-0_3},
  editor = {Lee, Roger and Ma, Jixin and Bacon, Liz and Du, Wencai and Petridis, Miltos},
  isbn = {978-3-642-13264-3},
  pages = {23-37},
  pdf = {http://d3s.mff.cuni.cz/~simko/wiki/uploads/Main/Publications/ViliamSimko-SNPD2010.pdf},
  publisher = {Springer Berlin Heidelberg},
  series = {Studies in Computational Intelligence},
  title = {From textual use-cases to component-based applications},
  url = {http://dx.doi.org/10.1007/978-3-642-13265-0_3},
  year = {2010}
}
@inproceedings{heinrich2010defining,
  author = {Heinrich, Robert and Paech, Barbara},
  booktitle = {Modellierung},
  pages = {133--148},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/heinrich2010defining.pdf},
  title = {Defining the Quality of Business Processes},
  year = {2010}
}