2009.bib

@article{becker2008a,
  abstract = {One aim of component-based software engineering (CBSE) is to enable the prediction of extra-functional properties, such as performance and reliability, utilising a well-defined composition theory. Nowadays, such theories and their accompanying prediction methods are still in a maturation stage. Several factors influencing extra-functional properties need additional research to be understood. A special problem in CBSE stems from its specific development process: Software components should be specified and implemented independently from their later context to enable reuse. Thus, extra-functional properties of components need to be specified in a parametric way to take different influencing factors like the hardware platform or the usage profile into account. Our approach uses the Palladio component model (PCM) to specify component-based software architectures in a parametric way. This model offers direct support of the CBSE development process by dividing the model creation among the developer roles. This paper presents our model and a simulation tool based on it, which is capable of making performance predictions. Within a case study, we show that the resulting prediction accuracy is sufficient to support the evaluation of architectural design decisions.},
  author = {Steffen Becker and Heiko Koziolek and Ralf Reussner},
  doi = {10.1016/j.jss.2008.03.066},
  journal = {Journal of Systems and Software},
  pages = {3--22},
  publisher = {Elsevier Science Inc.},
  title = {{T}he {P}alladio component model for model-driven performance prediction},
  volume = {82},
  year = {2009}
}
@misc{vaupelpatent2,
  author = {Bosch, G. and Vaupel, Robert and Wirag, S.},
  howpublished = {Patent No. 7487506, United States},
  month = {February},
  title = {{Autonomous Management of System Troughput}},
  year = {2009}
}
@techreport{indust2009,
  abstract = {Die Industrialisierung der Software-Entwicklung ist ein zur Zeit sehr stark diskutiertes Thema. Es geht dabei vor allem um die Efizienzsteigerung durch die Steigerung des Standardisierungsgrades, des Automatisierungsgrades sowie eine Erh\"{o}hung der Arbeitsteilung. Dies wirkt sich einerseits auf die den Software- Systemen zu Grunde liegenden Architekturen, aber auch auf die Entwicklungsprozesse aus. So sind Service-orientierte Architekturen ein Beispiel f\"{u}r eine gesteigerte Standardisierung innerhalb von Software-Systemen. Es ist zu ber\"{u}cksichtigen, dass sich die Software-Branche von den klassischen produzierenden Industriezweigen dadurch unterscheidet, dass Software ein immaterielles Produkt ist und so ohne hohe Produktionskosten beliebig oft vervielf\"{a}ltigt werden kann. Trotzdem lassen sich viele Erkenntnisse aus den klassischen Industriezweigen auf die Software-Technik \"{u}bertragen. Die Inhalte dieses Berichts stammen haupts\"{a}chlich aus dem Seminar " Software- Industrialisierung\, welches sich mit der Professionalisierung der Software- Entwi- cklung und des Software-Entwurfs besch\"{a}ftigte. W\"{a}hrend die klassische Software-Entwicklung wenig strukturiert ist und weder im Bezug auf Reproduzierbarkeit oder Qualit\"{a}tssicherung erh\"{o}hten Anforderungen gen\"{u}gt, befindet sich die Software-Entwick- lung im Rahmen der Industrialisierung in einemWandel. Dazu z\"{a}hlen arbeitsteiliges Arbeiten, die Einf\"{u}hrung von Entwicklungsprozessen mit vorhersagbaren Eigenschaften (Kosten, Zeitbedarf, ...), und in der Folge die Erstellung von Produkten mit garantierbaren Eigenschaften. Das Themenspektrum des Seminars umfasste dabei unter anderem: * Komponentenbasierte Software-Architekturen * Modellgetriebene Softwareentwicklung: Konzepte und Technologien * Industrielle Softwareentwicklungsprozesse und deren Bewertung Das Seminar wurde wie eine wissenschaftliche Konferenz organisiert: Die Einreichungen wurden in einem zweistufigen Peer-Review-Verfahren begutachtet. In der ersten Stufe wurde eine Begutachtung der studentischen Arbeiten durch Kommilitonen durchgef\"{u}hrt, in der zweiten Stufe eine Begutachtung durch die Betreuer. In verschiedenen Sessions wurden die Artikel wie bei einer Konferenz pr\"{a}sentiert. Die besten Beitr\"{a}ge wurden durch zwei Best Paper Awards ausgezeichnet. Diese gingen an Tom Beyer f\"{u}r seine Arbeit Realoptionen f\"{u}r Entscheidungen in der Software-Entwicklung, sowie an Philipp Meier f\"{u}r seine Arbeit Assessment Methods for Software Product Lines. Erg\"{a}nzt wurden die Vortr\"{a}ge der Seminarteilnehmer durch zwei eingeladene Vortr\"{a}ge: Collin Rogowski von der 1&1 Internet AG stellte den agilen Softwareentwicklungsprozess beim Mail-Produkt GMX.COM vor. Heiko Koziolek, Wolfgang Mahnke und Michaela Saeftel von ABB referierten \"{u}ber das Thema Software Product Line Engineering anhand der bei ABB entwickelten Robotik-Applikationen.},
  address = {Karlsruhe},
  author = {Brosch, Franz and Groenda, Henning and Kapova, Lucia and Krogmann, Klaus and Kuperberg, Michael and Martens, Anne and Parrend, Pierre and Reussner, Ralf and Stammel, Johannes and Taspolatoglu, Emre},
  edition = {ISSN 1432-7864},
  institution = {Fakult{\"a}t f{\"u}r Informatik, Universit{\"a}t Karlsruhe},
  note = {Interner Bericht},
  publisher = {Fakult{\"a}t f{\"u}r Informatik, Institut f{\"u}r Programmstrukturen und Datenorganisation},
  timestamp = {2009.06.29},
  title = {Software-Industrialisierung},
  url = {http://digbib.ubka.uni-karlsruhe.de/volltexte/1000010899},
  volume = {4},
  year = {2009}
}
@inproceedings{brosch2009a,
  abstract = {Reliability is one of the most critical extra-functional properties of a software system, which needs to be evaluated early in the development process when formal methods and tools can be applied. Though many approaches for reliability prediction exist, not much work has been done in combining different types of failures and system views that influence the reliability. This paper presents an integrated approach to reliability prediction, reflecting failures triggered by both software faults and physical-resource breakdowns, and incorporating detailed information about system control flow governed by user inputs.},
  author = {Franz Brosch and Barbora Zimmerova},
  booktitle = {International Workshop on Software Quality and Maintainability (SQM)},
  month = {March},
  pages = {70--74},
  pdf = {http://sdqweb.ipd.uka.de/publications/pdfs/brosch2009.pdf},
  title = {{Design-Time Reliability Prediction for Software Systems}},
  year = {2009}
}
@mastersthesis{brosig2009a,
  address = {Karlsruhe, Germany},
  author = {Fabian Brosig},
  month = {June},
  note = {FZI Prize "Best Diploma Thesis"},
  school = {Universit{\"{a}}t Karlsruhe (TH)},
  title = {{Automated Extraction of Palladio Component Models from Running Enterprise Java Applications}},
  year = {2009}
}
@inproceedings{BrKoKr2009-ROSSA-Extracting_PCM_JavaEE,
  abstract = {Nowadays, software systems have to fulfill increasingly stringent requirements for performance and scalability. To ensure that a system meets its performance requirements during operation, the ability to predict its performance under different configurations and workloads is essential. Most performance analysis tools currently used in industry focus on monitoring the current system state. They provide low-level monitoring data without any performance prediction capabilities. For performance prediction, performance models are normally required. However, building predictive performance models manually requires a lot of time and effort. In this paper, we present a method for automated extraction of performance models of Java EE applications, based on monitoring data collected during operation. We extract instances of the Palladio Component Model (PCM) - a performance meta-model targeted at component-based systems. We evaluate the model extraction method in the context of a case study with a real-world enterprise application. Even though the extraction requires some manual intervention, the case study demonstrates that the existing gap between low-level monitoring data and high-level performance models can be closed.},
  author = {Fabian Brosig and Samuel Kounev and Klaus Krogmann},
  booktitle = {Proceedings of the 1st International Workshop on Run-time mOdels for Self-managing Systems and Applications (ROSSA 2009). In conjunction with the Fourth International Conference on Performance Evaluation Methodologies and Tools (VALUETOOLS 2009)},
  month = {October},
  pdf = {http://sdqweb.ipd.kit.edu/publications/descartes-pdfs/BrKoKr2009-ROSSA-Extracting_PCM_JavaEE.pdf},
  publisher = {ACM, New York, NY, USA},
  title = {{Automated Extraction of Palladio Component Models from Running Enterprise Java Applications}},
  year = {2009},
  isbn = {978-963-9799-70-7},
  location = {Pisa, Italy},
  pages = {10:1--10:10},
  articleno = {10}
}
@misc{BrKoPa2009-OTN-WLDF2PCM,
  abstract = {Throughout the system life cycle, the ability to predict a software system's performance under different configurations and workloads is highly valuable to ensure that the system meets its performance requirements. During the design phase, performance prediction helps to evaluate different design alternatives. At deployment time, it facilitates system sizing and capacity planning. During operation, predicting the effect of changes in the workload or in the system configuration is beneficial for run-time performance management. The alternative to performance prediction is to deploy the system in an environment reflecting the configuration of interest and conduct experiments measuring the system performance under the respective workloads. Such experiments, however, are normally very expensive and time-consuming and therefore often considered not to be economically viable. To enable performance prediction we need an abstraction of the real system that incorporates performance-relevant data, i.e., a performance model. Based on such a model, performance analysis can be carried out. Unfortunately, building predictive performance models manually requires a lot of time and effort. The model must be designed to reflect the abstract system structure and capture its performance-relevant aspects. In addition, model parameters like resource demands or system configuration parameters have to be determined. Given the costs of building performance models, techniques for automatic extraction of models based on observation of the system at run-time are highly desirable. During system development, such models can be exploited to evaluate the performance of system prototypes. During operation, an automatically extracted performance model can be applied for efficient and performance-aware resource management. For example, if one observes an increased user workload and assumes a steady workload growth rate, performance predictions help to determine when the system would reach its saturation point. This way, system operators can react to the changing workload before the system has failed to meet its performance objectives thus avoiding a violation of service level agreements (SLAs). Current performance analysis tools used in industry mostly focus on profiling and monitoring transaction response times and resource consumption. The tools often provide large amounts of low level data while important information needed for building performance models is missing, e.g., the resource demands of individual components. In this article, we present a method for automated extraction of performance models for Java EE applications during operation. We implemented the method in a tool prototype and evaluated its effectiveness in the context of a case study with an early prototype of the SPECjEnterprise2009 benchmark application which in the following we will refer to as SPECjEnterprise2009_pre. (SPECjEnterprise2009 is the successor benchmark of the SPECjAppServer2004 benchmark developed by the Standard Performance Evaluation Corp. [SPEC]; SPECjEnterprise is a trademark of SPEC. The SPECjEnterprise2009 results or findings in this publication have not been reviewed or accepted by SPEC, therefore no comparison nor performance inference can be made against any published SPEC result.) The target Java EE platform we consider is Oracle WebLogic Server (WLS). The extraction is based on monitoring data that is collected during operation using the WebLogic Diagnostics Framework (WLDF). As a performance model, we selected the Palladio Component Model (PCM). PCM is a sophisticated performance modeling framework with mature tool support. In contrast to low level mathematical models like, e.g., queueing networks, PCM is a high-level UML-like design-oriented model that captures the performance-relevant aspects of the system architecture. This makes PCM models easy to understand and use by software developers. We begin by providing some background on the technologies we use, focusing on the WLDF monitoring framework and the PCM models. We then describe the model extraction method in more detail. Finally, we present the case study we conducted and conclude with a summary.},
  author = {Fabian Brosig and Samuel Kounev and Charles Paclat},
  howpublished = {Oracle Technology Network (OTN) Article},
  month = {September},
  title = {{Using WebLogic Diagnostics Framework to Enable Performance Prediction for Java EE Applications}},
  url = {http://www.oracle.com/technetwork/articles/brosig-wldf-086367.html},
  year = {2009}
}
@inproceedings{Geoffray2009cfse,
  address = {Toulouse, France},
  author = {N. Geoffray and G. Thomas and G. Muller and P. Parrend and S. Frenot and B. Folliot},
  booktitle = {Conference Francaise sur les Systemes d'Exploitation},
  month = {September},
  timestamp = {2010.01.29},
  title = {I-JVM: une machine virtuelle Java pour l'isolation de composants dans OSGi.},
  year = {2009}
}
@inproceedings{NicolasGeoffray2009,
  address = {Lisbon, Portugal},
  author = {Nicolas Geoffray and Gael Thomas and Gilles Muller and Pierre Parrend and Stephane Frenot and Bertil Folliot},
  booktitle = {39th IEEE/IFIP Conference on Dependable Systems and Networks (DSN)},
  timestamp = {2010.01.29},
  title = {I-JVM: a Java Virtual Machine for Component Isolation in OSGi},
  year = {2009}
}
@inproceedings{goldschmidt2009a,
  abstract = {Textual concrete syntaxes for models are beneficial for many reasons. They foster usability and productivity because of their fast editing style, their usage of error markers, autocompletion and quick fixes. Several frameworks and tools from different communities for creating concrete textual syntaxes for models emerged during recent years. However, these approaches failed to provide a solution in general. Open issues are incremental parsing and model updating as well as partial and federated views. Building views on abstract models is one of the key concepts of model-driven engineering. Different views help to present concepts behind a model in a way that they can be understood and edited by different stakeholders or developers in different roles. Within graphical modelling several approaches exist allowing the definition of explicit holistic, partial or combined graphical views for models. On the other hand several frameworks that provide textual editing support for models have been presented over recent years. However, the combination of both principals, meaning textual, editable and decorating views is lacking in all of these approaches. In this presentation, we show FURCAS (Framework for UUID Retaining Concrete to Abstract Syntax Mappings), a textual decorator approach that allows to separately store and manage the textual concrete syntax from the actual abstract model elements. Thereby we allow to define textual views on models that may be partial and/or overlapping concerning other (graphical and/or textual) views.},
  author = {Goldschmidt, Thomas and Becker, Steffen and Uhl, Axel},
  booktitle = {Proceedings of the 5th European Conference on Model Driven Architecture - Foundations and Applications (ECMDA 2009) - Tools and Consultancy Track},
  publisher = {CTIT},
  timestamp = {2008.04.28},
  title = {{FURCAS: Framework for UUID-Retaining Concrete to Abstract Syntax Mappings}},
  year = {2009}
}
@inproceedings{goldschmidt2009b,
  abstract = {Building views on abstract models is one of the key concepts of model-driven engineering. Different views help to present concepts behind a model in a way that they can be understood and edited by different stakeholders or developers in different roles. Within graphical modelling several approaches exist allowing the definition of explicit holistic, partial or combined graphical views for models. On the other hand several frameworks that provide textual editing support for models have been presented over recent years. However, the combination of both principals, meaning textual, editable and decorating views is lacking in all of these approaches. In this paper, we introduce a textual decorator approach that allows to separately store and manage the textual concrete syntax from the actual abstract model elements. Thereby we allow to define textual views on models that may be partial and/or overlapping concerning other (graphical and/or textual) views.},
  author = {Goldschmidt, Thomas and Becker, Steffen and Uhl, Axel},
  booktitle = {Proceedings of the 35th EUROMICRO Conference on Software Engineering and Advanced Applications (SEAA)},
  publisher = {IEEE},
  timestamp = {2009.04.28},
  title = {Textual Views in Model Driven Engineering},
  year = {2009}
}
@article{engels2009a,
  author = {Gregor, Engels and Michael, Goedicke and Ursula, Goltz and Andreas, Rausch and Ralf, Reussner},
  journal = {Informatik-Spektrum},
  month = {October},
  number = {5},
  pages = {393-397},
  timestamp = {2009.09.28},
  title = {Design for Future - Legacy-Probleme von morgen vermeidbar?},
  volume = {32},
  year = {2009}
}
@inproceedings{groenda2009a,
  abstract = {In software engineering, performance specifications of components support the successful evolution of complex software systems. Having trustworthy specifications is important to reliably detect unwanted effects of modifications on the performance using prediction techniques before they are experienced in live systems. This is especially important if there is no test system available and a system can't be taken down or replaced in its entirety. Existing approaches neglect stating the quality of specifications at all and hence the quality of the prediction is lowered if the assumption that all used specifications are suitable does not hold. In this paper, we propose a test-based approach to validate performance specifications against deployed component implementations. The validation is used to certify specifications which in turn allow assessing the suitability of specifications for predicting the performance of a software system. A small example shows that the certification approach is applicable and creates trustworthy performance specifications.},
  author = {Henning Groenda},
  booktitle = {Proceedings of the Fourteenth International Workshop on Component-Oriented Programming (WCOP) 2009},
  day = {25},
  location = {East Stroudsburg, PA, USA},
  month = {June},
  pages = {13--21},
  title = {Certification of Software Component Performance Specifications},
  year = {2009}
}
@article{groenda2009,
  author = {Henning Groenda and Christoph Rathfelder and Ralph Mueller},
  journal = {Eclipse Magazine},
  month = {March},
  pages = {8--10},
  timestamp = {2009-04-02},
  title = {{Best of Eclipse DemoCamps - Ein Erfahrungsbericht vom dritten Karlsruher Eclipse DemoCamp}},
  volume = {3},
  year = {2009}
}
@inproceedings{happe2009b,
  abstract = {The shift of hardware architecture towards parallel execution led to a broad usage of multi-core processors in desktop systems and in server systems. The benefit of additional processor cores for software performance depends on the software's parallelism as well as the operating system scheduler's capabilities. Especially, the load on the available processors (or cores) strongly influences response times and throughput of software applications. Hence, a sophisticated understanding of the mutual influence of software behaviour and operating system schedulers is essential for accurate performance evaluations. Multi-core systems pose new challenges for performance analysis and developers of operating systems. For example, an optimal scheduling policy for multi-server systems, such as shortest remaining processing time (SRPT) for single-server systems, is not yet known in queueing theory. In this paper, we present a detailed experimental evaluation of general purpose operating system (GPOS) schedulers in symmetric multiprocessing (SMP) environments. In particular, we are interested in the influence of multiprocessor load balancing on software performance. Additionally, the evaluation includes effects of GPOS schedulers that can also occur in single-processor environments, such as I/Oboundedness of tasks and different prioritisation strategies. The results presented in this paper provide the basis for the future development of more accurate performance models of today's software systems.},
  author = {Jens Happe and Henning Groenda and Ralf H. Reussner},
  booktitle = {Proceedings of the 17th IEEE International Symposium on Modelling, Analysis and Simulation of Computer and Telecommunication Systems (MASCOTS'09)},
  title = {{P}erformance {E}valuation of {S}cheduling {P}olicies in {S}ymmetric {M}ultiprocessing {E}nvironments},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/happe2009b.pdf},
  year = {2009}
}
@inproceedings{happe2009c,
  abstract = {Software performance engineering enables software architects to find potential performance problems, such as bottlenecks and long delays, prior to implementation and testing. Such early feedback on the system's performance is essential to develop and maintain efficient and scalable applications. However, the unavailability of data necessary to design performance models often hinders its application in practice. During system maintenance, the existing system has to be included into the performance model. For large, heterogeneous, and complex systems that have grown over time, modelling becomes infeasible due to the sheer size and complexity of the systems. Re-engineering approaches also fail due to the large and heterogeneous technology stack. Especially for such systems, performance prediction is essential. In this position statement, we propose goal-oriented abstractions of large parts of a software system based on systematic measurements. The measurements provide the information necessary to determine Black-box Performance Models that directly capture the influence of a system's usage and workload on performance (response time, throughput, and resource utilisation). We outline the research challenges that need to be addressed in order to apply Black-box Performance Models.},
  author = {Jens Happe and Hui Li and Wolfgang Theilmann},
  booktitle = {Proceedings of the 1st International Workshop on the Quality of Service-Oriented Software Systems (QUASOSS)},
  doi = {10.1145/1596473.1596479},
  pages = {19--24},
  publisher = {ACM, New York, NY, USA},
  title = {{B}lack-box {P}erformance {M}odels: {P}rediction based on {O}bservation},
  year = {2009}
}
@book{happe2009d,
  abstract = {With today's rise of multi-core processors, concurrency becomes a ubiquitous challenge in software development. Performance prediction methods have to reflect the influence of multiprocessing environments on software performance in order to help software architects to find potential performance problems during early development phases. In this thesis, we address the influence of the operating system scheduler on software performance in symmetric multiprocessing environments.},
  author = {Jens Happe},
  doi = {10.5445/KSP/1000011806},
  editors = {Ralf Reussner},
  isbn = {978-3-86644-381-5},
  month = {July},
  publisher = {Universit{\"a}tsverlag Karlsruhe},
  series = {The Karlsruhe Series on Software Design and Quality},
  title = {{P}redicting {S}oftware {P}erformance in {S}ymmetric {M}ulti-core and {M}ultiprocessor {E}nvironments},
  volume = {3},
  year = {2009}
}
@mastersthesis{hauck2009a,
  abstract = {The performance of a software system is strongly in uenced by the execution environment the software runs in. In the Palladio Component Model (PCM), a domain-specific language for modelling component-based software systems, the execution environment must be modelled explicitly as it is needed for performance predictions. However, the current version of the PCM offers only rudimentary support for hardware resource modelling: For instance, it is not possible to distinguish between read and write accesses to a hard disk resource. This thesis develops an enhancement of the PCM meta-model that allows for better predictions based on more sophisticated resource models. The enhancement includes the support for accessing resources through explicit interfaces with distinct services and the integration of resource controllers in the meta-model. To support modelling of infrastructure components such as application servers, this thesis introduces the separation of business interfaces and interfaces for accessing resources or the execution environment. Existing PCM tools have been adapted to support the simulation of PCM instances based on the enhanced meta-model. Additionally, the adapted meta-model has been successfully evaluated in two case studies to show that the extended meta-model has no side e ects on preexisting predictions and also enables scenarios not supported before, such as the modelling of a Java Virtual Machine which processes higher-level resource demands.},
  address = {Germany},
  author = {Hauck, Michael},
  month = {February},
  school = {University of Karlsruhe (TH)},
  title = {{Extending Performance-Oriented Resource Modelling in the Palladio Component Model}},
  type = {Diploma Thesis},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/hauck2009a.pdf},
  year = {2009}
}
@inproceedings{hauck2009b,
  abstract = {Software architects often use model-based techniques to analyse performance (e.g. response times), reliability and other extra-functional properties of software systems. These techniques operate on models of software architecture and execution environment, and are applied at design time for early evaluation of design alternatives, especially to avoid implementing systems with insufficient quality. Virtualisation (such as operating system hypervisors or virtual machines) and multiple layers in execution environments (e.g. RAID disk array controllers on top of hard disks) are becoming increasingly popular in reality and need to be reflected in the models of execution environments. However, current component meta-models do not support virtualisation and cannot model individual layers of execution environments. This means that the entire monolithic model must be recreated when different implementations of a layer must be compared to make a design decision, e.g. when comparing different Java Virtual Machines. In this paper, we present an extension of an established model-based performance prediction approach and associated tools which allow to model and predict state-of-the-art layered execution environments, such as disk arrays, virtual machines, and application servers. The evaluation of the presented approach shows its applicability and the resulting accuracy of the performance prediction while respecting the structure of the modelled resource environment.},
  author = {Michael Hauck and Michael Kuperberg and Klaus Krogmann and Ralf Reussner},
  booktitle = {{Proceedings of the 12th International Symposium on Component Based Software Engineering (CBSE 2009)}},
  doi = {10.1007/978-3-642-02414-6_12},
  ee = {http://dx.doi.org/10.1007/978-3-642-02414-6_12},
  isbn = {978-3-642-02413-9},
  number = {5582},
  pages = {191--208},
  pdf = {http://sdqweb.ipd.uka.de/publications/pdfs/hauck2009b.pdf},
  publisher = {Springer},
  series = {LNCS},
  title = {{Modelling Layered Component Execution Environments for Performance Prediction}},
  url = {http://www.comparch-events.org/pages/present.html},
  year = {2009}
}
@inproceedings{DBLP:conf/sss/HenrichHKMS09,
  author = {Christian Henrich and Matthias Huber and Carmen Kempka and J{\"o}rn M{\"u}ller-Quade and Mario Strefler},
  bibsource = {DBLP, http://dblp.uni-trier.de},
  booktitle = {Stabilization, Safety, and Security of Distributed Systems, 11th International Symposium, SSS 2009, Lyon, France, November 3-6, 2009. Proceedings},
  ee = {http://dx.doi.org/10.1007/978-3-642-05118-0_60},
  pages = {785-786},
  title = {Brief Announcement: Towards Secure Cloud Computing},
  year = {2009}
}
@inproceedings{henss2009a,
  author = {J{\"o}rg Henss and Joachim Kleb},
  booktitle = {11th {I}ntl. {P}rot{\'e}g{\'e} {C}onference - June 23-26, 2009 - Amsterdam, Netherlands},
  keywords = {database owl},
  title = {Prot{\'e}g{\'e} 4 {B}ackend for {N}ative {OWL} {P}ersistence},
  url = {http://protege.stanford.edu/conference/2009/abstracts/S9P2Kleb.pdf},
  year = {2009}
}
@inproceedings{henss2009b-OWLED09,
  abstract = {Most Semantic Web applications are build on top of technology based on the Semantic Web layer cake and the W3C ontology languages RDF(S) and OWL. However RDF(S) embodies a graph abstraction model and thus is represented by triple-based artifacts. Using OWL as a language for Semantic Web knowledge-bases, this abstraction no longer holds. OWL is build up on an axiomatic model representation. Consequential storage systems focusing on the triple-based representation of ontologies seem to be no longer adequate as persistence layer for OWL ontologies. Our proposed system allows for a native mapping of OWL constructs to a database-schema without an unnecessary complex transformation in triples. Our Evaluation shows that our system performs comparable to current OWL storage systems.},
  author = {J{\"o}rg Henss and Joachim Kleb and Stephan Grimm and J{\"u}rgen Bock},
  booktitle = {Proceedings of the 5th {I}nternational {W}orkshop on {OWL}: {E}xperiences and {D}irections ({OWLED 2009}), Chantilly, VA, United States, October 23-24, 2009},
  editor = {Rinke Hoeksta and Peter F. Patel-Schneider},
  keywords = {database owl},
  publisher = {CEUR-WS},
  series = {CEUR Workshop Proceedings},
  title = {A {D}atabase {B}ackend for {OWL}},
  url = {http://CEUR-WS.org/Vol-529/owled2009_submission_3.pdf},
  volume = {529},
  year = {2009}
}
@mastersthesis{huber09approx,
  abstract = {Graphen mit Kantengewichten treten in vielen Anwendungsdomanen auf, wie zum Beispiel in der Bildverarbeitung, der Transportlogistik, oder der Softwaretechnik. Die Analyse von solchen Graphen mittels Graph-Mining- Techniken ist eine lohnenswerte Aufgabe. Jedoch gibt es keinen Graph- Mining-Algorithmus, der in der Lage ist, kantengewichtete Graphen zu analysieren. Bisher wurden Kantengewichte diskretisiert, damit gewichtete Graphen analysiert werden konnten, oder Kantengewichte wurden erst in einem Postprocessing-Schritt betrachtet. In dieser Arbeit wird eine auf Constraints auf Kantengewichten basierende Erweiterung f\"{u}r die Graph-Mining-Algorithmen gSpan und CloseGraph vorgestellt, welche es ermoglicht, Kantengewichte direkt wahrend dem Mining zu betrachten und zu bewerten. Dadurch ergeben sich neue Pruningmoglichkeiten, welche zu Laufzeitgewinnen fuhren konnen. Es werden verschiedene Methoden vorgestellt, Kantengewichte zu bewerten. Des Weiteren werden diese Moglichkeiten bezuglich der Laufzeit und Ergebnisqualitat mit realen Daten aus den Domanen Transportlogistik und Softwaretechnik evaluiert und verglichen. Es wird gezeigt, dass die in dieser Arbeit vorgestellten Erweiterungen bei anlicher Ergebnisqualitat, zu einer Verbesserung der Laufzeit des Graph- Mining-Algorithmus' fuhren.},
  author = {Huber, Matthias},
  month = {March},
  school = {Universit{\"a}t Karlsruhe (TH)},
  title = {{Approximatives und diskriminatives Mining von gewichteten Graphen}},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/huber09approx.pdf},
  year = {2009}
}
@mastersthesis{Hu2009-UKA-PerfMod,
  address = {Karlsruhe, Germany},
  author = {Nikolaus Huber},
  month = {April},
  note = {GFFT Prize},
  school = {Universit{\"{a}}t Karlsruhe (TH)},
  title = {{Performance Modeling of Storage Virtualization}},
  year = {2009}
}
@inproceedings{kuebler2009a,
  abstract = {Model Driven Software Development (MDSD) has matured over the last few years and is now becoming an established technology. Models are used in various contexts, where the possibility to perform different kinds of analyses based on the modelled applications is one of these potentials. In different use cases during these analyses it is necessary to detect patterns within large models. A general analysis technique that deals with lots of data is pattern mining. Different algorithms for different purposes have been developed over time. However, current approaches were not designed to operate on models.With employing QVT for matching and transforming patterns we present an approach that deals with this problem. Furthermore, we present an idea to use our pattern mining approach to estimate the maintainability of modelled artifacts.},
  author = {K{\"u}bler, Jens and Goldschmidt, Thomas},
  booktitle = {Proceedings of the 5th European Conference on Model Driven Architecture - Foundations and Applications (ECMDA)},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{A} {P}attern {M}ining {A}pproach {U}sing {QVT}},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/kuebler2009a.pdf},
  year = {2009}
}
@mastersthesis{kuester2009a,
  author = {Martin K{\"u}ster},
  month = {November},
  school = {Karlsruhe Institute of Technology},
  title = {Modularization of Text-to-Model Mapping Specifications - A Feasibility Study Using Scannerless Parsing},
  year = {2009}
}
@inproceedings{kapova2009a,
  author = {Kapova, Lucia and Goldschmidt, Thomas},
  booktitle = {Proceedings of the 35th EUROMICRO Conference on Software Engineering and Advanced Applications (SEAA)},
  publisher = {IEEE},
  title = {Automated Feature Model-based Generation of Refinement Transformations},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/kapova2009a.pdf},
  year = {2009}
}
@inbook{Ko2008-WILEY-SoftwarePerfEval,
  abstract = {Modern software systems are expected to satisfy increasingly stringent requirements for performance and scalability. To avoid the pitfalls of inadequate quality of service, it is important to evaluate the expected performance and scalability characteristics of systems during all phases of their life cycle. At every stage, performance evaluation is carried out with a specific set of goals and constraints. In this article, we present an overview of the major methods and techniques for software performance evaluation. We start by considering the different types of workload models that are typically used in performance evaluation studies. We then discuss performance measurement techniques including platform benchmarking, application profiling and system load testing. Following this, we survey the most common methods and techniques for performance modeling of software systems. We consider the major types of performance models used in practice and discuss their advantages and disadvantages. Finally, we briefly discuss operational analysis as an alternative to queueing theoretic methods.},
  author = {Samuel Kounev},
  chapter = {{Software Performance Evaluation}},
  isbn = {0471383937},
  isbn-13 = {978-0471383932},
  month = {January},
  pdf = {http://sdqweb.ipd.kit.edu/publications/descartes-pdfs/Ko2008-WILEY-SoftwarePerfEval.pdf},
  publisher = {Wiley-Interscience, John Wiley \& Sons Inc.},
  title = {{Wiley Encyclopedia of Computer Science and Engineering, edited by Benjamin W. Wah}},
  url = {http://www.amazon.com/Wiley-Encyclopedia-Computer-Science-Engineering/dp/0471383937},
  year = {2009}
}
@article{KoDu2009-SIGMETRICS_PER-QPME,
  abstract = {Queueing Petri nets are a powerful formalism that can be exploited for modeling distributed systems and analyzing their performance and scalability. By combining the modeling power and expressiveness of queueing networks and stochastic Petri nets, queueing Petri nets provide a number of advantages. In this paper, we present QPME (Queueing Petri net Modeling Environment) - a tool that supports the modeling and analysis of systems using queueing Petri nets. QPME provides an Eclipse-based editor for designing queueing Petri net models and a powerful simulation engine for analyzing the models. After presenting the tool, we discuss the ongoing work on the QPME project and the planned future enhancements of the tool.},
  author = {Samuel Kounev and Christofer Dutz},
  journal = {ACM SIGMETRICS Performance Evaluation Review (PER), Special Issue on Tools for Computer Performance Modeling and Reliability Analysis},
  month = {March},
  number = {4},
  pages = {46--51},
  pdf = {http://sdqweb.ipd.kit.edu/publications/descartes-pdfs/KoDu2009-SIGMETRICS_PER-QPME.pdf},
  publisher = {ACM, New York, NY, USA},
  title = {{QPME - A Performance Modeling Tool Based on Queueing Petri Nets}},
  volume = {36},
  year = {2009}
}
@article{KoSa2009-it-EventBasedSystems,
  abstract = {Event-based systems are used increasingly often to build loosely-coupled distributed applications. With their growing popularity and gradual adoption in mission critical areas, the need for novel techniques for benchmarking and performance modeling of event-based systems is increasing. In this article, we provide an overview of the state-of-the-art in this area considering both centralized systems based on message-oriented middleware as well as large-scale distributed publish/subscribe systems. We consider a number of specific techniques for benchmarking and performance modeling, discuss their advantages and disadvantages, and provide references for further information. The techniques we review help to ensure that systems are designed and sized to meet their quality-of-service requirements.},
  address = {Munich, Germany},
  author = {Samuel Kounev and Kai Sachs},
  journal = {it - Information Technology},
  month = {September},
  number = {5},
  publisher = {Oldenbourg Wissenschaftsverlag},
  title = {{Benchmarking and Performance Modeling of Event-Based Systems}},
  volume = {51},
  year = {2009}
}
@inproceedings{koziolek2009a,
  abstract = {Predicting the reliability of a software system at an architectural level during early design stages can help to make systems more dependable and avoid costs for fixing the implementation. Existing reliability prediction methods for component-based systems use Markov models and assume that the software architect can provide the transition probabilities between individual components. This is however not possible if the components are black boxes, only at the design stage, or not available for testing. We propose a new modelling formalism that includes parameter dependencies into software component reliability specifications. It allows the software architect to only model a system-level usage profile, which a tool then propagates to individual components to determine the transition probabilities of the Markov model. We demonstrate the applicability of our approach by modelling the reliability of a retail management system and conduct reliability predictions.},
  author = {Koziolek, Heiko and Brosch, Franz},
  booktitle = {Proceedings of the 6th International Workshop on Formal Engineering approaches to Software Components and Architectures (FESCA)},
  doi = {10.1016/j.entcs.2009.09.026},
  editors = {Barbora Zimmerova and Jens Happe},
  issn = {1571-0661},
  pages = {23 - 38},
  publisher = {Elsevier},
  series = {ENTCS},
  title = {Parameter Dependencies for Component Reliability Specifications},
  url = {http://www.koziolek.de/docs/Koziolek2009.pdf},
  volume = {253(1)},
  year = {2009}
}
@article{krogmann2009b,
  abstract = {Die Verwendung von Komponenten ist ein anerkanntes Prinzip in der Software-Entwicklung. Dabei werden Software-Komponenten zumeist als Black-Boxes aufgefasst, deren Interna vor einem Komponenten-Verwender verborgen sind. Architektur-Analyse- Verfahren zur Vorhersage nicht-funktionaler Eigenschaften erlauben bspw. auf der Architekturebene Dimensionierungsfragestellungen fuer Hardware- / Software-Umgebungen zu beantworten, sowie Skalierbarkeitsanalysen und Was-Waere-Wenn-Szenarien fuer die Erweiterung von Altsystemen durchzufuehren. Dazu benoetigen sie jedoch Informationen ueber Interna (bspw. die Anzahl abgearbeiteter Schleifen oder Aufrufe externer Dienste) von Komponenten. Um an solche Informationen zu gelangen muessen existierende Software-Komponenten analysiert werden. Die benoetigten Informationen ueber das Innere der Komponenten muessen dabei derart rekonstruiert werden, dass sie fuer anschlie{\ss}ende Analyseverfahren nicht-funktionaler Eigenschaften genutzt werden koennen. Eine haendische Rekonstruktion solcher Modelle scheitert haeufig an der Groe{\ss}e der Systeme und ist sehr fehleranfaellig, da konsistente Abstraktionen ueber potentiell tausende Zeilen von Code gefunden werden muessen. Bestehende Verfahren liefern dabei nicht die notwendigen Daten- und Kontrollflussabstraktionen die fuer Analysen und Simulationen benoetigt werden. Der Beitrag dieses Papiers ist ein Reverse Engineering Verfahren fuer Komponentenverhalten. Die daraus resultierenden Modelle (Palladio Komponentenmodell) eignen sich zur Vorhersage von Performanz-Eigenschaften (Antwortzeit, Durchsatz) und damit fuer die oben angefuehrten Fragestellungen. Die aus Quellcode rekonstruierten Modelle umfassen parametrisierten Kontroll- und Datenfluss fuer Software-Komponenten und stellen eine Abstraktion realer Zusammenh\"{a}nge im Quellcode dar. Das Reverse Engineering Verfahren kombiniert dabei ueber Genetische Programmierung (einer Form von Maschinen Lernen) statische und dynamische Analyseverfahren.},
  author = {Klaus Krogmann and Ralf Reussner},
  issn = {ISSN 0720-8928},
  journal = {Softwaretechnik-Trends},
  month = {May},
  number = {2},
  pages = {22--24},
  title = {{Reverse Engineering von Software-Komponentenverhalten mittels Genetischer Programmierung}},
  url = {http://pi.informatik.uni-siegen.de/stt/29_2/01_Fachgruppenberichte/SRE/10-krogmann.pdf},
  volume = {29},
  year = {2009}
}
@inproceedings{krogmann2009a,
  abstract = {Software performance engineering provides techniques to analyze and predict the performance (e.g., response time or resource utilization) of software systems to avoid implementations with insufficient performance. These techniques operate on models of software, often at an architectural level, to enable early, design-time predictions for evaluating design alternatives. Current software performance engineering approaches allow the prediction of performance at design time, but often provide cryptic results (e.g., lengths of queues). These prediction results can be hardly mapped back to the software architecture by humans, making it hard to derive the right design decisions. In this paper, we integrate software cartography (a map technique) with software performance engineering to overcome the limited interpretability of raw performance prediction results. Our approach is based on model transformations and a general software visualization approach. It provides an intuitive mapping of prediction results to the software architecture which simplifies design decisions. We successfully evaluated our approach in a quasi experiment involving 41 participants by comparing the correctness of performance-improving design decisions and participants' time effort using our novel approach to an existing software performance visualization.},
  author = {Klaus Krogmann and Christian M. Schweda and Sabine Buckl and Michael Kuperberg and Anne Martens and Florian Matthes},
  booktitle = {{Architectures for Adaptive Systems (Proceedings of QoSA 2009)}},
  doi = {10.1007/978-3-642-02351-4_4},
  editor = {Raffaela Mirandola and Ian Gorton and Christine Hofmeister},
  note = {Best Paper Award},
  pages = {52--69},
  publisher = {Springer},
  series = {Lecture Notes in Computer Science},
  title = {{Improved Feedback for Architectural Performance Prediction using Software Cartography Visualizations}},
  url = {http://www.springerlink.com/content/m0325512hl4857v1},
  volume = {5581},
  year = {2009},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/krogmann2009a.pdf}
}
@inproceedings{kuperberg2009b,
  abstract = {ICS include supervisory control and data acquisition (SCADA) systems, distributed control systems (DCS), and other control system configurations such as skid-mounted Programmable Logic Controllers (PLC) as are often found in the industrial control sector. In contrast to traditional information processing systems logic executing in ICS has a direct effect on the physical world. These control systems are critical for the operation of complex infrastructures that are often highly interconnected and thus mutually dependent systems. Numerous methodical approaches aim at modeling, analysis and simulation of single systems� behavior. However, modeling the interdependencies between different systems and describing their complex behavior by simulation is still an open issue. Although different modeling approaches from classic network theory to bio-inspired methods can be found in scientific literature a comprehensive method for modeling and simulation of interdependencies among complex systems has still not been established. An overall model is needed to provide security and reliability assessment taking into account various kinds of threats and failures. These metrics are essential for a vulnerability analysis. Vulnerability of a critical infrastructure is defined as the presence of flaws or weaknesses in its design, implementation, operation and/or management that render it susceptible to destruction or incapacitation by a threat, in spite of its capacity to absorb and recover (�resilience�). A significant challenge associated with this model may be to create �what-if� scenarios for the analysis of interdependencies. Interdependencies affect the consequences of single or multiple failures or disruption in interconnected systems. The different types of interdependencies can induce feedback loops which have accelerating or retarding effects on a systems response as observed in system dynamics. Threats to control systems can come from numerous sources, including hostile governments, terrorist groups, disgruntled employees, malicious intruders, complexities, accidents, natural disasters and malicious or accidental actions by insiders. The threats and failures can impact ICS themselves as well as underlying (controlled) systems. In previous work seven evaluation criteria have been defined and eight good praxis methods have been selected and are briefly described. Analysis of these techniques is undertaken and their suitability for modeling and simulation of interdependent critical infrastructures in general is hypothesized. With},
  author = {Michael Kuperberg},
  booktitle = {Proceedings of the 2008 Dependability Metrics Research Workshop, Technical Report TR-2009-002},
  editor = {Felix C. Freiling and Irene Eusgeld and Ralf Reussner},
  location = {November 10, 2008, Mannheim, Germany},
  month = {May},
  organization = {Department of Computer Science, University of Mannheim},
  pages = {7--11},
  title = {{FOBIC: A Platform-Independent Performance Metric based on Dynamic Java Bytecode Counts}},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/kuperberg2009b.pdf},
  year = {2009}
}
@inproceedings{kuperberg2009c,
  author = {Michael Kuperberg and Martin Krogmann and Ralf Reussner},
  booktitle = {{Proceedings of the 6th International Conference on Quantitative Evaluation of SysTems (QEST) 2009}},
  locatio = {September 13-16, 2009, Budapest, Hungary},
  title = {{TimerMeter: Quantifying Accuracy of Software Times for System Analysis}},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/kuperberg2009c.pdf},
  year = {2009}
}
@inproceedings{kuperberg2009a,
  abstract = {Automated generation of method parameters is needed in benchmarking scenarios where manual or random generation of parameters are not suitable, do not scale or are too costly. However, for a method to execute correctly, the generated input parameters must not violate implicit semantical constraints, such as ranges of numeric parameters or the maximum length of a collection. For most methods, such constraints have no formal documentation, and human-readable documentation of them is usually incomplete and ambiguous. Random search of appropriate parameter values is possible but extremely ineffective and does not pay respect to such implicit constraints. Also, the role of polymorphism and of the method invocation targets is often not taken into account. Most existing approaches that claim automation focus on a single method and ignore the structure of the surrounding APIs where those exist. In this paper, we present HEURIGENJ, a novel heuristics-based approach for automatically finding legal and appropriate method input parameters and invocation targets, by approximating the implicit constraints imposed on them. Our approach is designed to support systematic benchmarking of API methods written in the Java language. We evaluate the presented approach by applying it to two frequently-used packages of the Java platform API, and demonstrating its coverage and effectiveness.},
  author = {Michael Kuperberg and Fouad Omri and Ralf Reussner},
  booktitle = {Proceedings of the 6th International Workshop on Formal Engineering approaches to Software Components and Architectures, York, UK, 28th March 2009 (ETAPS 2009, 12th European Joint Conferences on Theory and Practice of Software)},
  keywords = {Heuristics, parameter generation, exception handling, Java, benchmarking},
  title = {{Using Heuristics to Automate Parameter Generation for Benchmarking of Java Methods}},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/kuperberg2009a.pdf},
  year = {2009}
}
@techreport{li2009a,
  abstract = {Service-Oriented Architecture (SOA) represents an architectural shift for building business applications based on loosely-coupled services. In a multi- layered SOA environment the exact conditions under which services are to be delivered can be formally specified by Service Level Agreements (SLAs). However, typical SLAs are just specified at the top-level and do not allow ser- vice providers to manage their IT stack accordingly as they have no insight on how top-level SLAs translate to metrics or parameters at the various layers of the IT stack. This paper addresses the research problems in the area of SLA translation, namely, the correlation and mapping of SLA-related metrics and parameters within and across IT layers. We introduce a conceptual frame- work for precise definition and classification of SLA translations in SOA. With such a framework, an in-depth review and analysis of the state of the art is carried out by the category, maturity, and applicability of approaches and methodologies. Furthermore, we discuss the fundamental research chal- lenges to be addressed for turning the vision of holistic and transparent SLA translation into reality.},
  author = {Hui Li and Wolfgang Theilmann and Jens Happe},
  institution = {Universit{\"a}t Karlsruhe (TH)},
  month = {April},
  number = {2009-8},
  title = {SLA Translation in Multi-Layered Service Oriented Architectures: Status and Challenges},
  year = {2009}
}
@inproceedings{MuScPaKoRi2009-EuroPar-StoAnalPubSub,
  abstract = {With the gradual adoption of publish/subscribe systems in mission critical areas, it is essential that systems are subjected to rigorous performance analysis before they are put into production. However, existing approaches to performance modeling and analysis of publish/subscribe systems suffer from many limitations that seriously constrain their practical applicability. In this paper, we present a generalized method for stochastic analysis of publish/subscribe systems employing identity-based hierarchical routing. The method is based on an analytical model that addresses the major limitations underlying existing work in this area. In particular, it supports arbitrary broker overlay topologies and allows to set workload parameters, e.g., publication rates and subscription lifetimes, individually for each broker. The analysis is illustrated by a running example that helps to gain better understanding of the derived mathematical relationships.},
  author = {Gero M{\"u}hl and Arnd Schr{\"o}ter and Helge Parzyjegla and Samuel Kounev and Jan Richling},
  booktitle = {Proceedings of the 15th International European Conference on Parallel and Distributed Computing (Euro-Par 2009), Delft, The Netherlands, August 25-28, 2009.},
  note = {Acceptance Rate (Full Paper): 33%},
  pdf = {http://sdqweb.ipd.kit.edu/publications/descartes-pdfs/MuScPaKoRi2009-EuroPar-StoAnalPubSub.pdf},
  publisher = {Springer Verlag},
  title = {{Stochastic Analysis of Hierarchical Publish/Subscribe Systems}},
  url = {http://europar2009.ewi.tudelft.nl/},
  year = {2009}
}
@inproceedings{martens2009b,
  abstract = {Quantitative prediction of quality criteria (i.e. extra-functional properties such as performance, reliability, and cost) of service-oriented architectures supports a systematic software engineering approach. However, various degrees of freedom in building a software architecture span a large, discontinuous design space. Currently, solutions with a good trade-off between multiple quality criteria have to be found manually. We propose an automated approach to search the design space by modifying the architectural models, to improve the architecture with respect to multiple quality criteria, and to find optimal architectural models. The found optimal architectural models can be used as an input for trade-off analyses and thus allow systematic engineering of high-quality software architectures. Using this approach, the design of a high-quality component-based software system is eased for the software architect and thus saves cost and effort. Our approach applies a multi-criteria genetic algorithm to software architectures modelled with the Palladio Component Model (PCM). Currently, the method supports quantitative performance and reliability prediction, but it can be extended to other quality properties such as cost as well.},
  author = {Anne Martens and Franz Brosch and Ralf Reussner},
  booktitle = {Proceedings of the 1st international workshop on Quality of service-oriented software systems (QUASOSS)},
  doi = {10.1145/1596473.1596481},
  editors = {Paul Petterson and Heiko Koziolek and Petr Hnetynka},
  pages = {25--32},
  pdf = {http://sdqweb.ipd.uka.de/publications/pdfs/martens2009b.pdf},
  publisher = {ACM, New York, NY, USA},
  title = {Optimising multiple quality criteria of service-oriented software architectures},
  year = {2009}
}
@inproceedings{martens2009a,
  abstract = {Formal performance prediction methods, based on queueing network models, allow evaluating software architectural designs for performance. Existing methods provide prediction results such as response times and throughputs, but do not guide the software architect on how to improve the design. We propose a novel approach to optimise the expected performance of component-based software designs by automatically generating and evaluating design alternatives. The design space spanned by different design options (e.g. available components and configuration options) is systematically explored using metaheuristic search techniques and performance-domain heuristics. The gap between applying formal performance predictions and actually improving the design of a system can thus be closed. This paper presents a formal description and a prototypical implementation of our approach with a proof-of-concept case study.},
  author = {Anne Martens and Heiko Koziolek},
  booktitle = {Proceedings of the Sixth International Workshop on Formal Engineering approaches to Software Components and Architectures (FESCA 2009)},
  doi = {10.1016/j.entcs.2009.09.029},
  editors = {Barbora Zimmerova and Jens Happe},
  issn = {1571-0661},
  pages = {77 -- 93},
  pdf = {http://sdqweb.ipd.uka.de/publications/pdfs/martens2009a.pdf},
  publisher = {Elsevier},
  series = {Electronic Notes in Theoretical Computer Science},
  title = {Automatic, Model-Based Software Performance Improvement for Component-based Software Designs},
  volume = {253(1)},
  year = {2009}
}
@inproceedings{MG+09,
  address = {Venice, Italy},
  author = {Momm, Christof and Gebhart, Michael and Abeck, Sebastian},
  booktitle = {Fourth International Conference on Internet and Web Applications and Services (ICIW 2009)},
  publisher = {IEEE Computer Society Press},
  title = {A Model-Driven Approach for Monitoring Business Performance in Web Service Compositions},
  year = {2009}
}
@article{NoKoJuTo2008-JSS-GridAutoQoS,
  abstract = {As Grid Computing increasingly enters the commercial domain, performance and Quality of Service (QoS) issues are becoming a major concern. The inherent complexity, heterogeneity and dynamics of Grid computing environments pose some challenges in managing their capacity to ensure that QoS requirements are continuously met. In this paper, a comprehensive framework for autonomic QoS control in enterprise Grid environments using online simulation is proposed. The paper presents a novel methodology for designing autonomic QoS-aware resource managers that have the capability to predict the performance of the Grid components they manage and allocate resources in such a way that service level agreements are honored. Support for advanced features such as autonomic workload characterization on-the-fly, dynamic deployment of Grid servers on demand, as well as dynamic system reconfiguration after a server failure is provided. The goal is to make the Grid middleware self-configurable and adaptable to changes in the system environment and workload. The approach is subjected to an extensive experimental evaluation in the context of a real-world Grid environment and its effectiveness, practicality and performance are demonstrated.},
  address = {Amsterdam, The Netherlands},
  author = {Ramon Nou and Samuel Kounev and Ferran Julia and Jordi Torres},
  doi = {10.1016/j.jss.2008.07.048},
  journal = {Journal of Systems and Software},
  month = {March},
  number = {3},
  pages = {486--502},
  pdf = {http://sdqweb.ipd.kit.edu/publications/descartes-pdfs/NoKoJuTo2008-JSS-GridAutoQoS.pdf},
  publisher = {Elsevier Science Publishers B. V.},
  title = {{Autonomic QoS control in enterprise Grid environments using online simulation}},
  url = {http://www.sciencedirect.com/science/journal/01641212},
  volume = {82},
  year = {2009}
}
@inproceedings{paech2009a,
  author = {Barbara Paech and Andreas Oberweis and Ralf Reussner},
  bibsource = {DBLP, http://dblp.uni-trier.de},
  booktitle = {Software Engineering (Workshops)},
  editor = {J{\"u}rgen M{\"u}nch and Peter Liggesmeyer},
  ee = {http://subs.emis.de/LNI/Proceedings/Proceedings150/article5319.html},
  isbn = {978-3-88579-244-4},
  pages = {223-228},
  publisher = {GI},
  series = {LNI},
  title = {{Qualit{\"a}t von Gesch{\"a}ftsprozessen und Unternehmenssoftware - Eine Thesensammlung}},
  volume = {150},
  year = {2009}
}
@inproceedings{Parrend2009ares,
  abstract = {Java-based systems are built from components from various providers that are integrated together. Generic coding best practices are gaining momentum, but no tool is availableso far that guarantees that the interactions between these components are performed in a secure manner. We propose the 'Weak Component Analysis' (WCA) tool, which performs static analysis of the component code to identify exploitable vulnerabilities. Three types of classes can be identified in Java components, that each can be exploited through specific vulnerabilities. Internal classes which are not available for other components can be abused in an indirect manner. Shared classes which are provided by libraries can be abused through class-level vulnerabilities. Shared objects, i.e. instantiated classes, which are made available as local services in Service-oriented Programming platforms such as OSGi, Spring and Guice can be abused through object-level vulnerabilities in addition to class-level vulnerabilities.},
  address = {Fukuoka, Japan},
  author = {Pierre Parrend},
  booktitle = {Forth International Conference on Availability, Reliability and Security (AReS 2009)},
  month = {March},
  title = {Enhancing Automated Detection of Vulnerabilities in Java Components},
  year = {2009}
}
@inbook{parrend2009javasec,
  abstract = {The Java environment is composed of two main parts: the Java language and the Java virtual machine. It is designed with the assumption that no software entity is to be trusted and therefore that each need to be checked. The first success of Java was the inception of Java Applets which enabled fully untrusted code provided by unknown web sites to be executed in a browser. This feature demonstrated the isolation brought by the Java Virtual Machine (JVM) between the applications and the underlying operating system. However, the evolution of Java systems from mono-application to multi-component systems induce new vulnerabilities developers are not aware of. This requires that additional security mechanisms are used to support secure Java environments. This survey presents an overview of the security issues for the Java language and Virtual Machine. The default security model is defined and explained. Its three main components are the Java language itself, the Bytecode validation at load time and modularity supports such as the class loaders and permission domains. Known vulnerabilities are presented. They originate either in the JVM or in the code of applications. Two approaches exist for describing code vulnerabilities: source code and Bytecode. This duality enables to identify them both during development through manual code review and tools, and in an automated manner during code deployment or installation. Security extensions for the Java Execution Environment and tools for writing secure Java code are presented. They are of three types: platform extensions, static analysis approaches and behavior injection. Platform extensions consist in strengthening the isolation between components (beans, etc.) and providing support for resource consumption accounting and control. Static analysis is often performed through generic tools that improve the code quality and thus reduce the number of exploitable bugs in the Java code. Some of these tools, such as FindBugs, encompass security-specific bugs, and some, as JSLint are dedicated to security analysis. Bytecode injection enables to introduce security checks in the core of the code. It can be performed with the developers involved, for instance through aspect-oriented programming, or transparently, through Bytecode injection or meta-programming. An overview of the existing protection mechanisms for Java systems according to the life-cycle moment they are enforced and to the development overhead they imply concludes this work},
  address = {New York},
  author = {Pierre Parrend},
  chapter = {Security for Java Platforms},
  keywords = {Java, Security, Survey},
  publisher = {Nova Publishers},
  timestamp = {2008.09.21},
  title = {Java Software},
  url = {http://www.rzo.free.fr/parrend09javasec.php},
  year = {2009}
}
@article{parrend2009hardenedOSGi,
  abstract = {OSGi Platforms are Extensible Component Platforms, ie they support the dynamic and transparent installation of components that are provided by third party providers at runtime. This feature makes systems built using OSGi extensible and adaptable but opens a dangerous attack vector that has not been considered as such until recently. Performing a security benchmark of the OSGi platform is therefore necessary to gather knowledge related to the weaknesses it introduces as well as to propose enhancements. A suitable Vulnerability Pattern is defined. The attacks that can be performed through malicious OSGi components are identified. Quantitative analysis is then performed so as to characterize the origin of the vulnerabilities and the target and consequences of attacks. The assessment of the security status of the various implementations of the OSGi Platform and of existing security mechanisms is done through a metric we introduce, the Protection Rate. Based on these benchmarks, OSGi-specific security enhancements are identified and evaluated. First recommendations are given. Then evaluation is performed through the Protection Rate metric and performance analysis. Lastly, further requirements for building secure OSGi Platforms are identified.},
  author = {Pierre Parrend and St\'{e}phane Fr\'{e}not},
  journal = {Software: Practice \& Experience},
  keywords = {Software Security Assurance, Software Vulnerabilities, Security Benchmark, OSGi Component Framework, Component Platform, Dependability},
  timestamp = {2008.09.17},
  title = {Security Benchmarks of OSGi Platforms: Toward Hardened OSGi},
  url = {http://www.rzo.free.fr/parrend08spe.php},
  year = {2009}
}
@inproceedings{rathfelder2009c,
  abstract = {Today, the architectures of software systems are not stable for their whole lifetime but often adapted driven by business needs. Preserving their quality characteristics beyond each of these changes requires deep knowledge of the requirements and the systems themselves. Proper documentation reduces the risk that knowledge is lost and hence is a base for the system's maintenance in the long-run. However, the influence of architectural documentation on the maintainability of software systems is neglected in current quality assessment methods. They are limited to documentation for anticipated change scenarios and do not provide a general assessment approach. In this paper, we propose a maturity model for architecture documentation. It is shaped relative to growing quality preservation maturity and independent of specific technologies or products. It supports the weighting of necessary effort against reducing long-term risks in the maintenance phase. This allows to take product maintainability requirements into account for selecting an appropriate documentation maturity level.},
  address = {Berlin, Germany},
  author = {Rathfelder, Christoph and Groenda, Henning},
  booktitle = {Proceedings of the 3rd Workshop MDD, SOA und IT-Management (MSI 2009)},
  day = {6--7},
  location = {Oldenburg, Germany},
  month = {October},
  pages = {65--80},
  pdf = {http://sdqweb.ipd.uka.de/publications/pdfs/rathfelder2009c.pdf},
  publisher = {GiTO-Verlag},
  title = {{T}he {A}rchitecture {D}ocumentation {M}aturity {M}odel {ADM2}},
  year = {2009}
}
@inproceedings{rathfelder2009,
  abstract = {The event-driven communication paradigm provides a number of advantages for building loosely coupled distributed systems. However, the loose coupling of components in such systems makes it hard for developers to estimate their behavior and performance under load. Most existing performance prediction techniques for systems using event-driven communication require specialized knowledge to build the necessary prediction models. In this paper, we propose an extension of the Palladio Component Model (PCM) that provides natural support for modeling event-based communication and supports different performance prediction techniques.},
  address = {New York, NY, USA},
  author = {Rathfelder, Christoph and Kounev, Samuel},
  booktitle = {Proceedings of the Third ACM International Conference on Distributed Event-Based Systems (DEBS 2009)},
  day = {6--9},
  doi = {http://doi.acm.org/10.1145/1619258.1619300},
  isbn = {978-1-60558-665-6},
  location = {Nashville, Tennessee},
  month = {July},
  pages = {33:1--33:2},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/rathfelder2009.pdf},
  publisher = {ACM},
  title = {Model-based performance prediction for event-driven systems},
  url = {http://doi.acm.org/10.1145/1619258.1619300},
  year = {2009}
}
@inproceedings{rathfelder2009b,
  abstract = {The use of event-based communication within a Service-Oriented Architecture promises several benefits including more loosely-coupled services and better scalability. However, the loose coupling of services makes it difficult for system developers to estimate the behavior and performance of systems composed of multiple services. Most existing performance prediction techniques for systems using event-based communication require specialized knowledge to build the necessary prediction models. Furthermore, general purpose design-oriented performance models for component-based systems provide limited support for modeling event-based communication. In this paper, we propose an extension of the Palladio Component Model (PCM) that provides natural support for modeling event-based communication. We show how this extension can be exploited to model event-driven service-oriented systems with the aim to evaluate their performance and scalability.},
  address = {New York, USA},
  author = {Christoph Rathfelder and Samuel Kounev},
  booktitle = {Proceedings of the 1st International Workshop on the Quality of Service-Oriented Software Systems (QUASOSS 2009)},
  day = {24--28},
  doi = {10.1145/1596473.159648207-ModelingDEBS-CameraReady},
  location = {Amsterdam, The Netherlands},
  month = {August},
  pages = {33--38},
  pdf = {http://sdqweb.ipd.kit.edu/publications/descartes-pdfs/quas04g-rathfelder.pdf},
  publisher = {ACM},
  title = {{M}odeling {E}vent-{D}riven {S}ervice-{O}riented {S}ystems using the {P}alladio {C}omponent {M}odel},
  year = {2009}
}
@inproceedings{SaKoApBu2009-DEBS-MOM_Benchmarking,
  abstract = {In this poster, we provide an overview of our past and current research in the area of Message-Oriented Middleware (MOM) performance benchmarks. Our main research motivation is a) to gain a better understanding of the performance of MOM, b) to show how to use benchmarks for the evaluation of performance aspects and c)to establish performance modeling techniques. For a better understanding, we first introduce the Java Message Service (JMS) standard. Afterwards, we provide an overview of our work on MOM benchmark development, i.e., we present the SPECjms2007 benchmark and the new jms2009-PS, a test harness designed specifically for JMS-based pub/sub. We outline a new case study with jms2009-PS and present first results of our work-in-progress.},
  author = {Sachs, Kai and Kounev, Samuel and Appel, Stefan and Buchmann, Alejandro},
  booktitle = {Proceedings of the 3rd ACM International Conference on Distributed Event-Based Systems (DEBS-2009), Nashville, TN, {USA}, July 6-9, 2009},
  month = {July},
  pdf = {http://sdqweb.ipd.kit.edu/publications/descartes-pdfs/SaKoApBu2009-DEBS-MOM_Benchmarking.pdf},
  publisher = {ACM, New York, NY, USA},
  title = {{Benchmarking of Message-Oriented Middleware (Poster Paper)}},
  url = {http://www.debs.org/2009},
  year = {2009}
}
@inproceedings{SaKoApBu2009-SIGMETRICS-jms2009_PS,
  abstract = {Publish/subscribe is becoming increasingly popular as communication paradigm for loosely-coupled message exchange. It is used as a building block in major new software architectures and technology domains such as Enterprise Service Bus, Enterprise Application Integration, Service-Oriented Architecture and Event-Driven Architecture. The growing adoption of these technologies leads to a strong need for benchmarks and performance evaluation tools in this area. In this demonstration, we present jms2009-PS, a benchmark for publish/subscribe middleware based on the Java Message Service standard interface.},
  author = {Sachs, Kai and Kounev, Samuel and Appel, Stefan and Buchmann, Alejandro},
  booktitle = {SIGMETRICS/Performance 2009 International Conference, Seattle, WA, USA, June 15--19, 2009},
  month = {June},
  note = {(Demo Paper)},
  pdf = {http://sdqweb.ipd.kit.edu/publications/descartes-pdfs/SaKoApBu2009-SIGMETRICS-jms2009_PS.pdf},
  title = {{A Performance Test Harness For Publish/Subscribe Middleware}},
  url = {http://www.sigmetrics.org/conferences/sigmetrics/2009/program_sigmetrics-demo.shtml},
  year = {2009}
}
@article{SaKoBaBu2008-PERFEVAL-SPECjms2007,
  abstract = {Message-oriented middleware (MOM) is at the core of a vast number of financial services and telco applications, and is gaining increasing traction in other industries, such as manufacturing, transportation, health-care and supply chain management. Novel messaging applications, however, pose some serious performance and scalability challenges. In this paper, we present a methodology for performance evaluation of MOM platforms using the SPECjms2007 benchmark which is the world's first industry-standard benchmark specialized for MOM. SPECjms2007 is based on a novel application in the supply chain management domain designed to stress MOM infrastructures in a manner representative of real-world applications. In addition to providing a standard workload and metrics for MOM performance, the benchmark provides a flexible performance analysis framework that allows users to tailor the workload to their requirements. The contributions of this paper are: i) we present a detailed workload characterization of SPECjms2007 with the goal to help users understand the internal components of the workload and the way they are scaled, ii) we show how the workload can be customized to exercise and evaluate selected aspects of MOM performance, iii) we present a case study of a leading JMS platform, the BEA WebLogic server, conducting an in-depth performance analysis of the platform under a number of different workload and configuration scenarios. The methodology we propose is the first one that uses an industry-standard benchmark providing both a representative workload as well as the ability to customize it to evaluate the features of MOM platforms selectively.},
  address = {Amsterdam, The Netherlands},
  author = {Kai Sachs and Samuel Kounev and Jean Bacon and Alejandro Buchmann},
  doi = {10.1016/j.peva.2009.01.003},
  journal = {Performance Evaluation},
  month = {August},
  number = {8},
  pages = {410--434},
  pdf = {http://sdqweb.ipd.kit.edu/publications/descartes-pdfs/08-PerfEval-SPECjms2007.pdf},
  publisher = {Elsevier Science Publishers B. V.},
  title = {{Benchmarking message-oriented middleware using the SPECjms2007 benchmark}},
  url = {http://www.elsevier.com/wps/find/journaldescription.cws_home/505618/description},
  volume = {66},
  year = {2009}
}
@inproceedings{stammel09a,
  abstract = {In their lifetime software systems usually need to be adapted in order to fit in a changing environment or to cover new required functionality. The effort necessary for implementing changes is related to the maintainability of the software system. Therefore, maintainability is an important quality aspect of software systems. Today Software Architecture plays an important role in achieving software quality goals. Therefore, it is useful to evaluate software architectures regarding their impact on the quality of the program. However, unlike other quality attributes, such as performance or reliability, there is relatively less work on the impact of the software architecture on maintainability in a quantitative manner. In particular, the cost of software evolution not only stems from software-development activities, such as reimplementation, but also from software management activities, such as re-deployment, upgrade installation, etc. Most metrics for software maintainability base on code of object-oriented designs, but not on architectures, and do not consider costs from software management activities. Likewise, existing current architectural maintainability evaluation techniques manually yield just qualitative (and often subjective) results and also do concentrate on software (re-)development costs. In this paper, we present KAMP, the Karlsruhe Architectural Maintainability Prediction Method, a quantitative approach to evaluate the maintainability of software architectures. Our approach estimates the costs of change requests for a given architecture and takes into account re-implementation costs as well as re-deployment and upgrade activities. We combine several strengths of existing approaches. First, our method evaluates maintainability for concrete change requests and makes use of explicit architecture models. Second, it estimates change efforts using semi-automatic derivation of work plans, bottom-up effort estimation, and guidance in investigation of estimation supports (e.g. design and code properties, team organization, development environment, and other influence factors).},
  author = {Stammel, Johannes and Reussner, Ralf},
  booktitle = {Proceedings of the 1. Workshop des GI-Arbeitskreises Langlebige Softwaresysteme (L2S2): "Design for Future - Langlebige Softwaresysteme"},
  editor = {Engels, Gregor and Reussner, Ralf and Momm, Christof and Sauer, Stefan},
  pages = {87-98},
  title = {KAMP: Karlsruhe Architectural Maintainability Prediction},
  url = {http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-537/},
  year = {2009}
}
@inproceedings{trifu09a,
  author = {Mircea Trifu},
  booktitle = {Proceedings of the 13-th European Conference on Software Maintenance and Reengineering},
  month = {March},
  publisher = {IEEE},
  title = {Improving the Dataflow-Based Concern Identification Approach},
  year = {2009}
}
@mastersthesis{busch2009a,
  address = {Amerikastrasse 1, 66482 Zweibruecken, Germany},
  author = {Axel Busch},
  month = {October},
  school = {Fachhochschule Kaiserslautern, Campus Zweibruecken},
  title = {{Performance Assessment of State-of-the-Art Computing Servers for Scientific Applications}},
  type = {Bachelor's Thesis},
  year = {2009}
}
@techreport{busch2009b,
  author = {Axel Busch and Julien Leduc},
  institution = {CERN},
  month = {October},
  pdf = {http://openlab.web.cern.ch/sites/openlab.web.cern.ch/files/technical_documents/CERN_openlab_report-Eval-of-energy-consumption-and-perf-of-Intel's-Nehalem-achitecture.pdf},
  title = {{Evaluation of energy consumption and performance of Intel's Nehalem architecture}},
  type = {openlab Report},
  year = {2009}
}