inproceedings_krogmann.bib

@inproceedings{becker2010a,
  abstract = {Legacy applications are still widely spread. If a need to change deployment or update its functionality arises, it becomes difficult to estimate the performance impact of such modifications due to absence of corresponding models. In this paper, we present an extendable integrated environment based on Eclipse developed in the scope of the Q-ImPrESS project for reverse engineering of legacy applications (in C/C++/Java). The Q-ImPrESS project aims at modeling quality attributes at an architectural level and allows for choosing the most suitable variant for implementation of a desired modification. The main contributions of the project include i) a high integration of all steps of the entire process into a single tool, a beta version of which has been already successfully tested on a case study, ii) integration of multiple research approaches to performance modeling, and iii) an extendable underlying meta-model for different quality dimensions.},
  author = {Steffen Becker and Michael Hauck and Mircea Trifu and Klaus Krogmann and Jan Kofron},
  booktitle = {Proceedings of the 14th European Conference on Software Maintenance and Reengineering, European Projects Track},
  keywords = {Q-ImPrESS},
  pages = {199-202},
  publisher = {IEEE},
  title = {{Reverse Engineering Component Models for Quality Predictions}},
  url = {http://sdqweb.ipd.kit.edu/publications/pdfs/becker2010a.pdf},
  year = {2010}
}
@inproceedings{BrKoKr2009-ROSSA-Extracting_PCM_JavaEE,
  abstract = {Nowadays, software systems have to fulfill increasingly stringent requirements for performance and scalability. To ensure that a system meets its performance requirements during operation, the ability to predict its performance under different configurations and workloads is essential. Most performance analysis tools currently used in industry focus on monitoring the current system state. They provide low-level monitoring data without any performance prediction capabilities. For performance prediction, performance models are normally required. However, building predictive performance models manually requires a lot of time and effort. In this paper, we present a method for automated extraction of performance models of Java EE applications, based on monitoring data collected during operation. We extract instances of the Palladio Component Model (PCM) - a performance meta-model targeted at component-based systems. We evaluate the model extraction method in the context of a case study with a real-world enterprise application. Even though the extraction requires some manual intervention, the case study demonstrates that the existing gap between low-level monitoring data and high-level performance models can be closed.},
  author = {Fabian Brosig and Samuel Kounev and Klaus Krogmann},
  booktitle = {Proceedings of the 1st International Workshop on Run-time mOdels for Self-managing Systems and Applications (ROSSA 2009). In conjunction with the Fourth International Conference on Performance Evaluation Methodologies and Tools (VALUETOOLS 2009)},
  month = {October},
  pdf = {http://sdqweb.ipd.kit.edu/publications/descartes-pdfs/BrKoKr2009-ROSSA-Extracting_PCM_JavaEE.pdf},
  publisher = {ACM, New York, NY, USA},
  title = {{Automated Extraction of Palladio Component Models from Running Enterprise Java Applications}},
  year = {2009},
  isbn = {978-963-9799-70-7},
  location = {Pisa, Italy},
  pages = {10:1--10:10},
  articleno = {10}
}
@inproceedings{chouambe2008a,
  abstract = {An increasing number of software systems is developed using component technologies such as COM, CORBA, or EJB. Still, there is a lack of support to reverse engineer such systems. Existing approaches claim reverse engineering of components, but do not support composite components. Also, external dependencies such as required interfaces are not made explicit. Furthermore, relaxed component definitions are used, and obtained components are thus indistinguishable from modules or classes. We present an iterative reverse engineering approach that follows the widely used definition of components by Szyperski. It enables third-party reuse of components by explicitly stating their interfaces and supports composition of components. Additionally, components that are reverse engineered with the approach allow reasoning on properties of software architectures at the model level. For the approach, source code metrics are combined to recognize components. We discuss the selection of source code metrics and their interdependencies, which were explicitly taken into account. An implementation of the approach was successfully validated within four case studies. Additionally, a fifth case study shows the scalability of the approach for an industrial-size system.},
  address = {Athens, Greece},
  author = {Chouambe, Landry and Klatt, Benjamin and Krogmann, Klaus},
  booktitle = {12th European Conference on Software Maintenance and Reengineering},
  editor = {Kontogiannis, Kostas and Tjortjis, Christos and Winter, Andreas},
  isbn = {978-1-4244-2157-2},
  keywords = {ArchiRec},
  month = {April},
  day = {1--4},
  pages = {93--102},
  publisher = {IEEE Computer Society},
  title = {{R}everse {E}ngineering {S}oftware-{M}odels of {C}omponent-{B}ased {S}ystems},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/chouambe2008a.pdf},
  year = {2008}
}
@inproceedings{durdik2012c,
  address = {Trento, Italy},
  author = {Zoya Durdik and Benjamin Klatt and Heiko Koziolek and Klaus Krogmann and Johannes Stammel and Roland Weiss},
  booktitle = {Proceedings of the 28th IEEE International Conference on Software Maintenance (ICSM)},
  title = {Sustainability Guidelines for Long-Living Software Systems},
  url = {http://selab.fbk.eu/icsm2012/},
  year = {2012}
}
@inproceedings{eichinger2010a,
  abstract = {Defect localisation is essential in software engineering and is an important task in domain-specific data mining. Existing techniques building on call-graph mining can localise different kinds of defects. However, these techniques focus on defects that affect the control flow and are agnostic regarding the data flow. In this paper, we introduce data flow enabled call graphs that incorporate abstractions of the data flow. Building on these graphs, we present an approach for defect localisation. The creation of the graphs and the defect localisation are essentially data mining problems, making use of discretisation, frequent subgraph mining and feature selection. We demonstrate the defect-localisation qualities of our approach with a study on defects introduced into Weka. As a result, defect localisation now works much better, and a developer has to investigate on average only 1.5 out of 30 methods to fix a defect.},
  address = {Barcelona, Spain},
  author = {Frank Eichinger and Klaus Krogmann and Roland Klug and Klemens B\"{o}hm},
  booktitle = {Proceedings of the 10th European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases (ECML PKDD)},
  title = {{Software-Defect Localisation by Mining Dataflow-Enabled Call Graphs}},
  url = {http://digbib.ubka.uni-karlsruhe.de/volltexte/1000019636},
  year = {2010}
}
@inproceedings{hauck2009b,
  abstract = {Software architects often use model-based techniques to analyse performance (e.g. response times), reliability and other extra-functional properties of software systems. These techniques operate on models of software architecture and execution environment, and are applied at design time for early evaluation of design alternatives, especially to avoid implementing systems with insufficient quality. Virtualisation (such as operating system hypervisors or virtual machines) and multiple layers in execution environments (e.g. RAID disk array controllers on top of hard disks) are becoming increasingly popular in reality and need to be reflected in the models of execution environments. However, current component meta-models do not support virtualisation and cannot model individual layers of execution environments. This means that the entire monolithic model must be recreated when different implementations of a layer must be compared to make a design decision, e.g. when comparing different Java Virtual Machines. In this paper, we present an extension of an established model-based performance prediction approach and associated tools which allow to model and predict state-of-the-art layered execution environments, such as disk arrays, virtual machines, and application servers. The evaluation of the presented approach shows its applicability and the resulting accuracy of the performance prediction while respecting the structure of the modelled resource environment.},
  author = {Michael Hauck and Michael Kuperberg and Klaus Krogmann and Ralf Reussner},
  booktitle = {{Proceedings of the 12th International Symposium on Component Based Software Engineering (CBSE 2009)}},
  doi = {10.1007/978-3-642-02414-6_12},
  ee = {http://dx.doi.org/10.1007/978-3-642-02414-6_12},
  isbn = {978-3-642-02413-9},
  number = {5582},
  pages = {191--208},
  pdf = {http://sdqweb.ipd.uka.de/publications/pdfs/hauck2009b.pdf},
  publisher = {Springer},
  series = {LNCS},
  title = {{Modelling Layered Component Execution Environments for Performance Prediction}},
  url = {http://www.comparch-events.org/pages/present.html},
  year = {2009}
}
@inproceedings{kappler2008a,
  abstract = {Performance predictions for software architectures can reveal performance bottlenecks and quantitatively support design decisions for different architectural alternatives. As software architects aim at reusing existing software components, their performance properties should be included into performance predictions without the need for manual modelling. However, most prediction approaches do not include automated support for modelling implemented components. Therefore, we propose a new reverse engineering approach, which generates Palladio performance models from Java code. In this paper, we focus on the static analysis of Java code, which we have implemented as an Eclipse plugin called Java2PCM. We evaluated our approach on a larger component-based software architecture, and show that a similar prediction accuracy can be achieved with generated models compared to completely manually specified ones.},
  address = {Munich, Germany},
  author = {Kappler, Thomas and Koziolek, Heiko and Krogmann, Klaus and Reussner, Ralf H.},
  booktitle = {Software Engineering 2008},
  keywords = {Java2PCM},
  month = {February},
  day = {18--22},
  pages = {140--154},
  publisher = {Bonner K{\"o}llen Verlag},
  series = {Lecture Notes in Informatics},
  title = {{T}owards {A}utomatic {C}onstruction of {R}eusable {P}rediction {M}odels for {C}omponent-{B}ased {P}erformance {E}ngineering},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/kappler2008a.pdf},
  volume = {121},
  year = {2008}
}
@inproceedings{klatt2012a,
  abstract = {Integrating 3rd party components in software systems provides promising advantages but also risks due to disconnected evolution cycles. Deciding whether to migrate to a newer version of a 3rd party component integrated into self-implemented code or to switch to a different one is challenging. Dedicated evolution support for 3rd party component scenarios is hence required. Existing approaches do not account for open source components which allow accessing and analyzing their source code and project information. The approach presented in this paper combines analyses for code dependency, code quality, and bug tracker information for a holistic view on the evolution with 3rd party components. We applied the approach in a case study on a communication middleware component for industrial devices used at ABB. We identified 7 methods potentially impacted by changes of 3rd party components despite the absence of interface changes. We further identified self-implemented code that does not need any manual investigation after the 3rd party component evolution as well as a positive trend of code and bug tracker issues.},
  address = {Szeged, Hungary},
  author = {Benjamin Klatt and Zoya Durdik and Klaus Krogmann and Heiko Koziolek and Johannes Stammel and Roland Weiss},
  booktitle = {Proceedings of the 16th Conference on Software Maintenance and Reengineering (CSMR'12)},
  doi = {10.1109/CSMR.2012.59},
  issn = {1534-5351},
  keywords = {Benchmark testing;Computer bugs;Databases;Manuals;Reliability;Software systems;middleware;program debugging;bug tracker information;code dependency;code quality;communication middleware component;disconnected evolution cycles;industrial devices;long-living software systems;open source components;project information;self-implemented code;third party components;},
  month = {March},
  pages = {461--464},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/klatt2012a.pdf},
  title = {{Identify Impacts of Evolving Third Party Components on Long-Living Software Systems}},
  year = {2012}
}
@inproceedings{klatt2013a,
  address = {Genua, Italy},
  author = {Klatt, Benjamin and K{\"u}ster, Martin and Krogmann, Klaus},
  booktitle = {Proceedings of the 1st International workshop on Reverse Variability Engineering (REVE'13)},
  month = {March},
  pages = {1--8},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/klatt2013a.pdf},
  title = {{A Graph-Based Analysis Concept to Derive a Variation Point Design from Product Copies}},
  year = {2013}
}
@inproceedings{klatt2013b,
  address = {Bad Honnef, Germany},
  author = {Benjamin Klatt and Martin K\"uster and Klaus Krogmann and Oliver Burkhardt},
  booktitle = {15th Workshop Software-Reengineering (WSR'13)},
  month = {May},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/klatt2013b.pdf},
  title = {{A Change Impact Analysis Case Study: Replacing the Input Data Model of SoMoX}},
  year = {2013}
}
@inproceedings{klatt2014a,
  address = {Kiel, Germany},
  author = {Benjamin Klatt and Klaus Krogmann and Michael Langhammer},
  booktitle = {{Proceedings of Software Engineering 2014 (SE2014)}},
  editor = {Wilhelm Hasselbring, Nils Christian Ehmke},
  isbn = {987-388579-621-3},
  issn = {1617-5468},
  month = {January},
  page = {165--170},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/klatt2014a.pdf},
  series = {Lecture Notes in Informatics (LNI)},
  title = {{Individual Code-Analyzes in Practice}},
  volume = {P-227},
  year = {2014}
}
@inproceedings{klatt2014b,
  address = {Bad Honnef, Germany},
  author = {Benjamin Klatt and Klaus Krogmann and Christian Wende},
  booktitle = {16th Workshop Software-Reengineering (WSRE'14)},
  month = {April},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/klatt2014b.pdf},
  title = {{Consolidating Customized Product Copies to Software Product Lines}},
  year = {2014}
}
@inproceedings{klatt2014c,
  address = {Bad Honnef, Germany},
  author = {Benjamin Klatt and Klaus Krogmann and Volker Kuttruff},
  booktitle = {16th Workshop Software-Reengineering (WSRE'14)},
  month = {April},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/klatt2014c.pdf},
  title = {{Developing Stop Word Lists for Natural Language Program Analysis}},
  year = {2014}
}
@inproceedings{klatt2014d,
  abstract = {To cope with project constraints, copying and customizing existing software products is a typical practice to flexibly serve customer-specific needs. In the long term, this practice becomes a limitation for growth due to redundant maintenance efforts or wasted synergy and cross selling potentials. To mitigate this limitation, customized copies need to be consolidated into a single, variable code base of a software product line (SPL). However, consolidation is tedious as one must identify and correlate differences between the copies to design future variability. For one, existing consolidation approaches lack support of the implementation level. In addition, approaches in the fields of difference analysis and feature detection are not sufficiently integrated for finding relationships between code modifications. In this paper, we present remedy to this problem by integrating a difference analysis with a program dependency analysis based on Program Dependency Graphs (PDG) to reduce the effort of consolidating developers when identifying dependent differences and deriving clusters to consider in their variability design. We successfully evaluated our approach on variants of the open source ArgoUML modeling tool, reducing the manual review effort about 72\% with a precision of 99\% and a recall of 80\%. We further proved its industrial applicability in a case study on a commercial relationship management application.},
  address = {Victoria, Canada},
  author = {Benjamin Klatt and Klaus Krogmann and Christoph Seidl},
  booktitle = {IEEE 30th International Conference on Software Maintenance and Evolution (ICSME'14)},
  doi = {10.1109/ICSME.2014.81},
  issn = {1063-6773},
  keywords = {Unified Modeling Language;customer relationship management;feature extraction;product customisation;program diagnostics;public domain software;software product lines;PDG;code modifications;commercial relationship management application;cross selling potentials;customer-specific needs;customized product copies;difference analysis;feature detection;industrial applicability;open source ArgoUML modeling tool;program dependency analysis;program dependency graphs;project constraints;redundant maintenance efforts;software product line;wasted synergy;Algorithm design and analysis;Context;Merging;Object oriented modeling;Software;Software algorithms;Unified modeling language;program dependencies;reengineering;software engineering;software product lines;variability},
  month = {September},
  pages = {496--500},
  title = {{Program Dependency Analysis for Consolidating Customized Product Copies}},
  year = {2014}
}
@inproceedings{klatt2008a,
  abstract = {Industrial software projects not only have to deal with the number of features in the software system. Also issues like quality, flexibility, reusability, extensibility, developer and user acceptance are key factors in these days. An architecture paradigm targeting those issues are extension mechanisms which are for example used by component frameworks. The main contribution of this paper is to identify software extension mechanism characteristics derived from state-of-the-art software frameworks. These identified characteristics will benefit developers with selecting and creating extension mechanisms.},
  author = {Benjamin Klatt and Klaus Krogmann},
  booktitle = {Proceedings of the Thirteenth International Workshop on Component-Oriented Programming (WCOP'08), Karlsruhe, Germany},
  editor = {Ralf Reussner and Clemens Szyperski and Wolfgang Weck},
  issn = {1432-7864},
  number = {2008-12},
  pages = {11--18},
  series = {Interner Bereich Universit{\"a}t Karlsruhe (TH)},
  title = {{Software Extension Mechanisms}},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/klatt2008a.pdf},
  year = {2008}
}
@inproceedings{KlKr2011-WSR-SPLToolSupport,
  address = {Bad-Honnef, Germany},
  author = {Benjamin Klatt and Klaus Krogmann},
  booktitle = {13th Workshop Software-Reengineering (WSR 2011)},
  month = {May},
  day = {2--4},
  pdf = {http://sdqweb.ipd.uka.de/publications/pdfs/KlKr2011-WSR-SPLToolSupport.pdf},
  title = {{Towards Tool-Support for Evolutionary Software Product Line Development}},
  year = {2011}
}
@inproceedings{koziolek2011a,
  abstract = {Systematic decision support for architectural design decisions is a major concern for software architects of evolving service-oriented systems. In practice, architects often analyse the expected performance and reliability of design alternatives based on prototypes or former experience. Modeldriven prediction methods claim to uncover the tradeoffs between different alternatives quantitatively while being more cost-effective and less error-prone. However, they often suffer from weak tool support and focus on single quality attributes. Furthermore, there is limited evidence on their effectiveness based on documented industrial case studies. Thus, we have applied a novel, model-driven prediction method called Q-ImPrESS on a large-scale process control system consisting of several million lines of code from the automation domain to evaluate its evolution scenarios. This paper reports our experiences with the method and lessons learned. Benefits of Q-ImPrESS are the good architectural decision support and comprehensive tool framework, while one drawback is the time-consuming data collection.},
  acmid = {1985902},
  author = {Heiko Koziolek and Bastian Schlich and Carlos Bilich and Roland Weiss and Steffen Becker and Klaus Krogmann and Mircea Trifu and Raffaela Mirandola and Anne Koziolek},
  booktitle = {Proceeding of the 33rd international conference on Software engineering (ICSE 2011), Software Engineering in Practice Track},
  doi = {10.1145/1985793.1985902},
  editor = {Richard N. Taylor and Harald Gall and Nenad Medvidovic},
  isbn = {978-1-4503-0445-0},
  keywords = {case study, dtmc, industrial software, lqn, palladio, performance prediction, reliability prediction, reverse engineering, service-oriented software, trade-off analysis},
  location = {Waikiki, Honolulu, HI, USA},
  note = {Acceptance Rate: 18\% (18/100)},
  numpages = {10},
  pages = {776--785},
  publisher = {ACM, New York, NY, USA},
  title = {An Industrial Case Study on Quality Impact Prediction for Evolving Service-Oriented Software},
  url = {http://doi.acm.org/10.1145/1985793.1985902},
  year = {2011},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/koziolek2011a.pdf}
}
@inproceedings{koziolek2011c,
  abstract = {Long-living software systems are sustainable if they can be cost-effectively maintained and evolved over their complete life-cycle. Software-intensive systems in the industrial automation domain are typically long-living and cause high evolution costs, because of new customer requirements, technology changes, and failure reports. Many methods for sustainable software development have been proposed in the scientific literature, but most of them are not applied in industrial practice. We identified typical evolution scenarios in the industrial automation domain and conducted an extensive literature search to extract a number of guidelines for sustainable software development based on the methods found in literature. For validation purposes, we map one evolution scenario to these guidelines in this paper.},
  author = {Heiko Koziolek and Roland Weiss and Zoya Durdik and Johannes Stammel and Klaus Krogmann},
  booktitle = {{Proceedings of Software Engineering (Workshops), 3rd Workshop of GI Working Group Long-living Software Systems (L2S2), Design for Future}},
  isbn = {978-3-88579-278-9},
  pages = {47--58},
  pdf = {http://www.koziolek.de/docs/Koziolek2011-DFF-preprint.pdf},
  publisher = {GI},
  series = {LNI},
  title = {{Towards Software Sustainability Guidelines for Long-living Industrial Systems}},
  volume = {184},
  year = {2011}
}
@inproceedings{krogmann2007d,
  abstract = {Die Verwendung von Komponenten ist ein anerkanntes Prinzip in der Software-Entwicklung. Dabei werden Software-Komponenten zumeist als \textit{Black-Boxes} aufgefasst , deren Interna vor einem Komponenten-Verwender verborgen sind. Zahlreiche Architektur-Analyse-Verfahren, insbesondere solche zur Vorhersage von nicht-funktionalen Eigenschaften, ben{\"o}tigen jedoch Informationen {\"u}ber Interna (bspw. die Anzahl abgearbeiteter Schleifen oder Aufrufe externer Dienste), die von den vielen Komponentenmodellen nicht angeboten werden. F{\"u}r Forscher, die aktuell mit der Analyse nicht-funktionaler Eigenschaften von komponentenbasierten Software-Architekturen besch{\"a}ftigt sind, stellt sich die Frage, wie sie an dieses Wissen {\"u}ber Komponenten-Interna gelangen. Dabei m{\"u}ssen existierende Software-Komponenten analysiert werden, um die ben{\"o}tigten Informationen {\"u}ber das Innere der Komponenten derart zu rekonstruieren, dass sie f{\"u}r anschlie"sende Analyse-Verfahren nicht-funktionaler Eigenschaften genutzt werden k{\"o}nnen. Bestehende Verfahren konzentrieren sich auf die Erkennung von Komponenten oder bspw. das Reengineering von Sequenzdiagrammen gegebener Komponenten, fokussieren aber nicht auf die Informationen, die von Vorhersageverfahren f{\"u}r nicht-funktionale Eigenschaften ben{\"o}tigt werden. Der Beitrag dieses Papiers ist eine genaue Betrachtung der Informationen, die das Reengineering von Komponenten-Interna liefern muss, um f{\"u}r die Vorhersage der nicht-funktionalen Eigenschaft Performanz (im Sinne von Antwortzeit) nutzbringend zu sein. Dazu wird das Palladio Komponentenmodell \cite{Becker} vorgestellt, das genau f{\"u}r diese Informationen vorbereitet ist. Schlie{\ss}lich wird ein Reengineering-Ansatz vorgestellt, der dazu geeignet ist, die ben{\"o}tigten Informationen zu gewinnen.},
  address = {Bad Honnef},
  author = {Krogmann, Klaus},
  booktitle = {WSR2007},
  editor = {Gimnich, Rainer and Winter, Andreas},
  month = {May},
  number = {1/2007},
  series = {Mainzer Informatik-Berichte},
  title = {{R}eengineering von {S}oftware-{K}omponenten zur {V}orhersage von {D}ienstg{\"u}te-{E}igenschaften},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/krogmann2007d.pdf},
  year = {2007}
}
@inproceedings{krogmann2007c,
  abstract = {In this paper, we propose to relate model-based adaptation approaches with the Windows Workflow Foundation (WF) implementation platform, through a simple case study. We successively introduce a client/server system with mismatching components implemented in WF, our formal approach to work mismatch cases out, and the resulting WF adaptor. We end with some conclusions and a list of open issues.},
  address = {Karlsruhe, Germany},
  author = {Krogmann, Klaus},
  booktitle = {Proceedings of the 12th International Workshop on Component Oriented Programming (WCOP 2007)},
  editor = {Reussner, Ralf H. and Szyperski, Clemens and Weck, Wolfgang},
  isbn = {ISSN 1432 - 7864},
  month = {July},
  day = {31},
  pages = {23--29},
  publisher = {Universit{\"a}t Karlsruhe (TH)},
  series = {Interne Berichte},
  title = {{R}eengineering of {S}oftware {C}omponent {M}odels to {E}nable {A}rchitectural {Q}uality of {S}ervice {P}redictions},
  url = {http://digbib.ubka.uni-karlsruhe.de/volltexte/1000007172},
  volume = {2007-13},
  year = {2007}
}
@inproceedings{krogmann2007b,
  abstract = {The actual benefits of model-driven approaches compared to code-centric development have not been systematically investigated. This paper presents a case study in which functional identical software was once developed in a code-centric, conventional style and once using Eclipse-based model-driven development tools. In our specific case, the model-driven approach could be carried in 11% of the time of the conventional approach, while simultaneously improving code quality.},
  author = {Krogmann, Klaus and Becker, Steffen},
  booktitle = {Software Engineering 2007 - Beitr{\"a}ge zu den Workshops},
  editor = {Bleek, Wolf-Gideon and Schwentner, Henning and Z{\"u}llighoven, Heinz},
  month = {March},
  day = {27},
  pages = {169--176},
  publisher = {Series of the Gesellschaft f{\"u}r Informatik (GI)},
  series = {Lecture Notes in Informatics},
  title = {{A} {C}ase {S}tudy on {M}odel-{D}riven and {C}onventional {S}oftware {D}evelopment: {T}he {P}alladio {E}ditor},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/krogmann2007b.pdf},
  volume = {106},
  year = {2007}
}
@inproceedings{krogmann2008b,
  abstract = {Integrating heterogeneous software systems becomes increasingly important. It requires combining existing components to form new applications. Such new applications are required to satisfy non-functional properties, such as performance. Design-time performance prediction of new applications built from existing components helps to compare design decisions before actually implementing them to the full, avoiding costly prototype and glue code creation. But design-time performance prediction requires understanding and modeling of data flow and control flow accross component boundaries, which is not given for most black-box components. If, for example one component processes and forwards files to other components, this effect should be an explicit model parameter to correctly capture its performance impact. This impact should also be parameterised over data, but no reverse engineering approach exists to recover such dependencies. In this paper, we present an approach that allows reverse engineering of such behavioural models, which is applicable for blackbox components. By runtime monitoring and application of genetic programming, we recover functional dependencies in code, which then are expressed as parameterisation in the output model. We successfully validated our approach in a case study on a file sharing application, showing that all dependencies could correctly be reverse engineered from black-box components.},
  address = {Oldenburg},
  author = {Klaus Krogmann and Michael Kuperberg and Ralf Reussner},
  booktitle = {MDD, SOA und IT-Management (MSI 2008)},
  editor = {Steffens, Ulrike and Addicks, Jan Stefan and Streekmann, Niels},
  month = {September},
  pages = {57--71},
  publisher = {GITO Verlag},
  title = {{Reverse Engineering of Parametric Behavioural Service Performance Models from Black-Box Components}},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/krogmann2008b.pdf},
  year = {2008}
}
@inproceedings{krogmann2009a,
  abstract = {Software performance engineering provides techniques to analyze and predict the performance (e.g., response time or resource utilization) of software systems to avoid implementations with insufficient performance. These techniques operate on models of software, often at an architectural level, to enable early, design-time predictions for evaluating design alternatives. Current software performance engineering approaches allow the prediction of performance at design time, but often provide cryptic results (e.g., lengths of queues). These prediction results can be hardly mapped back to the software architecture by humans, making it hard to derive the right design decisions. In this paper, we integrate software cartography (a map technique) with software performance engineering to overcome the limited interpretability of raw performance prediction results. Our approach is based on model transformations and a general software visualization approach. It provides an intuitive mapping of prediction results to the software architecture which simplifies design decisions. We successfully evaluated our approach in a quasi experiment involving 41 participants by comparing the correctness of performance-improving design decisions and participants' time effort using our novel approach to an existing software performance visualization.},
  author = {Klaus Krogmann and Christian M. Schweda and Sabine Buckl and Michael Kuperberg and Anne Martens and Florian Matthes},
  booktitle = {{Architectures for Adaptive Systems (Proceedings of QoSA 2009)}},
  doi = {10.1007/978-3-642-02351-4_4},
  editor = {Raffaela Mirandola and Ian Gorton and Christine Hofmeister},
  note = {Best Paper Award},
  pages = {52--69},
  publisher = {Springer},
  series = {Lecture Notes in Computer Science},
  title = {{Improved Feedback for Architectural Performance Prediction using Software Cartography Visualizations}},
  url = {http://www.springerlink.com/content/m0325512hl4857v1},
  volume = {5581},
  year = {2009},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/krogmann2009a.pdf}
}
@inproceedings{kuperberg2008c,
  abstract = {In component-based software engineering, the response time of an entire application is often predicted from the execution durations of individual component services. However, these execution durations are specific for an execution platform (i.e. its resources such as CPU) and for a usage profile. Reusing an existing component on different execution platforms up to now required repeated measurements of the concerned components for each relevant combination of execution platform and usage profile, leading to high effort. This paper presents a novel integrated approach that overcomes these limitations by reconstructing behaviour models with platform-independent resource demands of bytecode components. The reconstructed models are parameterised over input parameter values. Using platform-specific results of bytecode benchmarking, our approach is able to translate the platform-independent resource demands into predictions for execution durations on a certain platform. We validate our approach by predicting the performance of a file sharing application.},
  author = {Michael Kuperberg and Klaus Krogmann and Ralf Reussner},
  booktitle = {Proceedings of the 11th International Symposium on Component Based Software Engineering (CBSE 2008), Karlsruhe, Germany, 14th-17th October 2008},
  month = {October},
  pages = {48-63},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{P}erformance {P}rediction for {B}lack-{B}ox {C}omponents using {R}eengineered {P}arametric {B}ehaviour {M}odels},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/kuperberg2008c.pdf},
  volume = {5282},
  year = {2008}
}
@inproceedings{rathfelder2012a,
  author = {Christoph Rathfelder and Stefan Becker and Klaus Krogmann and Ralf Reussner},
  booktitle = {Proceedings of the Joint 10th Working IEEE/IFIP Conference on Software Architecture (WICSA) \& 6th European Conference on Software Architecture (ECSA)},
  doi = {10.1109/WICSA-ECSA.212.11},
  location = {Helsinki, Finland},
  month = {August},
  note = {Acceptance Rate (Full Paper): 19.8\%},
  pages = {31--40},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/rathfelder2012a.pdf},
  title = {Workload-aware System Monitoring Using Performance Predictions Applied to a Large-scale E-Mail System},
  url = {http://dx.doi.org/10.1109/WICSA-ECSA.212.11},
  year = {2012},
  publisher = {IEEE Computer Society},
  address = {Washington, DC, USA}
}
@inproceedings{ostberg2014a,
  address = {Singapore},
  author = {P-O \"{O}stberg and Henning Groenda and Stefan Wesner and James Byrne and Dimitrios~S. Nikolopoulos and Craig Sheridan and Jakub Krzywda and Ahmed Ali-Eldin and Johan Tordsson and Erik Elmroth and Christian Stier and Klaus Krogmann and J\"{o}rg Domaschka and Christopher Hauser and PJ Byrne and Sergej Svorobej and Barry McCollum and Zafeirios Papazachos and Loke Johannessen and Stephan R\"{u}th and Dragana Paurevic},
  booktitle = {Proceedings of the Sixth IEEE International Conference on Cloud Computing Technology and Science (CloudCom)},
  doi = {10.1109/CloudCom.2014.62},
  month = {December},
  pages = {26-31},
  publisher = {IEEE Computer Society},
  title = {{The CACTOS Vision of Context-Aware Cloud Topology Optimization and Simulation}},
  year = {2014}
}
@inproceedings{langhammer2015a,
  author = {Langhammer, Michael and Krogmann, Klaus},
  booktitle = {17. Workshop Software-Reengineering und-Evolution},
  title = {A Co-evolution Approach for Source Code and Component-based Architecture Models},
  url = {http://fg-sre.gi.de/fileadmin/gliederungen/fg-sre/wsre2015/WSRE2015-Proceeedings-preliminary.pdf#page=40},
  volume = {4},
  year = {2015}
}