article_reussner.bib

@article{becker2008a,
  abstract = {One aim of component-based software engineering (CBSE) is to enable the prediction of extra-functional properties, such as performance and reliability, utilising a well-defined composition theory. Nowadays, such theories and their accompanying prediction methods are still in a maturation stage. Several factors influencing extra-functional properties need additional research to be understood. A special problem in CBSE stems from its specific development process: Software components should be specified and implemented independently from their later context to enable reuse. Thus, extra-functional properties of components need to be specified in a parametric way to take different influencing factors like the hardware platform or the usage profile into account. Our approach uses the Palladio component model (PCM) to specify component-based software architectures in a parametric way. This model offers direct support of the CBSE development process by dividing the model creation among the developer roles. This paper presents our model and a simulation tool based on it, which is capable of making performance predictions. Within a case study, we show that the resulting prediction accuracy is sufficient to support the evaluation of architectural design decisions.},
  author = {Steffen Becker and Heiko Koziolek and Ralf Reussner},
  doi = {10.1016/j.jss.2008.03.066},
  journal = {Journal of Systems and Software},
  pages = {3--22},
  publisher = {Elsevier Science Inc.},
  title = {{T}he {P}alladio component model for model-driven performance prediction},
  volume = {82},
  year = {2009}
}
@article{becker2006d,
  abstract = {Component adapters are used to bridge interoperability problems between the required interface of a component and the provided interface of another component. As bridging functional mismatches is frequently required, the use of adapters is unavoidable. In these cases an impact on the Quality of Service resulting from the adaptation is often undesired. Nevertheless, some adapters are deployed to change the Quality of Service on purpose when the interoperability problem results from mismatching Quality of Service. This emphasises the need of adequate prediction models for the impact of component adaptation on the Quality of Service characteristics. We present research on the impact of adaptation on the Quality of Service and focus on unresolved issues hindering effective predictions nowadays.},
  author = {Steffen Becker and Ralf Reussner},
  journal = {L\'{ }objet},
  number = {1},
  pages = {105--125},
  publisher = {RSTI},
  title = {{T}he {I}mpact of {S}oftware {C}omponent {A}daptation on {Q}uality of {S}ervice {P}roperties},
  volume = {12},
  year = {2006}
}
@article{brosch2011b,
  abstract = {With the increasing importance of reliability in business and industrial software systems, new techniques of architecture-based reliability engineering are becoming an integral part of the development process. These techniques can assist system architects in evaluating the reliability impact of their design decisions. Architecture-based reliability engineering is only effective if the involved reliability models reflect the interaction and usage of software components and their deployment to potentially unreliable hardware. However, existing approaches either neglect individual impact factors on reliability or hard-code them into formal models, which limits their applicability in component-based development processes. This paper introduces a reliability modelling and prediction technique that considers the relevant architectural factors of software systems by explicitly modelling the system usage profile and execution environment and automatically deriving component usage profiles. The technique offers a UML-like modelling notation, whose models are automatically transformed into a formal analytical model. Our work builds upon the Palladio Component Model, employing novel techniques of information propagation and reliability assessment. We validate our technique with sensitivity analyses and simulation in two case studies. The case studies demonstrate effective support of usage profile analysis and architectural configuration ranking, together with the employment of reliability-improving architecture tactics.},
  author = {Franz Brosch and Heiko Koziolek and Barbora Buhnova and Ralf Reussner},
  doi = {10.1109/TSE.2011.94},
  journal = {IEEE Transactions on Software Engineering},
  publisher = {{IEEE Computer Society}},
  title = {Architecture-based Reliability Prediction with the Palladio Component Model},
  volume = {38},
  number = {6},
  pages = {1319-1339},
  year = {2012},
  issn = {0098-5589},
  month = {November},
  keywords = {Unified Modeling Language;object-oriented programming;software architecture;software reliability;UML like modeling notation;architectural configuration ranking;architecture based reliability engineering;architecture based reliability prediction;architecture tactics;assist system architects;component based development process;component usage profiles;execution environment;formal analytical model;industrial software system;information propagation;palladio component model;reliability assessment;reliability impact;reliability modeling;sensitivity analysis;software component;system usage profile;usage profile analysis;Design methodology;Markov processes;Phase change materials;Software architecture;Software quality;Software reliability;Unified modeling language;Software architectures;design tools and techniques;quality analysis and evaluation;reliability}
}
@article{broy2010a,
  address = {Los Alamitos, CA, USA},
  author = {Manfred Broy and Ralf Reussner},
  doi = {http://doi.ieeecomputersociety.org/10.1109/MC.2010.277},
  issn = {0018-9162},
  journal = {Computer},
  pages = {88-91},
  publisher = {IEEE Computer Society},
  title = {Architectural Concepts in Programming Languages},
  url = {http://sdqweb.ipd.kit.edu/publications/pdfs/broy2010a.pdf},
  volume = {43},
  year = {2010}
}
@article{combemale2020a,
  title = {{A Hitchhiker's Guide to Model-Driven Engineering for Data-Centric Systems}},
  author = {Combemale, Benoit and Kienzle, J{\"o}rg and Mussbacher, Gunter and Ali, Hyacinth and Amyot, Daniel and Bagherzadeh, Mojtaba and Batot, Edouard and Bencomo, Nelly and Benni, Benjamin and Bruel, Jean-Michel and Cabot, Jordi and Cheng, Betty H C and Collet, Philippe and Engels, Gregor and Heinrich, Robert and J{\'e}z{\'e}quel, Jean-Marc and Koziolek, Anne and Mosser, S{\'e}bastien and Reussner, Ralf and Sahraoui, Houari and Saini, Rijul and Sallou, June and Stinckwich, Serge and Syriani, Eugene and Wimmer, Manuel},
  url = {https://hal.inria.fr/hal-02612087},
  journal = {{IEEE Software}},
  publisher = {{Institute of Electrical and Electronics Engineers}},
  year = {2020},
  pdf = {https://hal.inria.fr/hal-02612087/file/ieeesw-moda-preprint.pdf},
  hal_id = {hal-02612087},
  hal_version = {v1},
  doi = {10.1109/MS.2020.2995125},
  tags = {peer-reviewed}
}
@article{engels2009a,
  author = {Gregor, Engels and Michael, Goedicke and Ursula, Goltz and Andreas, Rausch and Ralf, Reussner},
  journal = {Informatik-Spektrum},
  month = {October},
  number = {5},
  pages = {393-397},
  timestamp = {2009.09.28},
  title = {Design for Future - Legacy-Probleme von morgen vermeidbar?},
  volume = {32},
  year = {2009}
}
@article{happe2009a,
  abstract = {Performance prediction methods can help software architects to identify potential performance problems, such as bottlenecks, in their software systems during the design phase. In such early stages of the software life-cycle, only a little information is available about the system�s implementation and execution environment. However, these details are crucial for accurate performance predictions. Performance completions close the gap between available high-level models and required low-level details. Using model-driven technologies, transformations can include details of the implementation and execution environment into abstract performance models. However, existing approaches do not consider the relation of actual implementations and performance models used for prediction. Furthermore, they neglect the broad variety of possible implementations and middleware platforms, possible configurations, and possible usage scenarios. In this paper, we (i) establish a formal relation between generated performance models and generated code, (ii) introduce a design and application process for parametric performance completions, and (iii) develop a parametric performance completion for Message-oriented Middleware according to our method. Parametric performance completions are independent of a specific platform, reflect performance-relevant software configurations, and capture the influence of different usage scenarios. To evaluate the prediction accuracy of the completion for Message-oriented Middleware, we conducted a real-world case study with the SPECjms2007 Benchmark [http://www.spec.org/jms2007/]. The observed deviation of measurements and predictions was below 10% to 15%},
  author = {Jens Happe and Steffen Becker and Christoph Rathfelder and Holger Friedrich and Ralf H. Reussner},
  doi = {10.1016/j.peva.2009.07.006},
  journal = {Performance Evaluation (PE)},
  month = {August},
  number = {8},
  pages = {694--716},
  pdf = {http://sdqweb.ipd.uka.de/publications/pdfs/happe2009a.pdf},
  publisher = {Elsevier},
  title = {{P}arametric {P}erformance {C}ompletions for {M}odel-{D}riven {P}erformance {P}rediction},
  url = {http://dx.doi.org/10.1016/j.peva.2009.07.006},
  volume = {67},
  year = {2010}
}
@article{happe2011a,
  author = {Happe, Jens and Koziolek, Heiko and Reussner, Ralf},
  doi = {10.1109/MS.2011.25},
  issn = {0740-7459},
  journal = {Software, IEEE},
  keywords = {MediaStore system;component-based software engineering;compositional reasoning;software components;software performance;software engineering;},
  month = {June},
  number = {3},
  pages = {27 -33},
  title = {Facilitating Performance Predictions Using Software Components},
  volume = {28},
  year = {2011}
}
@article{happe_lucia2013a,
  author = {Happe, Lucia and Buhnova, Barbora and Reussner, Ralf},
  doi = {10.1007/s10270-013-0336-6},
  issn = {1619-1366},
  journal = {Software \& Systems Modeling},
  keywords = {Stateful components; Performance prediction; Prediction accuracy},
  pages = {1319--1343},
  publisher = {Springer-Verlag},
  title = {Stateful component-based performance models},
  url = {http://dx.doi.org/10.1007/s10270-013-0336-6},
  year = {2013},
  volume = {13},
  number = {4},
  pages = {1319--1343},
  abstract = {The accuracy of performance-prediction models is crucial for widespread adoption of performance prediction in industry. One of the essential accuracy-influencing aspects of software systems is the dependence of system behaviour on a configuration, context or history related state of the system, typically reflected with a (persistent) system attribute. Even in the domain of component-based software engineering, the presence of state-reflecting attributes (the so-called internal states) is a natural ingredient of the systems, implying the existence of stateful services, stateful components and stateful systems as such. Currently, there is no consensus on the definition or method to include state-related information in component-based prediction models. Besides the task to identify and localise different types of stateful information across component-based software architecture, the issue is to balance the expressiveness and complexity of prediction models via an effective abstraction of state modelling. In this paper, we identify and classify stateful information in component-based software systems, study the performance impact of the individual state categories, and discuss the costs of their modelling in terms of the increased model size. The observations are formulated into a set of heuristics-guiding software engineers in state modelling. Finally, practical effect of state modelling on software performance is evaluated on a real-world case study, the SPECjms2007 Benchmark. The observed deviation of measurements and predictions was significantly decreased by more precise models of stateful dependencies.}
}
@article{hasselbring2006a,
  author = {Hasselbring, Wilhelm and Reussner, Ralf H.},
  journal = {IEEE Computer},
  number = {4},
  pages = {91--92},
  title = {{T}oward {T}rustworthy {S}oftware {S}ystems},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/hasselbring2006a.pdf},
  volume = {30},
  year = {2006}
}
@article{hauck2013a,
  abstract = {To predict the performance of an application, it is crucial to consider the performance of the underlying infrastructure. Thus, to yield accurate prediction results, performance-relevant properties and behaviour of the infrastructure have to be integrated into performance models. However, capturing these properties is a cumbersome and error-prone task, as it requires carefully engineered measurements and experiments. Existing approaches for creating infrastructure performance models require manual coding of these experiments, or ignore the detailed properties in the models. The contribution of this paper is the Ginpex approach, which introduces goal-oriented and model-based specification and generation of executable performance experiments for automatically detecting and quantifying performance-relevant infrastructure properties. Ginpex provides a metamodel for experiment specification and comes with predefined experiment templates that provide automated experiment execution on the target platform and also automate the evaluation of the experiment results. We evaluate Ginpex using three case studies, where experiments are executed to quantify various infrastructure properties.},
  author = {Michael Hauck and Michael Kuperberg and Nikolaus Huber and Ralf Reussner},
  doi = {10.1007/s10270-013-0335-7},
  issn = {1619-1366},
  journal = {Software \& Systems Modeling},
  pages = {1-21},
  publisher = {Springer-Verlag},
  title = {Deriving performance-relevant infrastructure properties through model-based experiments with Ginpex},
  url = {http://dx.doi.org/10.1007/s10270-013-0335-7},
  year = {2013}
}
@article{hinkel2016f,
  abstract = {{Bio-inspired robots still rely on classic robot control although advances in neurophysiology allow adaptation to control as well. However, the connection of a robot to spiking neuronal networks needs adjustments for each purpose and requires frequent adaptation during an iterative development. Existing approaches cannot bridge the gap between robotics and neuroscience or do not account for frequent adaptations. The contribution of this paper is an architecture and domain-specific language (DSL) for connecting robots to spiking neuronal networks for iterative testing in simulations, allowing neuroscientists to abstract from implementation details. The framework is implemented in a web-based platform. We validate the applicability of our approach with a case study based on image processing for controlling a four-wheeled robot in an experiment setting inspired by Braitenberg vehicles.}},
  author = {Hinkel, Georg and Groenda, Henning and Krach, Sebastian and Vannucci, Lorenzo and Denninger, Oliver and Cauli, Nino and Ulbrich, Stefan and Roennau, Arne and Falotico, Egidio and Gewaltig, Marc-Oliver and Knoll, Alois and Dillmann, R\"udiger and Laschi, Cecilia and Reussner, Ralf},
  issn = {0921-0296},
  journal = {Journal of Intelligent \& Robotics Systems},
  title = {{A Framework for Coupled Simulations of Robots and Spiking Neuronal Networks}},
  publisher = {Springer},
  year = {2016},
  tags = {refereed},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/hinkel2016f.pdf}
}
@article{hinkel2017b,
  author = {Georg Hinkel and Thomas Goldschmidt and Erik Burger and Ralf Reussner},
  issn = {1619-1374},
  journal = {Software \& Systems Modeling},
  publisher = {Springer Berlin / Heidelberg},
  title = {{Using Internal Domain-Specific Languages to inherit Tool Support and Modularity for Model Transformations}},
  year = {2019},
  volume = {18},
  number = {1},
  pages = {129--155},
  doi = {10.1007/s10270-017-0578-9},
  tags = {refereed,nmf},
  url = {http://rdcu.be/oTED},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/hinkel2017b.pdf}
}
@article{knethen1999a,
  author = {von Knethen, Antje and Kamsties, Erik and Reussner, Ralf H. and Bunse, Christian and Shen, Bin},
  journal = {G{\'e}nie Logiciel},
  pages = {8--15},
  title = {{U}ne {\'e}tude comparative de m{\'e}thodes industrielles d'ing{\'e}nierie des exigences},
  volume = {50},
  year = {1999}
}
@article{kratz2004a,
  abstract = {The notions of design and process cut across many disciplines. Applications of abstract notions of design and process to engineering problem solving would certainly redefine and expand the notion of engineering itself in the 21st century. This Journal of SDPS strives to be the repository of human knowledge covering interdisciplinary notions of design and process in a rigorous fashion. We expect and encourage papers crossing the boundaries back and forth in mathematical landscape as well as among mathematics, physics, economics, management science, and engineering. Journal of Integrated Design and Process Science is an archival, peer-reviewed technical journal publishing the following types of papers: a) Research papers, b) Reports on case studies, c) Reports on major design and process projects, d) Design and process standards and proposals, and e) Insightful tutorials on design and process. It has been observed that most of the work related to design and process is interdisciplinary and until recently has been scattered in journals of many diverse disciplines. The objective on this journal is to publish state-of-the-art papers in this expanding field, providing an international and interdisciplinary forum for best work in design and process related areas. The audience of this journal will have a single source to stay current on new and quality work as academic research papers and synthesis on best-practices. Consistent with SDPS philosophy, the Journal strives to maintain an international and interdisciplinary balance by relying on experts from various corners of the world. Authors whose work are in the domain of interdisciplinary no-man's land with a flavor of design and process are encouraged to submit their papers to this Journal. The readership of this journal includes participants from academia and industry.},
  address = {Amsterdam, The Netherlands, The Netherlands},
  author = {Kratz, Benedikt and Reussner, Ralf and van den Heuvel, Willem-Jan},
  issn = {1092-0617},
  journal = {J. Integr. Des. Process Sci.},
  number = {4},
  pages = {1--17},
  publisher = {IOS Press},
  title = {{E}mpirical {R}esearch on {S}imilarity {M}etrics for {S}oftware {C}omponent {I}nterfaces},
  volume = {8},
  year = {2004}
}
@article{krogmann2009c,
  abstract = {In component-based software engineering, existing components are often re-used in new applications. Correspondingly, the response time of an entire component-based application can be predicted from the execution durations of individual component services. These execution durations depend on the runtime behaviour of a component, which itself is influenced by three factors: the execution platform, the usage profile, and the component wiring. To cover all relevant combinations of these influencing factors, conventional prediction of response times requires repeated deployment and measurements of component services for all such combinations, incurring a substantial effort. This paper presents a novel comprehensive approach for reverse engineering and performance prediction of components. In it, genetic programming is utilised for reconstructing a behaviour model from monitoring data, runtime bytecode counts and static bytecode analysis. The resulting behaviour model is parametrised over all three performance-influencing factors, which are specified separately. This results in significantly fewer measurements: the behaviour model is reconstructed only once per component service, and one application-independent bytecode benchmark run is sufficient to characterise an execution platform. To predict the execution durations for a concrete platform, our approach combines the behaviour model with platform-specific benchmarking results. We validate our approach by predicting the performance of a file sharing application.},
  author = {Klaus Krogmann and Michael Kuperberg and Ralf Reussner},
  doi = {http://doi.ieeecomputersociety.org/10.1109/TSE.2010.69},
  editor = {Mark Harman and Afshin Mansouri},
  issn = {0098-5589},
  journal = {IEEE Transactions on Software Engineering},
  number = {6},
  pages = {865--877},
  publisher = {{IEEE}},
  title = {{Using Genetic Search for Reverse Engineering of Parametric Behaviour Models for Performance Prediction}},
  url = {http://sdqweb.ipd.kit.edu/publications/pdfs/krogmann2009c.pdf},
  volume = {36},
  year = {2010}
}
@article{krogmann2009b,
  abstract = {Die Verwendung von Komponenten ist ein anerkanntes Prinzip in der Software-Entwicklung. Dabei werden Software-Komponenten zumeist als Black-Boxes aufgefasst, deren Interna vor einem Komponenten-Verwender verborgen sind. Architektur-Analyse- Verfahren zur Vorhersage nicht-funktionaler Eigenschaften erlauben bspw. auf der Architekturebene Dimensionierungsfragestellungen fuer Hardware- / Software-Umgebungen zu beantworten, sowie Skalierbarkeitsanalysen und Was-Waere-Wenn-Szenarien fuer die Erweiterung von Altsystemen durchzufuehren. Dazu benoetigen sie jedoch Informationen ueber Interna (bspw. die Anzahl abgearbeiteter Schleifen oder Aufrufe externer Dienste) von Komponenten. Um an solche Informationen zu gelangen muessen existierende Software-Komponenten analysiert werden. Die benoetigten Informationen ueber das Innere der Komponenten muessen dabei derart rekonstruiert werden, dass sie fuer anschlie{\ss}ende Analyseverfahren nicht-funktionaler Eigenschaften genutzt werden koennen. Eine haendische Rekonstruktion solcher Modelle scheitert haeufig an der Groe{\ss}e der Systeme und ist sehr fehleranfaellig, da konsistente Abstraktionen ueber potentiell tausende Zeilen von Code gefunden werden muessen. Bestehende Verfahren liefern dabei nicht die notwendigen Daten- und Kontrollflussabstraktionen die fuer Analysen und Simulationen benoetigt werden. Der Beitrag dieses Papiers ist ein Reverse Engineering Verfahren fuer Komponentenverhalten. Die daraus resultierenden Modelle (Palladio Komponentenmodell) eignen sich zur Vorhersage von Performanz-Eigenschaften (Antwortzeit, Durchsatz) und damit fuer die oben angefuehrten Fragestellungen. Die aus Quellcode rekonstruierten Modelle umfassen parametrisierten Kontroll- und Datenfluss fuer Software-Komponenten und stellen eine Abstraktion realer Zusammenh\"{a}nge im Quellcode dar. Das Reverse Engineering Verfahren kombiniert dabei ueber Genetische Programmierung (einer Form von Maschinen Lernen) statische und dynamische Analyseverfahren.},
  author = {Klaus Krogmann and Ralf Reussner},
  issn = {ISSN 0720-8928},
  journal = {Softwaretechnik-Trends},
  month = {May},
  number = {2},
  pages = {22--24},
  title = {{Reverse Engineering von Software-Komponentenverhalten mittels Genetischer Programmierung}},
  url = {http://pi.informatik.uni-siegen.de/stt/29_2/01_Fachgruppenberichte/SRE/10-krogmann.pdf},
  volume = {29},
  year = {2009}
}
@article{becker2014c,
  author = {Becker, Steffen and Hasselbring, Wilhelm and van Hoorn, Andre and Kounev, Samuel and Reussner, Ralf and others},
  publisher = {Stuttgart, Germany, Universit{\"a}t Stuttgart},
  title = {Proceedings of the 2014 Symposium on Software Performance (SOSP'14): Joint Descartes/Kieker/Palladio Days},
  year = {2014}
}
@article{martens2010c,
  abstract = {Background: Model-based performance evaluation methods for software architectures can help architects to assess design alternatives and save costs for late life-cycle performance fixes. A recent trend is component-based performance modelling, which aims at creating reusable performance models; a number of such methods have been proposed during the last decade. Their accuracy and the needed effort for modelling are heavily influenced by human factors, which are so far hardly understood empirically. 

Objective: Do component-based methods allow to make performance predictions with a comparable accuracy while saving effort in a reuse scenario? We examined three monolithic methods (SPE, umlPSI, Capacity Planning (CP)) and one component-based performance evaluation method (PCM) with regard to their accuracy and effort from the viewpoint of method users.

Methods: We conducted a series of three experiments (with different levels of control) involving 47 computer science students. In the first experiment, we compared the applicability of the monolithic methods in order to choose one of them for comparison. In the second experiment, we compared the accuracy and effort of this monolithic and the component-based method for the model creation case. In the third, we studied the effort reduction from reusing component-based models. Data were collected based on the resulting artefacts, questionnaires and screen recording. They were analysed using hypothesis testing, linear models, and analysis of variance.

Results: For the monolithic methods, we found that using SPE and CP resulted in accurate predictions, while umlPSI produced over-estimates. Comparing the component-based method PCM with SPE, we found that creating reusable models using PCM takes more (but not drastically more) time than using SPE and that participants can create accurate models with both techniques. Finally, we found that reusing PCM models can save time, because effort to reuse can be explained by a model that is independent of the inner complexity of a component.

Limitations: The tasks performed in our experiments reflect only a subset of the actual activities when applying model-based performance evaluation methods in a software development process.

Conclusions: Our results indicate that sufficient prediction accuracy can be achieved with both monolithic and component-based methods, and that the higher effort for component-based performance modelling will indeed pay off when the component models incorporate and hide a sufficient amount of complexity.}, author = {Anne Martens and Heiko Koziolek and Lutz Prechelt and Ralf Reussner}, doi = {10.1007/s10664-010-9142-8}, issn = {1382-3256}, journal = {Empirical Software Engineering}, keyword = {Computer Science}, number = {5}, pages = {587--622}, pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/martens2010c.pdf}, publisher = {Springer Netherlands}, title = {From monolithic to component-based performance evaluation of software architectures}, url = {http://dx.doi.org/10.1007/s10664-010-9142-8}, volume = {16}, year = {2011}, tags = {peer-reviewed} }

@article{reussner2003b,
  abstract = {The purpose of this tutorial is to provide concepts and historical background of the �network integration testing� (NIT) methodology. NIT is a �grey box� testing technique that is aimed at verifying the correct behaviour of interconnected networks (operated by different operators) in provisioning services to end users, or the behaviour of a complex network operated by a unique operator. The main technical concepts behind this technique are presented along with the history of some International projects that have contributed to its early definition and application. European Institute for Research and Strategic Studies in Telecommunication (EURESCOM) has actually been very active, with many projects, in defining the NIT basic methodology and providing actual NIT specifications (for narrow-band and broad-band services, covering both voice and data). EURESCOM has also been acting as a focal point in the area, e.g., encouraging the Industry in developing commercial tools supporting NIT. In particular, the EURESCOM P412 project (1994�1996) first explicitly defined the NIT methodology (the methodological aspects include test notation, test implementation, test processes, distributed testing and related co-ordination aspects). P412 applied the methodology to ISDN whilst another project, P410, applied NIT to data services. The P613 project (1997�1999) extended the basic NIT methodology to the broad band and GSM. More into details, the areas covered currently by NIT test specifications developed by EURESCOM projects include N-ISDN, N-ISUP, POTS, B-ISDN, B-ISUP, IP over ATM, ATM/FR, GSM, focusing also on their �inter-working� cases (e.g., ISDN/ISDN, ISDN/GSM, etc.). ETSI, the European Telecommunication Standards Institute, also contributed to NIT development (e.g., the definition of the TSP1+ protocol, used for the functional co-ordination and timing synchronisation of all tools involved in a distributed testing session). The paper also discusses NIT in relation to the recent major changes (processes) within the telecommunication (TLC) community. Beyond the new needs coming from the pure technical aspects (integration of voice and data, fixed mobile convergence, etc.) the full deregulation of the TLC sector has already generated new processes and new testing needs (e.g., Interconnection Testing) that had a significant influence on the methodology. NIT is likely to continue to develop further in the future according to the needs of telecom operators, authorities, user�s associations and suppliers.},
  author = {Reussner, Ralf H.},
  journal = {Future Generation Computer Systems},
  number = {5},
  pages = {627--639},
  title = {{A}utomatic {C}omponent {P}rotocol {A}daptation with the {C}o{C}o{N}ut {T}ool {S}uite},
  volume = {19},
  year = {2003}
}
@article{reussner2003e,
  author = {Reussner, Ralf H.},
  issn = {0167-739X},
  journal = {Future Generation Computer Systems},
  number = {5},
  pages = {749--759},
  title = {{U}sing {SK}a{MPI} for {D}eveloping {H}igh-{P}erformance {MPI} {P}rograms with {P}erformance {P}ortability},
  volume = {19},
  year = {2003}
}
@article{reussner2001f,
  author = {Reussner, Ralf H. and Sanders, Peter and Tr{\"a}ff, Jesper Larsson},
  journal = {Scientific Computing},
  title = {{SKaMPI}: a comprehensive benchmark for public benchmarking of {MPI}},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/reussner2001f.pdf},
  year = {2001}
}
@article{reussner2003d,
  abstract = {Due to the increasing size and complexity of software systems, software architectures have become a crucial part in development projects. A lot of effort has been put into defining formal ways for describing architecture specifications using Architecture Description Languages (ADLs). Since no common ADL today offers tools for evaluating the performance, an attempt to develop such a tool based on an event-based simulation engine has been made. Common ADLs were investigated and the work was based on the fundamentals within the field of software architectures. The tool was evaluated both in terms of correctness in predictions as well as usability to show that it actually is possible to evaluate the performance using high-level architectures as models.},
  author = {Reussner, Ralf H. and Schmidt, Heinz W. and Poernomo, Iman},
  journal = {Journal of Systems and Software -- Special Issue of Software Architecture -- Engineering Quality Attributes},
  number = {3},
  pages = {241--252},
  title = {{R}eliability {P}rediction for {C}omponent-{B}ased {S}oftware {A}rchitectures},
  volume = {66},
  year = {2003}
}
@article{schmidt2001a,
  abstract = {The increasing pressure for enterprises to join into agile business networks is changing the requirements on the enterprise computing systems. The supporting infrastructure is increasingly required to provide common facilities and societal infrastructure services to support the lifecycle of loosely-coupled, eContract-governed business networks. The required facilities include selection of those autonomously administered business services that the enterprises are prepared to provide and use, contract negotiations, and furthermore, monitoring of the contracted behaviour with potential for breach management. The essential change is in the requirement of a clear mapping between business-level concepts and the automation support for them. Our work has focused on developing B2B middleware to address the above challenges; however, the architecture is not feasible without management facilities for trust-aware decisions for entering business networks and interacting within them. This paper discusses how trust-based decisions are supported and positioned in the B2B middleware.},
  author = {Schmidt, Heinz W. and Poernomo, Iman and Reussner, Ralf H.},
  journal = {Journal of Integrated Design and Process Science},
  month = {September},
  number = {3},
  pages = {25--51},
  title = {{T}rust-{B}y-{C}ontract: {M}odelling, {A}nalysing and {P}redicting {B}ehaviour in {S}oftware {A}rchitectures},
  volume = {5},
  year = {2001}
}
@article{trubiani2014a,
  abstract = {Antipatterns are conceptually similar to patterns in that they document recurring solutions to common design problems. Software Performance Antipatterns document common performance problems in the design as well as their solutions. The definition of performance antipatterns concerns software properties that can include static, dynamic, and deployment aspects. To make use of such knowledge, we propose an approach that helps software architects to identify and solve performance antipatterns. Our approach provides software performance feedback to architects, since it suggests the design alternatives that allow overcoming the detected performance problems. The feedback process may be quite complex since architects may have to assess several design options before achieving the architectural model that best fits the end-user expectations. In order to optimise such process we introduce a ranking methodology that identifies, among a set of detected antipatterns, the ``guilty'' ones, i.e. the antipatterns that more likely contribute to the violation of specific performance requirements. The introduction of our ranking process leads the system to converge towards the desired performance improvement by discarding a consistent part of design alternatives. Four case studies in different application domains have been used to assess the validity of the approach.},
  author = {Catia Trubiani and Anne Koziolek and Vittorio Cortellessa and Ralf Reussner},
  doi = {10.1016/j.jss.2014.03.081},
  issn = {0164-1212},
  journal = {Journal of Systems and Software},
  keywords = {Palladio Architectural Models},
  pages = {141 - 165},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/trubiani2014a.pdf},
  title = {Guilt-based Handling of Software Performance Antipatterns in {P}alladio Architectural Models},
  url = {http://www.sciencedirect.com/science/article/pii/S0164121214001010},
  volume = {95},
  year = {2014},
  tags = {peer-reviewed}
}
@article{vogel-heuser2017a,
  title = {Maintenance effort estimation with {KAMP4aPS} for cross-disciplinary automated PLC-based Production Systems - a collaborative approach},
  journal = {IFAC-PapersOnLine},
  volume = {50},
  number = {1},
  pages = {4360 - 4367},
  year = {2017},
  note = {20th IFAC World Congress},
  issn = {2405-8963},
  doi = {https://doi.org/10.1016/j.ifacol.2017.08.877},
  author = {Birgit Vogel-Heuser and Robert Heinrich and Suhyun Cha and Kiana Rostami and Felix Ocker and Sandro Koch and Ralf Reussner and Simon Ziegltrum},
  abstract = {Automated production systems (aPSs) are often in operation for several decades. Due to a multiplicity of reasons, these assets have to be maintained and modified over the time multiple times and with respect to multiple engineering domains. An increased economic pressure demands to perform these tasks in an optimized way. Therefore, it is necessary to estimate change effects with respect to multidisciplinary interdependences, required surrounding non-functional tasks and the effort and costs included in each step. This paper outlines available cost estimation methods for PLC-based automation and Information Systems (ISs). We introduce Karlsruhe Architectural Maintainability Prediction for aPS (KAMP4aPS), an approach to estimate the necessary maintenance tasks to be performed and their related costs for the domain of aPSs by extending KAMP, which is limited to change propagation analysis on ISs. KAMP requires a metamodel to derive these tasks automatically. Unfortunately, a domain spanning metamodel is missing for aPSs. Hence, we need to develop a part of the metamodel derived from an AutomationML description for the chosen demonstrator at first. Finally, we apply and compare different estimation methods and KAMP4aPS to analyze the exchange of a fieldbus system as exemplary change scenario on a lab size plant to demonstrate the benefits of our discipline-spanning approach.},
  tags = {refereed},
  pdf = {https://sdqweb.ipd.kit.edu/publications/pdfs/vogel-heuser2017a.pdf}
}
@article{heinrich2016a,
  author = {Heinrich, Robert and G{\"a}rtner, Stefan and Hesse, Tom-Michael and Ruhroth, Thomas and Reussner, Ralf and Schneider, Kurt and Paech, Barbara and J{\"u}rjens, Jan},
  journal = {International Journal of Software Engineering and Knowledge Engineering},
  title = {The {CoCoME} Platform: A Research Note on Empirical Studies in Information System Evolution},
  volume = {25},
  number = {09\&10},
  pages = {1715-1720},
  year = {2015},
  doi = {10.1142/S0218194015710059},
  url = {http://www.worldscientific.com/doi/abs/10.1142/S0218194015710059},
  eprint = {http://www.worldscientific.com/doi/pdf/10.1142/S0218194015710059}
}
@article{budde2016a,
  title = {{Praxis der Forschung -- Eine Lehrveranstaltung des forschungsnahen Lehrens und Lernens in der Informatik am KIT}},
  author = {Budde, Matthias and Grebing, Sarah and Burger, Erik and Kramer, Max E. and Beckert, Bernhard and Beigl, Michael and Reussner, Ralf},
  editors = {Berendt, Brigitte and Fleischmann, Andreas and Schaper, Niclas and Szczyrba, Birgit and Wildt, Johannes},
  publisher = {DUZ Verlags- und Medienhaus GmbH},
  journal = {Neues Handbuch Hochschullehre},
  number = {A 3.19},
  volume = {74},
  abstract = {Der neue Lehrveranstaltungstyp Praxis der Forschung wurde 2012 im Master-Studiengang Informatik des Karlsruher Instituts f{\"u}r Technologie (KIT) eingef{\"u}hrt. Zentrales Konzept dieser Veranstaltung ist das forschungsnahe Lehren und Lernen: Studierende erwerben im Rahmen eines eigenen Forschungsprojekts sowohl Fachwissen als auch methodische Kompetenz zu wissenschaftlicher Arbeit. Die konkrete Ausgestaltung folgt den Grunds{\"a}tzen der Forschungsn{\"a}he und der integrierten Vermittlung methodischer Kompetenzen. Die Studierenden sollen insbesondere auch erfahren, dass es ein wesentlicher Aspekt der wissenschaftlichen Arbeit ist, Forschungsergebnisse sicht- und wahrnehmbar zu machen.},
  year = {2016},
  tags = {education}
}
@article{hasselbring20167th,
  title = {7th Symposium on Software Performance (SSP)},
  author = {Hasselbring, Wilhelm and Becker, Steffen and van Hoorn, Andre and Kounev, Samuel and Reussner, Ralf},
  journal = {Softwaretechnik-Trends},
  volume = {36},
  number = {4},
  pages = {1},
  year = {2016}
}
@article{heinrich2018c,
  title = {Architecture-based change impact analysis in cross-disciplinary automated production systems},
  journal = {Journal of Systems and Software},
  volume = {146},
  pages = {167 - 185},
  year = {2018},
  issn = {0164-1212},
  doi = {10.1016/j.jss.2018.08.058},
  url = {http://www.sciencedirect.com/science/article/pii/S0164121218301717},
  author = {Robert Heinrich and Sandro Koch and Suhyun Cha and Kiana Busch and Ralf Reussner and Birgit Vogel-Heuser},
  keywords = {Change impact analysis, Maintenance cost estimation, Metamodeling, Manufacturing system, Production automation, Programmable logic controller},
  abstract = {Maintaining an automated production system is a challenging task as it comprises artifacts from multiple disciplines – namely mechanical, electrical, and software engineering. As the artifacts mutually affect each other, even small modifications may cause extensive side effects. Consequently, estimating the maintenance effort for modifications in an automated production system precisely is time consuming and often nearly as complicated as implementing the modifications. In this paper, we present the KAMP4aPS approach for architecture-based change impact analysis in production automation. We propose metamodels to specify the various artifacts of the system and modifications to them, as well as algorithms and rules for change propagation analysis based on the models. We evaluate KAMP4aPS for three different change scenarios based on the established xPPU community case study on production automation. In the case study, we investigate different configurations of metamodels and change propagation rules. Evaluation results indicate the accuracy of change propagation for applying KAMP4aPS to the specific metamodel and rules.}
}
@article{alpers2019a,
  author = {Alpers, Sascha and Pilipchuk, Roman and Oberweis, Andreas and Reussner, Ralf},
  title = {The Current State of the Holistic Privacy and Security Modelling Approach in Business Process and Software Architecture Modelling},
  journal = {Information Systems Security and Privacy},
  pages = {109--124},
  year = {2019},
  publisher = {Springer International Publishing},
  editor = {Mori, Paolo and Furnell, Steven and Camp, Olivier},
  doi = {10.1007/978-3-030-25109-3},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/alpers2019a.pdf}
}
@article{hinkel2019,
  author = {Hinkel, Georg and Heinrich, Robert and Reussner, Ralf},
  title = {An extensible approach to implicit incremental model analyses},
  journal = {Software {\&} Systems Modeling},
  year = {2019},
  month = {Oct},
  day = {01},
  volume = {18},
  number = {5},
  pages = {3151--3187},
  issn = {1619-1374},
  doi = {10.1007/s10270-019-00719-y},
  url = {https://doi.org/10.1007/s10270-019-00719-y}
}
@article{klare2019icmt,
  author = {Klare, Heiko and Syma, Torsten and Burger, Erik and Reussner, Ralf},
  title = {A Categorization of Interoperability Issues in Networks of Transformations},
  journal = {Journal of Object Technology},
  volume = {18},
  number = {3},
  issn = {1660-1769},
  year = {2019},
  month = {July},
  editor = {Anthony Anjorin and Regina Hebig},
  note = {The 12th International Conference on Model Transformations (ICMT 2019)},
  pages = {4:1-20},
  doi = {10.5381/jot.2019.18.3.a4},
  url = {http://www.jot.fm/contents/issue_2019_03/article4.html},
  pdf = {http://www.jot.fm/issues/issue_2019_03/article4.pdf},
  tags = {Vitruv}
}
@article{klare2021jss,
  title = {{Enabling consistency in view-based system development -- The Vitruvius approach}},
  journal = {Journal of Systems and Software},
  year = {2021},
  volume = {171},
  issn = {0164-1212},
  doi = {10.1016/j.jss.2020.110815},
  url = {http://www.sciencedirect.com/science/article/pii/S0164121220302144},
  author = {Heiko Klare and Max E. Kramer and Michael Langhammer and Dominik Werle and Erik Burger and Ralf Reussner},
  keywords = {Consistency, Model-driven software development, Model transformations, Model views},
  abstract = {During the development of large software-intensive systems, developers use several modeling languages and tools to describe a system from different viewpoints. Model-driven and view-based technologies have made it easier to define domain-specific languages and transformations. Nevertheless, using several languages leads to fragmentation of information, to redundancies in the system description, and eventually to inconsistencies. Inconsistencies have negative impacts on the system’s quality and are costly to fix. Often, there is no support for consistency management across multiple languages. Using a single language is no practicable solution either, as it is overly complex to define, use, and evolve such a language. View-based development is a suitable approach to deal with complex systems, and is widely used in other engineering disciplines. Still, we need to cope with the problems of fragmentation and consistency. In this paper, we present the Vitruviusapproach for consistency in view-based modeling. We describe the approach by formalizing the notion of consistency, presenting languages for consistency preservation, and defining a model-driven development process. Furthermore, we show how existing models can be integrated. We have evaluated our approach at two case studies from component-based and embedded automotive software development, using our prototypical implementation based on the Eclipse Modeling Framework.},
  tags = {Vitruv}
}
@article{seifermann2022a,
  author = {Seifermann, Stephan and Heinrich, Robert and Werle, Dominik and Reussner, Ralf},
  year = {2022},
  title = {Detecting Violations of Access Control and Information Flow Policies in Data Flow Diagrams},
  journal = {The Journal of Systems and Software},
  publisher = {Elsevier},
  issn = {0164-1212, 1873-1228},
  volume = {184},
  doi = {10.1016/j.jss.2021.111138},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/seifermann2022a.pdf}
}