peer-reviewed-KoziolekAnne.bib

@article{aleti2013a,
  abstract = {Due to significant industrial demands toward software systems with increasing complexity and challenging quality requirements, software architecture design has become an important development activity and the research domain is rapidly evolving. In the last decades, software architecture optimization methods, which aim to automate the search for an optimal architecture design with respect to a (set of) quality attribute(s), have proliferated. However, the reported results are fragmented over different research communities, multiple system domains, and multiple quality attributes. To integrate the existing research results, we have performed a systematic literature review and analyzed the results of 188 research papers from the different research communities. Based on this survey, a taxonomy has been created which is used to classify the existing research. Furthermore, the systematic analysis of the research literature provided in this review aims to help the research community in consolidating the existing research efforts and deriving a research agenda for future developments.},
  author = {Aldeida Aleti and Barbora Buhnova and Lars Grunske and Anne Koziolek and Indika Meedeniya},
  doi = {http://dx.doi.org/10.1109/TSE.2012.64},
  issn = {0098-5589},
  journal = {IEEE Transactions on Software Engineering},
  keywords = {Computer architecture;Optimization methods;Software;Software architecture;Systematics;Taxonomy;Software architecture optimization;optimization methods;problem overview;systematic literature review},
  number = {5},
  pages = {658--683},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/aleti2013a.pdf},
  publisher = {IEEE},
  title = {Software Architecture Optimization Methods: A Systematic Literature Review},
  volume = {39},
  year = {2013},
  tags = {peer-reviewed},
  groups = {ARE; SDQ; KoziolekAnne}
}
@article{charrada2015a,
  abstract = {Updating the requirements specification when software systems evolve is a manual task that is expensive and time consuming. Therefore, maintainers usually apply the changes to the code directly and leave the requirements unchanged. This results in the requirements rapidly becoming obsolete and useless. In this paper, we propose an approach that supports the maintainer in keeping the requirements specification consistent with the implementation, by identifying the requirements that are impacted whenever the code is changed. Our approach works as follows. First, we analyse the changes that have been applied to the source code and detect if they are likely to impact the requirements or not. Second, we trace the requirements impacting changes back to the requirements specification to identify the parts that might need to be modified. The output of the tracing is a list of requirements that are sorted according to their likelihood of being impacted. Automatically identifying the parts of the requirements specification that are likely to need maintenance reduces the effort needed for keeping the requirements up-to-date and thus makes the task of the maintainer easier. When applying our approach in three cases studies, 70% to 100% of the impacted requirements were identified within a list that includes less than 20% of the total number of requirements in the specification},
  author = {Ben Charrada, Eya and Koziolek, Anne and Glinz, Martin},
  doi = {10.1002/smr.1705},
  issn = {2047-7481},
  journal = {Journal of Software: Evolution and Process},
  keywords = {requirements evolution, requirements update, impact analysis, traceability, artifact synchronization},
  number = {3},
  pages = {166--194},
  pdf = {https://files.ifi.uzh.ch/rerg/amadeus/staff/charrada/publications/JSEP2015.pdf},
  title = {Supporting requirements update during software evolution},
  url = {http://dx.doi.org/10.1002/smr.1705},
  volume = {27},
  year = {2015},
  tags = {peer-reviewed},
  groups = {ARE; SDQ; KoziolekAnne}
}
@article{brosig2015a,
  abstract = {During the last decade, researchers have proposed a number of model transformations enabling performance predictions. These transformations map performance-annotated software architecture models into stochastic models solved by analytical means or by simulation. However, so far, a detailed quantitative evaluation of the accuracy and efficiency of different transformations is missing, making it hard to select an adequate transformation for a given context. This paper provides an in-depth comparison and quantitative evaluation of representative model transformations to, e.g., Queueing Petri Nets and Layered Queueing Networks. The semantic gaps between typical source model abstractions and the different analysis techniques are revealed. The accuracy and efficiency of each transformation are evaluated by considering four case studies representing systems of different size and complexity. The presented results and insights gained from the evaluation help software architects and performance engineers to select the appropriate transformation for a given context, thus significantly improving the usability of model transformations for performance prediction.},
  author = {Fabian Brosig AND Philipp Meier AND Steffen Becker AND Anne Koziolek AND Heiko Koziolek AND Samuel Kounev},
  doi = {10.1109/TSE.2014.2362755},
  issn = {0098-5589},
  journal = {Software Engineering, IEEE Transactions on},
  keywords = {Accuracy;Analytical models;Phase change materials;Predictive models;Software architecture;Stochastic processes;Unified modeling language;D.2.10.h Quality analysis and evaluation;D.2.11 Software architectures;D.2.2 Design tools and techniques},
  month = {February},
  number = {2},
  pages = {157-175},
  title = {Quantitative Evaluation of Model-Driven Performance Analysis and Simulation of Component-based Architectures},
  volume = {41},
  year = {2015},
  pdf = {http://www.koziolek.de/docs/Brosig2015-IEEE-TSE-preprint.pdf},
  tags = {peer-reviewed},
  groups = {SDQ; DESCARTES; ARE; BrosigFabian; MeierPhilipp; BeckerSteffen; KoziolekAnne; KoziolekHeiko; KounevSamuel}
}
@article{combemale2020a,
  title = {{A Hitchhiker's Guide to Model-Driven Engineering for Data-Centric Systems}},
  author = {Combemale, Benoit and Kienzle, J{\"o}rg and Mussbacher, Gunter and Ali, Hyacinth and Amyot, Daniel and Bagherzadeh, Mojtaba and Batot, Edouard and Bencomo, Nelly and Benni, Benjamin and Bruel, Jean-Michel and Cabot, Jordi and Cheng, Betty H C and Collet, Philippe and Engels, Gregor and Heinrich, Robert and J{\'e}z{\'e}quel, Jean-Marc and Koziolek, Anne and Mosser, S{\'e}bastien and Reussner, Ralf and Sahraoui, Houari and Saini, Rijul and Sallou, June and Stinckwich, Serge and Syriani, Eugene and Wimmer, Manuel},
  url = {https://hal.inria.fr/hal-02612087},
  journal = {{IEEE Software}},
  publisher = {{Institute of Electrical and Electronics Engineers}},
  year = {2020},
  pdf = {https://hal.inria.fr/hal-02612087/file/ieeesw-moda-preprint.pdf},
  hal_id = {hal-02612087},
  hal_version = {v1},
  doi = {10.1109/MS.2020.2995125},
  tags = {peer-reviewed},
  groups = {SDQ; ARE; HeinrichRobert; KoziolekAnne; ReussnerRalf}
}
@article{gouvea2012a,
  abstract = {In this paper, we report on our experience with the application of validated models to assess performance, reliability, and adaptability of a complex mission critical system that is being developed to dynamically monitor and control the position of an oil-drilling platform. We present real-time modeling results that show that all tasks are schedulable. We performed stochastic analysis of the distribution of task execution time as a function of the number of system interfaces. We report on the variability of task execution times for the expected system configurations. In addition, we have executed a system library for an important task inside the performance model simulator. We report on the measured algorithm convergence as a function of the number of vessel thrusters. We have also studied the system architecture adaptability by comparing the documented system architecture and the implemented source code. We report on the adaptability findings and the recommendations we were able to provide to the system's architect. Finally, we have developed models of hardware and software reliability. We report on hardware and software reliability results based on the evaluation of the system architecture.},
  author = {Daniel Dominguez Gouv\^ea and Cyro Muniz and Gilson Pinto and Alberto Avritzer and Rosa Maria Meri {Le\~{a}o} and Edmundo de Souza e Silva and Morganna Carmem Diniz and Luca Berardinelli and Julius C. B. Leite and Daniel {Moss\'e} and Yuanfang Cai and Michael Dalton and Lucia Happe and Anne Koziolek},
  doi = {10.1007/s10270-012-0264-x},
  issn = {1619-1366},
  journal = {Journal of Software and Systems Modeling},
  keywords = {Performance; Reliability; Adaptability},
  note = {Special Issue on Performance Modeling},
  pages = {1--23},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/gouvea2012a.pdf},
  publisher = {Springer-Verlag},
  title = {Experience with Model-based Performance, Reliability and Adaptability Assessment of a Complex Industrial Architecture},
  year = {2012},
  tags = {peer-reviewed},
  groups = {SDQ; ARE; HappeLucia; KoziolekAnne}
}
@article{koziolek2013a,
  abstract = {Design decisions for complex, component-based systems impact multiple quality of service (QoS) properties. Often, means to improve one quality property deteriorate another one. In this scenario, selecting a good solution with respect to a single quality attribute can lead to unacceptable results with respect to the other quality attributes. A promising way to deal with this problem is to exploit multi-objective optimization where the objectives represent different quality attributes. The aim of these techniques is to devise a set of solutions, each of which assures an optimal trade-off between the conflicting qualities. Our previous work proposed a combined use of analytical optimization techniques and evolutionary algorithms to efficiently identify an optimal set of design alternatives with respect to performance and costs. This paper extends this approach to more QoS properties by providing analytical algorithms for availability-cost optimization and three-dimensional availability-performance-cost optimization. We demonstrate the use of this approach on a case study, showing that the analytical step provides a better-than-random starting population for the evolutionary optimization, which lead to a speed-up of 28\% in the availability-cost case.},
  author = {Anne Koziolek and Danilo Ardagna and Raffaela Mirandola},
  doi = {10.1016/j.jss.2013.03.081},
  issn = {0164-1212},
  journal = {Journal of Systems and Software},
  note = {Special Issue on Quality Optimization of Software Architecture and Design Specifications},
  number = {10},
  pages = {2542 - 2558},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/koziolek2013a.pdf},
  publisher = {Elsevier},
  title = {Hybrid Multi-Attribute {QoS} Optimization in Component Based Software Systems},
  url = {http://www.sciencedirect.com/science/article/pii/S0164121213000800},
  volume = {86},
  year = {2013},
  tags = {peer-reviewed},
  groups = {ARE; SDQ; KoziolekAnne}
}
@article{koziolek2016a,
  title = {Assessing survivability to support power grid investment decisions},
  journal = {Reliability Engineering & System Safety},
  volume = {155},
  number = {},
  pages = {30 - 43},
  year = {2016},
  note = {},
  issn = {0951-8320},
  doi = {http://dx.doi.org/10.1016/j.ress.2016.05.015},
  url = {http://www.sciencedirect.com/science/article/pii/S095183201630076X},
  author = {Anne Koziolek and Alberto Avritzer and Sindhu Suresh and Daniel S. Menasché and Morganna Diniz and Edmundo de Souza e Silva and Rosa M. Leão and Kishor Trivedi and Lucia Happe},
  keywords = {Survivability},
  keywords = {Survivability},
  keywords = {Survivability},
  keywords = {Survivability},
  keywords = {Survivability},
  abstract = {Abstract The reliability of power grids has been subject of study for the past few decades. Traditionally, detailed models are used to assess how the system behaves after failures. Such models, based on power flow analysis and detailed simulations, yield accurate characterizations of the system under study. However, they fall short on scalability. In this paper, we propose an efficient and scalable approach to assess the survivability of power systems. Our approach takes into account the phased-recovery of the system after a failure occurs. The proposed phased-recovery model yields metrics such as the expected accumulated energy not supplied between failure and full recovery. Leveraging the predictive power of the model, we use it as part of an optimization framework to assist in investment decisions. Given a budget and an initial circuit to be upgraded, we propose heuristics to sample the solution space in a principled way accounting for survivability-related metrics. We have evaluated the feasibility of this approach by applying it to the design of a benchmark distribution automation circuit. Our empirical results indicate that the combination of survivability and power flow analysis can provide meaningful investment decision support for power systems engineers.},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/koziolek2016a.pdf},
  tags = {peer-reviewed},
  groups = {ARE; SDQ; KoziolekAnne; HappeLucia}
}
@article{martens2010c,
  abstract = {Background: Model-based performance evaluation methods for software architectures can help architects to assess design alternatives and save costs for late life-cycle performance fixes. A recent trend is component-based performance modelling, which aims at creating reusable performance models; a number of such methods have been proposed during the last decade. Their accuracy and the needed effort for modelling are heavily influenced by human factors, which are so far hardly understood empirically. 

Objective: Do component-based methods allow to make performance predictions with a comparable accuracy while saving effort in a reuse scenario? We examined three monolithic methods (SPE, umlPSI, Capacity Planning (CP)) and one component-based performance evaluation method (PCM) with regard to their accuracy and effort from the viewpoint of method users.

Methods: We conducted a series of three experiments (with different levels of control) involving 47 computer science students. In the first experiment, we compared the applicability of the monolithic methods in order to choose one of them for comparison. In the second experiment, we compared the accuracy and effort of this monolithic and the component-based method for the model creation case. In the third, we studied the effort reduction from reusing component-based models. Data were collected based on the resulting artefacts, questionnaires and screen recording. They were analysed using hypothesis testing, linear models, and analysis of variance.

Results: For the monolithic methods, we found that using SPE and CP resulted in accurate predictions, while umlPSI produced over-estimates. Comparing the component-based method PCM with SPE, we found that creating reusable models using PCM takes more (but not drastically more) time than using SPE and that participants can create accurate models with both techniques. Finally, we found that reusing PCM models can save time, because effort to reuse can be explained by a model that is independent of the inner complexity of a component.

Limitations: The tasks performed in our experiments reflect only a subset of the actual activities when applying model-based performance evaluation methods in a software development process.

Conclusions: Our results indicate that sufficient prediction accuracy can be achieved with both monolithic and component-based methods, and that the higher effort for component-based performance modelling will indeed pay off when the component models incorporate and hide a sufficient amount of complexity.}, author = {Anne Martens and Heiko Koziolek and Lutz Prechelt and Ralf Reussner}, doi = {10.1007/s10664-010-9142-8}, issn = {1382-3256}, journal = {Empirical Software Engineering}, keyword = {Computer Science}, number = {5}, pages = {587--622}, pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/martens2010c.pdf}, publisher = {Springer Netherlands}, title = {From monolithic to component-based performance evaluation of software architectures}, url = {http://dx.doi.org/10.1007/s10664-010-9142-8}, volume = {16}, year = {2011}, tags = {peer-reviewed}, groups = {ARE; SEQ; SDQ; KoziolekAnne; KoziolekHeiko; ReussnerRalf} }

@article{menasche2014a,
  abstract = {Smart grids are fostering a paradigm shift in the realm of power distribution systems. Whereas traditionally different components of the power distribution system have been provided and analyzed by different teams through different lenses, smart grids require a unified and holistic approach that takes into consideration the interplay of communication reliability, energy backup, distribution automation topology, energy storage, and intelligent features such as automated fault detection, isolation, and restoration (FDIR) and demand response. In this paper, we present an analytical model and metrics for the survivability assessment of the distribution power grid network. The proposed metrics extend the system average interruption duration index, accounting for the fact that after a failure, the energy demand and supply will vary over time during a multi-step recovery process. The analytical model used to compute the proposed metrics is built on top of three design principles: state space factorization, state aggregation, and initial state conditioning. Using these principles, we reduce a Markov chain model with large state space cardinality to a set of much simpler models that are amenable to analytical treatment and efficient numerical solution. In case demand response is not integrated with FDIR, we provide closed form solutions to the metrics of interest, such as the mean time to repair a given set of sections. Under specific independence assumptions, we show how the proposed methodology can be adapted to account for multiple failures. We have evaluated the presented model using data from a real power distribution grid, and we have found that survivability of distribution power grids can be improved by the integration of the demand response feature with automated FDIR approaches. Our empirical results indicate the importance of quantifying survivability to support investment decisions at different parts of the power grid distribution network.},
  author = {Menasch\'{e}, Daniel Sadoc and Avritzer, Alberto and Suresh, Sindhu and Le\~{a}o, Rosa M. and de Souza e Silva, Edmundo and Diniz, Morganna and Trivedi, Kishor and Happe, Lucia and Koziolek, Anne},
  doi = {10.1002/cpe.3241},
  issn = {1532-0634},
  journal = {Concurrency and Computation: Practice and Experience},
  keywords = {survivability, transient analysis, smart grid, fault tolerance, demand response, reliability metrics, FDIR},
  number = {12},
  pages = {1949--1974},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/menasche2014a.pdf},
  title = {Assessing survivability of smart grid distribution network designs accounting for multiple failures},
  url = {http://dx.doi.org/10.1002/cpe.3241},
  volume = {26},
  year = {2014},
  tags = {peer-reviewed},
  groups = {SDQ; ARE; HappeLucia; KoziolekAnne}
}
@article{trubiani2014a,
  abstract = {Antipatterns are conceptually similar to patterns in that they document recurring solutions to common design problems. Software Performance Antipatterns document common performance problems in the design as well as their solutions. The definition of performance antipatterns concerns software properties that can include static, dynamic, and deployment aspects. To make use of such knowledge, we propose an approach that helps software architects to identify and solve performance antipatterns. Our approach provides software performance feedback to architects, since it suggests the design alternatives that allow overcoming the detected performance problems. The feedback process may be quite complex since architects may have to assess several design options before achieving the architectural model that best fits the end-user expectations. In order to optimise such process we introduce a ranking methodology that identifies, among a set of detected antipatterns, the ``guilty'' ones, i.e. the antipatterns that more likely contribute to the violation of specific performance requirements. The introduction of our ranking process leads the system to converge towards the desired performance improvement by discarding a consistent part of design alternatives. Four case studies in different application domains have been used to assess the validity of the approach.},
  author = {Catia Trubiani and Anne Koziolek and Vittorio Cortellessa and Ralf Reussner},
  doi = {10.1016/j.jss.2014.03.081},
  issn = {0164-1212},
  journal = {Journal of Systems and Software},
  keywords = {Palladio Architectural Models},
  pages = {141 - 165},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/trubiani2014a.pdf},
  title = {Guilt-based Handling of Software Performance Antipatterns in {P}alladio Architectural Models},
  url = {http://www.sciencedirect.com/science/article/pii/S0164121214001010},
  volume = {95},
  year = {2014},
  tags = {peer-reviewed},
  groups = {ARE; SDQ; KoziolekAnne; ReussnerRalf}
}
@article{HuHoKoBrKo2014-SOCA-ModelingRuntimeAdaptation,
  author = {Nikolaus Huber and Andr\'{e} van Hoorn and Anne Koziolek and Fabian Brosig and Samuel Kounev},
  doi = {10.1007/s11761-013-0144-4},
  journal = {Service Oriented Computing and Applications Journal (SOCA)},
  number = {1},
  pages = {73--89},
  pdf = {http://sdqweb.ipd.kit.edu/publications/descartes-pdfs/HuHoKoBrKo2013-SOCA-ModelingRuntimeAdaptation.pdf},
  publisher = {Springer London},
  title = {{Modeling Run-Time Adaptation at the System Architecture Level in Dynamic Service-Oriented Environments}},
  volume = {8},
  year = {2014},
  tags = {peer-reviewed},
  groups = {SDQ; DESCARTES; ARE; HuberNikolaus; KoziolekAnne; BrosigFabian; KounevSamuel}
}
@article{happe2020a,
  title = {Effective measures to foster girls' interest in secondary computer science education},
  author = {Happe, Lucia and Buhnova, Barbora and Koziolek, Anne and Wagner, Ingo},
  journal = {Education and Information Technologies},
  pages = {1--19},
  isbn = {1573-7608},
  year = {2020},
  month = {November},
  volume = {1},
  number = {1},
  url = {https://rdcu.be/ceUOT},
  doi = {10.1007/s10639-020-10379-x},
  publisher = {Springer},
  abstract = {The interest of girls in computing drops early during primary and secondary education, with minimal recovery in later education stages. In combination with the growing shortage of qualified computer science personnel, this is becoming a major issue, and also a target of numerous studies that examine measures, interventions, and strategies to boost girls' commitment to computing. Yet, the results of existing studies are difficult to navigate, and hence are being very rarely employed in classrooms. In this paper, we summarize the existing body of knowledge on the effective interventions to recruit and retain girls in computer science education, intending to equip educators with a comprehensive and easy-to-navigate map of interventions recommended in the existing literature. To this end, we perform an aggregated umbrella literature review of 11 existing reviews on the topic, together accumulating joined knowledge from over 800 publications, and formulate the findings in a map of 22 concrete interventions structured in six groups according to their phase and purpose.},
  tags = {peer-reviewed},
  groups = {SDQ; ARE; HappeLucia; KoziolekAnne}
}