2014.bib

@inproceedings{wesner2014,
  author = {Stefan Wesner and Henning Groenda and James Byrne and Sergej Svorobej and Christopher Hauser and J\"{o}rg Domaschka},
  booktitle = {eChallenges e-2014 Conference Proceesings},
  editor = {Paul Cunningham and Miriam Cunningham},
  isbn = {978-1-905824-45-8},
  organization = {IIMC International Information Management Corporation},
  title = {Optimised Cloud Data Centre Operation Supported by Simulation},
  volume = {2014},
  year = {2014}
}
@inproceedings{avritzer2014a,
  abstract = {We present models and metrics for the survivability assessment of distribution power grid networks accounting for the impact of multiple failures due to large storms. The analytical models used to compute the proposed metrics are built on top of three design principles: state space factorization, state aggregation, and initial state conditioning. Using these principles, we build scalable models that are amenable to analytical treatment and efficient numerical solution. Our models capture the impact of using reclosers and tie switches to enable faster service restoration after large storms. We have evaluated the presented models using data from a real power distribution grid impacted by a large storm: Hurricane Sandy. Our empirical results demonstrate that our models are able to efficiently evaluate the impact of storm hardening investment alternatives on customer affecting metrics such as the expected energy not supplied until complete system recovery.},
  author = {Alberto Avritzer and Laura Carnevali and Lucia Happe and Anne Koziolek and Daniel Sadoc Menasche and Marco Paolieri and Sindhu Suresh},
  booktitle = {Quantitative Evaluation of Systems, 11th International Conference, QEST 2014, Florence, Italy, September 8-10, 2014, Proceedings},
  editor = {Norman, Gethin and Sanders, William},
  isbn = {978-3-319-10695-3},
  pages = {345--367},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  subseries = {Theoretical Computer Science and General Issues},
  title = {A Scalable Approach to the Assessment of Storm Impact in Distributed Automation Power Grids},
  doi = {10.1007/978-3-319-10696-0},
  url = {http://www.springer.com/computer/theoretical+computer+science/book/978-3-319-10695-3},
  volume = {8657},
  year = {2014},
  pdf = {https://stlab.dinfo.unifi.it/carnevali/papers/14_ACHKMPS_QEST.pdf}
}
@article{krogmann2014a,
  abstract = {Mit der zunehmenden Verbreitung agiler Softwareentwicklung steigt auch die Zahl problematischer Projekte. Ziele wie eine schnelle Reaktionsf\"{a}higkeit auf \"{A}nderungsw\"{u}nsche werden nicht erreicht, obwohl (vordergr\"{u}ndig) nach agilen Grunds\"{a}tzen vorgegangen wird. Im Artikel fassen wir wiederkehrende Praxiserlebnisse in Form von Anti-Patterns zusammen und schildern, wie agile Entwicklung wiederholt zu dogmatisch gelebt oder als Ausrede f\"{u}r schlechte Projektorganisation missbraucht wurde. Diese Anti-Patterns erm\"{o}glichen dem Leser eigene Projekte auf \"{a}hnliche Missst\"{a}nde zu pr\"{u}fen und gegebenenfalls dagegen anzugehen.},
  author = {Klaus Krogmann and Matthias Naab and Oliver Hummel},
  journal = {{JAXenter Business Technology}},
  month = {June},
  pages = {29--34},
  title = {{Agile Anti-Patterns -- Warum viele Organisationen weniger agil sind, als sie denken}},
  url = {http://jaxenter.de/Business-Technology/Business-Technology-214-173734},
  volume = {2.14},
  year = {2014}
}
@inproceedings{burger2014a,
  author = {Erik Burger and Aleksandar Toshovski},
  booktitle = {Proceedings of Modellierung 2014},
  day = {21},
  location = {Vienna, Austria},
  month = {March},
  series = {GI-LNI},
  title = {{Difference-based Conformance Checking for Ecore Metamodels}},
  url = {http://sdqweb.ipd.kit.edu/publications/pdfs/burger2014a.pdf},
  volume = {225},
  year = {2014},
  pages = {97--104},
  tags = {Vitruv}
}
@article{burger2014b,
  affiliation = {Karlsruhe Institute of Technology},
  author = {Erik Burger and J{\"o}rg Hen{\ss} and Martin K{\"u}ster and Steffen Kruse and Lucia Happe},
  doi = {10.1007/s10270-014-0413-5},
  editor = {Robert France and Bernhard Rumpe},
  issn = {1619-1374},
  journal = {Software \& Systems Modeling},
  pages = {472--496},
  pdf = {https://sdqweb.ipd.kit.edu/publications/pdfs/burger2014b.pdf},
  publisher = {Springer Berlin / Heidelberg},
  title = {{View-Based Model-Driven Software Development with ModelJoin}},
  volume = {15},
  number = {2},
  year = {2014},
  tags = {Vitruv}
}
@techreport{modeljointechreport2014,
  author = {Erik Burger and J{\"o}rg Hen{\ss} and Steffen Kruse and Martin K{\"u}ster and Andreas Rentschler and Lucia Happe},
  institution = {Karlsruhe Institute of Technology, Faculty of Informatics},
  issn = {2190-4782},
  number = {1},
  series = {Karlsruhe Reports in Informatics},
  title = {{ModelJoin. A Textual Domain-Specific Language for the Combination of Heterogeneous Models}},
  url = {http://digbib.ubka.uni-karlsruhe.de/volltexte/1000037908},
  urn = {urn:nbn:de:swb:90-379089},
  year = {2014},
  tags = {Vitruv}
}
@phdthesis{burger2014diss,
  address = {Karlsruhe, Germany},
  author = {Erik Burger},
  doi = {10.5445/KSP/1000043437},
  editor = {Ralf Reussner},
  isbn = {978-3-7315-0276-0},
  issn = {1867-0067},
  month = {July},
  publisher = {KIT Scientific Publishing},
  school = {Karlsruhe Institute of Technology},
  series = {The Karlsruhe Series on Software Design and Quality},
  title = {{Flexible Views for View-based Model-driven Development}},
  url = {http://digbib.ubka.uni-karlsruhe.de/volltexte/1000043437},
  year = {2014},
  tags = {Vitruv}
}
@incollection{happe2014a,
  author = {Lucia Happe and Erik Burger and Max Kramer and Andreas Rentschler and Ralf Reussner},
  booktitle = {Future Business Software -- Current Trends in Business Software Development},
  doi = {10.1007/978-3-319-04144-5},
  editor = {Gino Brunetti and Thomas Feld and Joachim Schnitter and Lutz Heuser and Christian Webel},
  isbn = {978-3-319-04143-8},
  issn = {2196-8705},
  location = {New York, Heidelberg},
  pages = {117-131},
  publisher = {Springer International Publishing},
  series = {Progress in IS},
  tags = {invited},
  title = {{Completion and Extension Techniques for Enterprise Software Performance Engineering}},
  year = {2014}
}
@incollection{happe2014b,
  address = {Dagstuhl, Germany},
  annote = {Keywords: Critical Infrastructures, Smart Grids, Modeling, Randomized Timed and Hybrid Models, Analysis},
  author = {Lucia Happe and Anne Koziolek},
  booktitle = {{Randomized Timed and Hybrid Models for Critical Infrastructures (Dagstuhl Seminar 14031), Dagstuhl Reports}},
  doi = {10.4230/DagRep.4.1.36},
  editor = {Erika {\'A}brah{\'a}m and Alberto Avritzer and Anne Remke and William H. Sanders},
  issn = {2192-5283},
  note = {Issue 1},
  pages = {45--46},
  publisher = {Schloss Dagstuhl--Leibniz-Zentrum fuer Informatik},
  title = {A Common Analysis Framework for Smart Distribution Networks Applied to Security and Survivability Analysis (Talk Abstract)},
  url = {http://drops.dagstuhl.de/opus/volltexte/2014/4535},
  urn = {urn:nbn:de:0030-drops-45355},
  volume = {4},
  year = {2014}
}
@inproceedings{heinrich2014a,
  abstract = {iObserve is an approach to integrate model-driven monitoring with design time models of software systems and reuse those models at run time to realize analyses based on the design time model. It is assumed that this reduces the effort to be made to interpret analysis results of a software system.},
  author = {Robert Heinrich and Reiner Jung and Eric Schmieders and Andreas Metzger and Wilhelm Hasselbring and Klaus Pohl and Ralf Reussner},
  booktitle = {DFG Priority Program SPP1593, 4th Workshop},
  month = {November},
  title = {Integrated Observation and Modeling Techniques to Support Adaptation and Evolution of Software Systems},
  url = {http://eprints.uni-kiel.de/27380/},
  year = {2014}
}
@incollection{heger2014a,
  author = {Heger, Christoph and Heinrich, Robert},
  booktitle = {Computer Performance Engineering},
  doi = {10.1007/978-3-319-10885-8_8},
  editor = {Horv\'{a}th, Andr\'{a}s and Wolter, Katinka},
  isbn = {978-3-319-10884-1},
  keywords = {Software Performance Engineering; Solution Implementation Support; Rules; Impact Propagation},
  pages = {104-118},
  publisher = {Springer International Publishing},
  series = {Lecture Notes in Computer Science},
  title = {Deriving Work Plans for Solving Performance and Scalability Problems},
  url = {http://dx.doi.org/10.1007/978-3-319-10885-8_8},
  volume = {8721},
  year = {2014}
}
@incollection{heger2014b,
  author = {Heger, Christoph and Wert, Alexander and Farahbod, Roozbeh},
  booktitle = {The Ninth International Conference on Software Engineering Advances (ICSEA)},
  editor = {Mannaert, Herwig and Lavazza, Luigi and Oberhauser, Roy and Kajko-Mattsson, Mira and Gebhart, Michael},
  isbn = {978-1-61208-367-4},
  keywords = {Performance; Software Engineering; Software Measurement; Performance Evaluation},
  pages = {598-608},
  pdf = {http://www.thinkmind.org/download.php?articleid=icsea_2014_22_20_10210},
  publisher = {IARIA},
  title = {Vergil: Guiding Developers Through Performance and Scalability Inferno},
  year = {2014}
}
@inproceedings{hinkel2014,
  abstract = {{To increase the development productivity, possibilities for reuse, maintainability and quality of complex model transformations, modularization techniques are indispensable. Component-Based Software Engineering targets the challenge of modularity and is well-established in languages like Java or C\# with component models like .NET, EJB or OSGi. There are still many challenging barriers to overcome in current model transformation languages to provide comparable support for component-based development of model transformations. Therefore, this paper provides a pragmatic solution based on NMF Transformations, a model transformation language realized as an internal DSL embedded in C\#. An internal DSL can take advantage of the whole expressiveness and tooling build for the well established and known host language. In this work, we use the component model of the .NET platform to represent reusable components of model transformations to support internal and external model transformation composition. The transformation components are hidden behind transformation rule interfaces that can be exchanged dynamically through configuration. Using this approach we illustrate the possibilities to tackle typical issues of integrity and versioning, such as detecting versioning conflicts for model transformations.}},
  author = {Hinkel, Georg and Happe, Lucia},
  booktitle = {{Proceedings of the 1st International Workshop on Model-Driven Engineering for Component-Based Software Systems co-located with ACM/IEEE 17th International Conference on Model Driven Engineering Languages \& Systems (MoDELS 2014)}},
  editors = {Federico Ciccozi and Massimo Tivoli and Jan Carlson},
  pdf = {http://ceur-ws.org/Vol-1281/1.pdf},
  publisher = {CEUR-WS.org},
  series = {CEUR Workshop Proceedings},
  tags = {refereed,workshop,nmf},
  slides = {http://sdqweb.ipd.kit.edu/publications/pdfs/hinkel2014_slides.pdf},
  title = {{Using component frameworks for model transformations by an internal DSL}},
  issn = {1613-0073},
  pages = {6--15},
  pdf = {http://ceur-ws.org/Vol-1281/1.pdf},
  volume = {1281},
  year = {2014}
}
@article{HuKoAm2013-CCPE-WorkloadClassificationAndForecasting,
  abstract = {As modern enterprise software systems become increasingly dynamic, workload forecasting techniques are gaining in importance as a foundation for online capacity planning and resource management. Time series analysis covers a broad spectrum of methods to calculate workload forecasts based on history monitoring data. Related work in the field of workload forecasting mostly concentrates on evaluating specific methods and their individual optimisation potential or on predicting Quality-of-Service (QoS) metrics directly. As a basis, we present a survey on established forecasting methods of the time series analysis concerning their benefits and drawbacks and group them according to their computational overheads. In this paper, we propose a novel self-adaptive approach that selects suitable forecasting methods for a given context based on a decision tree and direct feedback cycles together with a corresponding implementation. The user needs to provide only his general forecasting objectives. In several experiments and case studies based on real world workload traces, we show that our implementation of the approach provides continuous and reliable forecast results at run-time. The results of this extensive evaluation show that the relative error of the individual forecast points is significantly reduced compared to statically applied forecasting methods, e.g. in an exemplary scenario on average by 37%. In a case study, between 55% and 75% of the violations of a given service level agreement can be prevented by applying proactive resource provisioning based on the forecast results of our implementation.},
  author = {Nikolas Roman Herbst and Nikolaus Huber and Samuel Kounev and Erich Amrehn},
  doi = {10.1002/cpe.3224},
  issn = {1532-0634},
  journal = {Concurrency and Computation - Practice and Experience, Special Issue with extended versions of the best papers from ICPE 2013, John Wiley and Sons, Ltd.},
  keywords = {workload forecasting, arrival rate, time series analysis, proactive resource provisioning, assurance of service level objectives},
  title = {{Self-Adaptive Workload Classification and Forecasting for Proactive Resource Provisioning}},
  url = {http://dx.doi.org/10.1002/cpe.3224},
  year = {2014}
}
@inproceedings{KiHeKo2014-LT-DLIM,
  abstract = {{Today's software systems are expected to deliver reliable performance under highly variable load intensities while at the same time making efficient use of dynamically allocated resources. Conventional benchmarking frameworks provide limited support for emulating such highly variable and dynamic load profiles and workload scenarios. Industrial benchmarks typically use workloads with constant or stepwise increasing load intensity, or they simply replay recorded workload traces. Based on this observation, we identify the need for means allowing flexible definition of load profiles and address this by introducing two meta-models at different abstraction levels. At the lower abstraction level, the Descartes Load Intensity Meta-Model (DLIM) offers a structured and accessible way of describing the load intensity over time by editing and combining mathematical functions. The High-Level Descartes Load Intensity Meta-Model (HLDLIM) allows the description of load variations using few defined parameters that characterize the seasonal patterns, trends, bursts and noise parts. We demonstrate that both meta-models are capable of capturing real-world load profiles with acceptable accuracy through comparison with a real life trace.}},
  acmid = {2577037},
  address = {New York, NY, USA},
  author = {J\'{o}akim Gunnarson von Kistowski and Nikolas Roman Herbst and Samuel Kounev},
  booktitle = {Proceedings of the 3rd International Workshop on Large-Scale Testing (LT 2014), co-located with the 5th ACM/SPEC International Conference on Performance Engineering (ICPE 2014)},
  day = {22},
  doi = {10.1145/2577036.2577037},
  isbn = {978-1-4503-2762-6},
  keywords = {benchmarking, modeling, workload},
  location = {Dublin, Ireland},
  month = {March},
  numpages = {4},
  pages = {1--4},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/KiHeKo2014-LT-DLIM.pdf},
  publisher = {ACM},
  slides = {http://lt2014.eecs.yorku.ca/talks/Joakim_LTslides.pdf},
  title = {{Modeling Variations in Load Intensity over Time}},
  url = {http://doi.acm.org/10.1145/2577036.2577037},
  year = {2014}
}
@inproceedings{KiHeKo2014-ICPEDemo-LIMBO,
  abstract = {{Modern software systems are expected to deliver reliable performance under highly variable load 	intensities while at the same time making efficient use of dynamically allocated resources. Conventional benchmarking frameworks provide limited support for emulating such highly variable and dynamic load profiles and workload scenarios. Industrial benchmarks typically use workloads with constant or stepwise increasing load intensity, or they simply replay recorded workload traces. In this paper, we present LIMBO - an Eclipse-based tool for modeling variable load intensity profiles based on the Descartes Load Intensity Model as an underlying modeling formalism.}},
  acmid = {2576092},
  address = {New York, NY, USA},
  author = {J\'{o}akim Gunnarson von Kistowski and Nikolas Roman Herbst and Samuel Kounev},
  booktitle = {Proceedings of the 5th ACM/SPEC International Conference on Performance Engineering (ICPE 2014)},
  day = {22--26},
  doi = {10.1145/2568088.2576092},
  isbn = {978-1-4503-2733-6},
  keywords = {load intensity variation, load profile, meta-modeling, model extraction, open workloads, transformation},
  location = {Dublin, Ireland},
  month = {March},
  numpages = {2},
  pages = {225--226},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/KiHeKo2014-ICPEDemo-LIMBO.pdf},
  publisher = {ACM},
  series = {ICPE '14},
  slides = {http://sdqweb.ipd.kit.edu/publications/pdfs/KiHeKo2014-ICPEDemo-LIMBO-Poster.pdf},
  title = {{LIMBO: A Tool For Modeling Variable Load Intensities}},
  titleaddon = {{(Demonstration Paper)}},
  url = {http://doi.acm.org/10.1145/2568088.2576092},
  year = {2014}
}
@inproceedings{WeHeGrKo2014-HotTopicsWS-ElaBench,
  abstract = {{Auto-scaling features offered by today's cloud infrastructures provide increased flexibility especially for customers that experience high variations in the load intensity over time. However, auto-scaling features introduce new system quality attributes when considering their accuracy, timing, and boundaries. Therefore, distinguishing between different offerings has become a complex task, as it is not yet supported by reliable metrics and measurement approaches. In this paper, we discuss shortcomings of existing approaches for measuring and evaluating elastic behavior and propose a novel benchmark methodology specifically designed for evaluating the elasticity aspects of modern cloud platforms. The benchmark is based on open workloads with realistic load variation profiles that are calibrated to induce identical resource demand variations independent of the underlying hardware performance. Furthermore, we propose new metrics that capture the accuracy of resource allocations and de-allocations, as well as the timing aspects of an auto-scaling mechanism explicitly.}},
  author = {Andreas Weber and Nikolas Roman Herbst and Henning Groenda and Samuel Kounev},
  booktitle = {Proceedings of the 2nd International Workshop on Hot Topics in Cloud Service Scalability (HotTopiCS 2014), co-located with the 5th ACM/SPEC International Conference on Performance Engineering (ICPE 2014)},
  day = {22},
  keywords = {benchmarking, metrics, cloud computing, resource elasticity, load profile},
  location = {Dublin, Ireland},
  month = {March},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/WeHeGrKo2014-HotTopicsWS-ElaBench.pdf},
  publisher = {ACM},
  slides = {http://sdqweb.ipd.kit.edu/publications/pdfs/WeHeGrKo2014-HotTopicsWS-ElaBench-Slides.pdf},
  title = {{Towards a Resource Elasticity Benchmark for Cloud Environments}},
  year = {2014}
}
@article{kuester2014a,
  abstract = {For the evolution of software, understanding of the context, i.e. history and rationale of the existing artifacts, is crucial to avoid ignorant surgery", i.e. modifications to the software without understanding its design intent. Existing works on recording architecture decisions have mostly focused on architectural models. We extend this to code models, and introduce a catalog of code decisions that can be found in object-oriented systems. With the presented approach, we make it possible to record design decisions that are concerned with the decomposition of the system into interfaces, classes, and references between them, or how exceptions are handled. Furthermore, we indicate how decisions on the usage of Java frameworks (e.g. for dependency injection) can be recorded. All decision types presented are supplied with OCL-constraints to check the validity of the decision based on the linked code model.},
  author = {Martin K{\"u}ster and Klaus Krogmann},
  journal = {Softwaretechnik-Trends},
  month = {May},
  number = {2},
  pages = {58---59},
  title = {{Checkable Code Decisions to Support Software Evolution}},
  url = {http://pi.informatik.uni-siegen.de/stt/34_2/01_Fachgruppenberichte/WSRDFF/wsre_dff_2014-05_submission_d3.pdf},
  volume = {34},
  year = {2014}
}
@inproceedings{klatt2014a,
  address = {Kiel, Germany},
  author = {Benjamin Klatt and Klaus Krogmann and Michael Langhammer},
  booktitle = {{Proceedings of Software Engineering 2014 (SE2014)}},
  editor = {Wilhelm Hasselbring, Nils Christian Ehmke},
  isbn = {987-388579-621-3},
  issn = {1617-5468},
  month = {January},
  page = {165--170},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/klatt2014a.pdf},
  series = {Lecture Notes in Informatics (LNI)},
  title = {{Individual Code-Analyzes in Practice}},
  volume = {P-227},
  year = {2014}
}
@inproceedings{klatt2014b,
  address = {Bad Honnef, Germany},
  author = {Benjamin Klatt and Klaus Krogmann and Christian Wende},
  booktitle = {16th Workshop Software-Reengineering (WSRE'14)},
  month = {April},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/klatt2014b.pdf},
  title = {{Consolidating Customized Product Copies to Software Product Lines}},
  year = {2014}
}
@inproceedings{klatt2014c,
  address = {Bad Honnef, Germany},
  author = {Benjamin Klatt and Klaus Krogmann and Volker Kuttruff},
  booktitle = {16th Workshop Software-Reengineering (WSRE'14)},
  month = {April},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/klatt2014c.pdf},
  title = {{Developing Stop Word Lists for Natural Language Program Analysis}},
  year = {2014}
}
@inproceedings{klatt2014d,
  abstract = {To cope with project constraints, copying and customizing existing software products is a typical practice to flexibly serve customer-specific needs. In the long term, this practice becomes a limitation for growth due to redundant maintenance efforts or wasted synergy and cross selling potentials. To mitigate this limitation, customized copies need to be consolidated into a single, variable code base of a software product line (SPL). However, consolidation is tedious as one must identify and correlate differences between the copies to design future variability. For one, existing consolidation approaches lack support of the implementation level. In addition, approaches in the fields of difference analysis and feature detection are not sufficiently integrated for finding relationships between code modifications. In this paper, we present remedy to this problem by integrating a difference analysis with a program dependency analysis based on Program Dependency Graphs (PDG) to reduce the effort of consolidating developers when identifying dependent differences and deriving clusters to consider in their variability design. We successfully evaluated our approach on variants of the open source ArgoUML modeling tool, reducing the manual review effort about 72\% with a precision of 99\% and a recall of 80\%. We further proved its industrial applicability in a case study on a commercial relationship management application.},
  address = {Victoria, Canada},
  author = {Benjamin Klatt and Klaus Krogmann and Christoph Seidl},
  booktitle = {IEEE 30th International Conference on Software Maintenance and Evolution (ICSME'14)},
  doi = {10.1109/ICSME.2014.81},
  issn = {1063-6773},
  keywords = {Unified Modeling Language;customer relationship management;feature extraction;product customisation;program diagnostics;public domain software;software product lines;PDG;code modifications;commercial relationship management application;cross selling potentials;customer-specific needs;customized product copies;difference analysis;feature detection;industrial applicability;open source ArgoUML modeling tool;program dependency analysis;program dependency graphs;project constraints;redundant maintenance efforts;software product line;wasted synergy;Algorithm design and analysis;Context;Merging;Object oriented modeling;Software;Software algorithms;Unified modeling language;program dependencies;reengineering;software engineering;software product lines;variability},
  month = {September},
  pages = {496--500},
  title = {{Program Dependency Analysis for Consolidating Customized Product Copies}},
  year = {2014}
}
@article{klatt2014e,
  abstract = {When implementing a software, developers express conceptual knowledge (e.g. about a specific feature) not only in program language syntax and semantics but also in linguistic information stored in identifiers (e.g. method or class names). Based on this habit, Natural Language Program Analysis (NLPA) is used to improve many different areas in software engineering such as code recommendations or program analysis. Simplified, NLPA algorithms collect identifier names and apply term processing such as camel case splitting (i.e. "MyIdentifier" to "My" and "Identifier") or stemming (i.e. "records" to "record") to subsequently perform further analyzes. In our research context, we search for code locations sharing similar terms to link them with each other. In such types of analysis, filtering stop words is essential to reduce the number of useless links.},
  author = {Benjamin Klatt and Klaus Krogmann and Volker Kuttruff},
  journal = {Softwaretechnik-Trends},
  month = {May},
  number = {2},
  pages = {85--86},
  title = {{Developing Stop Word Lists for Natural Language Program Analysis}},
  url = {http://sdqweb.ipd.kit.edu/publications/pdfs/klatt2014c.pdf},
  volume = {34},
  year = {2014}
}
@article{klatt2014f,
  abstract = {Reusing existing software solutions as initial point for new projects is a frequent approach in software business. Copying existing code and adapting it to customer-specific needs allows for exible and efficient software customization in the short term. But in the long term, a Software Product Line (SPL) approach with a single code base and explicitly managed variability reduces maintenance effort and eases instantiation of new products.},
  author = {Benjamin Klatt and Klaus Krogmann and Christian Wende},
  journal = {Softwaretechnik-Trends},
  month = {May},
  number = {2},
  pages = {64--65},
  title = {{Consolidating Customized Product Copies to Software Product Lines}},
  url = {http://pi.informatik.uni-siegen.de/stt/34_2/01_Fachgruppenberichte/WSRDFF/wsre_dff_2014-08_submission_w8.pdf},
  volume = {34},
  year = {2014}
}
@techreport{KoBrHu2014-TechReport-DMM,
  abstract = {{This technical report introduces the Descartes Modeling Language (DML), a new architecture-level modeling language for modeling Quality-of-Service (QoS) and resource management related aspects of modern dynamic IT systems, infrastructures and services. DML is designed to serve as a basis for self-aware resource management during operation ensuring that system QoS requirements are continuously satisfied while infrastructure resources are utilized as efficiently as possible.}},
  author = {Samuel Kounev and Fabian Brosig and Nikolaus Huber},
  http = {http://opus.bibliothek.uni-wuerzburg.de/frontdoor/index/index/docId/10488},
  institution = {{Department of Computer Science, University of Wuerzburg}},
  month = {October},
  pages = {91},
  pdf = {http://opus.bibliothek.uni-wuerzburg.de/files/10488/DML-TechReport-1.0.pdf},
  title = {{The Descartes Modeling Language}},
  url = {http://www.descartes-research.net/dml/},
  year = {2014}
}
@article{koziolek2014a,
  acmid = {2567531},
  address = {New York, NY, USA},
  author = {Koziolek, Heiko and Becker, Steffen and Happe, Jens and Tuma, Petr and de Gooijer, Thijmen},
  doi = {10.1145/2567529.2567531},
  issn = {0163-5999},
  issue_date = {December 2013},
  journal = {SIGMETRICS Perform. Eval. Rev.},
  month = {January},
  number = {3},
  numpages = {10},
  pages = {2--11},
  publisher = {ACM},
  title = {Towards Software Performance Engineering for Multicore and Manycore Systems},
  url = {http://doi.acm.org/10.1145/2567529.2567531},
  volume = {41},
  year = {2014}
}
@inproceedings{kramer2014a,
  author = {Max E. Kramer},
  booktitle = {Software Engineering 2014 -- Fachtagung des GI-Fachbereichs Softwaretechnik},
  editor = {Wilhelm Hasselbring and Nils Christian Ehmke},
  isbn = {978-388579-621-3},
  issn = {1617-5468},
  location = {Kiel, Germany},
  note = {Doctoral Symposium},
  pages = {233-236},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/kramer2014a.pdf},
  publisher = {Gesellschaft f\"{u}r Informatik e.V.\ (GI)},
  series = {GI Lecture Notes in Informatics},
  tags = {doctoral-symposium, Vitruv},
  title = {Synchronizing Heterogeneous Models in a View-Centric Engineering Approach},
  url = {http://subs.emis.de/LNI/Proceedings/Proceedings227/P-227.pdf},
  volume = {227},
  year = {2014}
}
@inproceedings{kramer2014b,
  acmid = {2631676},
  address = {New York, NY, USA},
  articleno = {7},
  author = {Kramer, Max E. and Langhammer, Michael},
  booktitle = {Proceedings of the 2nd Workshop on View-Based, Aspect-Oriented and Orthographic Software Modelling},
  doi = {10.1145/2631675.2631676},
  isbn = {978-1-4503-2900-2},
  keywords = {Model-Driven Software Engineering, Software Architectures},
  location = {York, United Kingdom},
  numpages = {4},
  pages = {7:7--7:10},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/kramer2014b.pdf},
  publisher = {ACM},
  series = {VAO '14},
  tags = {workshop, Vitruv},
  title = {Proposal for a Multi-View Modelling Case Study: Component-Based Software Engineering with UML, Plug-ins, and Java},
  url = {http://doi.acm.org/10.1145/2631675.2631676},
  year = {2014}
}
@misc{kramer2014c,
  author = {Max E. Kramer and Anton Hergenr\"{o}der and Martin Hecker and Simon Greiner and Kaibin Bao},
  howpublished = {Poster at the 35th IEEE Symposium on Security and Privacy},
  location = {San Jose, California, USA},
  pdf = {http://www.ieee-security.org/TC/SP2014/posters/KRAME.pdf},
  tags = {poster, Vitruv},
  title = {Specification and Verification of Confidentiality in Component-Based Systems},
  url = {http://www.ieee-security.org/TC/SP2014/posters/KRAME.pdf},
  year = {2014}
}
@incollection{langhammer2014a,
  author = {Langhammer, Michael and Kramer, Max E.},
  booktitle = {Fachgruppenbericht des 2. Workshops ``Modellbasierte und Modellgetriebene Softwaremodernisierung''},
  issn = {0720-8928},
  pdf = {http://pi.informatik.uni-siegen.de/stt/34_2/01_Fachgruppenberichte/MMSM2014/MMSM2014_Paper8.pdf},
  publisher = {Gesellschaft f{\"u}r Informatik e.V.\ (GI)},
  series = {Software{\-}technik-Trends},
  tags = {Vitruv},
  title = {{Determining the Intent of Code Changes to Sustain Attached Model Information During Code Evolution}},
  url = {http://pi.informatik.uni-siegen.de/stt/34_2},
  volume = {34 (2)},
  year = {2014},
  tags = {Vitruv}
}
@article{becker2014c,
  author = {Becker, Steffen and Hasselbring, Wilhelm and van Hoorn, Andre and Kounev, Samuel and Reussner, Ralf and others},
  publisher = {Stuttgart, Germany, Universit{\"a}t Stuttgart},
  title = {Proceedings of the 2014 Symposium on Software Performance (SOSP'14): Joint Descartes/Kieker/Palladio Days},
  year = {2014}
}
@inproceedings{martinec2014a,
  acmid = {2568096},
  address = {New York, NY, USA},
  author = {Martinec, Tom\'{a}\c{s} and Marek, Luk\'{a}\c{s} and Steinhauser, Anton\'{\i}n and T\r{u}ma, Petr and Noorshams, Qais and Rentschler, Andreas and Reussner, Ralf},
  booktitle = {Proceedings of the 5th ACM/SPEC International Conference on Performance Engineering},
  doi = {10.1145/2568088.2568096},
  isbn = {978-1-4503-2733-6},
  keywords = {JMS, measurement, modeling, performance analysis, software performance},
  location = {Dublin, Ireland},
  numpages = {12},
  pages = {123--134},
  publisher = {ACM},
  series = {ICPE '14},
  title = {Constructing Performance Model of JMS Middleware Platform},
  url = {http://doi.acm.org/10.1145/2568088.2568096},
  year = {2014}
}
@article{menasche2014a,
  abstract = {Smart grids are fostering a paradigm shift in the realm of power distribution systems. Whereas traditionally different components of the power distribution system have been provided and analyzed by different teams through different lenses, smart grids require a unified and holistic approach that takes into consideration the interplay of communication reliability, energy backup, distribution automation topology, energy storage, and intelligent features such as automated fault detection, isolation, and restoration (FDIR) and demand response. In this paper, we present an analytical model and metrics for the survivability assessment of the distribution power grid network. The proposed metrics extend the system average interruption duration index, accounting for the fact that after a failure, the energy demand and supply will vary over time during a multi-step recovery process. The analytical model used to compute the proposed metrics is built on top of three design principles: state space factorization, state aggregation, and initial state conditioning. Using these principles, we reduce a Markov chain model with large state space cardinality to a set of much simpler models that are amenable to analytical treatment and efficient numerical solution. In case demand response is not integrated with FDIR, we provide closed form solutions to the metrics of interest, such as the mean time to repair a given set of sections. Under specific independence assumptions, we show how the proposed methodology can be adapted to account for multiple failures. We have evaluated the presented model using data from a real power distribution grid, and we have found that survivability of distribution power grids can be improved by the integration of the demand response feature with automated FDIR approaches. Our empirical results indicate the importance of quantifying survivability to support investment decisions at different parts of the power grid distribution network.},
  author = {Menasch\'{e}, Daniel Sadoc and Avritzer, Alberto and Suresh, Sindhu and Le\~{a}o, Rosa M. and de Souza e Silva, Edmundo and Diniz, Morganna and Trivedi, Kishor and Happe, Lucia and Koziolek, Anne},
  doi = {10.1002/cpe.3241},
  issn = {1532-0634},
  journal = {Concurrency and Computation: Practice and Experience},
  keywords = {survivability, transient analysis, smart grid, fault tolerance, demand response, reliability metrics, FDIR},
  number = {12},
  pages = {1949--1974},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/menasche2014a.pdf},
  title = {Assessing survivability of smart grid distribution network designs accounting for multiple failures},
  url = {http://dx.doi.org/10.1002/cpe.3241},
  volume = {26},
  year = {2014},
  tags = {peer-reviewed}
}
@inproceedings{noorshams2014c,
  author = {Qais Noorshams and Kiana Rostami and Samuel Kounev and Ralf Reussner},
  booktitle = {Proceedings of the IEEE 22nd International Symposium on Modeling, Analysis and Simulation of Computer and Telecommunication Systems},
  date = {September 09--11},
  location = {France, Paris},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/noorshams2014c.pdf},
  series = {MASCOTS '14},
  title = {{Modeling of I/O Performance Interference in Virtualized Environments with Queueing Petri Nets}},
  tags = {refereed},
  year = {2014}
}
@inproceedings{noorshams2014b,
  acmid = {2602475},
  address = {New York, NY, USA},
  author = {Noorshams, Qais and Reeb, Roland and Rentschler, Andreas and Kounev, Samuel and Reussner, Ralf},
  booktitle = {Proceedings of the 17th International ACM Sigsoft Symposium on Component-based Software Engineering},
  doi = {10.1145/2602458.2602475},
  isbn = {978-1-4503-2577-6},
  keywords = {i/o, performance, prediction, software architecture, statistical model, storage},
  location = {Marcq-en-Bareul, France},
  note = {Acceptance Rate (Full Paper): 14/62 = 23\%.},
  numpages = {10},
  pages = {45--54},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/noorshams2014b.pdf},
  publisher = {ACM},
  series = {CBSE '14},
  title = {Enriching Software Architecture Models with Statistical Models for Performance Prediction in Modern Storage Environments},
  url = {http://doi.acm.org/10.1145/2602458.2602475},
  year = {2014}
}
@inproceedings{noorshams2014a,
  author = {Qais Noorshams and Axel Busch and Andreas Rentschler and Dominik Bruhn and Samuel Kounev and Petr T\r{u}ma and Ralf Reussner},
  booktitle = {34th IEEE International Conference on Distributed Computing Systems Workshops (ICDCS 2014 Workshops). 4th International Workshop on Data Center Performance, DCPerf '14},
  doi = {10.1109/ICDCSW.2014.26},
  location = {Madrid, Spain},
  pages = {88-93},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/noorshams2014a.pdf},
  title = {{Automated Modeling of I/O Performance and Interference Effects in Virtualized Storage Systems}},
  url = {http://dx.doi.org/10.1109/ICDCSW.2014.26},
  year = {2014}
}
@inproceedings{rentschler2013b,
  _booktitle = {Proceedings of the 16th International Conference on Model Driven Engineering Languages and Systems -- Demonstrations Track, MODELS'13, Miami, FL, USA},
  _doi = {?},
  _pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/rentschler2013b.pdf},
  author = {Andreas Rentschler and Per Sterner},
  booktitle = {{Joint Proceedings of MODELS'13 Invited Talks, Demonstration Session, Poster Session, and ACM Student Research Competition co-located with the 16th International Conference on Model Driven Engineering Languages and Systems (MODELS '13), Miami, USA, September 29 - October 4, 2013}},
  editor = {Yan Liu and Steffen Zschaler},
  issn = {1613-0073},
  month = {January},
  pages = {36--40},
  pdf = {http://ceur-ws.org/Vol-1115/demo7.pdf},
  publisher = {CEUR-WS.org},
  series = {CEUR Workshop Proceedings},
  title = {{Interactive Dependency Graphs for Model Transformation Analysis}},
  url = {http://nbn-resolving.de/urn:nbn:de:0074-1115-4},
  volume = {1115},
  year = {2014}
}
@inproceedings{rentschler2014a,
  acmid = {2577094},
  address = {New York, NY, USA},
  author = {Andreas Rentschler and Dominik Werle and Qais Noorshams and Lucia Happe and Ralf Reussner},
  booktitle = {Proceedings of the 13th International Conference on Modularity (AOSD '14), Lugano, Switzerland, April 22 - 26, 2014},
  doi = {10.1145/2577080.2577094},
  isbn = {978-1-450-32772-5},
  month = {April},
  note = {Acceptance Rate: 35.0\%},
  numpages = {12},
  pages = {217--228},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/rentschler2014a.pdf},
  publisher = {ACM},
  title = {{Designing Information Hiding Modularity for Model Transformation Languages}},
  url = {http://doi.acm.org/10.1145/2577080.2577094},
  year = {2014}
}
@inproceedings{rentschler2014b,
  _booktitle = {Proceedings of the 3rd Workshop on the Analysis of Model Transformations (AMT@MODELS 2014), Valencia, Spain, September 29, 2014},
  _pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/rentschler2014b.pdf},
  author = {Andreas Rentschler and Dominik Werle and Qais Noorshams and Lucia Happe and Ralf Reussner},
  bibsource = {DBLP, http://dblp.uni-trier.de},
  booktitle = {Proceedings of the 3rd Workshop on the Analysis of Model Transformations co-located with the 17th International Conference on Model Driven Engineering Languages and Systems (AMT{}@{}MOD\-ELS '14), Valencia, Spain, September 29, 2014},
  editor = {Benoit Baudry and J{\"u}rgen Dingel and Levi Lucio and Hans Vangheluwe},
  issn = {1613-0073},
  month = {October},
  pages = {4--13},
  pdf = {http://ceur-ws.org/Vol-1277/1.pdf},
  publisher = {CEUR-WS.org},
  series = {CEUR Workshop Proceedings},
  title = {{Remodularizing Legacy Model Transformations with Automatic Clustering Techniques}},
  url = {http://nbn-resolving.de/urn:nbn:de:0074-1277-5},
  volume = {1277},
  year = {2014}
}
@mastersthesis{seifermann2014a,
  author = {Stephan Seifermann},
  school = {Karlsruhe Institute of Technology (KIT), Germany},
  title = {Model-Driven Co-Evolution of Contracts, Unit-Tests and Source-Code},
  year = {2014},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/seifermann2014a.pdf}
}
@article{trubiani2014a,
  abstract = {Antipatterns are conceptually similar to patterns in that they document recurring solutions to common design problems. Software Performance Antipatterns document common performance problems in the design as well as their solutions. The definition of performance antipatterns concerns software properties that can include static, dynamic, and deployment aspects. To make use of such knowledge, we propose an approach that helps software architects to identify and solve performance antipatterns. Our approach provides software performance feedback to architects, since it suggests the design alternatives that allow overcoming the detected performance problems. The feedback process may be quite complex since architects may have to assess several design options before achieving the architectural model that best fits the end-user expectations. In order to optimise such process we introduce a ranking methodology that identifies, among a set of detected antipatterns, the ``guilty'' ones, i.e. the antipatterns that more likely contribute to the violation of specific performance requirements. The introduction of our ranking process leads the system to converge towards the desired performance improvement by discarding a consistent part of design alternatives. Four case studies in different application domains have been used to assess the validity of the approach.},
  author = {Catia Trubiani and Anne Koziolek and Vittorio Cortellessa and Ralf Reussner},
  doi = {10.1016/j.jss.2014.03.081},
  issn = {0164-1212},
  journal = {Journal of Systems and Software},
  keywords = {Palladio Architectural Models},
  pages = {141 - 165},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/trubiani2014a.pdf},
  title = {Guilt-based Handling of Software Performance Antipatterns in {P}alladio Architectural Models},
  url = {http://www.sciencedirect.com/science/article/pii/S0164121214001010},
  volume = {95},
  year = {2014},
  tags = {peer-reviewed}
}
@inproceedings{wert2014automatic,
  author = {Alexander Wert and Marius Oehler and Christoph Heger and Roozbeh Farahbod},
  booktitle = {Proceedings of the 10th International Conference on Quality of Software Architecture},
  location = {Lille, France},
  note = {Acceptance Rate: 27\%},
  series = {QoSA '14},
  title = {{Automatic Detection of Performance Anti-patterns in Inter-component Communications}},
  url = {http://dl.acm.org/citation.cfm?id=2602579},
  year = {2014}
}
@inproceedings{wohlrab2014a,
  abstract = {To meet end-user performance expectations, precise performance requirements are needed during development and testing, e.g., to conduct detailed performance and load tests. However, in practice, several factors complicate performance requirements elicitation: lacking skills in performance requirements engineering, outdated or unavailable functional specifications and architecture models, the specification of the system's context, lack of experience to collect good performance requirements in an industrial setting with very limited time, etc. From the small set of available non-functional requirements engineering methods, no method exists that alone leads to precise and complete performance requirements with feasible effort and which has been reported to work in an industrial setting. In this paper, we present our experiences in combining existing requirements engineering methods into a performance requirements method called PROPRE. It has been designed to require no up-to-date system documentation and to be applicable with limited time and effort. We have successfully applied PROPRE in an industrial case study from the process automation domain. Our lessons learned show that the stakeholders gathered good performance requirements which now improve performance testing.},
  author = {Rebekka Wohlrab and Thijmen de Gooijer and Anne Koziolek and Steffen Becker},
  booktitle = {Proceedings of the 22nd IEEE International Requirements Engineering Conference (RE)},
  doi = {10.1109/RE.2014.6912285},
  keywords = {formal specification;program testing;software architecture;PROPRE;RE methods;architecture models;end-user performance expectations;functional specifications;industrial setting;nonfunctional requirements engineering methods;performance requirements elicitation;performance requirements engineering;performance testing;process automation domain;software development;system context specification;Adaptation models;Context;Documentation;Measurement;Testing;Throughput;Time factors},
  month = {August},
  pages = {344--353},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/wohlrab2014a.pdf},
  title = {Experience of Pragmatically Combining {RE} Methods for Performance Requirements in Industry},
  year = {2014}
}
@article{KrMoKo2013-SciCo-MetricsAndTechniquesForPerformanceIsolation,
  author = {Rouven Krebs and Christof Momm and Samuel Kounev},
  journal = {Elsevier Science of Computer Programming Journal (SciCo)},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/KrMoKo2013-SciCo-MetricsAndTechniquesForPerformanceIsolation.pdf},
  publisher = {Elsevier B.V.},
  title = {{Metrics and Techniques for Quantifying Performance Isolation in Cloud Environments}},
  volume = {90, Part B},
  number = {},
  pages = {116 - 134},
  year = {2014},
  note = {Special Issue on Component-Based Software Engineering and Software Architecture},
  issn = {0167-6423},
  doi = {http://dx.doi.org/10.1016/j.scico.2013.08.003},
  url = {http://www.sciencedirect.com/science/article/pii/S0167642313001962},
  abstract = {The cloud computing paradigm enables the provision of cost efficient IT-services by leveraging economies of scale and sharing data center resources efficiently among multiple independent applications and customers. However, the sharing of resources leads to possible interference between users and performance problems are one of the major obstacles for potential cloud customers. Consequently, it is one of the primary goals of cloud service providers to have different customers and their hosted applications isolated as much as possible in terms of the performance they observe. To make different offerings, comparable with regards to their performance isolation capabilities, a representative metric is needed to quantify the level of performance isolation in cloud environments. Such a metric should allow to measure externally by running benchmarks from the outside treating the cloud as a black box. In this article, we propose three different types of novel metrics for quantifying the performance isolation of cloud-based systems. We consider four new approaches to achieve performance isolation in Software-as-a-Service (SaaS) offerings and evaluate them based on the proposed metrics as part of a simulation-based case study. To demonstrate the effectiveness and practical applicability of the proposed metrics for quantifying the performance isolation in various scenarios, we present a second case study evaluating performance isolation of the hypervisor Xen.}
}
@article{BrHuKo2013-SciCo-SoftwarePerformanceAbstractions,
  author = {Fabian Brosig and Nikolaus Huber and Samuel Kounev},
  doi = {10.1016/j.scico.2013.06.004},
  journal = {Elsevier Science of Computer Programming Journal (SciCo)},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/BrHuKo2013-SciCo-SoftwarePerformanceAbstractions.pdf},
  publisher = {Elsevier},
  title = {{Architecture-Level Software Performance Abstractions for Online Performance Prediction}},
  url = {http://authors.elsevier.com/sd/article/S0167642313001421},
  year = {2014},
  volume = {90, Part B},
  pages = {71 - 92},
  year = {2014},
  issn = {0167-6423}
}
@article{HuHoKoBrKo2014-SOCA-ModelingRuntimeAdaptation,
  author = {Nikolaus Huber and Andr\'{e} van Hoorn and Anne Koziolek and Fabian Brosig and Samuel Kounev},
  doi = {10.1007/s11761-013-0144-4},
  journal = {Service Oriented Computing and Applications Journal (SOCA)},
  number = {1},
  pages = {73--89},
  pdf = {http://sdqweb.ipd.kit.edu/publications/descartes-pdfs/HuHoKoBrKo2013-SOCA-ModelingRuntimeAdaptation.pdf},
  publisher = {Springer London},
  title = {{Modeling Run-Time Adaptation at the System Architecture Level in Dynamic Service-Oriented Environments}},
  volume = {8},
  year = {2014},
  tags = {peer-reviewed}
}
@inproceedings{Durdik2014a,
  author = {Zoya Durdik and Ralf Reussner},
  booktitle = {{Proceedings of Software Engineering (SE2014)}},
  title = {{On the Appropriate Rationale for Using Design Patterns and Pattern Documentation}},
  year = {2014}
}
@mastersthesis{stier2014a,
  address = {Am Fasanengarten 5, 76131 Karlsruhe, Germany},
  author = {Christian Stier},
  month = {January},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/Stier2014a.pdf},
  school = {Karlsruhe Institute of Technology (KIT)},
  title = {{Transaction-Aware Software Performance Prediction}},
  year = {2014}
}
@inproceedings{GoBrKo2014-PerformanceQueries,
  address = {New York, NY, USA},
  author = {Gorsler, Fabian and Brosig, Fabian and Kounev, Samuel},
  booktitle = {Proceedings of the 5th ACM/SPEC International Conference on Performance Engineering (ICPE 2014)},
  location = {Dublin, Ireland},
  note = {Accepted for publication. Acceptance Rate (Full Paper): 29\%.},
  publisher = {ACM},
  title = {Performance Queries for Architecture-Level Performance Models},
  year = {2014}
}
@inproceedings{SpCaZhKo2014-ICPEDemo-LibReDE,
  abstract = {When creating a performance model, it is necessary to quantify the amount of resources consumed by an application serving individual requests. In distributed enterprise systems, these resource demands usually cannot be observed directly, their estimation is a major challenge. Different statistical approaches to resource demand estimation based on monitoring data have been proposed, e.g., using linear regression or Kalman filtering techniques. In this paper, we present LibReDE, a library of ready-to-use implementations of approaches to resource demand estimation that can be used for online and offline analysis. It is the first publicly available tool for this task and aims at supporting performance engineers during performance model construction. The library enables the quick comparison of the estimation accuracy of different approaches in a given context and thus helps to select an optimal one.},
  author = {Simon Spinner and Giuliano Casale and Xiaoyun Zhu and Samuel Kounev},
  booktitle = {Proceedings of the 5th ACM/SPEC International Conference on Performance Engineering (ICPE 2014)},
  day = {22--26},
  location = {Dublin, Ireland},
  month = {March},
  note = {Accepted for Publication},
  publisher = {ACM},
  title = {{LibReDE: A Library for Resource Demand Estimation}},
  titleaddon = {{(Demonstration Paper)}},
  year = {2014}
}
@inproceedings{merkle2014a,
  address = {New York, NY, USA},
  author = {Philipp Merkle and Christian Stier},
  booktitle = {Proceedings of the 5th ACM/SPEC International Conference on Performance Engineering},
  location = {Dublin, Ireland},
  note = {Work-In-Progress Paper},
  publisher = {ACM},
  series = {ICPE '14},
  title = {{Modelling Database Lock-Contention in Architecture-level Performance Simulation}},
  year = {2014}
}
@inproceedings{RyKo2014-DCPerf-DNI2QPN,
  author = {Piotr Rygielski and Samuel Kounev},
  booktitle = {34th IEEE International Conference on Distributed Computing Systems Workshops (ICDCS 2014 Wokrshops). 4th International Workshop on Data Center Performance, (DCPerf 2014)},
  location = {Madrid, Spain},
  note = {(Paper accepted for publication)},
  title = {{Data Center Network Throughput Analysis using Queueing Petri Nets}},
  year = {2014}
}
@inproceedings{KrScHe2014-HotTopiCS-OptimizationApproach,
  abstract = {{Software-as-a-Service (SaaS) often shares one single application instance among different tenants to reduce costs. However, sharing potentially leads to undesired influence from one tenant onto the performance observed by the others. Furthermore, providing one tenant additional resources to support its increasing demands without increasing the performance of tenants who do not pay for it is a major challenge. The application intentionally does not manage hardware resources, and the OS is not aware of application level entities like tenants. Thus, it is difficult to control the performance of different tenants to keep them isolated. These problems gain importance as performance is one of the major obstacles for cloud customers. Existing work applies request based admission control mechanisms like a weighted round robin with an individual queue for each tenant to control the share guaranteed for a tenant. However, the computation of the concrete weights for such an admission control is still challenging. In this paper, we present a fitness function and optimization approach reflecting various requirements from this field to compute proper weights with the goal to ensure an isolated performance as foundation to scale on a tenants basis.}},
  author = {Rouven Krebs and Philipp Schneider and Nikolas Herbst},
  booktitle = {Proceedings of the 2nd International Workshop on Hot Topics in Cloud Service Scalability (HotTopiCS 2014), co-located with the 5th ACM/SPEC International Conference on Performance Engineering (ICPE 2014)},
  day = {22},
  keywords = {SaaS, Multi-Tenancy, Performance, Isolation, Scalability},
  location = {Dublin, Ireland},
  month = {March},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/KrScHe2014-HotTopiCS-OptimizationApproach.pdf},
  publisher = {ACM},
  slides = {http://sdqweb.ipd.kit.edu/publications/pdfs/KrScHe2014-HotTopiCS-OptimizationApproach-Slides.pdf},
  title = {{Optimization Method for Request Admission Control to Guarantee Performance Isolation}},
  year = {2014}
}
@inproceedings{KrSpAhKo2014_CCGrid_ResourceIsolation,
  abstract = {{Multi-tenancy is an approach to share one application instance among multiple customers by providing each of them a dedicated view. This approach is commonly used by SaaS providers to reduce the costs for service provisioning. Tenants also expect to be isolated in terms of the performance they observe and the providers inability to offer performance guarantees is a major obstacle for potential cloud customers. To guarantee an isolated performance it is essential to control the resources used by a tenant. This is a challenge, because the layers of the execution environment, responsible for controlling resource usage (e.g., operating system), normally do not have knowledge about entities defined at the application level and thus they cannot distinguish between different tenants. Furthermore, it is hard to predict how tenant requests propagate through the multiple layers of the execution environment down to the physical resource layer. The intended abstraction of the application from the resource controlling layers does not allow to solely solving this problem in the application. In this paper, we propose an approach which applies resource demand estimation techniques in combination with a request based admission control. The resource demand estimation is used to determine resource consumption information for individual requests. The admission control mechanism uses this knowledge to delay requests originating from tenants that exceed their allocated resource share. The proposed method is validated by a widely accepted benchmark showing its applicability in a setup motivated by today's platform environments.}},
  author = {Rouven Krebs and Simon Spinner and Nadia Ahmed and Samuel Kounev},
  booktitle = {Proceedings of the 14th IEEE/ACM International Symposium on Cluster, Cloud and Grid Computing (CCGrid 2014)},
  day = {26},
  location = {Chicago, IL, USA},
  month = {May},
  note = {Accepted for Publication},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/KrSpAhKo2014_CCGrid_ResourceIsolation.pdf},
  publisher = {IEEE/ACM},
  title = {{Resource Usage Control In Multi-Tenant Applications}},
  year = {2014}
}
@inproceedings{KrLo2014_Closer_IsolationTypes,
  abstract = {{Multi-tenancy is an approach to share one application instance among multiple customers by providing each of them a dedicated view. This approach is commonly used by SaaS providers to reduce the costs for service provisioning. Tenants also expect to be isolated in terms of the performance they observe and the providers inability to offer performance guarantees is a major obstacle for potential cloud customers. To guarantee an isolated performance it is essential to control the resources used by a tenant. This is a challenge, because the layers of the execution environment, responsible for controlling resource usage (e.g., operating system), normally do not have knowledge about entities defined at the application level and thus they cannot distinguish between different tenants. Furthermore, it is hard to predict how tenant requests propagate through the multiple layers of the execution environment down to the physical resource layer. The intended abstraction of the application from the resource controlling layers does not allow to solely solving this problem in the application. In this paper, we propose an approach which applies resource demand estimation techniques in combination with a request based admission control. The resource demand estimation is used to determine resource consumption information for individual requests. The admission control mechanism uses this knowledge to delay requests originating from tenants that exceed their allocated resource share. The proposed method is validated by a widely accepted benchmark showing its applicability in a setup motivated by today's platform environments.}},
  author = {Rouven Krebs and Manuel Loesch},
  booktitle = {Proceedings of 4th International Conference On Cloud Computing And Services Science (CLOSER 2014)},
  day = {3},
  location = {Barcelona, Spain},
  month = {April},
  note = {Short Paper},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/KrLo2014_Closer_IsolationTypes.pdf},
  publisher = {SciTePress},
  title = {{Comparison of Request Admission Based Performance Isolation Approaches in Multi-Tenant SaaS Applications}},
  year = {2014}
}
@inproceedings{jung2014a,
  abstract = {Long-living systems face many modifications and extensions over time due to changing technology and requirements. This causes changes in the models reflecting the systems, and subsequently in the underlying meta-models, as their structure and semantics are adapted to adhere these changes. Modifying meta-models requires adaptations in all tools realizing their semantics. This is a costly endeavor, especially for complex meta-models. To solve this problem we propose a method to construct and refactor meta-models to be concise and focused on a small set of concerns. The method results in simpler metamodel modification scenarios and fewer modifications, as new concerns and aspects are encapsulated in separate meta-models. Furthermore, we define design patterns based on the different roles meta-models play in software. Thus, we keep large and complex modeling projects manageable due to the improved adaptability of their meta-model basis.},
  acmid = {2631681},
  address = {New York, NY, USA},
  articleno = {19},
  author = {Jung, Reiner and Heinrich, Robert and Schmieders, Eric and Strittmatter, Misha and Hasselbring, Wilhelm},
  booktitle = {Proceedings of the 2Nd Workshop on View-Based, Aspect-Oriented and Orthographic Software Modelling},
  doi = {10.1145/2631675.2631681},
  isbn = {978-1-4503-2900-2},
  keywords = {Aspect Modeling, Design Pattern, Evolution, Meta-Model Extension},
  location = {York, United Kingdom},
  month = {July},
  numpages = {4},
  pages = {19:19--19:22},
  pdf = {http://eprints.uni-kiel.de/25295/1/a-method-for-aspect-oriented-meta-model-evolution.pdf},
  publisher = {ACM},
  series = {VAO '14},
  slides = {http://eprints.uni-kiel.de/25295/8/vao-jung.pdf},
  tags = {refereed},
  title = {A Method for Aspect-oriented Meta-Model Evolution},
  year = {2014}
}
@inproceedings{anaya2014adapt,
  abstract = {{ Engineering self-adaptive software in unpredictable environments such as pervasive systems, where network's ability, remaining battery power and environmental conditions may vary over the lifetime of the system is a very challenging task. Many current software engineering approaches leverage run-time architectural models to ease the design of the autonomic control loop of these self-adaptive systems. While these approaches perform well in reacting to various evolutions of the runtime environment, implementations based on reactive paradigms have a limited ability to anticipate problems, leading to transient unavailability of the system, useless costly adaptations, or resources waste. In this paper, we follow a proactive self-adaptation approach that aims at overcoming the limitation of reactive approaches. Based on predictive analysis of internal and external context information, our approach regulates new architecture reconfigurations and deploys them using models at runtime. We have evaluated our approach on a case study where we combined hourly temperature readings provided by National Climatic Data Center (NCDC) with fire reports from Moderate Resolution Imaging Spectroradiometer (MODIS) and simulated the behavior of multiple systems. The results confirm that our proactive approach outperforms a typical reactive system in scenarios with seasonal behavior. }},
  author = {Ivan Dario Paez Anaya and Viliam Simko and Johann Bourcier and Noel Plouzeau},
  booktitle = {Proceedings of 9th International Symposium on Software Engineering for Adaptive and Self-Managing Systems (SEAMS'14)},
  day = {31},
  location = {Hyderabad, India},
  month = {May},
  pdf = {http://hal.archives-ouvertes.fr/docs/00/98/30/46/PDF/SEAMS14-main.pdf},
  title = {A Prediction-Driven Adaptation Approach for Self-Adaptive Sensor Networks},
  year = {2014}
}
@inproceedings{vinarek2014eomas,
  abstract = {{ Requirements traceability is an extremely important aspect of software development and especially of maintenance. Efficient maintaining of traceability links between high-level requirements specification and low-level implementation is hindered by many problems. In this paper, we propose a method for automated recovery of links between parts of the textual requirement specification and the source code of implementation. The described method is based on a method allowing extraction of a prototype domain model from plain text requirements specification. The proposed method is evaluated on two non-trivial examples. The performed experiments show that our method is able to link requirements with source code with the accuracy of $F_1=58-61\%$. }},
  author = {Jiri Vinarek and Petr Hnetynka and Viliam Simko and Petr Kroha},
  booktitle = {Proceedings of 10th International Workshop, EOMAS 2014, Held at CAiSE 2014},
  day = {16--17},
  location = {Thessaloniki, Greece},
  month = {June},
  series = {LNBIP},
  title = {Recovering Traceability Links between Code and Specification through Domain Model Extraction},
  url = {http://www.springer.com/business+%26+management/business+information+systems/book/978-3-662-44859-5},
  volume = {191},
  year = {2014}
}
@article{simko2014foam,
  abstract = {{ Textual use-cases have been traditionally used in the initial stages of the software development process to describe software functionality from the user's perspective. Their advantage is that they can be easily understood by stakeholders and domain experts. However, since use-cases typically rely on natural language, they cannot be directly subject to a formal verification. In this article, we present a method (called FOAM) for formal verification of use-cases. This method features simple user-definable annotations, which are inserted into a use-case to make its semantics more suitable for verification. Subsequently a model-checking tool is employed to verify temporal invariants associated with the annotations. This way, FOAM allows harnessing the benefits of model-checking while still keeping the use-cases understandable for non-experts. }},
  author = {Viliam Simko and David Hauzar and Petr Hnetynka and Tomas Bures and Frantisek Plasil},
  doi = {10.1093/comjnl/bxu068},
  journal = {The Computer Journal},
  note = {accepted for publication},
  tags = {Formal Methods; Verification, Requirements Engineering},
  title = {Formal Verification of Annotated Textual Use-Cases},
  year = {2014}
}
@inproceedings{strittmatter2014a,
  author = {Strittmatter, Misha and Langhammer, Michael},
  booktitle = {Proceedings of the Symposium on Software Performance: Joint Descartes/Kieker/Palladio Days},
  day = {26--28},
  editor = {Steffen Becker and Wilhelm Hasselbring and {van Hoorn}, Andr{\'e} and Kounev, Samuel and Ralf Reussner},
  location = {Stuttgart, Germany},
  month = {November},
  pages = {160--176},
  pdf = {http://www.performance-symposium.org/fileadmin/user_upload/palladio-conference/2014/papers/paper13.pdf},
  publisher = {Universit{\"a}tsbibliothek Stuttgart},
  slides = {http://www.performance-symposium.org/fileadmin/user_upload/palladio-conference/2014/slides/13_pcm_mm_concerns.pdf},
  tags = {refereed},
  title = {Identifying Semantically Cohesive Modules within the Palladio Meta-Model},
  year = {2014}
}
@inproceedings{strittmatter2014b,
  author = {Strittmatter, Misha},
  booktitle = {Proceedings of the Symposium on Software Performance: Joint Descartes/Kieker/Palladio Days},
  day = {26--28},
  editor = {Steffen Becker and Wilhelm Hasselbring and {van Hoorn}, Andr{\'e} and Kounev, Samuel and Ralf Reussner},
  location = {Stuttgart, Germany},
  month = {November},
  note = {Talk Abstract},
  pages = {9},
  pdf = {http://www.kieker-palladio-days.org/fileadmin/user_upload/palladio-conference/2014/papers/abstracts.pdf},
  publisher = {Universit{\"a}tsbibliothek Stuttgart},
  slides = {http://www.kieker-palladio-days.org/fileadmin/user_upload/palladio-conference/2014/slides/14_pcm_assembly.pdf},
  tags = {talk},
  title = {Enabling Assembly of Systems and its Implications within the Palladio Component Model},
  year = {2014}
}
@inproceedings{jung2014b,
  author = {Jung, Reiner and Strittmatter, Misha and Merkle, Philipp and Heinrich, Robert},
  booktitle = {Proceedings of the Symposium on Software Performance: Joint Descartes/Kieker/Palladio Days},
  day = {26--28},
  editor = {Steffen Becker and Wilhelm Hasselbring and {van Hoorn}, Andr{\'e} and Kounev, Samuel and Ralf Reussner},
  location = {Stuttgart, Germany},
  month = {November},
  note = {Talk Abstract},
  pages = {13},
  pdf = {http://www.kieker-palladio-days.org/fileadmin/user_upload/palladio-conference/2014/papers/abstracts.pdf},
  publisher = {Universit{\"a}tsbibliothek Stuttgart},
  slides = {http://eprints.uni-kiel.de/26081/1/sosp-evolution-pcm.pdf},
  tags = {talk},
  title = {Evolution of the Palladio Component Model: Process and Modeling Methods},
  year = {2014}
}
@techreport{stier2014b,
  author = {Stier, Christian and Groenda, Henning and Koziolek, Anne},
  booktitle = {Proceedings of the Symposium on Software Performance: Joint Descartes/Kieker/Palladio Days},
  day = {26--28},
  editor = {Steffen Becker and Wilhelm Hasselbring and {van Hoorn}, Andr{\'e} and Kounev, Samuel and Ralf Reussner},
  institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology},
  location = {Stuttgart, Germany},
  month = {November},
  pages = {18},
  pdf = {http://www.performance-symposium.org/fileadmin/user_upload/palladio-conference/2014/papers/paper12.pdf},
  slides = {http://www.performance-symposium.org/fileadmin/user_upload/palladio-conference/2014/slides/12_Stier_Energy.pdf},
  title = {{Towards Modeling and Analysis of Power Consumption of Self-Adaptive Software Systems in Palladio}},
  year = {2014}
}
@inproceedings{ostberg2014a,
  address = {Singapore},
  author = {P-O \"{O}stberg and Henning Groenda and Stefan Wesner and James Byrne and Dimitrios~S. Nikolopoulos and Craig Sheridan and Jakub Krzywda and Ahmed Ali-Eldin and Johan Tordsson and Erik Elmroth and Christian Stier and Klaus Krogmann and J\"{o}rg Domaschka and Christopher Hauser and PJ Byrne and Sergej Svorobej and Barry McCollum and Zafeirios Papazachos and Loke Johannessen and Stephan R\"{u}th and Dragana Paurevic},
  booktitle = {Proceedings of the Sixth IEEE International Conference on Cloud Computing Technology and Science (CloudCom)},
  doi = {10.1109/CloudCom.2014.62},
  month = {December},
  pages = {26-31},
  publisher = {IEEE Computer Society},
  title = {{The CACTOS Vision of Context-Aware Cloud Topology Optimization and Simulation}},
  year = {2014}
}
@phdthesis{klatt2014phd,
  abstract = {Copy-based customization is a widespread technique to serve individual customer needs with existing software solutions. To cope with long term disadvantages resulting from this practice, this dissertation developed an approach to support the consolidation of such copies into a Software Product Line with a future-compliant product base providing managed variability.},
  address = {Karlsruhe, Germany},
  author = {Klatt, Benjamin},
  month = {October},
  publisher = {KIT Scientific Publishing},
  school = {Karlsruhe Institute of Technology (KIT)},
  title = {Consolidation of Customized Product Copies into Software Product Lines},
  url = {http://digbib.ubka.uni-karlsruhe.de/volltexte/1000043687},
  year = {2014}
}
@inproceedings{heinrich2014,
  author = {Robert Heinrich and Eric Schmieders and Reiner Jung and Kiana Rostami and Andreas Metzger and Wilhelm Hasselbring and Ralf H. Reussner and Klaus Pohl},
  booktitle = {Proceedings of the 9th Workshop on Models@run.time co-located with 17th International Conference on Model Driven Engineering Languages and Systems (MODELS 2014), Valencia, Spain, September 30, 2014.},
  pages = {41--46},
  title = {Integrating Run-time Observations and Design Component Models for Cloud System Analysis},
  url = {http://ceur-ws.org/Vol-1270/mrt14_submission_8.pdf},
  year = {2014},
  tags = {refereed}
}
@book{heinrich2014aligning,
  author = {Heinrich, Robert},
  doi = {10.1007/978-3-658-06518-8},
  isbn = {978-3-658-06517-1},
  publisher = {Springer},
  title = {Aligning Business Processes and Information Systems: New Approaches to Continuous Quality Engineering},
  year = {2014}
}
@proceedings{DBLP:conf/se/2014w,
  bibsource = {dblp computer science bibliography, http://dblp.org},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/se/2014w},
  editor = {Klaus Schmid and Wolfgang B{\"{o}}hm and Robert Heinrich and Andrea Herrmann and Anne Hoffmann and Dieter Landes and Marco Konersmann and Thomas Ruhroth and Oliver Sander and Volker Stolz and Baltasar Tranc{\'{o}}n y Widemann and R{\"{u}}diger Wei{\ss}bach},
  publisher = {CEUR-WS.org},
  series = {{CEUR} Workshop Proceedings},
  timestamp = {Fri, 09 Jan 2015 13:24:38 +0100},
  title = {Gemeinsamer Tagungsband der Workshops der Tagung Software Engineering 2014, 25.-26. Februar 2014 in Kiel, Deutschland},
  url = {http://ceur-ws.org/Vol-1129},
  volume = {1129},
  year = {2014}
}
@proceedings{DBLP:conf/re/2014rebmp,
  bibsource = {dblp computer science bibliography, http://dblp.org},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/re/2014rebmp},
  editor = {Robert Heinrich and Kathrin Kirchner and R{\"{u}}diger Wei{\ss}bach},
  isbn = {978-1-4799-6346-1},
  publisher = {{IEEE}},
  timestamp = {Tue, 13 Jan 2015 18:59:21 +0100},
  title = {1st {IEEE} International Workshop on the Interrelations between Requirements Engineering and Business Process Management, {REBPM} 2014, Karlskrona, Sweden, August 25, 2014},
  url = {http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=6883271},
  year = {2014}
}
@mastersthesis{ananieva2014a,
  author = {Ananieva, Sofia},
  school = {Braunschweig University of Technology},
  title = {{Evolutionsanalyse von variablen Softwaresystemen}},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/ananieva2014a.pdf},
  type = {Bachelor's Thesis},
  year = {2014}
}
@mastersthesis{koch2014,
  author = {Koch, Sandro},
  school = {Hochschule Ravensburg-Weingarten},
  title = {{Selektion und Entwicklung eines {K}ommunikationsbus-/protokoll zur {I}ntegration in eine elektrische {A}ntriebsregelung}},
  type = {Bachelor's Thesis},
  year = {2014}
}