2008.bib

@incollection{boehme2008a,
  abstract = {This chapter discusses ways to validate metrics and raises awareness for possible caveats if metrics are used in a social environment.},
  author = {Rainer B{\"o}hme and Ralf Reussner},
  booktitle = {Dependability Metrics},
  chapter = {3},
  pages = {14--18},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{V}alidation of {P}redictions with {M}easurements},
  url = {http://www.springerlink.com/content/662rn13014r46269/fulltext.pdf},
  volume = {4909},
  year = {2008}
}
@incollection{boehme2008b,
  abstract = {The following chapter attempts to define the notions of metric and measurement which underlie this book. It further elaborates on general properties of metrics and introduces useful terms and concepts from measurement theory, without being overly formal.},
  author = {Rainer B{\"o}hme and Ralf Reussner},
  booktitle = {Dependability Metrics},
  chapter = {2},
  pages = {7--13},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {On Metrics and Measurements},
  url = {http://www.springerlink.com/content/w11w657204878756/fulltext.pdf},
  volume = {4909},
  year = {2008}
}
@inbook{baier2008a,
  author = {Achim Baier and Steffen Becker and Martin Jung and Klaus Krogmann and Carsten R{\"{o}}ttgers and Niels Streekmann and Karsten Thoms and Steffen Zschaler},
  chapter = {Modellgetriebene Software-Entwicklung},
  edition = {2},
  isbn = {3898645592},
  month = {December},
  pages = {93--122},
  publisher = {dPunkt.verlag Heidelberg},
  title = {{H}andbuch der {S}oftware-{A}rchitektur},
  year = {2008}
}
@inproceedings{becker2008b,
  abstract = {Model-driven performance prediction methods use abstract design models to predict the performance of the modelled system during early development stages. However, performance is an attribute of the running system and not its model. The system contains many implementation details not part of its model but still affecting the performance at run-time. Existing approaches neglect details of the implementation due to the abstraction underlying the design model. Completion components [26] deal with this problem, however, they have to be added manually to the prediction model. In this work, we assume that the system's implementation is generated by a chain of model transformations. In this scenario, the transformation rules determine the transformation result. By analysing these transformation rules, a second transformation can be derived which automatically adds details to the prediction model according to the encoded rules. We call this transformation a coupled transformation as it is coupled to an corresponding model-to-code transformation. It uses the knowledge on the output of the model-to-code transformation to increase performance prediction accuracy. The introduced coupled transformations method is validated in a case study in which a parametrised transformation maps abstract component connectors to realisations in different RPC calls. In this study, the corresponding coupled transformation captures the RPC's details with a prediction error of less than 5\%.},
  address = {New York, NY, USA},
  author = {Steffen Becker},
  booktitle = {WOSP '08: Proceedings of the 7th International Workshop on Software and performance},
  doi = {10.1145/1383559.1383573},
  isbn = {978-1-59593-873-2},
  keywords = {Software Architecture,Component-Based Software Engineering,Performance Prediction,MDA},
  location = {Princeton, NJ, USA},
  pages = {103--114},
  publisher = {ACM},
  title = {{C}oupled {M}odel {T}ransformations},
  year = {2008}
}
@inbook{becker2008d,
  abstract = {This chapter gives an overview over the Quality of Service Modeling Language (QML), a language which can be used to describe QoS offerings or needs of specified services.},
  author = {Steffen Becker},
  booktitle = {Dependability Metrics},
  chapter = {{Q}uality of {S}ervice {M}odeling {L}anguage},
  pages = {43--47},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{D}ependability {M}etrics},
  url = {http://www.springerlink.com/content/754u689927655237/fulltext.pdf},
  volume = {4909},
  year = {2008}
}
@inbook{becker2008e,
  abstract = {ISO 9126 [243, 244, 245] is a standard which can be used to describe the quality of software systems. It is based on a quality model that is illustrated in part one of the standard [243]. This model distinguishes quality attributes into internal and external attributes. Internal metrics depend on knowledge on the internal details of the respective software. External metrics can be measured without knowing internal details. Furthermore, the quality model introduces characteristics and sub-characteristicswhich are abstractions of the actual attributes.For example, Usability is an abstraction of Learnability,Understandability, and Operability which each itself again abstracts from the different attributes. The ISO 9126 standard has no characteristic Performance. The closest characteristic to our definition of performance is Efficiency. It is divided into two sub-characteristics: time behaviour and resource behaviour. Some people say this distinction is artificial as time is also a (rare) resource. Nevertheless, the timing behaviour is separated in ISO 9126. The important attributes of Efficiency in ISO 9126 are being described in the external metrics specification. Hence, the followin},
  author = {Steffen Becker},
  booktitle = {Dependability Metrics},
  chapter = {{P}erformance-{R}elated {M}etrics in the {ISO} 9126 {S}tandard},
  pages = {204-206},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{D}ependability {M}etrics},
  url = {http://www.springerlink.com/content/2l1q7p523215302l/fulltext.pdf},
  volume = {4909},
  year = {2008}
}
@phdthesis{becker2008f,
  author = {Steffen Becker},
  month = {March},
  school = {University of Oldenburg, Germany},
  title = {{C}oupled {M}odel {T}ransformations for {Q}o{S} {E}nabled {C}omponent-{B}ased {S}oftware {D}esign},
  year = {2008}
}
@book{becker2008g,
  abtract = {This thesis presents the Palladio Component Model and its accompanying transformations for component-based software design with predictable performance attributes. The use of transformations results in a deterministic relationship between the model and its implementation. The introduced Coupled Transformations method uses this relationship to include implementation details into predictions to get better predictions. The approach is validated in several case studies showing the increased accuracy.},
  author = {Steffen Becker},
  publisher = {Universit{\"a}tsverlag Karlsruhe},
  series = {The Karlsruhe Series on Software Design and Quality},
  title = {{C}oupled {M}odel {T}ransformations for {Q}o{S} {E}nabled {C}omponent-{B}ased {S}oftware {D}esign},
  volume = {1},
  year = {2008}
}
@inproceedings{becker2008c,
  abstract = {Early, model-based performance predictions help to understand the consequences of design decisions on the performance of the resulting system before the system's implementation becomes available. While this helps reducing the costs for redesigning systems not meeting their extra-functional requirements, performance prediction models have to abstract from the full complexity of modern hard- and software environments potentially leading to imprecise predictions. As a solution, the construction and execution of prototypes on the target execution environment gives early insights in the behaviour of the system under realistic conditions. In literature several approaches exist to generate prototypes from models which either generate code skeletons or require detailed models for the prototype. In this paper, we present an approach which aims at automated generation of a performance prototype based solely on a design model with performance annotations. For the concrete realisation, we used the Palladio Component Model (PCM), which is a component-based architecture modelling language supporting early performance analyses. For a typical three-tier business application, the resulting Java EE code shows how the prototype can be used to evaluate the influence of complex parts of the execution environment like memory interactions or the operating system's scheduler.},
  author = {Steffen Becker and Tobias Dencker and Jens Happe},
  booktitle = {Performance Evaluation: Metrics, Models and Benchmarks (SIPEW 2008)},
  doi = {10.1007/978-3-540-69814-2_7},
  isbn = {978-3-540-69813-5},
  pages = {79--98},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{Model-Driven Generation of Performance Prototypes}},
  url = {http://www.springerlink.com/content/62t1277642tt8676/fulltext.pdf},
  volume = {5119},
  year = {2008}
}
@inproceedings{becker2008i,
  author = {Steffen Becker and Mircea Trifu and Ralf Reussner},
  booktitle = {1st International Workshop on Automated engineeRing of Autonomous and run-time evolving Systems (ARAMIS 2008)},
  keywords = {Q-ImPrESS},
  location = {L'Aquila, Italy},
  month = {September},
  title = {{Towards Supporting Evolution of Service Oriented Architectures through Quality Impact Prediction}},
  year = {2008}
}
@techreport{groenda2008c,
  abstract = {Systematische Qualit\"{a}tssicherung gewinnt im Rahmen des globalenWettbewerbs auch in der Software-Entwicklungsbranche zunehmend an Bedeutung. Vor allem auf dem Weg zur Software-Industrialisierung bzw. zu einer ingenieurm\"{a}\ss{}igen Software-Entwicklung ist eine durchg\"{a}ngige Qualit\"{a}tssicherung unabdingbar. Zertifizierungen bieten hierbei die M\"{o}glichkeit, die Einhaltung bestimmter Standards und Kriterien durch unabh\"{a}ngige Dritte \"{u}berpr\"{u}fen und bescheinigen zu lassen, um die Qualit\"{a}t eines Produktes oder Entwicklungsprozesses zu belegen. Zertifizierungen k\"{o}nnen sich sowohl auf Produkte und Prozesse als auch auf die Ausbildung und das Wissen von Einzelpersonen beziehen. Da Zertifikate durch unabh\"{a}ngige Pr\"{u}finstanzen ausgestellt werden, wird Zertifikaten und deren \"{u}berpr\"{u}fbaren Aussagen im Allgemeinen ein deutlich h\"{o}heres Vertrauen entgegengebracht als Qualit\"{a}tsversprechen von Software-Herstellern selbst. Unternehmen, die ihre Prozesse beispielsweise nach CMMI zertifizieren lassen, k\"{o}nnen damit ihre F\"{a}higkeiten unter Beweis stellen, Projekte erfolgreich und mit vorhersagbarer Qualit\"{a}t abschlie{\ss}en zu k\"{o}nnen. Neben dem Nachweis entsprechender Zertifikate als Diversifikationsmerkmal gegen\"{u}ber Mitbewerbern k\"{o}nnen Zertifikate \"{u}ber die Einhaltung von Standards auch durch den Gesetzgeber vorgeschrieben werden. Ein Beispiel hierf\"{u}r sind Zertifikate aus Hochsicherheitsbereichen wie Atomkraftwerken. Das Seminar wurde wie eine wissenschaftliche Konferenz organisiert: Die Einreichungen wurden in einem zweistufigen Peer-Review-Verfahren begutachtet. In der ersten Stufe wurde eine Begutachtung der studentischen Arbeiten durch Kommilitonen durchgef\"{u}hrt, in der zweiten Stufe eine Begutachtung durch die Betreuer. In verschiedenen Sessions wurden die Artikel an zwei Konferenztagen pr\"{a}sentiert. Die besten Beitr\"{a}ge wurden durch best paper awards ausgezeichnet. Diese gingen an Fabian Brosig f\"{u}r seine Arbeit Cost Benefit Analysis Method (CBAM), an Jakob Blomer f\"{u}r die Arbeit Zertifizierung von Softwarebenchmarks und an Grischa Liebel f\"{u}r die Arbeit SWT - Das Standard Widget Toolkit, denen hiermit noch einmal herzlich zu dieser herausragenden Leistung gratuliert wird. Erg\"{a}nzend zu den Vortr\"{a}gen der Seminarteilnehmer wurde ein eingeladener Vortrag gehalten. Herr Dr. Dirk Feuerhelm von der 1&1 Internet AG gab dabei dankenswerterweise in seinem Vortrag mit dem Thema Softskills -- Ist das objektorientiert oder modellgetrieben? einen Einblick in die Aufgaben als Leiter der Software-Entwicklung},
  author = {Jakob Blomer and Fabian Brosig and Andreas Kreidler and Jens K{\"u}ttel and Achim Kuwertz and Grischa Liebel and Daniel Popovic and Michael St{\"u}bs and Alexander M. Turek and Christian Vogel and Thomas Weinstein and Thomas Wurth},
  editor = {Thomas Goldschmidt and Henning Groenda and Klaus Krogmann and Michael Kuperberg and Anne Martens and Christoph Rathfelder and Ralf Reussner and Johannes Stammel},
  institution = {Universit{\"a}t Karlsruhe, Fakult{\"a}t f{\"u}r Informatik},
  language = {ger},
  location = {Karlsruhe},
  number = {4/2008},
  title = {{S}oftware {Z}ertifizierung},
  year = {2008}
}
@techreport{indust2008,
  abstract = {Die Industrialisierung der Software-Entwicklung ist ein zurzeit sehr stark diskutiertes Thema. Es geht dabei vor allem um die Efizienzsteigerung durch die Steigerung des Standardisierungsgrades, des Automatisierungsgrades sowie eine Erh\"{o}hung der Arbeitsteilung. Dies wirkt sich einerseits auf die den Software- Systemen zu Grunde liegenden Architekturen aber auch auf die Entwicklungsprozesse aus. So sind service-orientierte Architekturen ein Beispiel f\"{u}r eine gesteigerte Standardisierung innerhalb von Software-Systemen. Es ist zu ber\"{u}cksichtigen, dass sich die Software-Branche von den klassischen produzierenden Industriezweigen dadurch unterscheidet, dass Software ein immaterielles Produkt ist und so ohne hohe Produktionskosten beliebig oft vervielf\"{a}ltigt werden kann. Trotzdem lassen sich viele Erkenntnisse aus den klassischen Industriezweigen auf die Software-Technik \"{u}bertragen. Die Inhalte dieses Berichts stammen haupts\"{a}chlich aus dem Seminar "Software- Industrialisierung\, welches sich mit der Professionalisierung der Software- Entwicklung und des Software-Entwurfs besch\"{a}ftigte. W\"{a}hrend die klassische Software-Entwicklung wenig strukturiert ist und weder im Bezug auf Reproduzierbarkeit oder Qualit\"{a}tssicherung erh\"{o}hten Anforderungen gen\"{u}gt, befindet sich die Software-Entwicklung im Rahmen der Industrialisierung in einem Wandel. Dazu z\"{a}hlen arbeitsteiliges Arbeiten, die Einf\"{u}hrung von Entwicklungsprozessen mit vorhersagbaren Eigenschaften (Kosten, Zeitbedarf, ...) und in der Folge die Erstellung von Produkten mit garantierbaren Eigenschaften. Das Themenspektrum des Seminars umfasste dabei unter anderem: * Software-Architekturen * Komponentenbasierte Software-Entwicklung * Modellgetriebene Entwicklung * Ber\"{u}cksichtigung von Qualit\"{a}tseigenschaften in Entwicklungsprozessen Das Seminar wurde wie eine wissenschaftliche Konferenz organisiert: Die Einreichungen wurden in einem zweistufigen Peer-Review-Verfahren begutachtet. In der ersten Stufe wurde eine Begutachtung der studentischen Arbeiten durch Kommilitonen durchgef\"{u}hrt, in der zweiten Stufe eine Begutachtung durch die Betreuer. In verschiedenen Sessions wurden die Artikel an zwei Konferenztagen pr\"{a}sentiert. Der beste Beitrag wurde durch einen Best Paper Award ausgezeichnet. Dieser ging an Benjamin Klatt f\"{u}r seine Arbeit Software Extension Mechanisms, dem hiermit noch einmal herzlich zu dieser herausragenden Leistung gratuliert wird. Erg\"{a}nzend zu den Vortr\"{a}gen der Seminarteilnehmer wurde ein eingeladener Vortrag gehalten. Herr Florian Kaltner und Herr Tobias Pohl vom IBM-Entwicklungslabor gaben dabei dankenswerterweise in ihrem Vortrag Einblicke in die Entwicklung von Plugins f\"{u}r Eclipse sowie in die Build-Umgebung der Firmware f\"{u}r die zSeries Mainframe-Server.},
  address = {Karlsruhe},
  author = {Brosch, Franz and Goldschmidt, Thomas and Groenda, Henning and Kapova, Lucia and Krogmann, Klaus and Kuperberg, Michael and Martens, Anne and Rathfelder, Christoph and Reussner, Ralf and Stammel, Johannes},
  edition = {ISSN 1432-7864},
  institution = {Universit{\"a}t Karlsruhe, Fakult{\"a}t f{\"u}r Informatik, Institut f{\"u}r Programmstrukturen und Datenorganisation},
  timestamp = {2009.06.29},
  title = {Software-Industrialisierung},
  type = {Interner Bericht},
  url = {http://digbib.ubka.uni-karlsruhe.de/volltexte/1000009113},
  volume = {8},
  year = {2008}
}
@mastersthesis{burger2008a,
  abstract = {The evolution of software systems can produce incompatibilities with existing data and applications. For this reason, changes have to be well-planned, and developers should know the impact of changes on a software system. This also affects the branch of model-driven development, where changes occur as modification of the metamodels that the system is based on. Models that are instantiated from an earlier metamodel version may not be valid instances if the new version of a metamodel. Also, changes in the interface definition may require adaptations to the modeling tools. In this thesis, the impact of meta-model changes is evaluated for the modeling standards Meta Object Facility (MOF) and the interface definition Java Metadata Interface (JMI), based on the Modeling Infrastructure (MOIN) project at SAP, which includes a MOF- based repository and implements the JMI standard. For the formalisation of changes to MOF-bases metamodels, a Change Metamodel is introduced to describe the transformation of one version of a metamodel to another by the means of modeling itself. The changes are then classifed by their impact on the compatibility of existing model data and the generated JMI interfaces. The description techniques and change classifications presented in this thesis can be used to implement a mechanism that allows metamodel editors to estimate the impact of metamodel changes with the help of modeling tools that can adapt existing data semi-automatically.},
  author = {Erik Burger},
  month = {September},
  school = {Universit{\"a}t Karlsruhe (TH)},
  title = {{Metamodel Evolution in the Context of a MOF-Based Metamodeling Infrastructure}},
  type = {Diplomarbeit},
  url = {http://sdqweb.ipd.kit.edu/publications/pdfs/burger2008a.pdf},
  year = {2008}
}
@inproceedings{chouambe2008a,
  abstract = {An increasing number of software systems is developed using component technologies such as COM, CORBA, or EJB. Still, there is a lack of support to reverse engineer such systems. Existing approaches claim reverse engineering of components, but do not support composite components. Also, external dependencies such as required interfaces are not made explicit. Furthermore, relaxed component definitions are used, and obtained components are thus indistinguishable from modules or classes. We present an iterative reverse engineering approach that follows the widely used definition of components by Szyperski. It enables third-party reuse of components by explicitly stating their interfaces and supports composition of components. Additionally, components that are reverse engineered with the approach allow reasoning on properties of software architectures at the model level. For the approach, source code metrics are combined to recognize components. We discuss the selection of source code metrics and their interdependencies, which were explicitly taken into account. An implementation of the approach was successfully validated within four case studies. Additionally, a fifth case study shows the scalability of the approach for an industrial-size system.},
  address = {Athens, Greece},
  author = {Chouambe, Landry and Klatt, Benjamin and Krogmann, Klaus},
  booktitle = {12th European Conference on Software Maintenance and Reengineering},
  editor = {Kontogiannis, Kostas and Tjortjis, Christos and Winter, Andreas},
  isbn = {978-1-4244-2157-2},
  keywords = {ArchiRec},
  month = {April},
  day = {1--4},
  pages = {93--102},
  publisher = {IEEE Computer Society},
  title = {{R}everse {E}ngineering {S}oftware-{M}odels of {C}omponent-{B}ased {S}ystems},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/chouambe2008a.pdf},
  year = {2008}
}
@inproceedings{huber08b,
  abstract = {An important problem in software engineering is the automated discovery of noncrashing occasional bugs. In this work we address this problem and show that mining of weighted call graphs of program executions is a promising technique. We mine weighted graphs with a combination of structural and numerical techniques. More specifically, we propose a novel reduction technique for call graphs which introduces edge weights. Then we present an analysis technique for such weighted call graphs based on graph mining and on traditional feature selection schemes. The technique generalises previous graph mining approaches as it allows for an analysis of weights. Our evaluation shows that our approach finds bugs which previous approaches cannot detect so far. Our technique also doubles the precision of finding bugs which existing techniques can already localise in principle.},
  author = {Frank Eichinger and Klemens B{\"{o}}hm and Matthias Huber},
  booktitle = {{P}roceedings of the 8th {E}uropean {C}onference on {M}achine {L}earning and {P}rinciples and {P}ractice of {K}nowledge {D}iscovery in {D}atabases ({ECML} {PKDD})},
  comment = {? Springer-Verlag Berlin Heidelberg 2008},
  doi = {10.1007/978-3-540-87479-9_40},
  editor = {Walter Daelemans and Bart Goethals and Katharina Morik},
  ee = {http://dx.doi.org/10.1007/978-3-540-87479-9_40},
  isbn = {978-3-540-87478-2},
  month = {September},
  note = {Part I},
  pages = {333--348},
  publisher = {Springer-Verlag Berlin Heidelberg, Germany},
  series = {Lecture Notes in Computer Science},
  timestamp = {2009.06.18},
  title = {{M}ining {E}dge-{W}eighted {C}all {G}raphs to {L}ocalise {S}oftware {B}ugs},
  url = {http://www.ipd.uka.de/~eichi/papers/eichinger08mining.pdf},
  volume = {5211},
  year = {2008}
}
@inproceedings{huber08a,
  abstract = {This work addresses the problem of discovering bugs in software development. We investigate the utilisation of call graphs of program executions and graph mining algorithms to approach this problem. We propose a novel reduction technique for call graphs which introduces edge weights. Then, we present an analysis technique for such weighted call graphs based on graph mining and on traditional feature selection. Our new approach finds bugs which could not be detected so far. With regard to bugs which can already be localised, our technique also doubles the precision of finding them.},
  address = {Helsinki, Finnland},
  author = {Frank Eichinger and Klemens B{\"{o}}hm and Matthias Huber},
  booktitle = {{P}roceedings of the 6th {I}nternational {W}orkshop on {M}ining and {L}earning with {G}raphs ({MLG}) at {ICML}},
  editor = {Samuel Kaski and S.V.N. Vishwanathan and Stefan Wrobel},
  month = {July},
  timestamp = {2009.06.18},
  title = {{I}mproved {S}oftware {F}ault {D}etection with {G}raph {M}ining},
  url = {http://www.ipd.uka.de/~eichi/papers/eichinger08improved.pdf},
  year = {2008}
}
@inbook{eusgeld2008a,
  abstract = {Performability combines performance and reliability analysis in order to estimate the quality of service characteristics of a system in the presence of faults. This chapter provides an introduction to performability, discusses its relation to reliability and performance metrics, and presents common models used in performability analysis, such as Markov reward models or Stochastic Petri Nets.},
  author = {Irene Eusgeld and Jens Happe and Philipp Limbourg and Matthias Rohr and Felix Salfner},
  booktitle = {Dependability Metrics},
  chapter = {{P}erformability},
  pages = {245-254},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{D}ependability {M}etrics},
  url = {http://www.springerlink.com/content/v349v11817117467/fulltext.pdf},
  volume = {4909},
  year = {2008}
}
@inproceedings{Frenot2008monitoring,
  abstract = {: In simple and monolithic systems such as our current home gateways, monitoring is often overlooked: the home user can only reboot the gateway when there is a problem. In next-generation home gateways, more services will be available (pay-per-view TV, games...) and different actors will provide them. When one service fails, it will be impossible to reboot the gateway without disturbing the other services. We propose a management framework that monitors remote gateways. The framework tests response times for various management activities on the gateway, and provides reference time/performance ratios. The values can be used to establish a management schedule that balances the rate at which queries can be performed with the resulting load that the query will induce locally on the gateway. This allows the manager to tune the ratio between the reactivity of monitoring and its intrusiveness on performance},
  author = {St\'{e}phane Fr\'{e}not and Yvan Royon and Pierre Parrend and Denis Beras},
  booktitle = {IEEE/IFIP Network Operations and Management Symposium (NOMS), Salvador de Bahia, Brazil, 7-11 April 2008},
  file = {http\://www.ieeexplore.ieee.org/xpls/abs_all.jsp?isnumber=4575087&arnumber=4575162&count=195&index=74:PDF},
  keywords = {Monitoring, load calibration, home gateways},
  timestamp = {2007.12.24},
  title = {Monitoring Scheduling for Home Gateways},
  url = {http://www.rzo.free.fr/frenot08monitoring.php},
  year = {2008}
}
@inproceedings{goldschmidt2008e,
  abstract = {Textual concrete syntaxes for models are beneficial for many reasons. They foster usability and productivity because of their fast editing style, their usage of error markers, autocompletion and quick fixes. Several frameworks and tools from different communities for creating concrete textual syntaxes for models emerged during recent years. However, these approaches failed to provide a solution in general. Open issues are incremental parsing and model updating as well as partial and federated views. On the other hand incremental parsing and the handling of abstract syntaxes as leading entities has been solved within the compiler construction communities many years ago. In this short paper we envision an approach for the mapping of concrete textual syntaxes that makes use of the incremental parsing techniques from the compiler construction world. Thus, we circumvent problems that occur when dealing with concrete textual syntaxes in a UUID based environment.},
  author = {Goldschmidt, Thomas},
  booktitle = {Proceedings of the 1st International Conference on Software Language Engineering (SLE)},
  doi = {10.1007/978-3-642-00434-6\_11},
  editor = {Gasevic, Dragan and L{\"a}mmel, Ralf and Wyk, Eric van},
  pages = {168--177},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  timestamp = {2008.09.09},
  title = {{T}owards an incremental update approach for concrete textual syntaxes for {UUID}-based model repositories},
  volume = {5452},
  year = {2008}
}
@inproceedings{goldschmidt2008b,
  abstract = {Textual concrete syntaxes for models are beneficial for many reasons. They foster usability and productivity because of their fast editing style, their usage of error markers, autocompletion and quick fixes. Furthermore, they can easily be integrated into existing tools such as diff/merge or information interchange through e-mail, wikis or blogs. Several frameworks and tools from different communities for creating concrete textual syntaxes for models emerged during recent years. However, these approaches failed to provide a solution in general. Open issues are incremental parsing and model updating as well as partial and federated views. To determine the capabilities of existing approaches, we provide a classification schema, apply it to these approaches, and identify their deficiencies.},
  author = {Goldschmidt, Thomas and Becker, Steffen and Uhl, Axel},
  booktitle = {Proceedings of the 4th European Conference on Model Driven Architecture - Foundations and Applications},
  doi = {10.1007/978-3-540-69100-6\_12},
  pages = {169--184},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{C}lassification of {C}oncrete {T}extual {S}yntax {M}apping {A}pproaches},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/goldschmidt2008b.pdf},
  volume = {5059},
  year = {2008}
}
@inproceedings{goldschmidt2008a,
  abstract = {Model Driven Software Development (MDSD) has matured over the last few years and is now becoming an established technology. One advantage that is promoted by the MDSD community is the improved maintainability during the systems evolution over conventional development approaches. Compared to code-based development (meta-)models and transformations need to be handled differently when it comes to maintainability assessments. However, a comprehensive analysis of the impact of the model-driven development approach on the maintainability of a software system is still lacking. This paper presents work towards the finding of appropriate approaches and metrics for measuring the maintainability and evolution capabilities of artefacts within model-driven environments. We present our first steps and further ideas on how to tackle this problem.},
  author = {Goldschmidt, Thomas and Kuebler, Jens},
  booktitle = {Software Engineering 2008, Workshop Modellgetriebene Softwarearchitektur - Evolution, Integration und Migration},
  title = {{T}owards {E}valuating {M}aintainability {W}ithin {M}odel-{D}riven {E}nvironments},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/goldschmidt2008a.pdf},
  year = {2008}
}
@inproceedings{goldschmidt2008c,
  abstract = {Efforts for software evolution supersede any other part of the software life cycle. Technological decisions have a major impact on the maintainability, but are not well reflected by existing code or architecture based metrics. The way the persistency of object structures with relational databases is solved affects the maintainability of the overall system. Besides maintainability other quality attributes of the software are of interest, in particular performance metrics. However, a systematic evaluation of the benefits and drawback of different persistency frameworks is lacking. In this paper we systematically evaluate the maintainability and performance of different technological approaches for this mapping. The paper presents a testbed and an evaluation process with specifically designed metrics to evaluate persistency techniques regarding their maintainability and performance. In the second part we present and discuss the results of the case study.},
  address = {New York, NY, USA},
  author = {Goldschmidt, Thomas and Reussner, Ralf and Winzen, Jochen},
  booktitle = {ICSE '08: Proceedings of the 30th international conference on Software engineering},
  isbn = {978-1-60558-079-1},
  location = {Leipzig, Germany},
  pages = {401--410},
  publisher = {ACM},
  title = {{A} {C}ase {S}tudy {E}valuation of {M}aintainability and {P}erformance of {P}ersistency {T}echniques},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/goldschmidt2008c.pdf},
  year = {2008}
}
@inproceedings{goldschmidt2008d,
  abstract = {The goal of the workshop was to exchange ideas and experiences related to Model (Co-)evolution and Consistency Management (MCCM) in the context of Model-Driven Engineering (MDE). Contemporary MDE practices typically include the manipulation and transformation of a large and heterogeneous set of models. This heterogeneity exhibits itself in different guises ranging from notational differences to semantic content-wise variations. These differences need to be carefully managed in order to arrive at a consistent specification that is adaptable to change. This requires a dedicated activity in the development process and a rigourous adoption of techniques such as model differencing, model comparison, model refactoring, model (in)consistency management, model versioning, and model merging. The workshop invited submissions from both academia and industry on these topics, as well as experience reports on the effective management of models, metamodels, and model transformations. We selected ten high-quality contributions out of which we included two as best-papers in the workshop reader. As a result of the high number of participants and the nice mix of backgrounds we were able to debate lively over a number of pertinent questions that challenge our field.},
  author = {Goldschmidt, Thomas and Uhl, Axel},
  booktitle = {Proceedings of the 1st International Workshop on Model Co-Evolution and Consistency Management},
  timestamp = {2008.09.09},
  title = {{R}etainment {R}ules for {M}odel {T}ransformations},
  year = {2008}
}
@inproceedings{goldschmidt2008f,
  abstract = {Model transformations are a central concept in Model-driven Engineering. Model transformations are defined in model transformation languages. This paper addresses QVT Relations, a high-level declarative model transformation language standardised by the Object Management Group. QVT Relations lacks support for default copy rules. Thus, transformation developers need to define copy rules explicitly. Particular for refinement transformations which copy large parts of a model, this is a tremendous task. In this paper, we propose generic patterns for copy rules in QVT Relations. Based on these patterns, we provide a higher-roder transformation to generate copy rules for a given metamodel. Finally, we explore several ways to derive a refinement transformation from a generated copy transformation.},
  author = {Goldschmidt, Thomas and Wachsmuth, Guido},
  booktitle = {Proceedings of the 3rd Workshop on Model Driven Software Engineering (MDSE 2008)},
  timestamp = {2008.11.17},
  title = {{R}efinement transformation support for {QVT} {R}elational transformations},
  year = {2008}
}
@inproceedings{huerst2008a,
  acmid = {1453839},
  address = {New York, NY, USA},
  author = {H\"{u}rst, Wolfgang and Merkle, Philipp},
  booktitle = {Proceedings of the 1st international conference on Designing interactive user experiences for TV and video},
  doi = {10.1145/1453805.1453839},
  isbn = {978-1-60558-100-2},
  keywords = {interfaces, mobile video, pda, video, video browsing},
  location = {Silicon Valley, California, USA},
  numpages = {10},
  pages = {169--178},
  publisher = {ACM},
  series = {UXTV '08},
  title = {One-handed mobile video browsing},
  url = {http://doi.acm.org/10.1145/1453805.1453839},
  year = {2008}
}
@phdthesis{happe2008b,
  abstract = {With today's rise of multi-core processors, concurrency becomes a ubiquitous challenge in software development. Concurrency allows the improvement of software performance by exploiting available processor cores. Performance prediction methods have to reflect the influence of multiprocessing environments on software performance in order to help software architects to find potential performance problems during early development phases. In this thesis, we address the influence of the operating system scheduler on software performance in symmetric multiprocessing environments. We propose a performance modelling framework for operating system schedulers such as Windows and Linux. Furthermore, the influence of the middleware on software performance is addressed by a performance modelling approach to message-oriented middleware. A series of case studies demonstrates that both techniques reduce the prediction error to less than 5\% to 10\% in most cases.},
  author = {Jens Happe},
  month = {August},
  school = {University of Oldenburg, Germany},
  title = {{P}redicting {S}oftware {P}erformance in {S}ymmetric {M}ulti-core and {M}ultiprocessor {E}nvironments},
  type = {Dissertation},
  url = {http://oops.uni-oldenburg.de/827/1/happre08.pdf},
  year = {2008}
}
@inbook{happe2008c,
  author = {Jens Happe},
  booktitle = {Dependability Metrics},
  chapter = {{A}nalytical {P}erformance {M}etrics},
  pages = {207-218},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{D}ependability {M}etrics},
  url = {http://www.springerlink.com/content/hm6vrj22gq236651/fulltext.pdf},
  volume = {4909},
  year = {2008}
}
@inproceedings{happe2008a,
  abstract = {Details about the underlying Message-oriented Middleware (MOM) are essential for accurate performance predictions of software systems using message-based communication. The MOM's configuration and usage strongly influence its throughput, resource utilisation and timing behaviour. Prediction models need to reflect these effects and allow software architects to evaluate the performance influence of MOM configured for their needs. Performance completions [31, 32] provide the general concept to include low-level details of execution environments in abstract performance models. In this paper, we extend the Palladio Component Model (PCM) [4] by a performance completion for Message-oriented Middleware. With our extension to the model, software architects can specify and configure message-based communication using a language based on messaging patterns. For performance evaluation, a model-to-model transformation integrates the low-level details of a MOM into the high-level software architecture model. A case study based on the SPECjms2007 Benchmark [1] predicts the performance of message-based communication with an error less than 20 percent.},
  address = {New York, NY, USA},
  author = {Jens Happe and Holger Friedrich and Steffen Becker and Ralf H. Reussner},
  booktitle = {Proceedings of the 7th International Workshop on Software and Performance (WOSP '08)},
  isbn = {978-1-59593-873-2},
  location = {Princeton, NJ, USA},
  pages = {165--176},
  publisher = {ACM},
  title = {{A} {P}attern-{B}ased {P}erformance {C}ompletion for {M}essage-{O}riented {M}iddleware},
  year = {2008}
}
@inbook{krogmann2008c,
  abstract = {The example of use which was chosen as the Common Component Modeling Example (CoCoME) and on which the several methods presented in this book should be applied was designed according to the example described by Larman in [1]. The description of this example and its use cases in the current chapter shall be considered under the assumption that this information was delivered by a business company as it could be in the reality. Therefore the specified requirements are potentially incomplete or imprecise.},
  author = {Sebastian Herold and Holger Klus and Yannick Welsch and Constanze Deiters and Andreas Rausch and Ralf Reussner and Klaus Krogmann and Heiko Koziolek and Raffaela Mirandola and Benjamin Hummel and Michael Meisinger and Christian Pfaller},
  chapter = {CoCoME -- The Common Component Modeling Example},
  keywords = {CoCoME},
  pages = {16--53},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{T}he {C}ommon {C}omponent {M}odeling {E}xample},
  url = {http://springerlink.com/content/a04pr72354648281/?p=34d1b831d92a42359a30ecad99939785&pi=2},
  volume = {5153},
  year = {2008}
}
@inbook{kapova2008a,
  address = {Prague},
  author = {Kapova, Lucia and Bures, Tomas and Hnetynka, Petr},
  chapter = {Preserving Intentions in SOA Business Process development},
  month = {August},
  day = {20--22},
  pages = {59-72},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Studies in Computational Intelligence},
  title = {{S}oftware {E}ngineering {R}esearch, {M}anagement and {A}pplications},
  url = {http://www.springer.com/engineering/book/978-3-540-70774-5?detailsPage=toc},
  volume = {150},
  year = {2008}
}
@inproceedings{kappler2008a,
  abstract = {Performance predictions for software architectures can reveal performance bottlenecks and quantitatively support design decisions for different architectural alternatives. As software architects aim at reusing existing software components, their performance properties should be included into performance predictions without the need for manual modelling. However, most prediction approaches do not include automated support for modelling implemented components. Therefore, we propose a new reverse engineering approach, which generates Palladio performance models from Java code. In this paper, we focus on the static analysis of Java code, which we have implemented as an Eclipse plugin called Java2PCM. We evaluated our approach on a larger component-based software architecture, and show that a similar prediction accuracy can be achieved with generated models compared to completely manually specified ones.},
  address = {Munich, Germany},
  author = {Kappler, Thomas and Koziolek, Heiko and Krogmann, Klaus and Reussner, Ralf H.},
  booktitle = {Software Engineering 2008},
  keywords = {Java2PCM},
  month = {February},
  day = {18--22},
  pages = {140--154},
  publisher = {Bonner K{\"o}llen Verlag},
  series = {Lecture Notes in Informatics},
  title = {{T}owards {A}utomatic {C}onstruction of {R}eusable {P}rediction {M}odels for {C}omponent-{B}ased {P}erformance {E}ngineering},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/kappler2008a.pdf},
  volume = {121},
  year = {2008}
}
@inproceedings{klatt2008a,
  abstract = {Industrial software projects not only have to deal with the number of features in the software system. Also issues like quality, flexibility, reusability, extensibility, developer and user acceptance are key factors in these days. An architecture paradigm targeting those issues are extension mechanisms which are for example used by component frameworks. The main contribution of this paper is to identify software extension mechanism characteristics derived from state-of-the-art software frameworks. These identified characteristics will benefit developers with selecting and creating extension mechanisms.},
  author = {Benjamin Klatt and Klaus Krogmann},
  booktitle = {Proceedings of the Thirteenth International Workshop on Component-Oriented Programming (WCOP'08), Karlsruhe, Germany},
  editor = {Ralf Reussner and Clemens Szyperski and Wolfgang Weck},
  issn = {1432-7864},
  number = {2008-12},
  pages = {11--18},
  series = {Interner Bereich Universit{\"a}t Karlsruhe (TH)},
  title = {{Software Extension Mechanisms}},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/klatt2008a.pdf},
  year = {2008}
}
@inbook{koziolek2008b,
  abstract = {This chapter gives an overview over the Goal-Question-Metric (GQM) approach, a way to derive and select metrics for a particular task in a top-down and goal-oriented fashion.},
  author = {Heiko Koziolek},
  booktitle = {Dependability Metrics},
  chapter = {{G}oal, {Q}uestion, {M}etric},
  pages = {39-42},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{D}ependability {M}etrics},
  url = {http://www.springerlink.com/content/n737771751296q23/fulltext.pdf},
  volume = {4909},
  year = {2008}
}
@inbook{koziolek2008c,
  abstract = {This chapter defines simple performance metrics and gives an outlook over the remaining chapters of Part IV.},
  author = {Heiko Koziolek},
  booktitle = {Dependability Metrics},
  chapter = {{I}ntroduction to {P}erformance {M}etrics},
  pages = {199-203},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{D}ependability {M}etrics},
  url = {http://www.springerlink.com/content/r6625lp264177m72/fulltext.pdf},
  volume = {4909},
  year = {2008}
}
@phdthesis{koziolek2008g,
  abstract = {The following introduction will motivate the need for a new modelling method for component-based performance engineering (Chapter 1.1) and then describe the specific problem tackled in this thesis in detail (Chapter 1.2). Afterwards, it will point out the deficits of existing solution approaches to this problem (Chapter 1.3), before it lists the scientific contributions of this thesis (Chapter 1.4). Finally, the introduction will sketch the experimental validation conducted for this thesis (Chapter 1.5).},
  author = {Heiko Koziolek},
  school = {University of Oldenburg},
  title = {{P}arameter {D}ependencies for {R}eusable {P}erformance {S}pecifications of {S}oftware {C}omponents},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/koziolek2008g.pdf},
  year = {2008}
}
@book{koziolek2008h,
  author = {Heiko Koziolek},
  editors = {Ralf Reussner},
  isbn = {978-3-86644-272-6},
  publisher = {Universit{\"a}tsverlag Karlsruhe},
  series = {The Karlsruhe Series on Software Design and Quality},
  title = {{Parameter Dependencies for Reusable Performance Specifications of Software Components}},
  volume = {2},
  year = {2008}
}
@inproceedings{koziolek2007a,
  author = {Heiko Koziolek and Steffen Becker and Jens Happe and Ralf Reussner},
  booktitle = {{M}odel-{D}riven {S}oftware {D}evelopment: {I}ntegrating {Q}uality {A}ssurance},
  editor = {J{\"o}rg Rech and Christian Bunse},
  month = {December},
  pages = {95-118},
  publisher = {IDEA Group Inc.},
  title = {Evaluating Performance of Software Architecture Models with the Palladio Component Model},
  year = {2008}
}
@inproceedings{koziolek2008d,
  abstract = {Current software component models insufficiently reflect the different stages of component life-cycle, which involves design, implementation, deployment, and runtime. Therefore, reasoning techniques for component-based models (e.g., protocol checking, QoS predictions, etc.) are often limited to a particular life-cycle stage. We propose modelling software components in different design stages, after implemenatation, and during deployment. Abstract models for newly designed components can be combined with refined models for already implemented components. As a proof-of-concept, we have implemented the new modelling techniques as part of our Palladio Component Model (PCM).},
  address = {Universit{\"a}t Karlsruhe (TH), Karlsruhe, Germany},
  author = {Heiko Koziolek and Steffen Becker and Jens Happe and Ralf Reussner},
  booktitle = {Proceedings of the 11th International Symposium on Component-Based Software Engineering (CBSE)},
  month = {October},
  pages = {278-285},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{L}ife-{C}ycle {A}ware {M}odelling of {S}oftware {C}omponents},
  year = {2008}
}
@inbook{koziolek2008f,
  abstract = {Some performance metrics are specific for certain domains or are used differently under different circumstances. In the following, performance metrics for Internet-based systems and embedded systems will be described.},
  author = {Heiko Koziolek and Jens Happe},
  booktitle = {Dependability Metrics},
  chapter = {{P}erformance {M}etrics for {S}pecific {D}omains},
  pages = {233-240},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{D}ependability {M}etrics},
  url = {http://www.springerlink.com/content/t13718l56531335p/fulltext.pdf},
  volume = {4909},
  year = {2008}
}
@inproceedings{koziolek2008a,
  abstract = {For component-based performance engineering, software component developers individually create performance specifications of their components. Software architects compose these specifications to architectural models. This enables assessing the possible fulfilment of performance requirements without the need to purchase and deploy the component implementations. Many existing performance models do not support component-based performance engineering but offer efficient solvers. On the other hand, component-based performance engineering approaches often lack tool support. We present a model transformation combining the advanced component concepts of the Palladio Component Model (PCM) with the efficient performance solvers of Layered Queueing Networks (LQN). Joining the tool-set for PCM specifications with the tool-set for LQN solution is an important step to carry component-based performance engineering into industrial practice.We validate the correctness of the transformation by mapping the PCM model of a componentbased architecture to an LQN and conduct performance predictions.},
  author = {Heiko Koziolek and Ralf Reussner},
  booktitle = {Performance Evaluation: Metrics, Models and Benchmarks, SIPEW 2008},
  isbn = {978-3-540-69813-5},
  pages = {58--78},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{A} {M}odel {T}ransformation from the {P}alladio {C}omponent {M}odel to {L}ayered {Q}ueueing {N}etworks},
  url = {http://www.springerlink.com/content/w14m0g520u675x10/fulltext.pdf},
  volume = {5119},
  year = {2008}
}
@inproceedings{krogmann2008b,
  abstract = {Integrating heterogeneous software systems becomes increasingly important. It requires combining existing components to form new applications. Such new applications are required to satisfy non-functional properties, such as performance. Design-time performance prediction of new applications built from existing components helps to compare design decisions before actually implementing them to the full, avoiding costly prototype and glue code creation. But design-time performance prediction requires understanding and modeling of data flow and control flow accross component boundaries, which is not given for most black-box components. If, for example one component processes and forwards files to other components, this effect should be an explicit model parameter to correctly capture its performance impact. This impact should also be parameterised over data, but no reverse engineering approach exists to recover such dependencies. In this paper, we present an approach that allows reverse engineering of such behavioural models, which is applicable for blackbox components. By runtime monitoring and application of genetic programming, we recover functional dependencies in code, which then are expressed as parameterisation in the output model. We successfully validated our approach in a case study on a file sharing application, showing that all dependencies could correctly be reverse engineered from black-box components.},
  address = {Oldenburg},
  author = {Klaus Krogmann and Michael Kuperberg and Ralf Reussner},
  booktitle = {MDD, SOA und IT-Management (MSI 2008)},
  editor = {Steffens, Ulrike and Addicks, Jan Stefan and Streekmann, Niels},
  month = {September},
  pages = {57--71},
  publisher = {GITO Verlag},
  title = {{Reverse Engineering of Parametric Behavioural Service Performance Models from Black-Box Components}},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/krogmann2008b.pdf},
  year = {2008}
}
@inbook{krogmann2008a,
  abstract = {Palladio is a component modelling approach with a focus on performance (i.e. response time, throughput, resource utilisation) analysis to enable early design-time evaluation of software architectures. It targets modelling business information systems. The Palladio approach includes a meta-model called Palladio Component Model for structural views, component behaviour specifications, resource environment, component allocation and the modelling of system usage and multiple analysis techniques ranging from process algebra analysis to discrete event simulation. Additionally, the Palladio approach is aligned with a development process model tailored for component-based software systems. Early design-time predictions avoid costly redesigns and reimplementation. Palladio enables software architects to analyse different architectural design alternatives supporting their design decisions with quantitative performance predictions, provided with the Palladio approach.},
  author = {Krogmann, Klaus and Reussner, Ralf H.},
  chapter = {{P}alladio: {P}rediction of {P}erformance {P}roperties},
  keywords = {CoCoME, PCM, Palladio},
  pages = {297-326},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{T}he {C}ommon {C}omponent {M}odeling {E}xample},
  url = {http://springerlink.com/content/63617n4j5688879h/?p=9666cb29a31b453aba8a1ae6ee7831b6&pi=11},
  volume = {5153},
  year = {2008}
}
@inbook{kuperberg2008b,
  abstract = {This chapter gives a brief overview overMarkov models, a useful formalism to analyse stochastic systems.},
  author = {Michael Kuperberg},
  booktitle = {Dependability Metrics},
  chapter = {Markov Models},
  pages = {48-55},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{D}ependability {M}etrics},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/kuperberg2008b.pdf},
  volume = {4909},
  year = {2008}
}
@inproceedings{kuperberg2008c,
  abstract = {In component-based software engineering, the response time of an entire application is often predicted from the execution durations of individual component services. However, these execution durations are specific for an execution platform (i.e. its resources such as CPU) and for a usage profile. Reusing an existing component on different execution platforms up to now required repeated measurements of the concerned components for each relevant combination of execution platform and usage profile, leading to high effort. This paper presents a novel integrated approach that overcomes these limitations by reconstructing behaviour models with platform-independent resource demands of bytecode components. The reconstructed models are parameterised over input parameter values. Using platform-specific results of bytecode benchmarking, our approach is able to translate the platform-independent resource demands into predictions for execution durations on a certain platform. We validate our approach by predicting the performance of a file sharing application.},
  author = {Michael Kuperberg and Klaus Krogmann and Ralf Reussner},
  booktitle = {Proceedings of the 11th International Symposium on Component Based Software Engineering (CBSE 2008), Karlsruhe, Germany, 14th-17th October 2008},
  month = {October},
  pages = {48-63},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{P}erformance {P}rediction for {B}lack-{B}ox {C}omponents using {R}eengineered {P}arametric {B}ehaviour {M}odels},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/kuperberg2008c.pdf},
  volume = {5282},
  year = {2008}
}
@inproceedings{kuperberg2008a,
  abstract = {For bytecode-based applications, runtime instruction counts can be used as a platform- independent application execution metric, and also can serve as the basis for bytecode-based performance prediction. However, different instruction types have different execution durations, so they must be counted separately, and method invocations should be identified and counted because of their substantial contribution to the total application performance. For Java bytecode, most JVMs and profilers do not provide such functionality at all, and existing bytecode analysis frameworks require expensive JVM instrumentation for instruction-level counting. In this paper, we present ByCounter, a lightweight approach for exact runtime counting of executed bytecode instructions and method invocations. ByCounter significantly reduces total counting costs by instrumenting only the application bytecode and not the JVM, and it can be used without modifications on any JVM. We evaluate the presented approach by successfully applying it to multiple Java applications on different JVMs, and discuss the runtime costs of applying ByCounter to these cases.},
  author = {Michael Kuperberg and Martin Krogmann and Ralf Reussner},
  booktitle = {{Proceedings of the 3rd International Workshop on Bytecode Semantics, Verification, Analysis and Transformation, Budapest, Hungary, 5th April 2008 (ETAPS 2008, 11th European Joint Conferences on Theory and Practice of Software)}},
  keywords = {Java, bytecode, counting, portable, runtime, instrumentation, fine-grained},
  title = {{ByCounter: Portable Runtime Counting of Bytecode Instructions and Method Invocations}},
  url = {http://sdqweb.ipd.uka.de/publications/pdfs/kuperberg2008a.pdf},
  year = {2008}
}
@inproceedings{kuttruff08a,
  address = {Bonn},
  author = {Volker Kuttruff and Mircea Trifu and Peter Szulman},
  booktitle = {GI Lecture Notes in Informatics},
  isbn = {978-3-88579-220-8},
  organization = {GI},
  pages = {35--50},
  publisher = {K{\"o}llen Verlag},
  title = {Von der Problemerkennung zur Problembehebung: 12 Jahre Softwaresanierung am FZI},
  volume = {126},
  year = {2008}
}
@inproceedings{martens2008b,
  abstract = {Component-based software performance engineering (CBSPE) methods shall enable software architects to assess the expected response times, throughputs, and resource utilization of their systems already during design. This avoids the violation of performance requirements. Existing approaches for CBSPE either lack tool support or rely on prototypical tools, who have only been applied by their authors. Therefore, industrial applicability of these methods is unknown. On this behalf, we have conducted a controlled experiment involving 19 computer science students, who analysed the performance of two component-based designs using our Palladio performance prediction approach, as an example for a CBSPE method. Our study is the first of its type in this area and shall help to mature CBSPE to industrial applicability. In this paper, we report on results concerning the prediction accuracy achieved by the students and list several lessons learned, which are also relevant for other methods than Palladio.},
  author = {Anne Martens and Steffen Becker and Heiko Koziolek and Ralf Reussner},
  booktitle = {Proceedings of the 5th European Performance Engineering Workshop (EPEW'08), Palma de Mallorca, Spain},
  doi = {10.1007/978-3-540-87891-9_2},
  editor = {N. Thomas and C. Juiz},
  pages = {17--31},
  pdf = {http://sdqweb.ipd.uka.de/publications/pdfs/martens2008b.pdf},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {An Empirical Investigation of the Applicability of a Component-Based Performance Prediction Method},
  volume = {5261},
  year = {2008}
}
@inproceedings{martens2008a,
  abstract = {Model-based performance prediction methods aim at evaluating the expected response time, throughput, and resource utilisation of a software system at design time, before implementation. Existing performance prediction methods use monolithic, throw-away prediction models or component-based, reusable prediction models. While it is intuitively clear that the development of reusable models requires more effort, the actual higher amount of effort has not been quantified or analysed systematically yet. To study the effort, we conducted a controlled experiment with 19 computer science students who predicted the performance of two example systems applying an established, monolithic method (Software Performance Engineering) as well as our own component-based method (Palladio). The results show that the effort of model creation with Palladio is approximately 1.25 times higher than with SPE in our experimental setting, with the resulting models having comparable prediction accuracy. Therefore, in some cases, the creation of reusable prediction models can already be justified, if they are reused at least once.},
  author = {Anne Martens and Steffen Becker and Heiko Koziolek and Ralf Reussner},
  booktitle = {Proceedings of the 11th International Symposium on Component-Based Software Engineering (CBSE'08), Karlsruhe, Germany},
  doi = {10.1007/978-3-540-87412-6_3},
  editors = {M.R.V. Chaudron and C. Szyperski},
  pages = {16--31},
  pdf = {http://sdqweb.ipd.uka.de/publications/pdfs/martens2008a.pdf},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {An Empirical Investigation of the Effort of Creating Reusable Models for Performance Prediction},
  volume = {5282},
  year = {2008}
}
@inproceedings{martens2008c,
  abstract = {Architectural models of component-based software systems are evaluated for functional properties and/or extrafunctional properties (e.g. by doing performance predictions). However, after getting the results of the evaluations and recognising that requirements are not met, most existing approaches leave the software architect alone with finding new alternatives to her current design (e.g. by changing the selection of components, the configuration of components and containers, the sizing). We propose a novel approach to automatically generate and assess performance-improving design alternatives for componentbased software systems based on performance analyses of the software architecture. First, the design space spanned by different design options (e.g. available components, configuration options) is systematically explored using metaheuristic search techniques. Second, new architecture candidates are generated based on detecting anti-patterns in the initial architecture. Using this approach, the design of a high-quality component-based software system is eased for the software architect. First, she needs less manual effort to find good design alternatives. Second, good design alternatives can be uncovered that the software architect herself would have overlooked.},
  author = {Anne Martens and Heiko Koziolek},
  booktitle = {Proceedings of the Thirteenth International Workshop on Component-Oriented Programming (WCOP'08), Karlsruhe, Germany},
  editors = {Ralf Reussner and Clemens Szyperski and Wolfgang Weck},
  issn = {1432-7864},
  pages = {25--32},
  pdf = {http://sdqweb.ipd.uka.de/publications/pdfs/martens2008c.pdf},
  series = {Interner Bericht / Universit{\"a}t Karlsruhe, Fakult{\"a}t f{\"u}r Informatik ; 2008,12},
  title = {Performance-oriented Design Space Exploration},
  year = {2008}
}
@mastersthesis{merkle2008a,
  author = {Merkle, Philipp},
  school = {Albert-Ludwigs-Universit{\"a}t Freiburg},
  title = {Einh{\"a}ndiges, {F}inger-basiertes {V}ideo-{B}rowsing auf mobilen {H}andcomputern},
  type = {Bachelor Thesis},
  url = {http://sdqweb.ipd.kit.edu/publications/pdfs/merkle2008a.pdf},
  year = {2008}
}
@inproceedings{MD+08b,
  abstract = {Supporting business services through Web service compositions (WSC) as part of service-oriented architectures involves various runtime monitoring requirements. The implementation of these requirements results in additional development activities. Due to the lack of standards for treating such WSC monitoring concerns, a corresponding development approach has to deal with a variety of specific technologies. This paper therefore introduces a platform-independent approach to the instrumentation of WSC and the generation of an effective monitoring infrastructure based on the principles of model-driven software development (MDSD).},
  address = {Munich, Germany},
  author = {Momm, Christof and Detsch, Thomas and Abeck, Sebastian},
  booktitle = {EDOC 2008 Workshop on Advances in Quality of Service Management (AQuSerM 08)},
  publisher = {IEEE Computer Society Press},
  title = {Model-Driven Instrumentation for Monitoring the Quality of Web Service Compositions},
  year = {2008}
}
@inproceedings{MD+08a,
  address = {Marrakesh, Maroc},
  author = {Momm, Christof and Detsch, Thomas and Gebhart, Michael and Abeck, Sebastian},
  booktitle = {15th HP-SUA Workshop},
  title = {Model-driven Development of Monitored Web Service Compositions},
  year = {2008}
}
@inproceedings{momm2008b,
  abstract = {Web service compositions (WSC), as part of a service-oriented architecture (SOA), have to be managed to ensure compliance with guaranteed service levels. In this context, a high degree of automation is desired, which can be achieved by applying autonomic computing concepts. This paper particularly focuses the autonomic management of semi-dynamic compositions. Here, for each included service several variants are available that differ with regard to the service level they offer. Given this scenario, we first show how to instrument WSC in order to allow a controlling of the service level through switching the employed service variant. Second, we show how the desired self-manageability can be designed and implemented by means of a WSC manageability infrastructure. The presented approach is based on widely accepted methodologies and standards from the area of application and web service management, in particular the WBEM standards.},
  address = {Berlin, Germany},
  author = {Christof Momm and Christoph Rathfelder},
  booktitle = {MDD, SOA und IT-Management (MSI 2008)},
  day = {24},
  editor = {Ulrike Steffens and Jan Stefan Addicks and Niels Streekmann},
  location = {Oldenburg, Germany},
  month = {September},
  pages = {25--40},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/momm2008b.pdf},
  publisher = {GITO-Verlag},
  title = {{M}odel-based {M}anagement of {W}eb {S}ervice {C}ompositions in {S}ervice-{O}riented {A}rchitectures},
  year = {2008}
}
@inproceedings{momm2008a,
  abstract = {Web service compositions (WSC), as part of a service- oriented architecture (SOA), have to be managed to ensure compliance with guaranteed service levels. In this context, a high degree of automation is desired, which can be achieved by applying autonomic computing concepts. This paper particularly focuses the autonomic management of semi-dynamic compositions. Here, for each included service several variants are available that differ with regard to the service level they offer. Given this scenario, we first show how to instrument WSC in order to allow a controlling of the service level through switching the employed service variant. Second, we show how the desired self-manageability can be designed and implemented by means of a WSC manageability infrastructure. The presented approach is based on widely accepted methodologies and standards from the area of application and web service management, in particular the WBEM standards.},
  author = {Momm, Christof and Rathfelder, Christoph and Hallerbach, Ignacio P{\'e}rez and Abeck, Sebastian},
  booktitle = {Proceedings of the Network Operations and Management Symposium (NOMS 2008)},
  day = {7--11},
  doi = {10.1109/NOMS.2008.4575227},
  issn = {1542-1201},
  location = {Salvador, Bahia, Brazil},
  month = {April},
  pages = {839--842},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/momm2008a.pdf},
  publisher = {IEEE},
  title = {{M}anageability {D}esign for an {A}utonomic {M}anagement of {S}emi-{D}ynamic {W}eb {S}ervice {C}ompositions},
  year = {2008}
}
@phdthesis{Parrend2008phd,
  author = {Pierre Parrend},
  keywords = {Middleware, Software Engineering, Security, Components, Service-oriented Programming, Java Language, OSGi Platform, Code Static analysis, Code vulnerabilities.},
  month = {December},
  school = {Institut National des Sciences Appliqu\'{e}es de Lyon, France},
  title = {Software Security Models for Service-Oriented Programming (SOP) Platforms},
  year = {2008}
}
@inproceedings{parrend08cbac,
  abstract = {Extensible Component Platforms support the discovery, installation, starting, uninstallation of components at runtime. Since they are often targeted at mobile resource-constraint devices, they have both strong performance and security requirements. The current security model for Java systems, Permissions, are based on call stack analysis. They proves to be very time-consuming, which makes them difficult to use in production environments. We therefore define the Component-Based Access Control (CBAC) Security Model, which aims at emulating Java Permissions through static analysis at the installation phase ofthe components. CBAC is based on a fully declarative approach, thatmakes it possible to tag arbitrary meth- ods as sensitive. A formal model is defined to guarantee that a given component have sufficientaccess rights, and that dependencies between components are taken into account. A first implementation of the model is provided for the OSGi Platform, using the ASM library for code anal- ysis. Performancetests show that the cost of CBAC at install time is negligible, becauseit is executed together with digital signature which is much more costly. Moreover, contrary to Java Permissions, the CBAC security model does not imply any runtime overhead.},
  author = {Pierre Parrend and St\'{e}phane Fr\'{e}not},
  booktitle = {Software Composition (SC'2008)},
  file = {k282223p57n56273:http\://www.springerlink.com/content/k282223p57n56273/:PDF},
  keywords = {Software Security, Component Middleware, Static Analysis, OSGi Platform},
  month = {March},
  pages = {68-83},
  publisher = {Springer Berlin / Heidelberg},
  series = {LNCS},
  title = {Component-based Access Control: Secure Software Composition through Static Analysis},
  url = {http://www.rzo.free.fr/parrend08cbac.php},
  volume = {4954/2008},
  year = {2008}
}
@inproceedings{parrend08cbse,
  abstract = {Java-based systems have evolved from stand-alone applications to multi-component to Service Oriented Programming (SOP) platforms. Each step of this evolution makes a set of Java vulnerabilities directly exploitable by malicious code: access to classes in multi-component platforms, and access to object in SOP, is granted to them with often no control. This paper defines two taxonomies that characterize vulnerabilities in Java components: the vulnerability categories, and the goals of the attacks that are based on these vulnerabilities. The `vulnerability category' taxonomy is based on three application types: stand-alone, class sharing, and SOP. Entries express the absence of proper security features at places they are required to build secure component-based systems. The `goal' taxonomy is based on the distinction between undue access, which encompasses the traditional integrity and confidentiality security properties, and denial-of-service. It provides a matching between the vulnerability categories and their consequences. The exploitability of each vulnerability is validated through the development of a pair of malicious and vulnerable components. Experiments are conducted in the context of the OSGi Platform. Based on the vulnerability taxonomies, recommendations for writing hardened component code are issued.},
  address = {Karlsruhe, Germany},
  author = {Pierre Parrend and St\'{e}phane Fr\'{e}not},
  booktitle = {Conference on Component-based Software Engineering (CBSE'2008)},
  keywords = {Software Security, Service-oriented Programming, Component Middleware},
  month = {October},
  publisher = {Springer Berlin / Heidelberg},
  series = {LNCS},
  title = {Classification of Component Vulnerabilities in {J}ava Service Oriented Programming ({SOP}) Platforms},
  url = {http://www.rzo.free.fr/parrend08cbse.php},
  volume = {5282/2008},
  year = {2008}
}
@techreport{Parrend2008bundleVulnerabilities,
  abstract = {Extensible Component Platforms can discover and install code during runtime. Although this feature introduces flexibility, it also brings new security threats: malicious components can quite easily be installed and exploit the rich programming environment and interactions with other components to perform attacks against the system. One example of such environments is the Java/OSGi Platform, which widespreads in the industrial world. Attacks from one component against another can not be prevented through conventional security mechanisms, since they exploit the lack of proper isolation between them: components often share classes and objects. This reports intends to list the vulnerabilities that a component can contain, both from the literature and from our own experience. The Vulnerable Bundle catalog gathers this knowledge. It provides informations related to the characteristics of the vulnerabilities, their consequence, the security mechanisms that would help prevent their exploitation, as well as to the implementation state of the proof-of-concept bundles that are developed to prove that the vulnerability is actually exploitable. The objective of vulnerability classification is of course to provide tools for identifying and preventing them. A first assessment is performed with existing tools, such as Java Permission and FindBugs, and a specific prototype we develop, WBA ( Weak Bundle Analysis), and manual code review.},
  author = {Pierre Parrend and St\'{e}phane Fr\'{e}not},
  file = {RR-6649.pdf&docid=322138:http\://hal.inria.fr/action/open_file.php?url=http\://hal.inria.fr/docs/00/32/21/38/PDF/RR-6649.pdf&docid=322138:PDF},
  institution = {INRIA},
  keywords = {Software Security, Vulnerability Benchmarking, Code Static Analysis, Java Language, Component Platforms, OSGi},
  month = {September},
  number = {RR-6649},
  timestamp = {2008.04.14},
  title = {More Vulnerabilities in the {J}ava/{OSGi} Platform: A Focus on Bundle Interactions},
  url = {http://www.rzo.free.fr/parrend08rr6649.php},
  year = {2008}
}
@article{Parrend2008lobjet,
  author = {Pierre Parrend and St\'{e}phane Fr\'{e}not},
  journal = {Num\'{e}ro sp\'{e}cial de la revue L'Objet - Composants, Services et Aspects : techniques et outils pour la v\'{e}rification},
  title = {V\'{e}rification automatique pour l'ex\'{e}cution s\'{e}curis\'{e}e de composants Java},
  year = {2008}
}
@inproceedings{rathfelder2008a,
  abstract = {The implementation of an enterprise-wide Service Oriented Architecture (SOA) is a complex task. In most cases, evolutional approaches are used to handle this complexity. Maturity models are a possibility to plan and control such an evolution as they allow evaluating the current maturity and identifying current shortcomings. In order to support an SOA implementation, maturity models should also support in the selection of the most adequate maturity level and the deduction of a roadmap to this level. Existing SOA maturity models provide only weak assistance with the selection of an adequate maturity level. Most of them are developed by vendors of SOA products and often used to promote their products. In this paper, we introduce our independent SOA Maturity Model (iSOAMM), which is independent of the used technologies and products. In addition to the impacts on IT systems, it reflects the implications on organizational structures and governance. Furthermore, the iSOAMM lists the challenges, benefits and risks associated with each maturity level. This enables enterprises to select the most adequate maturity level for them, which is not necessarily the highest one.},
  address = {Berlin, Heidelberg},
  author = {Rathfelder, Christoph and Groenda, Henning},
  booktitle = {Proceedings of the 8th IFIP International Conference on Distributed Applications and Interoperable Systems (DAIS 2008)},
  day = {4--6},
  location = {Olso, Norway},
  month = {June},
  pages = {1--15},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/rathfelder2008a.pdf},
  publisher = {Springer-Verlag},
  series = {Lecture Notes in Computer Science (LNCS)},
  title = {i{SOAMM}: {A}n independent {SOA} {M}aturity {M}odel},
  url = {http://www.springerlink.com/content/p38u564015713h55/?p=2777c4e8e3d6487e8f1e11de52f10f91&pi=1},
  volume = {5053/2008},
  year = {2008}
}
@article{rathfelder2008c,
  address = {Bonn, Germany},
  author = {Christoph Rathfelder and Henning Groenda},
  journal = {Softwaretechnik-Trends},
  month = {November},
  number = {4},
  pages = {3--7},
  pdf = {http://sdqweb.ipd.kit.edu/publications/pdfs/rathfelder2008c.pdf},
  publisher = {GI (Gesellschaft fuer Informatik)},
  timestamp = {2009.01.19},
  title = {{T}owards an {A}rchitecture {M}aintainability {M}aturity {M}odel ({AM}3)},
  volume = {28},
  year = {2008}
}
@incollection{rathfelder2008b,
  author = {Christoph Rathfelder and Henning Groenda and Ralf Reussner},
  booktitle = {Industrialisierung des Software-Managements: Fachtagung des GI-Fachausschusses Management der Anwendungsentwicklung und -Wartung im Fachbereich Wirtschaftsinformatik (WI-MAW)},
  day = {12--14},
  editor = {Georg Herzwurm and Martin Mikusz},
  isbn = {978-3-88579-233-8, 3-88579-383-0},
  location = {Stuttgart, Germany},
  month = {November},
  pages = {169--180},
  series = {Lecture Notes in Informatics (LNI)},
  title = {{S}oftware {I}ndustrialization and {A}rchitecture {C}ertification},
  volume = {139},
  year = {2008}
}
@inbook{reussner2008b,
  author = {Ralf Reussner and Viktoria Firus},
  booktitle = {Dependability Metrics},
  chapter = {{B}asic and {D}ependent {M}etrics},
  pages = {37-38},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{D}ependability {M}etrics},
  url = {http://www.springerlink.com/content/ex0866r4127562g8/fulltext.pdf},
  volume = {4909},
  year = {2008}
}
@inbook{reussner2008c,
  author = {Ralf Reussner and Viktoria Firus},
  booktitle = {Dependability Metrics},
  chapter = {{I}ntroduction to {O}verlapping {A}ttributes},
  pages = {243-244},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{D}ependability {M}etrics},
  url = {http://www.springerlink.com/content/d863l5wp7u382227/fulltext.pdf},
  volume = {4909},
  year = {2008}
}
@misc{reussner2008a,
  author = {Reussner, Ralf H.},
  language = {ger},
  location = {[Karlsruhe]},
  publisher = {Universit{\"a}tsbibliothek},
  title = {{G}ef{\"o}rdert ab dem {E}rsten {S}emester: die {F}akult{\"a}t f{\"u}r {I}nformatik am {KIT} bietet {S}tipendien f{\"u}r {B}egabte aus einkommensschwachen {F}amilien},
  year = {2008}
}
@book{reussner2008d,
  author = {Reussner, Ralf H. and Hasselbring, Wilhelm},
  edition = {2},
  isbn = {3898645592},
  month = {December},
  publisher = {dPunkt.verlag Heidelberg},
  title = {{H}andbuch der {S}oftware-{A}rchitektur},
  year = {2008}
}
@inbook{sabetta2008a,
  author = {Antonino Sabetta and Heiko Koziolek},
  booktitle = {Dependability Metrics},
  chapter = {{P}erformance {M}etrics in {S}oftware {D}esign {M}odels},
  pages = {219-225},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{D}ependability {M}etrics},
  url = {http://www.springerlink.com/content/j673681x20225266/fulltext.pdf},
  volume = {4909},
  year = {2008}
}
@inbook{sabetta2008b,
  abstract = {This chapter describes techniques for characterising workloads, which is a prerequisite for obtaining performance measurements in realistic settings, and presents an overview on performance measurement tools such as benchmarks, monitors, and load drivers.},
  author = {Antonino Sabetta and Heiko Koziolek},
  booktitle = {Dependability Metrics},
  chapter = {{M}easuring {P}erformance {M}etrics: {T}echniques and {T}ools},
  pages = {226-232},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{D}ependability {M}etrics},
  url = {http://www.springerlink.com/content/a36582v6512408p7/fulltext.pdf},
  volume = {4909},
  year = {2008}
}
@inproceedings{trifu08a,
  author = {Mircea Trifu},
  booktitle = {Proceedings of the 12-th European Conference on Software Maintenance and Reengineering},
  month = {April},
  pages = {193--202},
  publisher = {IEEE},
  title = {Using Dataflow Information for Concern Identification in Object-Oriented Software Systems},
  year = {2008}
}
@techreport{weck2008a,
  abstract = {This report covers the twelfth Workshop on Component-Oriented Programming (WCOP). WCOP has been affiliated with ECOOP since its inception in 1996. The report summarizes the contributions made by authors of accepted position papers as well as those made by all attendees of the workshop sessions.},
  author = {Weck, Wolfgang and Reussner, Ralf H. and Szyperski, Clemens},
  doi = {10.1007/978-3-540-78195-0_12},
  institution = {University of Karlsruhe},
  isbn = {0302-9743},
  number = {Volume 4906/2008},
  series = {Object-Oriented Technology. ECOOP 2007 Workshop Reader},
  title = {{C}omponent-{O}riented {P}rogramming: {R}eport on the 12th {W}orkshop {WCOP} at {ECOOP} 2007},
  url = {http://www.springerlink.com/content/y078px241312v295/},
  year = {2008}
}
@proceedings{becker2008h,
  booktitle = {QoSA},
  editor = {Steffen Becker and Frantisek Plasil and Ralf Reussner},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{Q}uality of {S}oftware {A}rchitectures. {M}odels and {A}rchitectures, 4th {I}nternational {C}onference on the {Q}uality of {S}oftware-{A}rchitectures, {Q}o{SA} 2008, {K}arlsruhe, {G}ermany, {O}ctober 14-17, 2008. {P}roceedings},
  volume = {5281},
  year = {2008}
}
@book{eusgeld2008b,
  doi = {10.1007/978-3-540-68947-8},
  editor = {Irene Eusgeld and Felix C. Freiling and Ralf Reussner},
  isbn = {978-3-540-68946-1},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{D}ependability {M}etrics},
  url = {http://www.springerlink.com/content/l81n8832u08r/?p=03247e9a421b443dac316c7cda1e9f62&pi=117},
  volume = {4909},
  year = {2008}
}
@book{rausch2008a,
  editor = {Andreas Rausch and Ralf Reussner and Raffaela Mirandola and FrantisekPlasil},
  keywords = {CoCoME},
  publisher = {Springer-Verlag Berlin Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {{T}he {C}ommon {C}omponent {M}odeling {E}xample: {C}omparing {S}oftware {C}omponent {M}odels},
  url = {http://springerlink.com/content/l8t37r41612l/},
  volume = {5153},
  year = {2008}
}
@mastersthesis{mazkatli2008a,
  author = {Mazkatli, Manar},
  title = {{E-learning system including test engine for the national test of foreign languages in Syria}},
  school = {Aleppo University},
  year = {2008},
  type = {Bachelor's Thesis}
}