# This BibTeX File has been generated by # the Typo3 extension 'Sixpack-4-T3 by Sixten Boeck' # # URL: # Date: 07/21/2017 # Non-Standard BibTex fields are included. # state: 0 = published, 1 = accepted, 2 = submitted, 3 = to be published // if missing, published is assumed # extern,deleted,hidden: 0 = false, 1 = true // if missing, false is assumed # link format: Title Url // separated by a whitespace @article{BTD2015A, author = { Buschmann, Stefan and Trapp, Matthias and Döllner, Jürgen }, title = { Animated visualization of spatial-temporal trajectory data for air-traffic analysis }, journal = { The Visual Computer }, year = { 2015 }, volume = { 32 }, number = { 3 }, pages = { 371-381 }, abstract = { With increasing numbers of flights worldwide and a continuing rise in airport traffic, air-traffic management is faced with a number of challenges. These include monitoring, reporting, planning, and problem analysis of past and current air traffic, e.g., to identify hotspots, minimize delays, or to optimize sector assignments to air-traffic controllers. To cope with these challenges, cyber worlds can be used for interactive visual analysis and analytical reasoning based on aircraft trajectory data. However, with growing data size and complexity, visualization requires high computational efficiency to process that data within real-time constraints. This paper presents a technique for real-time animated visualization of massive trajectory data. It enables (1) interactive spatio-temporal filtering, (2) generic mapping of trajectory attributes to geometric representations and appearance, and (3) real-time rendering within 3D virtual environments such as virtual 3D airport or 3D city models. Different visualization metaphors can be efficiently built upon this technique such as temporal focus+context, density maps, or overview+detail methods. As a general-purpose visualization technique, it can be applied to general 3D and 3+1D trajectory data, e.g., traffic movement data, geo-referenced networks, or spatio-temporal data, and it supports related visual analytics and data mining tasks within cyber worlds. }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/BTD2015/tvc2015_draft.pdf }, sorting = { 256 } } @article{SD2015, author = { Semmo, Amir and Trapp, Matthias and Jobst, Markus and D{\"o}llner, J{\"u}rgen }, title = { Cartography-Oriented Design of 3D Geospatial Information Visualization - Overview and Techniques }, journal = { The Cartographic Journal }, year = { 2015 }, volume = { 52 }, number = { 2 }, pages = { 95--106 }, abstract = {

In economy, society and personal life map-based, interactive geospatial visualization becomes a natural element of a growing number of applications and systems. The visualization of 3D geospatial information, however, raises the question how to represent the information in an effective way. Considerable research has been done in technology-driven directions in the fields of cartography and computer graphics (e.g., design principles, visualization techniques). Here, non-photorealistic rendering represents a promising visualization category–situated between both fields–that offers a large number of degrees for the cartography-oriented visual design of complex 2D and 3D geospatial information for a given application context. Still today, however, specifications and techniques for mapping cartographic design principles to the state-of-the-art rendering pipeline of 3D computer graphics remain to be explored. This paper revisits cartographic design principles for 3D geospatial visualization and introduces an extended 3D semiotic model that complies with the general, interactive visualization pipeline. Based on this model, we propose non-photorealistic rendering techniques to interactively synthesize cartographic renditions of basic feature types, such as terrain, water, and buildings. In particular, it includes a novel iconification concept to seamlessly interpolate between photorealistic and cartographic representations of 3D landmarks. Our work concludes with a discussion of open challenges in this field of research, including topics such as user interaction and evaluation.
}, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/STJD2015/icc2015_semmo_authors_version.pdf }, doi = { 10.1080/00087041.2015.1119462 }, sorting = { 2816 } } @article{PSTD2014, author = { Pasewaldt, Sebastian and Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Multi-Perspective 3D Panoramas }, journal = { International Journal of Geographical Information Science (IJGIS) }, year = { 2014 }, volume = { 28 }, number = { 10 }, pages = { 2030-2051 }, abstract = {
This article presents multi-perspective 3D panoramas that focus on visualizing 3D geovirtual environments (3D GeoVEs) for navigation and exploration tasks. Their key element, a multi-perspective view, seamlessly combines what is seen from multiple viewpoints into a single image. This approach facilitates thepresentation of information for virtual 3D city and landscape models, particularly by reducing occlusions, increasing screen-space utilization, and providing additional context within a single image. We complement multi-perspective views with cartographic visualization techniques to stylize features according to their semantics and highlight important or prioritized information. When combined, both techniques constitute the core implementation of interactive, multi-perspective 3D panoramas. They offer a large number of effective means for visual communication of 3D spatial information, a high degree of customization with respect to cartographic design, and manifold applications in different domains. We discuss design decisions of 3D panoramas for the exploration of and navigation in 3D GeoVEs. We also discuss a preliminary user study that indicates that 3D panoramas are a promising approach for navigation systems using 3D GeoVEs.
}, keywords = { multi-perspective visualization, panorama, focus+context visualization, 3D geovirtual environments, cartographic design }, project = { HPI;NFGII }, doi = { 10.1080/13658816.2014.922686 }, link1 = { http://dx.doi.org/10.1080/13658816.2014.922686 }, sorting = { 1792 } } @article{TD2013, author = { Trapp, Matthias and D\"ollner, J\"urgen }, title = { 2.5D Clip-Surfaces for Technical Visualization }, journal = { Journal of WSCG }, year = { 2013 }, volume = { 21 }, number = { 1 }, pages = { 89-96 }, month = { 6 }, abstract = { The concept of clipping planes is well known in computer graphics and can be used to create cut-away views. But clipping against just analytical defined planes is not always suitable for communicating every aspect of such visualization. For example, in hand-drawn technical illustrations, artists tend to communicate the difference between a cut and a model feature by using non-regular, sketchy cut lines instead of straight ones. To enable this functionality in computer graphics, this paper presents a technique for applying 2.5D clip-surfaces in real-time. Therefore, the clip plane equation is extended with an additional offset map, which can be represented by a texture map that contains height values. Clipping is then performed by varying the clip plane equation with respect to such an offset map. Further, a capping technique is proposed that enables the rendering of caps onto the clipped area to convey the impression of solid material. It avoids a re-meshing of a solid polygonal mesh after clipping is performed. Our approach is pixel precise, applicable in real-time, and takes fully advantage of graphics accelerators. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { clipping planes, real-time rendering, technical 3D visualization }, editor = { Václav Skala }, publisher = { Union Agency }, address = { Na Mazinach 9, CZ 322 00 Plzen, Czech Republic }, booktitle = { Proceedings of WSCG 2013: 21st International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision }, project = { NFGII }, issn = { ISSN 1213 – 6972 }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=mBasfz37VoY }, link2 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-1861946-5d-clip-surfaces-technical-visualization/ }, link3 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2013/TD2013/clipping.pdf }, sorting = { 1792 } } @article{GTD12, author = { Glander, Tassilo and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Concepts for Automatic Generalization of Virtual 3D Landscape Models }, journal = { gis.SCIENCE }, year = { 2012 }, volume = { 25 }, number = { 1 }, pages = { 18-23 }, month = { 3 }, abstract = { This paper discusses concepts for the automatic generalization of virtual 3D landscape models. As complexity, heterogeneity, and diversity of geodata that constitute landscape models are constantly growing, the need for landscape models that generalize their contents to a consistent, coherent level-of-abstraction and information density becomes an essential requirement for applications such as in conceptual landscape design, simulation and analysis, and mobile mapping. We discuss concepts of generalization and working principles as well as the concept of level-of-abstraction. We furthermore present three exemplary automated techniques for generalizing 3D landscape models, including a geometric generalization technique that generates discrete iso-surfaces of 3D terrain models in real-time, a geometric generalization technique for site and building models, and a real-time generalization lens technique. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam }, note = { Cover Image }, publisher = { Wichmann Verlag }, issn = { 1869-9391 }, sorting = { 4096 } } @article{TSPHDEH12, author = { Trapp, Matthias and Semmo, Amir and Pokorski, Rafael and Herrmann, Claus-Daniel and Döllner, Jürgen and Eichhorn, Michael and Heinzelmann, Michael }, title = { Colonia 3D - Communication of Virtual 3D Reconstructions in Public Spaces }, journal = { International Journal of Heritage in the Digital Era (IJHDE) }, year = { 2012 }, volume = { 1 }, number = { 1 }, pages = { 45-74 }, month = { 1 }, abstract = { The communication of cultural heritage in public spaces such as museums or exhibitions, gain more and more importance during the last years. The possibilities of interactive 3D applications open a new degree of freedom beyond the mere presentation of static visualizations, such as pre-produced video or image data. A user is now able to directly interact with 3D virtual environments that enable the depiction and exploration of digital cultural heritage artifacts in real-time. However, such technology requires concepts and strategies for guiding a user throughout these scenarios, since varying levels of experiences within interactive media can be assumed. This paper presents a concept as well as implementation for communication of digital cultural heritage in public spaces, by example of the project Roman Cologne. It describes the results achieved by an interdisciplinary team of archaeologists, designers, and computer graphics engineers with the aim to virtually reconstruct an interactive high-detail 3D city model of Roman Cologne. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam }, note = { Cover Image }, editor = { Marinos Ioannides }, publisher = { Multi-Science Publishing }, issn = { 2047-4970 }, doi = { 10.1260/2047-4970.1.1.45 }, link1 = { Paper (HQ) http://multi-science.metapress.com/content/b4wn417605744380/fulltext.pdf }, sorting = { 4608 } } @article{STKD12, author = { Semmo, Amir and Trapp, Matthias and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen }, title = { Interactive Visualization of Generalized Virtual 3D City Models using Level-of-Abstraction Transitions }, journal = { Computer Graphics Forum }, year = { 2012 }, volume = { 31 }, number = { 3 }, pages = { 885--894 }, abstract = {

Virtual 3D city models play an important role in the communication of complex geospatial information in a growing number of applications, such as urban planning, navigation, tourist information, and disaster management. In general, homogeneous graphic styles are used for visualization. For instance, photorealism is suitable for detailed presentations, and non-photorealism or abstract stylization is used to facilitate guidance of a viewer's gaze to prioritized information. However, to adapt visualization to different contexts and contents and to support saliency-guided visualization based on user interaction or dynamically changing thematic information, a combination of different graphic styles is necessary. Design and implementation of such combined graphic styles pose a number of challenges, specifically from the perspective of real-time 3D visualization. In this paper, the authors present a concept and an implementation of a system that enables different presentation styles, their seamless integration within a single view, and parametrized transitions between them, which are defined according to tasks, camera view, and image resolution. The paper outlines potential usage scenarios and application fields together with a performance evaluation of the implementation.
}, note = { Proceedings EuroVis 2012 }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/STKD12/asemmo-eurovis2012.pdf }, doi = { 10.1111/j.1467-8659.2012.03081.x }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=VXqtw44KxY4 }, sorting = { 2304 } } @article{SHTD2012, author = { Semmo, Amir and Hildebrandt, Dieter and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Concepts for Cartography-Oriented Visualization of Virtual 3D City Models }, journal = { Photogrammetrie - Fernerkundung - Geoinformation (PFG) }, year = { 2012 }, number = { 4 }, pages = { 455-465 }, abstract = {

Virtual 3D city models serve as an effective medium with manifold applications in geoinformation systems and services. To date, most 3D city models are visualized using photorealistic graphics. But an effective communication of geoinformation significantly depends on how important information is designed and cognitively processed in the given application context. One possibility to visually emphasize important information is based on non-photorealistic rendering, which comprehends artistic depiction styles and is characterized by its expressiveness and communication aspects. However, a direct application of non-photorealistic rendering techniques primarily results in monotonic visualization that lacks cartographic design aspects. In this work, we present concepts for cartography-oriented visualization of virtual 3D city models. These are based on coupling non-photorealistic rendering techniques and semantics-based information for a user, context, and media-dependent representation of thematic information. This work highlights challenges for cartography-oriented visualization of 3D geovirtual environments, presents stylization techniques and discusses their applications and ideas for a standardized visualization. In particular, the presented concepts enable a real-time and dynamic visualization of thematic geoinformation.
}, keywords = { 3D city models, cartography-oriented visualization, style description languages, real-time rendering }, publisher = { E. Schweizerbart'sche Verlagsbuchhandlung }, address = { Johannesstrasse 3A, D-70176 Stuttgart, Germany }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/SHTD2012/asemmo-PFG2012.pdf }, issn = { 1432-8364 }, doi = { 10.1127/1432-8364/2012/0131 }, sorting = { 16 } } @article{TSLHD11, author = { Trapp, Matthias and Schneider, Lars and Lehmann, Christine and Holz, Norman and D{\"o}llner, J{\"u}rgen }, title = { Strategies for Visualizing 3D Points-of-Interest on Mobile Devices }, journal = { Journal of Location Based Services (JLBS) }, year = { 2011 }, volume = { 5 }, number = { 2 }, pages = { 79-99 }, month = { 6 }, abstract = { 3D virtual environments are increasingly used as general-purpose medium for communicating spatial information. In particular, virtual 3D city models have numerous applications such as car navigation, city marketing, tourism, and gaming. In these applications, points-of-interest (POI) play a major role since they typically represent features relevant for specific user tasks and facilitate effective user orientation and navigation through the 3D virtual environment. In this paper, we present strategies that aim at effectively visualizing points-of-interest in a 3D virtual environment used on mobile devices. Here, we additionally have to face the "keyhole" situation, i.e., the users can realize only a small part of the environment due to the limited view space and resolution. For the effective visualization of points-of-interest in 3D virtual environments we propose to combine specialized occlusion management for 3D scenes together with visual cues that handle out-of-frame points-of-interest. We also discuss general aspects and definitions of points-of-interest in the scope of 3D models and outline a prototype implementation of the mobile 3D viewer application based on the presented concepts. In addition, we give a first performance evaluation with respect to rendering speed and power consumptions. }, keywords = { 3D visualisation, mobile devices, points-of-interest, real-time rendering }, publisher = { Taylor \& Francis, Inc. }, address = { Bristol, PA, USA }, booktitle = { Journal of Location Based Services (JLBS) }, project = { NFG }, issn = { 1748-9725 }, doi = { http://dx.doi.org/10.1080/17489725.2011.579579 }, sorting = { 1536 } } @article{PTD11, author = { Pasewaldt, Sebastian and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Multiscale Visualization of 3D Geovirtual Environments Using View-Dependent Multi-Perspective Views }, journal = { Journal of WSCG }, year = { 2011 }, volume = { 19 }, number = { 3 }, pages = { 111-118 }, month = { 2 }, abstract = { 3D geovirtual environments (GeoVEs), such as virtual 3D city models or landscape models, are essential visualization tools for effectively communicating complex spatial information. In this paper, we discuss how these environments can be visualized using multi-perspective projections based on view-dependent global deformations. Multi-perspective projections enable 3D visualization similar to panoramic maps, increasing overview and information density in depictions of 3D GeoVEs. To make multi-perspective views an effective medium, they must adjust to the orientation of the virtual camera controlled by the user and constrained by the environment. Thus, changing multi-perspective camera configurations typically require the user to manually adapt the global deformation — an error prone, non-intuitive, and often time-consuming task. Our main contribution comprises a concept for the automatic and view-dependent interpolation of different global deformation preset configurations. Applications and systems that implement such view-dependent global deformations, allow users to smoothly and steadily interact with and navigate through multi-perspective 3D GeoVEs. }, keywords = { multi-perspective views, view-dependence, global space deformation, realtime rendering, virtual 3D environments, geovisualization. }, editor = { Václav Skala }, publisher = { UNION Agency – Science Press }, project = { NFG;HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2011/PTD11/PTD11.pdf }, isbn = { 978-80-86943-84-8 }, issn = { 1213-6072 }, link2 = { Video [YouTube] http://www.youtube.com/watch?v=gzZXTXBwccY }, sorting = { 2816 } } @article{LTD09, author = { Lorenz, Haik and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Interaktive, multiperspektivische Ansichten für geovirtuelle 3D-Umgebungen }, journal = { Kartographische Nachrichten }, year = { 2009 }, volume = { 04 }, pages = { 175-181 }, month = { 9 }, abstract = { In diesem Beitrag werden Visualisierungstechniken vorgestellt, die auf den Gestaltungsprinzipien von Panoramakarten und Detail- und Überblicksdarstellungen beruhen. Die Techniken generieren multiperspektivische Ansichten für geovirtuelle 3D-Umgebungen, insbesondere für zwei häufig benötigte Ansichtsformen, die Vogelperspektive und die Fußgängerperspektive. Die Techniken tragen dazu bei, die Bandbreite der computergestützten, interaktiven 3D-Darstellungen zur Visualisierung von virtuellen 3D-Raummodellen zu erweitern und die Effektivität raumbezogener Informationsdarstellungen im Hinblick auf Ortsbewusstsein und Informationsgehalt zu verbessern. }, publisher = { Kirschbaum Verlag GmbH Bonn, Fachverlag für Verkehr und Technik }, project = { NFG }, sorting = { 1280 } } @inbook{SBTD2017, author = { Scheibel, Willy and Buschmann, Stefan and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Attributed Vertex Clouds }, year = { 2017 }, month = { 3 }, abstract = {

In todays computer graphics applications, large 3D scenes are rendered which consist of polygonal geometries such as triangle meshes. Using state- of-the-art techniques, this geometry is often represented on the GPU using vertex and index buffers, as well as additional auxiliary data such as tex- tures or uniform buffers. For polygonal meshes of arbitrary complexity, the described approach is indispensable. However, there are several types of simpler geometries (e.g., cuboids, spheres, tubes, or splats) that can be generated procedurally. We present an efficient data representation and render- ing concept for such geometries, denoted as attributed vertex clouds (AVCs). Using this approach, geometry is generated on the GPU during execution of the programmable rendering pipeline. Each vertex is used as the argument for a function that procedurally generates the target geometry. This function is called a transfer function, and it is implemented using shader programs and therefore executed as part of the rendering process. This approach allows for compact geometry representation and results in reduced memory footprints in comparison to traditional representations. By shifting geometry generation to the GPU, the resulting volatile geometry can be controlled flexibly, i.e., its position, parameteri- zation, and even the type of geometry can be modified without requiring state changes or uploading new data to the GPU. Performance measurements suggests improved rendering times and reduced memory transmission through the rendering pipeline.
}, editor = { Christopher Oat }, publisher = { Wolfgang Engel }, series = { GPU Pro }, edition = { 8 }, booktitle = { GPU Zen }, project = { HPI;NFGII;BIMAP }, institution = { Hasso Plattner Institute, University of Potsdam }, sorting = { 4 }, state = { 3 } } @conference{BTD2014, author = { Buschmann, Stefan and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Real-Time Animated Visualization of Massive Air-Traffic Trajectories }, year = { 2014 }, pages = { 172-181 }, abstract = { With increasing numbers of flights world-wide and a continuing rise in airport traffic, air-traffic management is faced with a number of challenges. These include monitoring, reporting, planning, and problem analysis of past and current air traffic, e.g., to identify hotspots, minimize delays, or to optimize sector assignments to air-traffic controllers. Interactive and dynamic 3D visualization and visual analysis of massive aircraft trajectories, i.e., analytical reasoning enabled by interactive cyber worlds, can be used to approach these challenges. To facilitate this kind of analysis, especially in the context of real-time data, interactive tools for filtering, mapping, and rendering are required. In particular, the mapping process should be configurable at run-time and support both static mappings and animations to allow users to effectively explore and realize movement dynamics. However, with growing data size and complexity, these stages of the visualization pipeline require computational efficient implementations to be capable of processing within real-time constraints. This paper presents an approach for real-time animated visualization of massive air-traffic data, that implements all stages of the visualization pipeline based on GPU techniques for efficient processing. It enables (1) interactive spatio-temporal filtering, (2) generic mapping of trajectory attributes to geometric representations and appearances, as well as (3) real-time rendering within 3D virtual environments, such as virtual 3D airport and city models. Based on this pipeline, different visualization metaphors (e.g., temporal focus+context, density maps, and overview+detail visualization) are implemented and discussed. The presented concepts and implementation can be generally used as visual analytics and data mining techniques in cyber worlds, e.g., to visualize movement data, geo-referenced networks, or other spatio-temporal data. }, keywords = { spatio-temporal visualization, trajectory visualization, 3D visualization, visual analytics, real-time rendering }, publisher = { IEEE Computer Society }, booktitle = { Proceedings of CyberWorlds 2014 }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/BTLD2014/cw2014_draft.pdf }, isbn = { 978-1-4799-4677-8/14 }, doi = { 10.1109/CW.2014.32 }, sorting = { 1024 } } @incollection{TD08c, author = { Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Generalization of Single-Center Projections Using Projection Tile Screens }, year = { 2008 }, editor = { Jos\'e Braz and Alpesh Kumar Ranchordas and João Madeiras Pereira and H\'elder J. Ara\'ujo }, publisher = { Springer }, series = { Communications in Computer and Information Science (CCIS) }, booktitle = { Advances in Computer Graphics and Computer Vision (VISIGRAPP) }, project = { NFG }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=Y6SBylq5SFA }, sorting = { 128 } } @proceedings{Buschmann2012a, author = { Buschmann, Stefan and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Challenges and Approaches for the Visualization of Movement Trajectories in 3D Geovirtual Environments }, year = { 2012 }, abstract = { The visualization of trajectories and their attributes represents an essential functionality for spatio-temporal data visualization and analysis. Many visualization methods, however, focus mainly on sparse 2D movements or consider only the 2D components of movements. This paper is concerned with true 3D movement data, i.e., movements that take place in the three-dimensional space and which characteristics significantly depend an all dimensions. In this case, spatio-temporal visualization approaches need to map all three spatial dimensions together with required mappings for associated attributes. We describe visualization approaches for true 3D movement data and evaluate their application within 3D geovirtual environments. We also identify challenges and propose approaches for the interactive visualization of 3D movement data using 3D geovirtual environments as scenery. }, keywords = { spatio-temporal data, trajectories, interactive 3D visualization, visual analytics }, booktitle = { GIScience workshop on GeoVisual Analytics, Time to Focus on Time }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/BTD2012/geovat2012_paper.pdf }, sorting = { 1024 } } @inproceedings{SDTKDP2016, author = { Semmo, Amir and D{\"u}rschmid, Tobias and Trapp, Matthias and Klingbeil, Mandy and D{\"o}llner, J{\"u}rgen and Pasewaldt, Sebastian }, title = { Interactive Image Filtering with Multiple Levels-of-Control on Mobile Devices }, year = { 2016 }, month = { 12 }, abstract = {
With the continuous development of mobile graphics hardware, interactive high-quality image stylization based on nonlinear filtering is becoming feasible and increasingly used in casual creativity apps. However, these apps often only serve high-level controls to parameterize image filters and generally lack support for low-level (artistic) control, thus automating art creation rather than assisting it. This work presents a GPU-based framework that enables to parameterize image filters at three levels of control: (1) presets followed by (2) global parameter adjustments can be interactively refined by (3) complementary on-screen painting that operates within the filters' parameter spaces for local adjustments. The framework provides a modular XML-based effect scheme to effectively build complex image processing chains-using these interactive filters as building blocks-that can be efficiently processed on mobile devices. Thereby, global and local parameterizations are directed with higher-level algorithmic support to ease the interactive editing process, which is demonstrated by state-of-the-art stylization effects, such as oil paint filtering and watercolor rendering.
}, booktitle = { Proceedings ACM SIGGRAPH Asia Symposium on Mobile Graphics and Interactive Applications }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/SDTKDP2016/asemmo-mgia2016-authors-version.pdf }, doi = { 10.1145/2999508.2999521 }, sorting = { 768 } } @inproceedings{STHD2016, author = { Schoedon, Alexander and Trapp, Matthias and Hollburg, Henning and Döllner, Jürgen }, title = { Interactive Web-based Visualization for Accessibility Mapping of Transportation Networks }, year = { 2016 }, month = { 6 }, abstract = { Accessibility is a fundamental aspect in transportation, routing, and spare-time activity planning concerning traveling in modern cities. In this context, interactive web-based accessibility-map visualization techniques and systems are important tools for provisioning, exploration, analysis, and assessment of multi-modal and location-based travel time data and routing information. To enable their effective application, such interactive visualization techniques demands for flexible mappings with respect to user-adjustable parameters such as maximum travel times, the types of transportation used, or used color schemes. However, traditional approaches for web-based visualization of accessibility-maps do not allow this degree of parametrization without significant latencies introduced by required data processing and transmission between the routing server and the visualization client. This paper presents a novel web-based visualization technique that allows for efficient client-side mapping and rendering of accessibility data onto transportation networks using WebGL and the OpenGL transmission format. A performance evaluation and comparison shows the superior performance of the approach over alternative implementations. }, booktitle = { Proceedings of EuroVis 2016 - Short Papers }, project = { NFG-II, MOBIE }, sorting = { 2304 } } @inproceedings{LFHTD, author = { Limberger, Daniel and Fiedler, Carolin and Hahn, Sebastian and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Evaluation of Sketchiness as a Visual Variable for 2.5D Treemaps }, year = { 2016 }, month = { 5 }, abstract = {

Interactive 2.5D treemaps serve as an effective tool for the visualization of attributed hierarchies, enabling exploration of non-spatial, multi-variate, hierarchical data. In this paper the suitability of sketchiness as a visual variable, e.g., for uncertainty, is evaluated. Therefore, a design space for sketchy rendering in 2.5D and integration details for real-time applications are presented. The results of three user studies indicate, that sketchiness is a promising candidate for a visual variable that can be used independently and in addition to others, e.g., color and height.

© The Authors 2016. This is the authors' version of the work. It is posted here for your personal use. Not for redistribution. The definitive version will be published in Proceedings of the 20th International Conference on Information Visualization (IV'16).
}, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { Visual Analytics, 2.5D Treemaps, Sketchiness, Visual Variables, Uncertainty }, booktitle = { Proceedings of the 20th International Conference of Information Visualization (IV'16) }, project = { HPI;NFGII }, sorting = { 2048 } } @inproceedings{STDDP2016, author = { Semmo, Amir and Trapp, Matthias and D{\"u}rschmid, Tobias and D{\"o}llner, J{\"u}rgen and Pasewaldt, Sebastian }, title = { Interactive Multi-scale Oil Paint Filtering on Mobile Devices }, year = { 2016 }, abstract = {
This work presents an interactive mobile implementation of a filter that transforms images into an oil paint look. At this, a multi-scale approach that processes image pyramids is introduced that uses flow-based joint bilateral upsampling to achieve deliberate levels of abstraction at multiple scales and interactive frame rates. The approach facilitates the implementation of interactive tools that adjust the appearance of filtering effects at run-time, which is demonstrated by an on-screen painting interface for per-pixel parameterization that fosters the casual creativity of non-artists.
}, booktitle = { Proceedings ACM SIGGRAPH Posters }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/STDDP2016/asemmo-siggraph2016-poster.pdf }, doi = { 10.1145/2945078.2945120 }, sorting = { 1536 } } @inproceedings{STPD2016, author = { Semmo, Amir and Trapp, Matthias and Pasewaldt, Sebastian and D{\"o}llner, J{\"u}rgen }, title = { Interactive Oil Paint Filtering On Mobile Devices }, year = { 2016 }, abstract = {
Image stylization enjoys a growing popularity on mobile devices to foster casual creativity. However, the implementation and provision of high-quality image filters for artistic rendering is still faced by the inherent limitations of mobile graphics hardware such as computing power and memory resources. This work presents a mobile implementation of a filter that transforms images into an oil paint look, thereby highlighting concepts and techniques on how to perform multi-stage nonlinear image filtering on mobile devices. The proposed implementation is based on OpenGL ES and the OpenGL ES shading language, and supports on-screen painting to interactively adjust the appearance in local image regions, e.g., to vary the level of abstraction, brush, and stroke direction. Evaluations of the implementation indicate interactive performance and results that are of similar aesthetic quality than its original desktop variant.
}, booktitle = { Expressive 2016 - Posters, Artworks, and Bridging Papers }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/STPD2016/asemmo-exressive2016-poster.pdf }, doi = { 10.2312/exp.20161255 }, sorting = { 1280 } } @inproceedings{VTD2016, author = { Vollmer, Jan Ole and Trapp, Matthias and Döllner, Jürgen }, title = { Interactive GPU-based Image Deformation for Mobile Devices }, year = { 2016 }, abstract = { Interactive image deformation is an important feature of modern image processing pipelines. It is often used to create caricatures and animation for input images, especially photos. State-of-the-art image deformation techniques are based on transforming vertices of a mesh, which is textured by the input image, using affine transformations such as translation, and scaling. However, the resulting visual quality of the output image depends on the geometric resolution of the mesh. Performing these transformations on the CPU often further inhibits performance and quality. This is especially problematic on mobile devices where the limited computational power reduces the maximum achievable quality. To overcome these issue, we propose the concept of an intermediate deformation buffer that stores deformation information at a resolution independent of the mesh resolution. This allows the combination of a high-resolution buffer with a low-resolution mesh for interactive preview, as well as a high-resolution mesh to export the final image. Further, we present a fully GPU-based implementation of this concept, taking advantage of modern OpenGL ES features, such as compute shaders. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { image deformation, image warping }, publisher = { The Eurographics Association }, booktitle = { Computer Graphics and Visual Computing (CGVC) }, isbn = { 978-3-03868-022-2 }, doi = { 10.2312/cgvc.20161303 }, sorting = { 3072 } } @inproceedings{STD2016, author = { Scheibel, Willy and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Interactive Revision Exploration using Small Multiples of Software Maps }, year = { 2016 }, pages = { 131-138 }, abstract = { To explore and to compare different revisions of complex software systems is a challenging task as it requires to constantly switch between different revisions and the corresponding information visualization. This paper proposes to combine the concept of small multiples and focus+context techniques for software maps to facilitate the comparison of multiple software map themes and revisions simultaneously on a single screen. This approach reduces the amount of switches and helps to preserve the mental map of the user. Given a software project the small multiples are based on a common dataset but are specialized by specific revisions and themes. The small multiples are arranged in a matrix where rows and columns represents different themes and revisions, respectively. To ensure scalability of the visualization technique we also discuss two rendering pipelines to ensure interactive frame-rates. The capabilities of the proposed visualization technique are demonstrated in a collaborative exploration setting using a high-resolution, multi-touch display. }, affiliation = { Hasso Plattner Institute, University of Potsdam }, keywords = { Software visualization, visual analytics, software maps, small multiples, interactive visualization techniques }, series = { IVAPP 2016 }, booktitle = { 7th International Conference on Information Visualization Theory and Applications }, project = { HPI;NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/STD2016/smallmultiples_ivapp2016-short.pdf,fileadmin/user_upload/fachgebiete/doellner/publications/2016/STD2016/smallmultiples-poster-landscape.pdf }, sorting = { 2816 } } @inproceedings{HTWD15, author = { Hahn, Sebastian and Trapp, Matthias and Wuttke, Nikolai and D{\"o}llner, J{\"u}rgen }, title = { ThreadCity: Combined Visualization of Structure and Activity for the Exploration of Multi-threaded Software Systems }, year = { 2015 }, month = { 7 }, abstract = {

This paper presents a novel visualization technique for the interactive exploration of multi-threaded software systems. It combines the visualization of static system structure based on the EvoStreets approach with an additional traffic metaphor to communicate the runtime characteristics of multiple threads simultaneously. To improve visual scalability with respect to the visualization of complex software systems, we further present an effective level-of-detail visualization based on hierarchical aggregation of system components by taking viewing parameters into account. We demonstrate our technique by means of a prototypical implementation and compare our result with existing visualization techniques.

© The Authors 2015. This is the authors' version of the work. It is posted here for your personal use. Not for redistribution. The definitive version will be published in Proceedings of the 19th International Conference on Information Visualization (IV'15).
}, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { Visual Software Analytics, Trace-Visualization, Multi-threaded Software Systems }, booktitle = { Proceedings of the 19th International Conference of Information Visualization (IV'15) }, project = { HPI;NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/HTWD2015/ThreadCity.pdf }, sorting = { 2048 } } @inproceedings{TSD2015, author = { Trapp, Matthias and Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Interactive Rendering and Stylization of Transportation Networks Using Distance Fields }, year = { 2015 }, pages = { 207-219 }, abstract = {
Transportation networks, such as streets, railroads or metro systems, constitute primary elements in cartography for reckoning and navigation. In recent years, they have become an increasingly important part of 3D virtual environments for the interactive analysis and communication of complex hierarchical information, for example in routing, logistics optimization, and disaster management. A variety of rendering techniques have been proposed that deal with integrating transportation networks within these environments, but have so far neglected the many challenges of an interactive design process to adapt their spatial and thematic granularity (i.e., level-of-detail and level-of-abstraction) according to a user's context. This paper presents an efficient real-time rendering technique for the view-dependent rendering of geometrically complex transportation networks within 3D virtual environments. Our technique is based on distance fields using deferred texturing that shifts the design process to the shading stage for real-time stylization. We demonstrate and discuss our approach by means of street networks using cartographic design principles for context-aware stylization, including view-dependent scaling for clutter reduction, contour-lining to provide figure-ground, handling of street crossings via shading-based blending, and task-dependent colorization. Finally, we present potential usage scenarios and applications together with a performance evaluation of our implementation.
}, keywords = { transportation networks, 3D visualization, image-based rendering, distance fields, shading, map design }, booktitle = { Proceedings of the 10th International Conference on Computer Graphics Theory and Applications (GRAPP 2015) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/TSD2015/streets.pdf }, sorting = { 3072 } } @inproceedings{TD2015, author = { Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Geometry Batching Using Texture-Arrays }, year = { 2015 }, pages = { 239-246 }, abstract = {
High-quality rendering of 3D virtual environments typically depends on high-quality 3D models with significant geometric complexity and texture data. One major bottleneck for real-time image-synthesis represents the number of state changes, which a specific rendering API has to perform. To improve performance, batching can be used to group and sort geometric primitives into batches to reduce the number of required state changes, whereas the size of the batches determines the number of required draw-calls, and therefore, is critical for rendering performance. For example, in the case of texture atlases, which provide an approach for efficient texture management, the batch size is limited by the efficiency of the texture-packing algorithm and the texture resolution itself. This paper presents a pre-processing approach and rendering technique that overcomes these limitations by further grouping textures or texture atlases and thus enables the creation of larger geometry batches. It is based on texture arrays in combination with an additional indexing schema that is evaluated at run-time using shader programs. This type of texture management is especially suitable for real-time rendering of large-scale texture-rich 3D virtual environments, such as virtual city and landscape models.
}, keywords = { Batching, Texture-array Processing, Real-time Rendering. }, booktitle = { Proceedings of the 10th International Conference on Computer Graphics Theory and Applications (GRAPP 2015) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/TD2015/TextureStacks.pdf }, sorting = { 3328 } } @inproceedings{MTD2015, author = { Meier, Benjamin-Heinz and Trapp, Matthias and Döllner, Jürgen }, title = { VideoMR: A Map and Reduce Framework for Real-time Video Processing }, year = { 2015 }, abstract = { This paper presents VideoMR: a novel map and reduce framework for real-time video processing on graphic processing units (GPUs). Using the advantages of implicit parallelism and bounded memory allocation, our approach enables developers to focus on implementing video operations without taking care of GPU memory handling or the details of code parallelization. Therefore, a new concept for map and reduce is introduced, redefining both operations to fit to the specific requirements of video processing. A prototypical implementation using OpenGL facilitates various operating platforms, including mobile development, and will be widely interoperable with other state-of-the-art video processing frameworks. }, url = { file:195536 }, booktitle = { International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision (WSCG 2015) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/MTD2015/wscgVideoMR.pdf }, sorting = { 1792 } } @inproceedings{BTD2015, author = { Buschmann, Stefan and Trapp, Matthias and Döllner, Jürgen }, title = { Real-Time Visualization of Massive Movement Data in Digital Landscapes }, year = { 2015 }, pages = { 213-220 }, abstract = { Due to continuing advances in sensor technology and increasing availability of digital infrastructure that allows for acquisition, transfer, and storage of big data sets, large amounts of movement data (e.g., road, naval, or air-traffic) become available. In the near future, movement data such as traffic data may even be available in real-time. In a growing number of application fields (e.g., landscape planning and design, urban development, and infrastructure planning), movement data enables new analysis and simulation applications. In this paper, we present an interactive technique for visualizing massive 3D movement trajectories. It is based on mapping massive movement data to graphics primitives and their visual variables in real-time, supporting a number of visualization schemes such as sphere, line, or tube-based trajectories, including animations of direction and speed. This generic technique enhances the functionality of VR and interactive 3D systems using virtual environments such as digital landscape models, city models, or virtual globes by adding support for this important category of spatio-temporal data. }, booktitle = { 16th Conference on Digital Landscape Architecture (DLA 2015) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/BTD2015/dla2015-draft.pdf }, sorting = { 1536 } } @inproceedings{WTLD2015, author = { Würfel, Hannes and Trapp, Matthias and Limberger, Daniel and Döllner, Jürgen }, title = { Natural Phenomena as Metaphors for Visualization of Trend Data in Interactive Software Maps }, year = { 2015 }, abstract = { Software maps are a commonly used tool for code quality monitoring in software-development projects and decision making processes. While providing an important visualization technique for the hierarchical system structure of a single software revision, they lack capabilities with respect to the visualization of changes over multiple revisions. This paper presents a novel technique for visualizing the evolution of the software system structure based on software metric trends. These trend maps extend software maps by using real-time rendering techniques for natural phenomena yielding additional visual variables that can be effectively used for the communication of changes. Therefore, trend data is automatically computed by hierarchically aggregating software metrics. We demonstrate and discuss the presented technique using two real world data sets of complex software systems. }, url = { file:195534 }, booktitle = { Computer Graphics and Visual Computing (CGVC) }, organization = { The Eurographics Association }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/WTLD2015/natural-metaphors-cgvc2015-final.pdf }, doi = { 10.2312/cgvc.20151246 }, sorting = { 768 } } @inproceedings{DRST2014, author = { D{\"u}bel, Steve and R{\"o}hlig, Martin and Schumann, Heidrun and Trapp, Matthias }, title = { 2D and 3D Presentation of Spatial Data: A Systematic Review }, year = { 2014 }, month = { 11 }, abstract = { The question whether to use 2D or 3D for data visualization is generally difficult to decide. Two-dimensional and three-dimensional visualization techniques exhibit different advantages and disadvantages related to various perceptual and technical aspects such as occlusion, clutter, distortion, or scalability. To facilitate problem understanding and comparison of existing visualization techniques with regard to these aspects, this report introduces a systematization based on presentation characteristics. It enables a categorization with respect to combinations of static 2D and 3D presentations of attributes and their spatial reference. Further, it complements ex-isting systematizations of data in an effort to formalize a common terminology and theoretical framework for this problem domain. We demonstrate our approach by reviewing different visualization techniques of spatial data according to the presented systematization. }, url = { file:195535 }, booktitle = { IEEE VIS International Workshop on 3DVis }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/DRST2014/survey2d3d.pdf }, sorting = { 64 } } @inproceedings{KHTD2014, author = { Klimke, Jan and Hagedorn, Benjamin and Trapp, Matthias and Döllner, Jürgen }, title = { Web-based and Mobile Provisioning of Virtual 3D Reconstructions }, year = { 2014 }, pages = { 17--28 }, month = { 5 }, abstract = { Communication of cultural heritage by means of digital information systems has been gaining more and more importance over recent years. Interactive virtual 3D applications enable users to explore 3D virtual reconstructions in real-time, to directly interact with the contained digital cultural heritage artifacts, and to obtain insights into this data. Nevertheless, these artifacts are usually very detailed and complex 3D models that are hard to handle for end-user systems. This paper presents the concept and a prototypical implementation of an image-based, web-based approach for the communication of digital cultural heritage and its provisioning for the Web and mobile devices by the example of the project Colonia3D – a high-detail, virtual reconstruction and high-detail 3D city model of Roman Cologne. Through this web-based and mobile provisioning, complex digital reconstructions can be used, e.g., on-site to match local findings and reconstructions. }, editor = { R. Franken-Wendelstorf and E. Lindinger and J. Sieck }, publisher = { Werner Hülsbusch Verlag }, chapter = { 2 }, booktitle = { Tagungsband der 12. Konferenz Kultur und Informatik: Reality and Virtuality }, files = { fileadmin/user_upload/fachgebiete/doellner/People/jklimke/Web-based-and-Mobile-Provisioning-of-Virtual-3D-Reconstructions.pdf }, isbn = { 978-3-86488-064-3 }, sorting = { 2048 } } @inproceedings{BTLD2014, author = { Buschmann, Stefan and Trapp, Matthias and L{\"uhne}, Patrick and D{\"o}llner, J{\"u}rgen }, title = { Hardware-Accelerated Attribute Mapping for Interactive Visualization of Complex 3D Trajectories }, year = { 2014 }, pages = { 355-363 }, month = { 1 }, abstract = { The visualization of 3D trajectories of moving objects and related attributes in 3D virtual environments represents a fundamental functionality in various visualization domains. Interactive rendering and visual analytics of such attributed trajectories involves both conceptual questions as well as technical challenges. Specifically, the mapping of trajectory attributes to rendering primitives and appearance represents a challenging task in the case of large data sets of high geometric complexity. There are various visualization approaches and rendering techniques considering specific aspects of these mappings to facilitate visualization and analysis of this kind of data. To solve the underlying general mapping problem efficiently, we developed an approach that uses and combines diverse types of visualizations, rather than being tailored to a specific use case. This paper describes an interactive rendering system for the visualization of 3D trajectories that enables the combinations of different mappings as well as their dynamic configuration at runtime. A fully hardware-accelerated implementation enables the processing of large sets of attributed 3D trajectories in real-time. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { 3D Attributed Trajectories, Real-time Rendering, Attribute Mapping }, publisher = { SCITEPRESS – Science and Technology Publications }, booktitle = { Proceedings of the 5th International Conference on Information Visualization Theory and Applications (IVAPP 2014) }, project = { NFGII }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2014/BTLD2014/appearance-mapping.pdf }, sorting = { 2560 } } @inproceedings{LTSD13, author = { Lux, Roland and Trapp, Matthias and Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Interactive Projective Texturing for Non-Photorealistic Shading of Technical 3D Models }, year = { 2013 }, pages = { 101--108 }, month = { 9 }, abstract = { This paper presents a novel interactive rendering technique for creating and editing shadings for man-made objects in technical 3D visualizations. In contrast to shading approaches that use intensities computed based on surface normals (e.g., Phong, Gooch, Toon shading), the presented approach uses one-dimensional gradient textures, which can be parametrized and interactively manipulated based on per-object bounding volume approximations. The fully hardware-accelerated rendering technique is based on projective texture mapping and customizable intensity transfer functions. A provided performance evaluation shows comparable results to traditional normal-based shading approaches. The work also introduce simple direct-manipulation metaphors that enables interactive user control of the gradient texture alignment and intensity transfer functions. }, affiliation = { Hasso-Plattner-Insititut, University of Potsdam }, editor = { Silvester Czanner, Wen Tang }, publisher = { The Eurographics Association }, booktitle = { Proceedings of 11th Theory and Practice of Computer Graphics 2013 Conference (TP.CG.2013) }, project = { NFGII }, isbn = { 978-3-905673-98-2 }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=PmBTK8TbpPA }, sorting = { 768 } } @inproceedings{PTD2013, author = { Pasewaldt, Sebastian and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Multi-Perspective Detail+Overview Visualization for 3D Building Exploration }, year = { 2013 }, pages = { 57--64 }, month = { 9 }, abstract = { This paper presents a multi-perspective rendering technique that enables detail+overview visualization and interactive exploration of virtual 3D building model. Virtual 3D building models, as main elements of virtual 3D city models, are used in a growing number of application domains, such as geoanalysis, disaster management and architectural planning. Visualization systems for such building models often rely on perspective or orthogonal projections using a single viewpoint. Therefore, the exploration of a complete model requires a user to change the viewpoint multiple times and to memorize the content of each view to obtain a comprehensive mental model. Since this is usually a time-consuming task, which implies context switching, current visualization systems use multiple viewports to simultaneously depict an object from different perspectives. Our approach extends the idea of multiple viewports by combining two linked views for the interactive exploration of virtual 3D buildings model and their facades. In contrast to traditional approaches, we automatically generate a multi-perspective view that simultaneously depicts all facades of the building in one overview image. This facilitates the process of obtaining overviews and supports fast and direct navigation to various points-of-interest. We describe the concept and implementations of our Multiple-Center-of-Projection camera model for real-time multi-perspective image synthesis. Further, we provide insights into different interaction techniques for linked multi-perspective views and outline approaches of future work. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam }, editor = { Silvester Czanner, Wen Tang }, publisher = { The Eurographics Association }, booktitle = { Proceedings of 11th Theory and Practice of Computer Graphics 2013 Conference (TP.CG.2013) }, project = { HPI; NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2013/PTD2013/PTD2013.pdf }, isbn = { 978-3-905673-98-2 }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=Ywo4gpx0rE8&feature=share&list=UURf7yK_n8IfSBtpWh8uP0mA }, sorting = { 512 } } @inproceedings{ESTD2013, author = { Engel, Juri and Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Evaluating the Perceptual Impact of Rendering Techniques on Thematic Color Mappings in 3D Virtual Environments }, year = { 2013 }, pages = { 25-32 }, month = { 9 }, abstract = {

Using colors for thematic mapping is a fundamental approach in visualization, and has become essential for 3D virtual environments to effectively communicate multidimensional, thematic information. Preserving depth cues within these environments to emphasize spatial relations between geospatial features remains an important issue.A variety of rendering techniques have been developed to preserve depth cues in 3D information visualization, including shading, global illumination, and image stylization. However, these techniques alter color values, which may lead to ambiguity in a color mapping and loss of information. Depending on the applied rendering techniques and color mapping, this loss should be reduced while still preserving depth cues when communicating thematic information. This paper presents the results of a quantitative and qualitative user study that evaluates the impact of rendering techniques on information and spatial perception when using visualization of thematic data in 3D virtual environments. We report the results of this study with respect to four perception-related tasks, showing significant differences in error rate and task completion time for different rendering techniques and color mappings.
}, editor = { Michael Bronstein, Jean Favre, and Kai Hormann }, publisher = { The Eurographics Association }, booktitle = { Proceedings of 18th International Workshop on Vision, Modeling and Visualization (VMV 2013) }, project = { NFGII }, doi = { 10.2312/PE.VMV.VMV13.025-032 }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2013/ESTD2013/jengel-vmv2013-authors-version-hq.pdf }, link2 = { User Study Raw Data (ZIP) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2013/ESTD2013/user_study_raw_data_txt.zip }, sorting = { 128 } } @inproceedings{SKTD13, author = { Semmo, Amir and Kyprianidis, Jan Eric and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Real-Time Rendering of Water Surfaces with Cartography-Oriented Design }, year = { 2013 }, pages = { 5--14 }, month = { 7 }, abstract = {

More than 70% of the Earth's surface is covered by oceans, seas, and lakes, making water surfaces one of the primary elements in geospatial visualization. Traditional approaches in computer graphics simulate and animate water surfaces in the most realistic ways. However, to improve orientation, navigation, and analysis tasks within 3D virtual environments, these surfaces need to be carefully designed to enhance shape perception and land-water distinction. We present an interactive system that renders water surfaces with cartography-oriented design using the conventions of mapmakers. Our approach is based on the observation that hand-drawn maps utilize and align texture features to shorelines with non-linear distance to improve figure-ground perception and express motion. To obtain local orientation and principal curvature directions, first, our system computes distance and feature-aligned distance maps. Given these maps, waterlining, water stippling, contour-hatching, and labeling are applied in real-time with spatial and temporal coherence. The presented methods can be useful for map exploration, landscaping, urban planning, and disaster management, which is demonstrated by various real-world virtual 3D city and landscape models.

© ACM, 2013. This is the authors' version of the work. It is posted here by permission of ACM for your personal use. Not for redistribution. The definitive version was published in Proceedings of the International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe'13). http://dx.doi.org/10.1145/2487276.2487277.
}, series = { Proceedings International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2013/SKTD13/asemmo-cae2013-authors-version-hq.pdf,fileadmin/user_upload/fachgebiete/doellner/publications/2013/SKTD13/asemmo-cae2013-slides.pdf }, doi = { 10.1145/2487276.2487277 }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=DFjjcMRWWoE }, sorting = { 1536 } } @inproceedings{TSD2013, author = { Trapp, Matthias and Hahn, Sebastian and D{\"o}llner, J{\"u}rgen }, title = { Interactive Rendering of Complex 3D-Treemaps }, year = { 2013 }, pages = { 165-175 }, month = { 2 }, abstract = { 3D-Treemaps are an important visualization technique for hierarchical views. In contrast to 2D-Treemaps, height can be used to map one additional attribute of the data items. Using the Treemap technique in combination with large datasets (more than 500k) a fast rendering and interaction techniques that are beyond collapsing/uncollapsing nodes is still one of the main challenges. This paper presents a novel rendering technique that enables the image synthesis of geometrical complex 3D-Treemaps in real-time. The fully hardware accelerated approach is based on shape generation using geometry shaders. This approach offers increased rendering performance and low update latency compared to existing techniques and through it enables new real-time interaction techniques to large datasets. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { 3D-treemaps, real-time rendering, performance evaluation }, editor = { Sabine Coquillart, Carlos Andujar, Robert S. Laramee, Andreas Kerren and José Braz }, publisher = { SCITEPRESS – Science and Technology Publications }, booktitle = { Proceedings of the 8th International Conference on Computer Graphics Theory and Applications (GRAPP 2013) }, project = { NFGII;HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2013/TSD2013/TreeMap.pdf }, isbn = { 978-989-8565-46-4 }, link1 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-1702377-rendering-complex-3d-tree-maps-grapp-2013/ }, sorting = { 2816 } } @inproceedings{Pasewaldt2012a, author = { Pasewaldt, Sebastian and Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Towards Comprehensible Digital 3D Maps }, year = { 2012 }, pages = { 261-276 }, month = { 11 }, abstract = { Digital mapping services have become fundamental tools in economy and society to provide domain experts and non-experts with customized, multi-layered map contents. In particular because of the continuous advancements in the acquisition, provision, and visualization of virtual 3D city and landscape models, 3D mapping services, today, represent key components to a growing number of applications, like car navigation, education, or disaster management. However, current systems and applications providing digital 3D maps are faced by drawbacks and limitations, such as occlusion, visual clutter, or insufficient use of screen space, that impact an effective comprehension of geoinformation. To this end, cartographers and computer graphics engineers developed design guidelines, rendering and visualization techniques that aim to increase the effectiveness and expressiveness of digital 3D maps, but whose seamless combination has yet to be achieved. This work discusses potentials of digital 3D maps that are based on combining cartography-oriented rendering techniques and multi-perspective views. For this purpose, a classification of cartographic design principles, visualization techniques, as well as suitable combinations are identified that aid comprehension of digital 3D maps. According to this classification, a prototypical implementation demonstrates the benefits of multi-perspective and non-photorealistic rendering techniques for visualization of 3D map contents. In particular, it enables (1) a seamless combination of cartography-oriented and photorealistic graphic styles while (2) increasing screen-space utilization, and (3) simultaneously directing a viewer’s gaze to important or prioritized information. }, editor = { Markus Jobst }, publisher = { Jobstmedia Management Verlag, Wien }, chapter = { 4 }, booktitle = { Service-Oriented Mapping 2012 (SOMAP2012) }, organization = { Internation Cartographic Association }, project = { NFGII;HPI }, language = { English }, isbn = { 3-9502039-2-3 }, link1 = { Slides http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2012/PSTD2012/somap2012_pasewaldt_towards_comprehensible_3D_maps.pdf }, link2 = { Paper http://www.hpi.de/fileadmin/user_upload/fachgebiete/doellner/publications/2012/PSTD2012/PSTD_2012_SOMAP.pdf }, sorting = { 32 } } @inproceedings{EPTD12, author = { Engel, Juri and Pasewaldt, Sebastian and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { An Immersive Visualization System for Virtual 3D City Models }, year = { 2012 }, month = { 6 }, abstract = { Virtual 3D city models are essential visualization tools for effective communication of complex urban spatial information. Immersive visualization of virtual 3D city models offers an intuitive access to and an effective way of realization of urban spatial information, enabling new collaborative applications and decision-support systems. This paper discusses techniques for and usage of fully immersive environments for visualizing virtual 3D city models by advanced 3D rendering techniques. Fully immersive environments imply a number of specific requirements for both hardware and software, which are discussed in detail. Further, we identify and outline conceptual and technical challenges as well as possible solution approaches by visualization system prototypes for large-scale, fully immersive environments. We evaluate the presented concepts using two application examples and discuss the results. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, publisher = { IEEE GRSS }, booktitle = { 20th International Conference on Geoinformatics (GEOINFORMATICS), 2012 }, project = { NFGII;HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/EPTD12/EPTD12_draft.pdf }, sorting = { 3840 } } @inproceedings{GTD11, author = { Glander, Tassilo and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Concepts for Automatic Generalization of Virtual 3D Landscape Models }, journal = { Peer Reviewed Proceedings Digital Landscape Architecture 2011: Teaching & Learning with Digital Methods & Tools }, year = { 2011 }, pages = { 127-135 }, month = { 5 }, abstract = { This paper discusses concepts for the automatic generalization of virtual 3D landscape models. As complexity, heterogeneity, and diversity of geodata that constitute landscape models are constantly growing, the need for landscape models that generalize their contents to a consistent, coherent level-of-abstraction and information density becomes an essential requirement for applications such as in conceptual landscape design, simulation and analysis, and mobile mapping. We discuss concepts of generalization and working principles as well as the concept of level-of-abstraction. We furthermore present three exemplary automated techniques for generalizing 3D landscape models, including a geometric generalization technique that generates discrete iso-surfaces of 3D terrain models in real-time, a geometric generalization technique for site and building models, and a real-time generalization lens technique. }, editor = { Erich Buhmann AND Stephen Ervin AND Dana Tomlin AND Matthias Pietsch }, booktitle = { Proceedings of the annual conference of Digital Landscape Architecture (DLA) }, project = { NFG }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2011/GTD11/2011_GlanderTrappDoellner_AutomaticGeneralization.pdf }, sorting = { 2304 } } @inproceedings{TSD11, author = { Trapp, Matthias and Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Colonia3D }, journal = { Tagungsband der 9. Konferenz Kultur und Informatik - Multimediale Systeme }, year = { 2011 }, pages = { 201-212 }, month = { 5 }, abstract = { Dieser Beitrag stellt die Ergebnisse des interdisziplinären Projektes Colonia3D - Visualisierung des Römischen Kölns vor. Die digitale 3D Rekonstruktion des antiken Köln ist das Ergebnis eines gemeinsamen Forschungsprojekts des Archäologischen Instituts der Universität zu Köln, der Köln International School of Design (KISD) der Fachhochschule Köln, des Hasso-Plattner Instituts an der Universität Potsdam und des Römisch Germanischen Museums (RGM) Köln. Der Beitrag präsentiert die wesentlichen Konzepte dieses interaktiven, auf Museen ausgerichteten 3D-Informationssystems, beschreibt verschiedene Präsentationsmodi und deren technische Umsetzung. Er diskutiert Vorgehensweisen und Interaktionskonzepte, die den Benutzer während der Erkundung und Bewegung im virtuellen 3D-Stadtmodell unterstützen. Weiter werden die Techniken für den Austausch, die Aufbereitung und die Optimierung komplexer 3D-Datensätze beschrieben sowie Potenziale für digitale Museen und Ausstellungen skizziert. Der vorgestellte Ansatz stellt insbesondere eine IT-Lösung für einen vereinfachten, räumlich-kontextintegrierten informellen Wissenszugang zu archäologischer Fachinformation dar. }, publisher = { Werner H{\"u}lsbusch Verlag }, booktitle = { Tagungsband der 9. Konferenz Kultur und Informatik - Multimediale Systeme }, project = { NFG }, sorting = { 1792 } } @inproceedings{STD11, author = { Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Ansätze zur kartographischen Gestaltung von 3D-Stadtmodellen }, journal = { 31. Wissenschaftlich-Technische Jahrestagung der DGPF }, year = { 2011 }, volume = { 20 }, pages = { 473-482 }, month = { 4 }, abstract = { Interaktive virtuelle 3D-Stadtmodelle haben sich zu einem bewährten Medium für die effektive und effiziente Kommunikation von Geoinformation entwickelt. Sie präsentieren eine spezialisierte Form geovirtueller Umgebungen und sind gekennzeichnet durch ein zugrunde liegendes 3D-Geländemodell, einer darin befindlichen 3D-Bebauung sowie des dazu komplementären Straßen-, Grünflächen- und Naturraumes. 3D-Stadtmodell-Systeme ermöglichen es dem Nutzer, sich im Modell interaktiv zu bewegen und sie stellen die Grundfunktionen für die Exploration, Analyse, Präsentation und das Editieren der raumbezogenen Information bereit. Besonders im Gebiet der kartenähnlichen und kartenverwandten 3D-Darstellungen stellen u.a. automatische Verfahren und Techniken zur Stilisierung und Abstraktion von Objekten eines 3D Stadtmodell ein Hauptproblem für die interaktive 3D-Bildsynthese dar. Hier spielt insbesondere die Abstraktion und Illustration potentiell wichtiger Information und somit die Reduzierung der kognitiven Belastung des Nutzers eine tragende Rolle. Diesbezüglich sind Verfahren und Techniken zur nicht-photorealistischen Bildsynthese ein bewährtes Mittel der Computergrafik, deren direkte Anwendung auf ein komplettes 3D-Stadtmodell jedoch häufig monotone sowie gestalterisch und kartographisch stark eingeschränkte Resultate liefert. Eine effiziente und kontextsensitive Kommunikation von 3D-Geoinformation bedarf jedoch der Kopplung von Objektsemantik und Abstraktionsverfahren. Diese Arbeit präsentiert ein Konzept und dessen Umsetzung, das die Auswahl und Parametrisierung von nicht-photorealistischen Darstellungstechniken auf Basis von Objektsemantiken erlaubt (Abbildung 1). Dies ermöglicht die Zuweisung unterschiedlicher automatischer Abstraktionstechniken zu Objekten und Objektgruppen. Der vorgestellte Ansatz ist echtzeitfähig und erlaubt eine interaktive Klassifikation von Objekten und Features zur Laufzeit, wodurch sich u.a. Szenarien zur interaktiven Exploration von thematisch-stilisierten Features bzw. feature-bezogenen Daten visualisieren lassen. Dieser Ansatz eröffnet Möglichkeiten für eine gezielte und systematische kartographische Gestaltung von 3D-Stadtmodellen sowie deren echtzeitfähige Implementierung durch entsprechende 3D-Visualisierungsdienste. }, publisher = { Landesvermessung und Geobasisinformation Brandenburg }, series = { Publikationen der Deutschen Gesellschaft f{\"u}r Photogrammetrie, Fernerkundung und Geoinformation e.V. }, booktitle = { 31. Wissenschaftlich-Technische Jahrestagung der DGPF }, project = { NFG }, sorting = { 2048 } } @inproceedings{TBPD10, author = { Trapp, Matthias and Beesk, Christian and Pasewaldt, Sebastian and Döllner Jürgen }, title = { Interactive Rendering Techniques for Highlighting in 3D Geovirtual Environments }, year = { 2010 }, month = { 11 }, abstract = { 3D geovirtual environments (GeoVE), such as virtual 3D city and landscape models became an important tool for the visualization of geospatial information. Highlighting is an important component within a visualization framework and is essential for the user interaction within many applications. It enables the user to easily perceive active or selected objects in the context of the current interaction task. With respect to 3D GeoVE, it has a number of applications, such as the visualization of user selections, data base queries, as well as navigation aid by highlighting way points, routes, or to guide the user attention. The geometrical complexity of 3D GeoVE often requires specialized rendering techniques for the real-time image synthesis. This paper presents a framework that unifies various highlighting techniques and is especially suitable for the interactive rendering 3D GeoVE of high geometrical complexity. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, url = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/TBPD10/Highlighting.pdf }, publisher = { Springer }, series = { Lecture Notes in Geoinformation & Cartography }, booktitle = { Proceedings of the 5th 3D GeoInfo Conference }, project = { NFG;HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/TBPD10/Highlighting.pdf }, link2 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-629065-interactive-rendering-techniques-for-highlighting/ }, sorting = { 1280 } } @inproceedings{TSPHDEH10, author = { Trapp, Matthias and Semmo, Amir and Pokorski, Rafael and Herrmann, Claus-Daniel and Döllner, Jürgen and Eichhorn, Michael and Heinzelmann, Michael }, title = { Communication of Digital Cultural Heritage in Public Spaces by the Example of Roman Cologne }, year = { 2010 }, pages = { 262-276 }, month = { 11 }, abstract = { The communication of cultural heritage in public spaces such as museums or exhibitions, gain more and more importance during the last years. The possibilities of interactive 3D applications open a new degree of freedom beyond the mere presentation of static visualizations, such as pre-produced video or image data. A user is now able to directly interact with 3D virtual environments that enable the depiction and exploration of digital cultural heritage artefacts in real-time. However, such technology requires concepts and strategies for guiding a user throughout these scenarios, since varying levels of experiences within interactive media can be assumed. This paper presents a concept as well as implementation for communication of digital cultural heritage in public spaces, by example of the project Roman Cologne. It describes the results achieved by an interdisciplinary team of archaeologists, designers, and computer graphics engineers with the aim to virtually reconstruct an interactive high-detail 3D city model of Roman Cologne. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam }, note = { Best-Paper-Award }, editor = { M. Ioannides }, publisher = { Springer-Verlag Berlin Heidelberg }, series = { Lecture Notes in Computer Science (LNCS) }, booktitle = { Digital Heritage, Proceedings of 3rd EuroMed Conference }, project = { NFG }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/TSPHDEH10/EuroMed2010Coloniad3D_CRC_HQ.pdf }, issn = { 0302-9743 }, link1 = { Paper (Google Books) http://books.google.de/books?id=lLGWMJc_s24C&lpg=PA262&ots=gFwW_7fmJI&dq=Communication%20of%20Digital%20Cultural%20Heritage%20in%20Public%20Spaces%20by%20the%20Example%20of%20Roman%20Cologne&pg=PA250#v=onepage&q&f=false }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=HoC_mmy51CE }, link3 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-645625-colonia3d/ }, sorting = { 1024 } } @inproceedings{TD10, author = { Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Interactive Rendering to Perspective Texture-Atlases }, year = { 2010 }, pages = { 81-84 }, month = { 5 }, abstract = { The image-based representation of geometry is a well known concept in computer graphics. Due to z-buffering, the derivation of such representations using render-to-texture delivers only information of the nearest fragments. Often, transparency-based visualization techniques, e.g., ghost views, also require information of occluded fragments. These can be captured using multi-pass rendering techniques such as depth-peeling or stencil-routed A-buffers on a per-fragment basis. This paper presents an alternative rendering technique that enables the derivation image-based representations on a per-object or per-primitive level within a single rendering pass. We use a dynamic 3D texture atlas that is parameterized on a per-frame basis. Then, prior to rasterization, the primitives are transformed to their respective position within the texture atlas, using vertex-displacement in screen space. }, affiliation = { Hasso-Plattner-Institute, University of Potsdam }, url = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/TD10/RenderToTextureAtlas.pdf }, editor = { Stefan Seipel and Hendrik Lensch }, publisher = { The Eurographics Association }, address = { Norrköping, Sweden }, booktitle = { Eurographics 2010 Shortpaper }, project = { NFG }, issn = { 1017-4656 }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2010/TD10/RenderToTextureAtlas.pdf }, link2 = { Video (Youtube) http://www.youtube.com/user/trappcg#p/a/u/1/llLKU-Oa2iU }, link3 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-386462-view-dependent-texture-atlases-atlas-real-time-rendering-render-rttav1-science-technology-ppt-powerpoint/ }, sorting = { 4864 } } @inproceedings{GTD10, author = { Glander, Tassilo and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { 3D Isocontours – Real-time Generation and Visualization of 3D Stepped Terrain Models }, year = { 2010 }, pages = { 17-20 }, month = { 5 }, abstract = { Isocontours (also isopleths, isolines, level sets) are commonly used to visualize real-valued data defined over a 2D plane according to a set of given isovalues. To support the 3D landscape metaphor for information visualization, a 3D stepped terrain can be derived by lifting and extruding isolines to their particular isovalue, but typically requires triangulation of the resulting surface representation in a preprocessing step. We present a concept and rendering technique for triangle-based terrain models that provide interactive, adaptive generation and visualization of such stepped terrains without preprocessing. Our fully hardware-accelerated rendering technique creates additional step geometry for each triangle intersecting an iso-plane on-the-fly. Further, an additional interpolation schema facilitates smooth transition between established 3D terrain visualization and its stepped variant. }, affiliation = { Hasso-Plattner-Institute, University of Potsdam }, url = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/GTD10/3DIsolines_draft.pdf }, editor = { Stefan Seipel and Hendrik Lensch }, publisher = { The Eurographics Association }, address = { Norrköping, Sweden }, booktitle = { Eurographics 2010 Shortpaper }, project = { NFG }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/GTD10/3DIsolines_draft.pdf }, issn = { 1017-4656 }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=7w3yRp3Xqp8 }, sorting = { 4608 } } @inproceedings{TSHD09, author = { Trapp, Matthias and Schneider, Lars and Holz, Norman and D{\"o}llner, J{\"u}rgen }, title = { Strategies for Visualizing Points-of-Interest of 3D Virtual Environments on Mobile Devices }, year = { 2009 }, month = { 9 }, abstract = { 3D virtual environments are increasingly used as general- purpose medium for communicating spatial information. In particular, virtual 3D city models have numerous applications such as car naviga- tion, city marketing, tourism, and gaming. In these applications, points- of-interest (POI) play a major role since they typically represent features relevant for speci c user tasks and facilitate eective user orientation and navigation through the 3D virtual environment. In this paper, we present strategies that aim at eectively visualizing points-of-interest in a 3D vir- tual environment used on mobile devices. Here, we additionally have to face the "keyhole" situation, i.e., the users can realize only a small part of the environment due to the limited view space and resolution. For the eective visualization of points-of-interest in 3D virtual environments we propose to combine specialized occlusion management for 3D scenes to- gether with visual cues that handle out-of-frame points-of-interest. We also discuss general aspects and de nitions of points-of-interest in the scope of 3D models and outline a prototype implementation of the mo- bile 3D viewer application based on the presented concepts. In addition, we give a rst performance evaluation with respect to rendering speed and power consumptions. }, affiliation = { Hasso-Plattner-Institute, University of Potsdam }, publisher = { Springer }, booktitle = { 6th International Symposium on LBS \& TeleCartography }, project = { NFG }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2009/TSHD09/MobilePOI.pdf }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=9rkykc-sSSI }, link3 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-233953-point-interest-visualization-mobilepoi-v2-science-technology-ppt-powerpoint/ }, sorting = { 2816 } } @inproceedings{TLJD09, author = { Trapp, Matthias and Lorenz, Haik and Jobst, Markus and D{\"o}llner, J{\"u}rgen }, title = { Enhancing Interactive Non-Planar Projections of 3D Geovirtual Environments with Stereoscopic Imaging }, year = { 2009 }, pages = { 281-296 }, month = { 9 }, abstract = { Stereo rendering, as an additional visual cue for humans, is an important method to increase the immersion into 3D virtual environments. Stereo pairs synthesized for the left and right eye are displayed in a way that the human visual system interprets as 3D perception. Stereoscopy is an emerging field in cinematography and gaming. While generating stereo images is well known for standard projections, the implementation of stereoscopic viewing for interactive non-planar single-center projections, such as cylindrical and spherical projections, is still a challenge. This paper presents the results of adapting an existing image-based approach for generating interactive stereoscopic non-planar projections for polygonal scenes on consumer graphics hardware. In particular, it introduces a rendering technique for generating image-based, non-planar stereo pairs within a single rendering pass. Further, this paper presents a comparison between the image-based and a geometry-based approach with respect to selected criteria. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam }, editor = { Manfred Buchroithner }, publisher = { Springer Verlag }, series = { Lecture Notes in Geoinformation and Cartography }, booktitle = { True-3D In Cartography - 1st International Conference on 3D Maps }, project = { NFG }, isbn = { 978-3-642-12271-2 }, issn = { 1863-2246 }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=RevbRJD3pPE }, link2 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-232625-stereoscopy-non-planar-projections-true-3d-cartography-real-time-rendering-true3d-v3-science-technology-ppt-powerpoint/ }, sorting = { 3328 } } @inproceedings{TD09, author = { Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Dynamic Mapping of Raster-Data for 3D Geovirtual Environments }, year = { 2009 }, pages = { 387--392 }, month = { 7 }, abstract = { Interactive 3D geovirtual environments (GeoVE), such as 3D virtual city and landscape models, are important tools to communicate geospatial information. Usually, this includes static polygonal data (e.g., digital terrain model) and raster data (e.g., aerial images) which are composed from multiple data sources during a complex, only partial automatic pre-processing step. When dealing with highly dynamic geo-referenced raster data, such as the propagation of fires or floods, this pre-processing step hinders the direct application of 3D GeoVE for decision support systems. To compensate for this limitation, this paper presents a concept for dynamically mapping multiple layers of raster for interactive GeoVE. The implementation of our rendering technique is based on the concept of projective texture mapping and can be implemented efficiently using consumer graphics hardware. Further, this paper demonstrates the flexibility of our technique using a number of typical application examples. }, affiliation = { Hasso-Plattner-Institute, University of Potsdam }, keywords = { 3D geovirtual environments, spatial-temporal data mapping, projective texture mapping }, publisher = { IEEE Computer Society Press }, booktitle = { 13th International Conference on IEEE Information Visualisation }, project = { NFG }, doi = { http://doi.ieeecomputersociety.org/10.1109/IV.2009.28 }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2009/TD09/projectivemappings_lores.pdf }, link2 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-219538-dynamic-mapping-raster-data-3d-ge-dynamicmapping-science-technology-ppt-powerpoint/ }, sorting = { 3840 } } @inproceedings{GPTD09, author = { Glander, Tassilo and Peters, Denise and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { 3D Wayfinding Choremes: A Cognitively Motivated Representation of Route Junctions in Virtual Environments }, year = { 2009 }, pages = { 407--427 }, month = { 6 }, abstract = { Research in cognitive sciences suggests that orientation and navigation along routes can be improved if the graphical representation is aligned with the user’s mental concepts of a route. In this paper, we analyze an existing 2D schematization approach called wayfinding choremes and present an implementation for virtual 3D urban models, transferring the approach to 3D. To create the virtual environment, we transform the junctions of a route defined for a given road network to comply with the eight sector model, that is, outgoing legs of a junction are slightly rotated to align with prototypical directions in 45° increments. Then, the adapted road network is decomposed into polygonal block cells, the individual polygons being extruded to blocks and their facades textured. For the evaluation of our 3D wayfinding choreme implementation, we present an experiment framework allowing for training and testing subjects by a route learning task. The experimental framework can be parameterized flexibly, exposing parameters to the conductor. We finally give a sketch of a user study by identifying hypotheses, indicators, and, hence, experiments to be done. }, publisher = { Springer }, series = { Lecture Notes in Geoinformation and Cartography }, booktitle = { 12th AGILE International Conference on GI Science }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2009/GPTD09/glander_choremes_final_flat.pdf }, isbn = { 978-3-642-00317-2 }, issn = { 1863-2246 }, doi = { 10.1007/978-3-642-00318-9_21 }, sorting = { 4352 } } @inproceedings{HTGD09, author = { Hagedorn, Benjamin and Trapp, Matthias and Glander, Tassilo and D{\"o}llner, J{\"u}rgen }, title = { Towards an Indoor Level-of-Detail Model for Route Visualization }, year = { 2009 }, pages = { 692--697 }, abstract = { Indoor routing represents an essential feature required by applications and systems that provide spatial information about complex sites, buildings and infrastructures such as in the case of visitor guidance for trade fairs and customer navigation at airports or train stations. Apart from up-to-date, precise 3D spatial models these systems and applications need user interfaces as core system components that allow users to efficiently express navigation goals and to effectively visualize routing information. For interoperable and flexible indoor routing systems, common specifications and standards for indoor structures, objects, and relationships are needed as well as for metadata such as data quality and certainty. In this paper, we introduce a classification of indoor objects and structures taking into account geometry, semantics, and appearance, and propose a level-of-detail model for them that supports the generation of effective indoor route visualization. }, publisher = { IEEE Computer Society Press }, booktitle = { First International Workshop on Indoor Spatial Awareness (ISA) }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2009/HTGD09/IndoorLOD08.pdf }, doi = { 10.1109/MDM.2009.118 }, sorting = { 4608 } } @inproceedings{TLD09, author = { Trapp, Matthias and Lorenz, Haik and D{\"o}llner, J{\"u}rgen }, title = { Interactive Stereo Rendering For Non-Planar Projections of 3D Virtual Environments }, year = { 2009 }, pages = { 199--204 }, abstract = { Stereo rendering, as an additional visual cue for humans, is an important method to increase the immersion into 3D virtual environments. Stereo pairs synthesized for the left and right eye are displayed in a way that the human visual system interprets as 3D perception. Stereoscopy is an emerging field in cinematography and gaming. While generating stereo images is well known for standard projections, the implementation of stereoscopic viewing for non-planar single-center projections, such as cylindrical and spherical projections in real-time, is still a challenge. This paper presents the results of adapting existing image-based and object-based approaches for generating interactive stereoscopic non-planar projections for polygonal scenes on consumer graphics hardware. In particular, it introduces a rendering technique for generating image-based, non-planar stereo pairs within a single rendering pass. Further, this paper presents a comparison between these both approaches with respect to selected criteria. }, publisher = { INSTICC Press }, booktitle = { GRAPP 2009 - 4th International Conference on Computer Graphics Theory and Applications }, project = { NFG }, issn = { 978-989-8111-67-8 }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2009/TLD09/StereoscopicNonPlanarProjections.pdf }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=RevbRJD3pPE }, link3 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-149181-interactive-stereoscopic-rendering-imaging-non-planar-projections-real-time-3d-virtual-environments-grapp-2009-science-technology-ppt-powerpoint/ }, sorting = { 5120 } } @inproceedings{TD08d, author = { Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Relief Clipping Planes for Real-Time Rendering }, year = { 2008 }, month = { 12 }, abstract = { The concept of clipping planes is well known in computer graphics and can be used to create cut-away views. But clipping against just analytical defined planes is not always suitable for communicating every aspect of such visualization. For example, in hand-drawn technical illustrations, artists tend to communicate the difference between a cut and a model feature by using non-regular, sketchy cut lines instead of straight ones. To enable this functionality in computer graphics, we present a technique for rendering relief clip planes in real-time. Therefore, we extend the clip plane equation with an additional offset map, which can be represented by a texture map that contains height values. Clipping is then performed by varying the clip plane equation with respect to such an offset map. Further, we propose a capping technique that enables the rendering of caps onto the clipped area to convey the impression of solid material. It avoids a re-meshing of a solid polygonal mesh after clipping is performed. Our approach is pixel precise, applicable in real-time, and takes fully advantage of graphics accelerators. }, affiliation = { Hasso-Plattner-Insitute, University of Potsdam }, address = { Singapore }, booktitle = { ACM SIGGRAPH Asia 2008 - Sketch Program }, project = { NFG }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2008/TD08d/ReliefClipPlanes.pdf }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=ydZIROOiNb0 }, link3 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-145934-reliefclippingplanes-trappdoellner-clipping-plane-real-time-rendering-science-technology-ppt-powerpoint/ }, sorting = { 2304 } } @inproceedings{TD08b, author = { Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Efficient Representation of Layered Depth Images for Real-time Volumetric Tests }, year = { 2008 }, pages = { 9--16 }, month = { 8 }, abstract = { Representing Layered Depth Images (LDI) as 3D texture can be used to approximate complex, arbitrarily shaped volumes on graphics hardware. Based on this concept, a number of real-time applications such as collision detection or 3D clipping against multiple volumes can be implemented efficiently using programmable hardware. One major drawback of this image-based representation is the high video memory consumption. To compensate that disadvantage, this paper presents a concept and associated algorithms that enable a lossless, efficient LDI representation which is especially designed for the usage within shader programs. The concept comprises the application of a viewpoint selection, a cropping, and a compression algorithm. We evaluated our algorithm with different test cases and show possible use cases. }, editor = { Ik Soo Lim and Wen Tang }, publisher = { The Eurographics Association }, booktitle = { EG UK Theory and Practice of Computer Graphics (2008) Conference }, organization = { UK Chapter of the Eurographics Association }, project = { NFG }, issn = { 978-3-905673-67-8 }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2008/TD08b/132-Trapp-Efficient-LDI-Representation.pdf }, link2 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-77166-matthias-trapp-efficient-LDI-representation-TPCG-2-EG-UK-Theory-Practice-Computer-Graphics-2008-Conference-Education-ppt-powerpoint/ }, sorting = { 4096 } } @inproceedings{TD08a, author = { Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Real-Time Volumetric Tests Using Layered Depth Images }, year = { 2008 }, pages = { 235--238 }, month = { 4 }, abstract = { This paper presents a new approach for performing efficiently 3D point-in-volume tests for solid and arbitrary complex shapes. It classifies a 3D point as inside or outside of a solid specified by 3D polygonal geometry. Our technique implements a basic functionality that offers a wide range of applications such as clipping, collision detection, interactive rendering of multiple 3D lenses as well as rendering using multiple styles. It is based on an extension of layered depth images (LDI) in combination with shader programs. An LDI contains layers of unique depth complexity and is represented by a 3D volume texture. The test algorithm transforms a 3D point into an LDI texture space and, then, performs ray marching through the depth layers to determine its classification. We show how to apply real-time volumetric tests to implement 3D clipping and rendering using multiple styles. In addition, we discuss limitations and possible improvements. }, affiliation = { Hasso-Plattner-Institute, University of Potsdam }, editor = { K. Mania and E. Reinhard }, publisher = { The Eurographics Association }, booktitle = { Eurographics 2008 Shortpaper }, organization = { Eurographics }, project = { NFG }, issn = { 1017-4656 }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2008/TD08a/VolumetricTest.pdf }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=o7NSLQvdghg }, link3 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-76989-real-time-volumetric-tests-using-layered-depth-ima-graphics-processors-boundary-representation-data-structures-types-matthias-trapp-eg2008-education-ppt-powerpoint/ }, sorting = { 5888 } } @inproceedings{TD08, author = { Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { A Generalization Approach for 3D Viewing Deformations of Single-Center Projections }, year = { 2008 }, number = { 3 }, pages = { 162--170 }, month = { 2 }, abstract = { This paper presents a novel image-based approach to efficiently generate real-time non-planar projections of arbitrary 3D scenes such as panorama and fish-eye views. The real-time creation of such projections has a multitude of applications, e.g., in geovirtual environments and in augmented reality. Our rendering technique is based on dynamically created cube map textures in combination with shader programs that calculate the specific projections. We discuss two different approaches to create such cubemaps and introduce possible optimizations. Our technique can be applied within a single rendering pass, is easy to implement, and exploits the capability of modern programmable graphics hardware completely. Further, we present an approach to customize and combine different planar as well as non-planar projections. We have integrated our technique into an existing real-time rendering framework and demonstrate its performance on large scale datasets such as virtual 3D city and terrain models. }, affiliation = { Hasso-Plattner-Institute, University of Potsdam }, keywords = { Real-time Panorama, Non-Planar Projection, Fish-Eye Views, Projection Tiles }, editor = { Jos\'e Braz and Nuno Jardim Nunes and Joao Madeiras Pereira }, publisher = { INSTICC Press }, booktitle = { International Conference on Computer Graphics Theory and Applications (GRAPP) }, project = { NFG }, issn = { 978-989-8111-20-3 }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2008/TD08/NonPlanarProjection.pdf }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=Y6SBylq5SFA }, link3 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-77167-matthias-trapp-non-planar-projection-GRAPP-2008-Funchal-Madeira-Real-time-Panorama-Fish-Eye-Views-Tiles-Education-ppt-powerpoint/ }, sorting = { 6400 } } @inproceedings{MTKDEPBH08, author = { Maass, Stefan and Trapp, Matthias and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen and Eichhorn, M. and Pokorski, Rafael and B{\"a}uerlein, Johannes and Hesberg, H. v. }, title = { Techniques For The Interactive Exploration Of High-Detail 3D Building Reconstruction Using The Example Of Roman Cologne }, year = { 2008 }, pages = { 223--229 }, abstract = { This paper presents the results achieved by an interdisciplinary team of archaeologists, designers, and computer graphics engineers with the aim to virtually reconstruct an interactive high-detail 3D city model of Roman Cologne. We describe a content creation pipeline established to enable a flexible exchange and enhancement of building models, the applied optimization techniques necessary for real-time rendering, and the design of an application framework that enables the coupling of 3D visualizations with additional information in corresponding Adobe® Flash® widgets. Furthermore, we expose challenges arising by incorporating state-of-the-art visualization techniques, such as cut-away views, non-photorealistic rendering (NPR), and automated label placement. These techniques are used to enhance the interactive 3D environments, to enable for the presentation of interior structures, the precise communication what is hypothetic and what proven knowledge, and the integration of meta information. }, keywords = { High-detail 3D Models, Virtual Reality, Real-Time 3D Visualization, Roman Cologne }, editor = { M. Loannides and A. Addison and A. Georgopoulos and L. Kalisperis }, publisher = { Archaeolingua }, booktitle = { 14th International Conference on Virtual Systems and Multimedia (VSMM 2008) }, project = { NFG }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2008/MTKDEPBH08/vsmm2008..pdf }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=xAqMM6G3de0 }, sorting = { 2816 } } @inproceedings{TGBD08, author = { Trapp, Matthias and Glander, Tassilo and Buchholz, Henrik and D{\"o}llner, J{\"u}rgen }, title = { 3D Generalization Lenses for Interactive Focus + Context Visualization of Virtual City Models }, year = { 2008 }, pages = { 356--361 }, abstract = { Focus + context visualization facilitates the exploration of complex information spaces. This paper proposes 3D generalization lenses, a new visualization technique for virtual 3D city models that combines different levels of structural abstraction. In an automatic preprocessing step, we derive a generalized representation of a given city model. At runtime, this representation is combined with a full-detail representation within a single view based on one or more 3D lenses of arbitrary shape. Focus areas within lens volumes are shown in full detail while excluding less important details of the surrounding area. Our technique supports simultaneous use of multiple lenses associated with different abstraction levels, can handle overlapping and nested lenses, and provides interactive lens modification. }, keywords = { 3D Lenses, Focus + Context Visualisation, Virtual 3D City Model, Generalization }, publisher = { IEEE Computer Society Press }, booktitle = { 12th International Conference on IEEE Information Visualization }, project = { NFG }, doi = { http://doi.ieeecomputersociety.org/10.1109/IV.2008.18 }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2008/TGBD08/FocusMaps08.pdf }, link2 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-77164-3D-Generalization-Lenses-Interactive-Focus-Con-12th-Conference-Information-Visualization-LSBU-2008-matthias-trapp-Education-ppt-powerpoint/ }, sorting = { 3328 } } @inproceedings{LTJD08, author = { Lorenz, Haik and Trapp, Matthias and Jobst, Markus and D{\"o}llner, J{\"u}rgen }, title = { Interactive Multi-Perspective Views of Virtual 3D Landscape and City Models }, year = { 2008 }, pages = { 301--321 }, abstract = { Based on principles of panorama maps we present an interactive visualization technique that generates multi-perspective views of complex spatial environments such as virtual 3D landscape and city models. Panorama maps seamlessly combine easily readable maps in the foreground with 3D views in the background – both within a single image. Such nonlinear, non-standard 3D projections enable novel focus & context views of complex virtual spatial environments. The presented technique relies on global space deformation to model multi-perspective views while using a standard linear projection for rendering which enables single-pass processing by graphics hardware. It automatically configures the deformation in a view-dependent way to maintain the multi-perspective view in an interactive environment. The technique supports different distortion schemata beyond classical panorama maps and can seamlessly combine different visualization styles of focus and context areas. We exemplify our approach in an interactive 3D tourist information system. }, note = { Best Paper Award }, keywords = { multi-perspective views, focus \& context visualization, global space deformation, virtual 3D city models, virtual 3D landscape models, geovisualization }, editor = { Lars Bernard and Anders Friis-Christensen and Hardy Pundt }, publisher = { Springer }, series = { Lecture Notes in Geoinformation and Cartography }, booktitle = { 11th AGILE International Conference on GI Science }, project = { NFG }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2008/LTJD08/agile08_draft.pdf }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=2bYDKbzocSg }, link3 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-77168-matthias-trapp-multi-perspective-views-AGILE-2008-focus-context-visualization-globalspace-Education-ppt-powerpoint/ }, sorting = { 5376 } } @inproceedings{GTD08, author = { Glander, Tassilo and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Konzepte f{\"u}r die Generalisierung von 3D-Geb{\"a}udemodellen }, year = { 2008 }, volume = { 41 }, pages = { 33--45 }, note = { Arbeitsgruppe Automation in Kartographie, Photogrammetrie und GIS }, publisher = { Bundesamt f{\"u}r Kartographie und Geod{\"a}sie }, booktitle = { Mitteilungen des Bundesamtes f{\"u}r Kartographie und Geod{\"a}sie }, project = { NFG }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2008/GTD08/Glander_Trapp_Doellner-Konzepte_fuer_die_Generalisierung_von_3D-Gebaeudemodellen.pdf }, issn = { 1436-3445 }, sorting = { 7168 } } @inproceedings{TD07, author = { Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Automated Combination of Real-Time Shader Programs }, year = { 2007 }, pages = { 53--56 }, month = { 9 }, abstract = { This work proposes an approach for automatic and generic runtime-combination of high-level shader programs. Many of recently introduced real-time rendering techniques rely on such programs. The fact that only a single program can be active concurrently becomes a main conceptual problem when embedding these techniques into middleware systems or 3D applications. Their implementations frequently demand for a combined use of individual shader functionality and, therefore, need to combine existing shader programs. Such a task is often timeconsuming, error-prone, requires a skilled software engineer, and needs to be repeated for each further extension. Our extensible approach solves these problems efficiently: It structures a shader program into code fragments, each typed with a predefined semantics. Based on an explicit order of those semantics, the code fragments of different programs can be combined at runtime. This technique facilitates the reuse of shader code as well as the development of extensible rendering frameworks for future hardware generations. We integrated our approach into an object-oriented high-level rendering system. }, editor = { P. Cignoni and J. Sochor }, publisher = { The Eurographics Association }, booktitle = { Eurographics 2007 Shortpaper }, organization = { Eurographics }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2007/TD07/ShaderCombination_MatthiasTrapp.pdf,user_upload/fachgebiete/doellner/publications/2007/TD07/matthias_trapp_shader_combination_EG_2007_poster.pdf }, issn = { 1017-4656 }, sorting = { 1024 } } @inproceedings{GTD07a, author = { Glander, Tassilo and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { A Concept of Effective Landmark Depiction in Geovirtual 3D Environments by View-Dependent Deformation }, year = { 2007 }, abstract = { Landmarks represent elements of geovirtual 3D environments with outstanding importance for user orientation. Especially, they facilitate navigation and exploration within virtual 3D city models. This paper presents a novel concept for the real-time depiction of landmarks that effectively emphasizes these 3D objects by improving their visibility with respect to their surrounding areas and the current 3D viewing settings. The concept is based on scaling landmark geometry according to an importance function while simultaneously adjusting the corresponding surrounding region. The amplification of landmarks takes into account the current camera parameters. To reduce visual artifacts caused by this multi-scale presentation, e.g., geometry intersections, the surrounding objects of each landmark are adapted according to a deformation field that encodes the displacement and scaling transformations. An individual weight coefficient can be defined that denotes the landmark’s importance. To render a collection of weighted landmarks within a virtual 3D city model, the technique accumulates their associated, weighted deformation fields in a view-dependent way. Our concept provides a flexible solution for the importance-driven enhancement of objects within interactive geovirtual 3D environments and aims at improving the perceptual and cognitive quality of their display. In particular, the concept can be applied to systems and applications that use abstracted, generalized virtual 3D city models such as in the fields of car and pedestrian navigation, disaster management, and spatial data mining. }, note = { CD proceedings }, keywords = { Visualization, Smart Environments and Landmarks, Navigation Systems }, booktitle = { 4th International Symposium on LBS and Telecartography }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2007/GTD07a/LBS2007_Glander_Trapp_Doellner_Landmark_visualization_draft.pdf }, sorting = { 3840 } } @masterthesis{Tra07, author = { Trapp, Matthias }, title = { Analysis and Exploration of Virtual 3D-Citymodels using 3D Information Lenses }, year = { 2007 }, month = { 2 }, abstract = { This thesis addresses real-time rendering techniques for 3D Information-Lenses, which are based on the focus & context metaphor. It analyzes, conceives, implements and reviews its applicability to objects and structures of virtual 3D city models. In contradiction to digital terrain models, the application of focus & context visualization to virtual 3D city models is barely researched. However, the purposeful visualization of contextual data of is extreme importance for the interactive exploration and analysis of this field. Programmable hardware enables the implementation of new lens-techniques, which allows the augmentation of the perceptive and cognitive quality of the visualization, compared to classical perspective projections. A set of 3D information-lenses is integrated into a 3D scene graph system: 1.) Occlusion Lenses modify the appearance of virtual 3D city-model objects in order to resolve their occlusion and consequently facilitate the navigation. 2.) Best-View Lenses display city-model objects in a priority-based manner and mediate their meta-information. Thus, they support exploration and navigation of virtual 3D-city-models. 3.) Color and deformation lenses modify the appearance and geometry of 3D city-models to facilitate their perception. The present techniques for 3D information lenses and the application to virtual 3D city models clarify their potential for interactive visualization and form a base for further development. }, url = { http://opus.kobv.de/ubp/volltexte/2008/1393/ }, address = { Hasso-Plattner-Institut f{\"u}r Softwaresystemtechnik GmbH, Prof.-Dr.-Helmert-Str. 2-3, D-14482 Potsdam }, school = { Hasso Plattner Institut, University Potsdam }, files = { user_upload/fachgebiete/doellner/publications/2007/Tra07/diplom.pdf }, link1 = { (PDF) http://opus.kobv.de/ubp/volltexte/2008/1393/pdf/trapp_matthias.pdf }, sorting = { 5376 } } @phdthesis{T2013, author = { Trapp, Matthias }, title = { Interactive Rendering Techniques for Focus+Context Visualization of 3D Geovirtual Environments }, year = { 2013 }, month = { 7 }, abstract = { This thesis introduces a collection of new real-time rendering techniques and applications for focus+context visualization of interactive 3D geovirtual environments such as virtual 3D city and landscape models. These environments are generally characterized by a large number of objects and are of high complexity with respect to geometry and textures. For these reasons, their interactive 3D rendering represents a major challenge. Their 3D depiction implies a number of weaknesses such as occlusions, cluttered image contents, and partial screen-space usage. To overcome these limitations and, thus, to facilitate the effective communication of geo-information, principles of focus+context visualization can be used for the design of real-time 3D rendering techniques for 3D geovirtual environments (see Figure). In general, detailed views of a 3D geovirtual environment are combined seamlessly with abstracted views of the context within a single image. To perform the real-time image synthesis required for interactive visualization, dedicated parallel processors (GPUs) for rasterization of computer graphics primitives are used. For this purpose, the design and implementation of appropriate data structures and rendering pipelines are necessary. The contribution of this work comprises the following five real-time rendering methods: The rendering technique for 3D generalization lenses enables the combination of different 3D city geometries (e.g., generalized versions of a 3D city model) in an single image in real time. The method is based on a generalized and fragment-precise clipping approach, which uses a compressible, raster-based data structure. It enables the combination of detailed views in the focus area with the representation of abstracted variants in the context area. The rendering technique for the interactive visualization of dynamic raster data in 3D geovirtual environments facilitates the rendering of 2D surface lenses. It enables a flexible combination of different raster layers (e.g., aerial images or videos) using projective texturing for decoupling image and geometry data. Thus, various overlapping and nested 2D surface lenses of different contents can be visualized interactively. The interactive rendering technique for image-based deformation of 3D geovirtual environments enables the real-time image synthesis of non-planar projections, such as cylindrical and spherical projections, as well as multi-focal 3D fisheye-lenses and the combination of planar and non-planar projections. The rendering technique for view-dependent multi-perspective views of 3D geovirtual environments, based on the application of global deformations to the 3D scene geometry, can be used for synthesizing interactive panorama maps to combine detailed views close to the camera (focus) with abstract views in the background (context). This approach reduces occlusions, increases the usage the available screen space, and reduces the overload of image contents. The object-based and image-based rendering techniques for highlighting objects and focus areas inside and outside the view frustum facilitate preattentive perception. The concepts and implementations of interactive image synthesis for focus+context visualization and their selected applications enable a more effective communication of spatial information, and provide building blocks for design and development of new applications and systems in the field of 3D geovirtual environments. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam }, address = { Hasso-Plattner-Institut für Softwaresystemtechnik GmbH, Prof.-Dr.-Helmert-Str. 2-3, D-14482 Potsdam }, school = { Hasso Plattner Institut, University Potsdam }, sorting = { 256 } } @misc{Tra09, author = { Trapp, Matthias }, title = { Interaktive Visualisierung des R{\"o}mischen K{\"o}lns }, year = { 2009 }, note = { german }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2009/Tra09/HPI Magazin.pdf }, sorting = { 6400 } }