# This BibTeX File has been generated by # the Typo3 extension 'Sixpack-4-T3 by Sixten Boeck' # # URL: # Date: 06/24/2017 # Non-Standard BibTex fields are included. # state: 0 = published, 1 = accepted, 2 = submitted, 3 = to be published // if missing, published is assumed # extern,deleted,hidden: 0 = false, 1 = true // if missing, false is assumed # link format: Title Url // separated by a whitespace @article{SLKD2016, author = { Semmo, Amir and Limberger, Daniel and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen }, title = { Image Stylization by Interactive Oil Paint Filtering }, journal = { Computers \& Graphics }, year = { 2016 }, volume = { 55 }, pages = { 157--171 }, abstract = {

This paper presents an interactive system for transforming images into an oil paint look. The system comprises two major stages. First, it derives dominant colors from an input image for feature-aware recolorization and quantization to conform with a global color palette. Afterwards, it employs non-linear filtering based on the smoothed structure adapted to the main feature contours of the quantized image to synthesize a paint texture in real-time. Our filtering approach leads to homogeneous outputs in the color domain and enables creative control over the visual output, such as color adjustments and per-pixel parametrizations by means of interactive painting. To this end, our system introduces a generalized brush-based painting interface that operates within parameter spaces to locally adjust the level of abstraction of the filtering effects. Several results demonstrate the various applications of our filtering approach to different genres of photography.
}, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/SLKD2016/oilpaint-cag2016_authors_version.pdf }, doi = { 10.1016/j.cag.2015.12.001 }, sorting = { 2560 } } @article{NBDMST2015, author = { Nocke, Thomas and Buschmann, Stefan and Donges, Jonathan and Marwan, Norbert and Schulz, Hans-Jörg and Tominski, Christian }, title = { Review: visual analytics of climate networks }, journal = { Nonlinear Processes in Geophysics }, year = { 2015 }, volume = { 22 }, number = { 5 }, pages = { 545-570 }, month = { 9 }, abstract = { Network analysis has become an important approach in studying complex spatiotemporal behaviour within geophysical observation and simulation data. This new field produces increasing amounts of large geo-referenced networks to be analysed. Particular focus lies currently on the network analysis of the complex statistical interrelationship structure within climatological fields. The standard procedure for such network analyses is the extraction of network measures in combination with static standard visualisation methods. Existing interactive visualisation methods and tools for geo-referenced network exploration are often either not known to the analyst or their potential is not fully exploited. To fill this gap, we illustrate how interactive visual analytics methods in combination with geovisualisation can be tailored for visual climate network investigation. Therefore, the paper provides a problem analysis, relating the multiple visualisation challenges with a survey undertaken with network analysts from the research fields of climate and complex systems science. Then, as an overview for the interested practitioner, we review the state-of-the-art in climate network visualisation and provide an overview of existing tools. As a further contribution, we introduce the visual network analytics tools CGV and GTX, providing tailored solutions for climate network analysis, including alternative geographic projections, edge bundling, and 3-D network support. Using these tools, the paper illustrates the application potentials of visual analytics for climate networks based on several use cases including examples from global, regional, and multi-layered climate networks. }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/NBDMST2015/npg-22-545-2015_final.pdf }, sorting = { 512 } } @article{BTD2015A, author = { Buschmann, Stefan and Trapp, Matthias and Döllner, Jürgen }, title = { Animated visualization of spatial-temporal trajectory data for air-traffic analysis }, journal = { The Visual Computer }, year = { 2015 }, volume = { 32 }, number = { 3 }, pages = { 371-381 }, abstract = { With increasing numbers of flights worldwide and a continuing rise in airport traffic, air-traffic management is faced with a number of challenges. These include monitoring, reporting, planning, and problem analysis of past and current air traffic, e.g., to identify hotspots, minimize delays, or to optimize sector assignments to air-traffic controllers. To cope with these challenges, cyber worlds can be used for interactive visual analysis and analytical reasoning based on aircraft trajectory data. However, with growing data size and complexity, visualization requires high computational efficiency to process that data within real-time constraints. This paper presents a technique for real-time animated visualization of massive trajectory data. It enables (1) interactive spatio-temporal filtering, (2) generic mapping of trajectory attributes to geometric representations and appearance, and (3) real-time rendering within 3D virtual environments such as virtual 3D airport or 3D city models. Different visualization metaphors can be efficiently built upon this technique such as temporal focus+context, density maps, or overview+detail methods. As a general-purpose visualization technique, it can be applied to general 3D and 3+1D trajectory data, e.g., traffic movement data, geo-referenced networks, or spatio-temporal data, and it supports related visual analytics and data mining tasks within cyber worlds. }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/BTD2015/tvc2015_draft.pdf }, sorting = { 256 } } @article{SD2015, author = { Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Interactive Image Filtering for Level-of-Abstraction Texturing of Virtual 3D Scenes }, journal = { Computers \& Graphics }, year = { 2015 }, volume = { 52 }, pages = { 181--198 }, abstract = {

Texture mapping is a key technology in computer graphics. For the visual design of 3D scenes, in particular, effective texturing depends significantly on how important contents are expressed, e.g., by preserving global salient structures, and how their depiction is cognitively processed by the user in an application context. Edge-preserving image filtering is one key approach to address these concerns. Much research has focused on applying image filters in a post-process stage to generate artistically stylized depictions. However, these approaches generally do not preserve depth cues, which are important for the perception of 3D visualization (e.g., texture gradient). To this end, filtering is required that processes texture data coherently with respect to linear perspective and spatial relationships. In this work, we present an approach for texturing 3D scenes with perspective coherence by arbitrary image filters. We propose decoupled deferred texturing with (1) caching strategies to interactively perform image filtering prior to texture mapping and (2) for each mipmap level separately to enable a progressive level of abstraction, using (3) direct interaction interfaces to parameterize the visualization according to spatial, semantic, and thematic data. We demonstrate the potentials of our method by several applications using touch or natural language inputs to serve the different interests of users in specific information, including illustrative visualization, focus+context visualization, geometric detail removal, and semantic depth of field. The approach supports frame-to-frame coherence, order-independent transparency, multitexturing, and content-based filtering. In addition, it seamlessly integrates into real-time rendering pipelines, and is extensible for custom interaction techniques.
}, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/SD2015/asemmo-cag2015-authors-version.pdf }, doi = { 10.1016/j.cag.2015.02.001 }, sorting = { 2560 } } @article{SD2015, author = { Semmo, Amir and Trapp, Matthias and Jobst, Markus and D{\"o}llner, J{\"u}rgen }, title = { Cartography-Oriented Design of 3D Geospatial Information Visualization - Overview and Techniques }, journal = { The Cartographic Journal }, year = { 2015 }, volume = { 52 }, number = { 2 }, pages = { 95--106 }, abstract = {

In economy, society and personal life map-based, interactive geospatial visualization becomes a natural element of a growing number of applications and systems. The visualization of 3D geospatial information, however, raises the question how to represent the information in an effective way. Considerable research has been done in technology-driven directions in the fields of cartography and computer graphics (e.g., design principles, visualization techniques). Here, non-photorealistic rendering represents a promising visualization category–situated between both fields–that offers a large number of degrees for the cartography-oriented visual design of complex 2D and 3D geospatial information for a given application context. Still today, however, specifications and techniques for mapping cartographic design principles to the state-of-the-art rendering pipeline of 3D computer graphics remain to be explored. This paper revisits cartographic design principles for 3D geospatial visualization and introduces an extended 3D semiotic model that complies with the general, interactive visualization pipeline. Based on this model, we propose non-photorealistic rendering techniques to interactively synthesize cartographic renditions of basic feature types, such as terrain, water, and buildings. In particular, it includes a novel iconification concept to seamlessly interpolate between photorealistic and cartographic representations of 3D landmarks. Our work concludes with a discussion of open challenges in this field of research, including topics such as user interaction and evaluation.
}, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/STJD2015/icc2015_semmo_authors_version.pdf }, doi = { 10.1080/00087041.2015.1119462 }, sorting = { 2816 } } @article{KHD2014, author = { Klimke, Jan Hagedorn, Benjamin Döllner, Jürgen }, title = { Scalable Multi-Platform Distribution of Spatial 3D Contents }, journal = { International Journal of 3-D Information Modeling }, year = { 2014 }, volume = { 3 }, number = { 3 }, pages = { 35-49 }, month = { 7 }, abstract = { Virtual 3D city models provide powerful user interfaces for communication of 2D and 3D geoinformation. Providing high quality visualization of massive 3D geoinformation in a scalable, fast, and cost efficient manner is still a challenging task. Especially for mobile and web-based system environments, software and hardware configurations of target systems differ significantly. This makes it hard to provide fast, visually appealing renderings of 3D data throughout a variety of platforms and devices. Current mobile or web-based solutions for 3D visualization usually require raw 3D scene data such as triangle meshes together with textures delivered from server to client, what makes them strongly limited in terms of size and complexity of the models they can handle. This paper introduces a new approach for provisioning of massive, virtual 3D city models on different platforms namely web browsers, smartphones or tablets, by means of an interactive map assembled from artificial oblique image tiles. The key concept is to synthesize such images of a virtual 3D city model by a 3D rendering service in a preprocessing step. This service encapsulates model handling and 3D rendering techniques for high quality visualization of massive 3D models. By generating image tiles using this service, the 3D rendering process is shifted from the client side, which provides major advantages: a The complexity of the 3D city model data is decoupled from data transfer complexity b the implementation of client applications is simplified significantly as 3D rendering is encapsulated on server side c 3D city models can be easily deployed for and used by a large number of concurrent users, leading to a high degree of scalability of the overall approach. All core 3D rendering techniques are performed on a dedicated 3D rendering server, and thin-client applications can be compactly implemented for various devices and platforms. }, keywords = { 3D City Models, CityGML, slippy Map, Olbique Views, Prvisioning }, url = { http://dl.acm.org/citation.cfm?id=2738648 }, editor = { U. Isikdag }, publisher = { IGI Publishing Hershey, PA, USA }, booktitle = { International Journal of 3-D Information Modeling }, organization = { HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/People/jklimke/klimke_2014_ij3dim.pdf }, issn = { 2156-1710 }, doi = { 10.4018/ij3dim.2014070103 }, sorting = { 16 } } @article{PSTD2014, author = { Pasewaldt, Sebastian and Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Multi-Perspective 3D Panoramas }, journal = { International Journal of Geographical Information Science (IJGIS) }, year = { 2014 }, volume = { 28 }, number = { 10 }, pages = { 2030-2051 }, abstract = {
This article presents multi-perspective 3D panoramas that focus on visualizing 3D geovirtual environments (3D GeoVEs) for navigation and exploration tasks. Their key element, a multi-perspective view, seamlessly combines what is seen from multiple viewpoints into a single image. This approach facilitates thepresentation of information for virtual 3D city and landscape models, particularly by reducing occlusions, increasing screen-space utilization, and providing additional context within a single image. We complement multi-perspective views with cartographic visualization techniques to stylize features according to their semantics and highlight important or prioritized information. When combined, both techniques constitute the core implementation of interactive, multi-perspective 3D panoramas. They offer a large number of effective means for visual communication of 3D spatial information, a high degree of customization with respect to cartographic design, and manifold applications in different domains. We discuss design decisions of 3D panoramas for the exploration of and navigation in 3D GeoVEs. We also discuss a preliminary user study that indicates that 3D panoramas are a promising approach for navigation systems using 3D GeoVEs.
}, keywords = { multi-perspective visualization, panorama, focus+context visualization, 3D geovirtual environments, cartographic design }, project = { HPI;NFGII }, doi = { 10.1080/13658816.2014.922686 }, link1 = { http://dx.doi.org/10.1080/13658816.2014.922686 }, sorting = { 1792 } } @article{S2014, author = { Semmo, Amir }, title = { Nichtfotorealistische Visualisierung virtueller 3D-Stadtmodelle }, journal = { HPImgzn }, year = { 2014 }, number = { 15 }, pages = { 32-35 }, abstract = { Die nichtfotorealistische Bildsynthese stellt ein umfangreiches, innovatives Repertoire zur grafischen Gestaltung bereit, die eine wirkungsvolle Visualisierung komplexer raumbezogener Informationen ermöglicht. Der Fachbereich für computergrafische Systeme beschäftigt sich u.a. mit dem Design, der Implementierung und Evaluierung von nichtfotorealistischen Visualisierungstechniken für virtuelle 3D-Umgebungen – ein Forschungsbericht von Amir Semmo. }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/S2014/asemmo-hpimgzn2014.pdf }, sorting = { 1280 } } @article{DEREKD2013, author = { Delikostidis, Ioannis and Engel, Juri and Retsios, Bas and van Elzakker, Corné P.J.M. and Kraak, Menno-Jan and Döllner, Jürgen }, title = { Increasing the Usability of Pedestrian Navigation Interfaces by means of Landmark Visibility Analysis }, journal = { The Journal of Navigation }, year = { 2013 }, volume = { 66 }, number = { 04 }, pages = { 523--537 }, month = { 6 }, abstract = { Communicating location-specific information to pedestrians is a challenging task which can be aided by user-friendly digital technologies. In this paper, landmark visibility analysis, as a means for developing more usable pedestrian navigation systems is discussed. Using an algorithmic framework for image-based 3D analysis, this method integrates a 3D city model with identified landmarks and produces raster visibility layers for each one. This output enables an Android phone prototype application to indicate the visibility of landmarks from the user’s actual position. Tested in the field, the method achieves sufficient accuracy for the context of use and improves navigation efficiency and effectiveness. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { pedestrian navigation, landmark visibility, user-centred design, usability testing }, publisher = { The Royal Institute of Navigation }, project = { NFGII }, issn = { 1469-7785 }, doi = { 10.1017/S0373463313000209 }, link1 = { http://journals.cambridge.org/article_S0373463313000209 }, sorting = { 3072 } } @article{TD2013, author = { Trapp, Matthias and D\"ollner, J\"urgen }, title = { 2.5D Clip-Surfaces for Technical Visualization }, journal = { Journal of WSCG }, year = { 2013 }, volume = { 21 }, number = { 1 }, pages = { 89-96 }, month = { 6 }, abstract = { The concept of clipping planes is well known in computer graphics and can be used to create cut-away views. But clipping against just analytical defined planes is not always suitable for communicating every aspect of such visualization. For example, in hand-drawn technical illustrations, artists tend to communicate the difference between a cut and a model feature by using non-regular, sketchy cut lines instead of straight ones. To enable this functionality in computer graphics, this paper presents a technique for applying 2.5D clip-surfaces in real-time. Therefore, the clip plane equation is extended with an additional offset map, which can be represented by a texture map that contains height values. Clipping is then performed by varying the clip plane equation with respect to such an offset map. Further, a capping technique is proposed that enables the rendering of caps onto the clipped area to convey the impression of solid material. It avoids a re-meshing of a solid polygonal mesh after clipping is performed. Our approach is pixel precise, applicable in real-time, and takes fully advantage of graphics accelerators. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { clipping planes, real-time rendering, technical 3D visualization }, editor = { Václav Skala }, publisher = { Union Agency }, address = { Na Mazinach 9, CZ 322 00 Plzen, Czech Republic }, booktitle = { Proceedings of WSCG 2013: 21st International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision }, project = { NFGII }, issn = { ISSN 1213 – 6972 }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=mBasfz37VoY }, link2 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-1861946-5d-clip-surfaces-technical-visualization/ }, link3 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2013/TD2013/clipping.pdf }, sorting = { 1792 } } @article{KCWI13, author = { Kyprianidis, Jan Eric and Collomosse, John and Wang, Tinghuai and Isenberg, Tobias }, title = { State of the ‘Art’: A Taxonomy of Artistic Stylization Techniques for Images and Video }, journal = { IEEE Transactions on Visualization and Computer Graphics }, year = { 2013 }, volume = { 19 }, number = { 5 }, pages = { 866--885 }, abstract = { This paper surveys the field of non-photorealistic rendering (NPR), focusing on techniques for transforming 2D input (images and video) into artistically stylized renderings. We first present a taxonomy of the 2D NPR algorithms developed over the past two decades, structured according to the design characteristics and behavior of each technique. We then describe a chronology of development from the semi-automatic paint systems of the early nineties, through to the automated painterly rendering systems of the late nineties driven by image gradient analysis. Two complementary trends in the NPR literature are then addressed, with reference to our taxonomy. First, the fusion of higher level computer vision and NPR, illustrating the trends toward scene analysis to drive artistic abstraction and diversity of style. Second, the evolution of local processing approaches toward edge-aware filtering for real-time stylization of images and video. The survey then concludes with a discussion of open challenges for 2D NPR identified in recent NPR symposia, including topics such as user and aesthetic evaluation. }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2013/KCWI13/jkyprian-tvcg2013.pdf }, doi = { 10.1109/TVCG.2012.160 }, sorting = { 3328 } } @article{RD13, author = { Richter, Rico and Döllner, Jürgen }, title = { Concepts and techniques for integration, analysis and visualization of massive 3D point clouds }, journal = { Computers, Environment and Urban Systems }, year = { 2013 }, volume = { 45 }, pages = { 114-124 }, abstract = { Remote sensing methods, such as LiDAR and image-based photogrammetry, are established approaches for capturing the physical world. Professional and low-cost scanning devices are capable of generating dense 3D point clouds. Typically, these 3D point clouds are preprocessed by GIS and are then used as input data in a variety of applications such as urban planning, environmental monitoring, disaster management, and simulation. The availability of area-wide 3D point clouds will drastically increase in the future due to the availability of novel capturing methods (e.g., driver assistance systems) and low-cost scanning devices. Applications, systems, and workflows will therefore face large collections of redundant, up-to-date 3D point clouds and have to cope with massive amounts of data. Hence, approaches are required that will efficiently integrate, update, manage, analyze, and visualize 3D point clouds. In this paper, we define requirements for a system infrastructure that enables the integration of 3D point clouds from heterogeneous capturing devices and different timestamps. Change detection and update strategies for 3D point clouds are presented that reduce storage requirements and offer new insights for analysis purposes. We also present an approach that attributes 3D point clouds with semantic information (e.g., object class category information), which enables more effective data processing, analysis, and visualization. Out-of-core real-time rendering techniques then allow for an interactive exploration of the entire 3D point cloud and the corresponding analysis results. Web-based visualization services are utilized to make 3D point clouds available to a large community. The proposed concepts and techniques are designed to establish 3D point clouds as base datasets, as well as rendering primitives for analysis and visualization tasks, which allow operations to be performed directly on the point data. Finally, we evaluate the presented system, report on its applications, and discuss further research challenges. }, project = { NFGII }, doi = { DOI: 10.1016/j.compenvurbsys.2013.07.004 }, link1 = { http://www.sciencedirect.com/science/article/pii/S0198971513000653 }, sorting = { 64 } } @article{RKD12, author = { Richter, Rico and Behrens, Markus and Döllner, Jürgen }, title = { Object class segmentation of massive 3D point clouds of urban areas using point cloud topology }, journal = { International Journal of Remote Sensing }, year = { 2013 }, volume = { 34 }, number = { 23 }, pages = { 8408-8424 }, abstract = { A large number of remote-sensing techniques and image-based photogrammetric approaches allow an efficient generation of massive 3D point clouds of our physical environment. The efficient processing, analysis, exploration, and visualization of massive 3D point clouds constitute challenging tasks for applications, systems, and workflows in disciplines such as urban planning, environmental monitoring, disaster management, and homeland security. We present an approach to segment massive 3D point clouds according to object classes of virtual urban environments including terrain, building, vegetation, water, and infrastructure. The classification relies on analysing the point cloud topology; it does not require per-point attributes or representative training data. The approach is based on an iterative multi-pass processing scheme, where each pass focuses on different topological features and considers already detected object classes from previous passes. To cope with the massive amount of data, out-of-core spatial data structures and graphics processing unit (GPU)-accelerated algorithms are utilized. Classification results are discussed based on a massive 3D point cloud with almost 5 billion points of a city. The results indicate that object-class-enriched 3D point clouds can substantially improve analysis algorithms and applications as well as enhance visualization techniques. }, project = { NFGII }, doi = { DOI: 10.1080/01431161.2013.838710 }, link1 = { http://www.tandfonline.com/doi/full/10.1080/01431161.2013.838710 }, sorting = { 32 } } @article{RKD12, author = { Richter, Rico and Kyprianidis, Jan Eric and Döllner, Jürgen }, title = { Out-of-Core GPU-based Change Detection in Massive 3D Point Clouds }, journal = { Transactions in GIS }, year = { 2013 }, volume = { 17 }, number = { 5 }, pages = { 724-741 }, abstract = { If sites, cities, and landscapes are captured at different points in time using technology such as LiDAR, large collections of 3D point clouds result. Their efficient storage, processing, analysis, and presentation constitute a challenging task because of limited computation, memory, and time resources. In this work, we present an approach to detect changes in massive 3D point clouds based on an out-of-core spatial data structure that is designed to store data acquired at different points in time and to efficiently attribute 3D points with distance information. Based on this data structure, we present and evaluate different processing schemes optimized for performing the calculation on the CPU and GPU. In addition, we present a point-based rendering technique adapted for attributed 3D point clouds, to enable effective out-of-core real-time visualization of the computation results. Our approach enables conclusions to be drawn about temporal changes in large highly accurate 3D geodata sets of a captured area at reasonable preprocessing and rendering times. We evaluate our approach with two data sets from different points in time for the urban area of a city, describe its characteristics, and report on applications. }, project = { NFGII }, doi = { DOI: 10.1111/j.1467-9671.2012.01362.x }, link1 = { http://onlinelibrary.wiley.com/doi/10.1111/j.1467-9671.2012.01362.x/abstract }, sorting = { 256 } } @article{GTD12, author = { Glander, Tassilo and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Concepts for Automatic Generalization of Virtual 3D Landscape Models }, journal = { gis.SCIENCE }, year = { 2012 }, volume = { 25 }, number = { 1 }, pages = { 18-23 }, month = { 3 }, abstract = { This paper discusses concepts for the automatic generalization of virtual 3D landscape models. As complexity, heterogeneity, and diversity of geodata that constitute landscape models are constantly growing, the need for landscape models that generalize their contents to a consistent, coherent level-of-abstraction and information density becomes an essential requirement for applications such as in conceptual landscape design, simulation and analysis, and mobile mapping. We discuss concepts of generalization and working principles as well as the concept of level-of-abstraction. We furthermore present three exemplary automated techniques for generalizing 3D landscape models, including a geometric generalization technique that generates discrete iso-surfaces of 3D terrain models in real-time, a geometric generalization technique for site and building models, and a real-time generalization lens technique. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam }, note = { Cover Image }, publisher = { Wichmann Verlag }, issn = { 1869-9391 }, sorting = { 4096 } } @article{TSPHDEH12, author = { Trapp, Matthias and Semmo, Amir and Pokorski, Rafael and Herrmann, Claus-Daniel and Döllner, Jürgen and Eichhorn, Michael and Heinzelmann, Michael }, title = { Colonia 3D - Communication of Virtual 3D Reconstructions in Public Spaces }, journal = { International Journal of Heritage in the Digital Era (IJHDE) }, year = { 2012 }, volume = { 1 }, number = { 1 }, pages = { 45-74 }, month = { 1 }, abstract = { The communication of cultural heritage in public spaces such as museums or exhibitions, gain more and more importance during the last years. The possibilities of interactive 3D applications open a new degree of freedom beyond the mere presentation of static visualizations, such as pre-produced video or image data. A user is now able to directly interact with 3D virtual environments that enable the depiction and exploration of digital cultural heritage artifacts in real-time. However, such technology requires concepts and strategies for guiding a user throughout these scenarios, since varying levels of experiences within interactive media can be assumed. This paper presents a concept as well as implementation for communication of digital cultural heritage in public spaces, by example of the project Roman Cologne. It describes the results achieved by an interdisciplinary team of archaeologists, designers, and computer graphics engineers with the aim to virtually reconstruct an interactive high-detail 3D city model of Roman Cologne. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam }, note = { Cover Image }, editor = { Marinos Ioannides }, publisher = { Multi-Science Publishing }, issn = { 2047-4970 }, doi = { 10.1260/2047-4970.1.1.45 }, link1 = { Paper (HQ) http://multi-science.metapress.com/content/b4wn417605744380/fulltext.pdf }, sorting = { 4608 } } @article{ED12_2, author = { Engel, Juri and D{\"o}llner, J{\"u}rgen }, title = { Immersive Visualization of Virtual 3D City Models and its Applications in E-Planning }, journal = { International Journal of E-Planning Research (IJEPR) }, year = { 2012 }, volume = { 1 }, number = { 4 }, pages = { 17-34 }, abstract = { Immersive visualization offers an intuitive access to and an effective way of realizing, exploring, and analyzing virtual 3D city models, which are essential tools for effective communication and management of complex urban spatial information in e-planning. In particular, immersive visualization allows for simulating planning scenarios and to receive a close-to-reality impression by both non-expert and expert stakeholders. This contribution is concerned with the main requirements and technical concepts of a system for visualizing virtual 3D city models in large-scale, fully immersive environments. It allows stakeholders ranging from citizens to decision-makers to explore and examine the virtual 3D city model and embedded planning models “in situ”. Fully immersive environments involve a number of specific requirements for both hardware and 3D rendering including enhanced 3D rendering techniques, an immersion-aware, autonomous, and assistive 3D camera system, and a synthetic, immersion-supporting soundscape. Based on these requirements, we have implemented a prototypical visualization system that we present in this article. The characteristics of fully immersive visualization enable a number of new applications within e-planning workflows and processes, in particular, with respect to public participation, decision support, and location marketing. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, publisher = { IGI Global }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/ED12-2/engel_IJEPR_2012_draft.pdf }, sorting = { 3872 } } @article{Klimke2012a, author = { Klimke, Jan and Döllner, Jürgen }, title = { Service-oriented Visualization of Virtual 3D City Models }, journal = { Directions Magazine }, year = { 2012 }, abstract = { Virtual 3D city models can effectively communicate complex 2D and 3D geospatial data. Myriad applications, from urban planning to solar potential of roof surfaces to noise pollution, can be modeled and visualized. However, massive amounts of data need to be processed. Authors Jan Klimke and Jürgen Döllner of the Hasso-Plattner-Institut, University of Potsdam, Germany detail a new, service-oriented approach that may solve some of the challenges of visualizing 3D data. }, note = { http://www.directionsmag.com/articles/service-oriented-visualization-of-virtual-3d-city-models/226560 }, url = { http://www.directionsmag.com/articles/service-oriented-visualization-of-virtual-3d-city-models/226560 }, howpublished = { Online Magazin Article }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/KD2012a/directionsmag.pdf }, sorting = { 4352 } } @article{STKD12, author = { Semmo, Amir and Trapp, Matthias and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen }, title = { Interactive Visualization of Generalized Virtual 3D City Models using Level-of-Abstraction Transitions }, journal = { Computer Graphics Forum }, year = { 2012 }, volume = { 31 }, number = { 3 }, pages = { 885--894 }, abstract = {

Virtual 3D city models play an important role in the communication of complex geospatial information in a growing number of applications, such as urban planning, navigation, tourist information, and disaster management. In general, homogeneous graphic styles are used for visualization. For instance, photorealism is suitable for detailed presentations, and non-photorealism or abstract stylization is used to facilitate guidance of a viewer's gaze to prioritized information. However, to adapt visualization to different contexts and contents and to support saliency-guided visualization based on user interaction or dynamically changing thematic information, a combination of different graphic styles is necessary. Design and implementation of such combined graphic styles pose a number of challenges, specifically from the perspective of real-time 3D visualization. In this paper, the authors present a concept and an implementation of a system that enables different presentation styles, their seamless integration within a single view, and parametrized transitions between them, which are defined according to tasks, camera view, and image resolution. The paper outlines potential usage scenarios and application fields together with a performance evaluation of the implementation.
}, note = { Proceedings EuroVis 2012 }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/STKD12/asemmo-eurovis2012.pdf }, doi = { 10.1111/j.1467-8659.2012.03081.x }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=VXqtw44KxY4 }, sorting = { 2304 } } @article{WKO12, author = { Winnemöller, Holger and Kyprianidis, Jan Eric and Olsen, Sven C. }, title = { XDoG: An eXtended difference-of-Gaussians compendium including advanced image stylization }, journal = { Computers & Graphics }, year = { 2012 }, volume = { 36 }, number = { 6 }, pages = { 740-753 }, abstract = { Recent extensions to the standard Difference-of-Gaussians (DoG) edge detection operator have rendered it less susceptible to noise and increased its aesthetic appeal. Despite these advances, the technical subtleties and stylistic potential of the DoG operator are often overlooked. This paper offers a detailed review of the DoG operator and its extensions, highlighting useful relationships to other image processing techniques. It also presents many new results spanning a variety of styles, including pencil-shading, pastel, hatching, and woodcut. Additionally, we demonstrate a range of subtle artistic effects, such as ghosting, speed-lines, negative edges, indication, and abstraction, all of which are obtained using an extended DoG formulation, or slight modifications thereof. In all cases, the visual quality achieved by the extended DoG operator is comparable to or better than those of systems dedicated to a single style. }, project = { flowabs }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/WKO12/winnemoeller-cag2012.pdf }, doi = { 10.1016/j.cag.2012.03.004 }, sorting = { 2048 } } @article{PBABD2012, author = { Paredes, E.G. and Bóo, M. and Amor, M. and Bruguera, J.D. and Döllner, J. }, title = { Extended hybrid meshing algorithm for multiresolution terrain models }, journal = { International Journal of Geographical Information Science }, year = { 2012 }, volume = { 26 }, number = { 5 }, pages = { 771--793 }, abstract = { Hybrid terrains are a convenient approach for the representation of digital terrain models, integrating heterogeneous data from different sources. In this article, we present a general, efficient scheme for achieving interactive level-of-detail rendering of hybrid terrain models, without the need for a costly preprocessing or resampling of the original data. The presented method works with hybrid digital terrains combining regular grid data and local high-resolution triangulated irregular networks. Since grid and triangulated irregular network data may belong to different datasets, a straightforward combination of both geometries would lead to meshes with holes and overlapping triangles. Our method generates a single multiresolution model integrating the different parts in a coherent way, by performing an adaptive tessellation of the region between their boundaries. Hence, our solution is one of the few existing approaches for integrating different multiresolution algorithms within the same terrain model, achieving a simple interactive rendering of complex hybrid terrains. }, keywords = { 3D modeling; 3D visualization; geovisualization; triangulated irregular networks }, doi = { http://dx.doi.org/10.1080/13658816.2011.615317 }, sorting = { 1536 } } @article{SHTD2012, author = { Semmo, Amir and Hildebrandt, Dieter and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Concepts for Cartography-Oriented Visualization of Virtual 3D City Models }, journal = { Photogrammetrie - Fernerkundung - Geoinformation (PFG) }, year = { 2012 }, number = { 4 }, pages = { 455-465 }, abstract = {

Virtual 3D city models serve as an effective medium with manifold applications in geoinformation systems and services. To date, most 3D city models are visualized using photorealistic graphics. But an effective communication of geoinformation significantly depends on how important information is designed and cognitively processed in the given application context. One possibility to visually emphasize important information is based on non-photorealistic rendering, which comprehends artistic depiction styles and is characterized by its expressiveness and communication aspects. However, a direct application of non-photorealistic rendering techniques primarily results in monotonic visualization that lacks cartographic design aspects. In this work, we present concepts for cartography-oriented visualization of virtual 3D city models. These are based on coupling non-photorealistic rendering techniques and semantics-based information for a user, context, and media-dependent representation of thematic information. This work highlights challenges for cartography-oriented visualization of 3D geovirtual environments, presents stylization techniques and discusses their applications and ideas for a standardized visualization. In particular, the presented concepts enable a real-time and dynamic visualization of thematic geoinformation.
}, keywords = { 3D city models, cartography-oriented visualization, style description languages, real-time rendering }, publisher = { E. Schweizerbart'sche Verlagsbuchhandlung }, address = { Johannesstrasse 3A, D-70176 Stuttgart, Germany }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/SHTD2012/asemmo-PFG2012.pdf }, issn = { 1432-8364 }, doi = { 10.1127/1432-8364/2012/0131 }, sorting = { 16 } } @article{TSLHD11, author = { Trapp, Matthias and Schneider, Lars and Lehmann, Christine and Holz, Norman and D{\"o}llner, J{\"u}rgen }, title = { Strategies for Visualizing 3D Points-of-Interest on Mobile Devices }, journal = { Journal of Location Based Services (JLBS) }, year = { 2011 }, volume = { 5 }, number = { 2 }, pages = { 79-99 }, month = { 6 }, abstract = { 3D virtual environments are increasingly used as general-purpose medium for communicating spatial information. In particular, virtual 3D city models have numerous applications such as car navigation, city marketing, tourism, and gaming. In these applications, points-of-interest (POI) play a major role since they typically represent features relevant for specific user tasks and facilitate effective user orientation and navigation through the 3D virtual environment. In this paper, we present strategies that aim at effectively visualizing points-of-interest in a 3D virtual environment used on mobile devices. Here, we additionally have to face the "keyhole" situation, i.e., the users can realize only a small part of the environment due to the limited view space and resolution. For the effective visualization of points-of-interest in 3D virtual environments we propose to combine specialized occlusion management for 3D scenes together with visual cues that handle out-of-frame points-of-interest. We also discuss general aspects and definitions of points-of-interest in the scope of 3D models and outline a prototype implementation of the mobile 3D viewer application based on the presented concepts. In addition, we give a first performance evaluation with respect to rendering speed and power consumptions. }, keywords = { 3D visualisation, mobile devices, points-of-interest, real-time rendering }, publisher = { Taylor \& Francis, Inc. }, address = { Bristol, PA, USA }, booktitle = { Journal of Location Based Services (JLBS) }, project = { NFG }, issn = { 1748-9725 }, doi = { http://dx.doi.org/10.1080/17489725.2011.579579 }, sorting = { 1536 } } @article{HHD11, author = { Hildebrandt, Dieter and Hagedorn, Benjamin and Döllner }, title = { Image-Based Strategies for Interactive Visualization of Complex 3D Geovirtual Environments on Lightweight Devices }, journal = { Journal of Location Based Services (JLBS) }, year = { 2011 }, volume = { 5 }, number = { 2 }, pages = { 100--120 }, month = { 6 }, abstract = { In this article, we present strategies for service-oriented, standards and image-based 3D geovisualisation that have the potential to provide interactive visualisation of complex 3D geovirtual environments (3DGeoVE) on lightweight devices. In our approach, interactive geovisualisation clients retrieve sets of 2D images of projective views of 3DGeoVEs generated by a 3D rendering service. As the key advantage of the image-based approach, the complexity that a client is exposed for displaying a visual representation is reduced to a constant factor primarily depending on the image resolution. To provide users with a high degree of interactivity, we propose strategies that are based on additional service-side functionality and on exploiting multiple layers of information encoded into the images for the local reconstruction of visual representations of the remote 3DGeoVE. The use of service-orientation and standards facilitates designing distributed 3D geovisualisation systems that are open, interoperable and can easily be adapted to changing requirements. We demonstrate the validity of the proposed strategies by presenting proof-of-concept implementations of several image-based 3D clients for the case of virtual 3D city models. }, keywords = { 3D geovirtual environments, distributed 3D geovisualisation, image-based representation }, publisher = { Taylor \& Francis, Inc. }, address = { Bristol, PA, USA }, sorting = { 2 } } @article{HKHD11, author = { Hildebrandt, Dieter and Klimke, Jan and Hagedorn, Benjamin and Döllner, Jürgen }, title = { Service-Oriented Interactive 3D Visualization of Massive 3D City Models on Thin Clients }, journal = { 2nd Int. Conference on Computing for Geospatial Research & Application COM.Geo 2011 }, year = { 2011 }, month = { 5 }, abstract = { Virtual 3D city models serve as integration platforms for complex geospatial and georeferenced information and as medium for effctive communication of spatial information. In this paper, we present a system architecture for service-oriented, interactive 3D visualization of massive 3D city models on thin clients such as mobile phones and tablets. It is based on high performance, server-side 3D rendering of extended cube maps, which are interactively visualized by corresponding 3D thin clients. As key property, the complexity of the cube map data transmitted between server and client does not depend on the model's complexity. In addition, the system allows the integration of thematic raster and vector geodata into the visualization process. Users have extensive control over the contents and styling of the visual representations. The approach provides a solution for safely, robustly distributing and interactively presenting massive 3D city models. A case study related to city marketing based on our prototype implementation shows the potentials of both server-side 3D rendering and fully interactive 3D thin clients on mobile phones. }, url = { dl.acm.org/authorize?432087 }, booktitle = { COM.Geo '11 Proceedings of the 2nd International Conference on Computing for Geospatial Research & Applications }, project = { NFG HPI }, isbn = { 978-1-4503-0681-2 }, doi = { 10.1145/1999320.1999326 }, sorting = { 1024 } } @article{PTD11, author = { Pasewaldt, Sebastian and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Multiscale Visualization of 3D Geovirtual Environments Using View-Dependent Multi-Perspective Views }, journal = { Journal of WSCG }, year = { 2011 }, volume = { 19 }, number = { 3 }, pages = { 111-118 }, month = { 2 }, abstract = { 3D geovirtual environments (GeoVEs), such as virtual 3D city models or landscape models, are essential visualization tools for effectively communicating complex spatial information. In this paper, we discuss how these environments can be visualized using multi-perspective projections based on view-dependent global deformations. Multi-perspective projections enable 3D visualization similar to panoramic maps, increasing overview and information density in depictions of 3D GeoVEs. To make multi-perspective views an effective medium, they must adjust to the orientation of the virtual camera controlled by the user and constrained by the environment. Thus, changing multi-perspective camera configurations typically require the user to manually adapt the global deformation — an error prone, non-intuitive, and often time-consuming task. Our main contribution comprises a concept for the automatic and view-dependent interpolation of different global deformation preset configurations. Applications and systems that implement such view-dependent global deformations, allow users to smoothly and steadily interact with and navigate through multi-perspective 3D GeoVEs. }, keywords = { multi-perspective views, view-dependence, global space deformation, realtime rendering, virtual 3D environments, geovisualization. }, editor = { Václav Skala }, publisher = { UNION Agency – Science Press }, project = { NFG;HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2011/PTD11/PTD11.pdf }, isbn = { 978-80-86943-84-8 }, issn = { 1213-6072 }, link2 = { Video [YouTube] http://www.youtube.com/watch?v=gzZXTXBwccY }, sorting = { 2816 } } @article{RD11b, author = { Richter, Rico and D{\"o}llner, J{\"u}rgen }, title = { Integrierte Echtzeit-Visualisierung von massiven 3D-Punktwolken und georeferenzierten Texturdaten }, journal = { Photogrammetrie Fernerkundung Geoinformation (PFG) }, year = { 2011 }, volume = { 2011 }, number = { 3 }, pages = { 145-154 }, abstract = { Dieser Beitrag stellt ein Verfahren vor, das die integrierte Echtzeit-Visualisierung von massiven 3D-Punktwolkendaten zusammen mit georeferenzierten Texturdaten ermöglicht. Beides sind wesentliche Kategorien von Geodaten, die insbesondere aufgrund ihrer wach-senden Verfügbarkeit und sinkender Kosten, generiert mit flugzeuggestützten oder mobilen Scan-Systemen, zunehmend Einsatz finden. Die Massivität der entstehenden Daten stellt eine Herausforde-rung sowohl für die Algorithmik wie auch für die Hardware-Ressourcen dar. Um die Daten effizient verarbeiten und in Echtzeit visualisieren zu können, werden sog. Out-of-Core-Algorithmen und Le-vel-of-Detail-Strukturen benötigt. Das vorgestellte Out-of-Core- und LOD-Konzept für unbeschränkt große 3D-Punktwolkendaten ermöglicht deren Echtzeit-Visualisierung zusammen mit georeferenzier-ten Texturdaten, z. B. digitalen Luftbildern. Dieser Ansatz erlaubt es, 3D-Punktwolkendaten in der Visualisierung mit beliebigen georeferenzierten thematischen Information zu attributieren, sodass z. B. Objekte und Bereiche nicht nur geometrisch, sondern auch thematisch präsentiert, exploriert und analysiert werden können. In einem Vorverarbeitungsschritt werden dazu die Daten aufbereitet und in eine räumliche LOD-Struktur überführt, auf der die echtzeitfähige, punktbasierte 3D-Rendering-Technik operiert. Mit dem Ansatz wird es möglich, interaktive 3D-Analyse-Werkzeuge und Simulati-onsverfahren zu realisieren, die direkt und effizient auf 3D-Punktwolkenrohdaten zugreifen. }, project = { NFG }, sorting = { 512 } } @article{KK11, author = { Kyprianidis, Jan Eric and Kang, Henry }, title = { Image and Video Abstraction by Coherence-Enhancing Filtering }, journal = { Computer Graphics Forum }, year = { 2011 }, volume = { 30 }, number = { 2 }, pages = { 593--602 }, abstract = { In this work, we present a non-photorealistic rendering technique to create stylized abstractions from color images and videos. Our approach is based on adaptive line integral convolution in combination with directional shock filtering. The smoothing process regularizes directional image features while the shock filter provides a sharpening effect. Both operations are guided by a flow field derived from the structure tensor. To obtain a high-quality flow field, we present a novel smoothing scheme for the structure tensor based on Poisson's equation. Our approach effectively regularizes anisotropic image regions while preserving the overall image structure and achieving a consistent level of abstraction. Moreover, it is suitable for per-frame filtering of video and can be efficiently implemented to process content in real-time. }, note = { Proceedings Eurographics 2011 }, project = { cefabs }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2011/KK11/jkyprian-eg2011.pdf }, sorting = { 2560 } } @article{GKD10, author = { Glander, Tassilo and Kramer, Martin and Döllner, Jürgen }, title = { Erreichbarkeitskarten zur Visualisierung der Mobilitätsqualität im ÖPNV }, journal = { Kartographische Nachrichten }, year = { 2010 }, abstract = { Mobilitätsqualität wird von ÖPNV-Nutzern situativ unmittelbar, aber systematisch nur diffus wahrgenommen.Wir stellen einen Ansatz vor, bei dem das vom ÖPNV-Betreiber bereitgestellte Verbindungsauskunftssystem genutzt wird, um durch eine Menge von Einzelverbindungsanfragen eine Erreichbarkeitskarte für das gesamte Verkehrsgebiet zu erstellen. Neben Erreichbarkeitskarten mit einem Startpunkt lassen sich auch mehrere Startpunkte mit gemittelter Erreichbarkeit berechnen. Die Visualisierung als farbkodierte Erreichbarkeitskarte oder Erreichbarkeitsterrain ermöglicht eine effektive Kommunikation der Mobilitätsqualität einer Region. }, publisher = { Kirschbaum Verlag }, project = { NFG }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/GKD10/KartographischeNachrichten.pdf }, sorting = { 3584 } } @article{ED10a, author = { Engel, Juri and D{\"o}llner, J{\"u}rgen }, title = { Effiziente Verschattungsberechnung für die Solarpotenzialanalyse durch bildbasierte 3D-Analyse }, journal = { Proceedings of the GeoInformatik 2010 }, year = { 2010 }, abstract = { In diesem Paper stellen wir ein neues Verfahren zur effizienten Berechnung von Verschattungsdaten als zentralen Bestandteil einer Solarpopenzialanalyse vor. Erneuerbare Energien tragen maßgeblich dazu bei, die CO2 Emissionen zu reduzieren und von fossilen Rohstoffen unabhängiger zu werden. Private Investoren sowie auch Kommunen sind daran interessiert, potenzielle Flächen für die Anbringung von Solaranlagen zu identifizieren. Eine großflächige Identifikation von Flächen ist nur durch eine vollautomatische Solarpotenzialanalyse möglich. Der hier vorgestellte Algorithmus für die Verschattungsberechnung basiert auf der 3D-Analyse einer virtuellen 3D-Umgebung. Die Genauigkeit der Verschattungsberechnung ist maßgeblich von dem Zeitintervall für die Diskretisierung eines untersuchten Zeitraums abhängig. Mit unserem Verfahren ist die Berechnungszeit jedoch nahezu unabhängig von dem Zeitintervall. Zusätzlich nutzt es moderne Grafikhardware um die Berechnung zu beschleunigen, indem die 3D-Analyserechnungen durch bildbasierte Verfahren approximiert werden. We-der Erkennung noch explizite Modellierung von Gebäuden oder Vegetation sind für die Solarpotentialanalyse notwendig, was den Einsatz der effizienten Verschattungsanalyse in einer Vielzahl von Anwendungen und Systemen ermöglicht. }, publisher = { Wichmann Verlag }, project = { NFG }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/ED10/Engel_Doellner_Verschattungsberechnung_draft.pdf }, sorting = { 3328 } } @article{LD10a, author = { Lorenz, Haik and D{\"o}llner, J{\"u}rgen }, title = { 3D Feature Surface Properties and Their Application in Geovisualization }, journal = { Computers, Environment and Urban Systems (CEUS) }, year = { 2010 }, volume = { 34 }, number = { 6 }, pages = { 476--483 }, abstract = { New acquisition methods have increased the availability of surface property data that capture location-dependent data on feature surfaces. However, these data are not supported as fully in the geovisualization of the Digital City as established data categories such as feature attributes, 2D rasters, or geometry. Consequently, 3D surface properties are largely excluded from the information extraction and knowledge creation process of geovisualization despite their potential for being an effective tool in many such tasks. To overcome this situation, this paper examines the benefits of a better integration into geovisualization systems in terms of two examples and discusses technological foundations for surface property support. The main contribution is the identification of computer graphics techniques as a suitable basis for such support. This way, the processing of surface property data fits well into existing visualization systems. This finding is demonstrated through an interactive prototypic visualization system that extends an existing system with surface property support. While this prototype concentrates on technology and neglects user-related and task-related aspects, the paper includes a discussion on challenges for making surface properties accessible to a wider audience. }, keywords = { geovisualization, exploratory data analysis, 3D surface properties, textures, computer graphics, GPU }, publisher = { Elsevier }, project = { NFG }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/LD10a/ceus09.pdf }, doi = { 10.1016/j.compenvurbsys.2010.04.003 }, sorting = { 2816 } } @article{GD09, author = { Glander, Tassilo and Döllner, Jürgen }, title = { Abstract representations for interactive visualization of virtual 3D city models }, journal = { Computers, Environment and Urban Systems }, year = { 2009 }, volume = { 33 }, number = { 5 }, pages = { 375 -- 387 }, month = { 9 }, abstract = { Virtual 3D city models increasingly cover whole city areas; hence, the perception of complex urban structures becomes increasingly difficult. Using abstract visualization, complexity of these models can be hidden where its visibility is unnecessary, while important features are maintained and highlighted for better comprehension and communication. We present a technique to automatically generalize a given virtual 3D city model consisting of building models, an infrastructure network and optional land coverage data; this technique creates several representations of increasing levels of abstraction. Using the infrastructure network, our technique groups building models and replaces them with cell blocks, while preserving local landmarks. By computing a landmark hierarchy, we reduce the set of initial landmarks in a spatially balanced manner for use in higher levels of abstraction. In four application examples, we demonstrate smooth visualization of transitions between precomputed representations; dynamic landmark highlighting according to virtual camera distance; an implementation of a cognitively enhanced route representation, and generalization lenses to combine precomputed representations in focus + context visualization. }, keywords = { Generalization, 3D city models, Landmarks, Interactive visualization }, url = { www.sciencedirect.com/science/article/B6V9K-4WXB4VV-2/2/a83c6d398b20e98143a17638455ecd82 }, editor = { Peter van Oosterom }, publisher = { Elsevier }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2009/GD09/glander_doellner_abstract_visualization_draft.pdf }, doi = { 10.1016/j.compenvurbsys.2009.07.003 }, sorting = { 1536 } } @article{LTD09, author = { Lorenz, Haik and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Interaktive, multiperspektivische Ansichten für geovirtuelle 3D-Umgebungen }, journal = { Kartographische Nachrichten }, year = { 2009 }, volume = { 04 }, pages = { 175-181 }, month = { 9 }, abstract = { In diesem Beitrag werden Visualisierungstechniken vorgestellt, die auf den Gestaltungsprinzipien von Panoramakarten und Detail- und Überblicksdarstellungen beruhen. Die Techniken generieren multiperspektivische Ansichten für geovirtuelle 3D-Umgebungen, insbesondere für zwei häufig benötigte Ansichtsformen, die Vogelperspektive und die Fußgängerperspektive. Die Techniken tragen dazu bei, die Bandbreite der computergestützten, interaktiven 3D-Darstellungen zur Visualisierung von virtuellen 3D-Raummodellen zu erweitern und die Effektivität raumbezogener Informationsdarstellungen im Hinblick auf Ortsbewusstsein und Informationsgehalt zu verbessern. }, publisher = { Kirschbaum Verlag GmbH Bonn, Fachverlag für Verkehr und Technik }, project = { NFG }, sorting = { 1280 } } @article{HD09, author = { Hagedorn, Benjamin and D{\"o}llner, J{\"u}rgen }, title = { Sketch-Based Navigation in 3D Virtual Environments }, journal = { it - Information Technology }, year = { 2009 }, number = { 3 }, pages = { 163--170 }, month = { 5 }, abstract = { Navigation represents a fundamental interaction technique for 3D virtual environments (3DVEs) – it enables users to explore the virtual world and to interact with its objects. For this, effective navigation techniques are required that take into account users and their goals. They also should prevent common problems of 3D navigation, such as an intricate camera control or “getting-lost” situations due to incoherent and confusing camera views. Smart navigation aims at this category of navigation techniques; it represents a special type of smart interaction and, thus, an essential element of smart graphics. This article presents the conceptual building blocks of smart navigation for 3DVEs and describes a navigation technique that allows for specifying navigation commands by sketches and gestures: Users sketch their navigation intentions on top of the perspective projection of the 3D scene; the system interprets these sketches regarding the affected scene geometry, as well as the spatial and temporal context. Based on this interpretation, navigation activities are derived and automatically performed. By providing assistance and automation, the smart navigation approach can substantially simplify user interfaces for 3DVEs and, thus, represents an essential component for novel applications of 3DVEs, e.g., based on touch-sensitive displays. }, publisher = { Oldenburger Wissenschaftsverlag }, doi = { 10.1524/itit.2009.0537 }, sorting = { 1792 } } @article{KKD09b, author = { Kyprianidis, Jan Eric and Kang, Henry and D{\"o}llner, J{\"u}rgen }, title = { Image and Video Abstraction by Anisotropic Kuwahara Filtering }, journal = { Computer Graphics Forum }, year = { 2009 }, volume = { 28 }, number = { 7 }, pages = { 1955--1963 }, abstract = { We present a non-photorealistic rendering technique to transform colorimages and videos into painterly abstractions. It is based on a generalization of the Kuwahara filter that is adapted to the local shape of features, derived from the smoothed structure tensor. Contrary to conventional edge-preserving filters, our filter generates a painting-like flattening effect along the local feature directions while preserving shape boundaries. As opposed to conventional painting algorithms, it produces temporally coherent video abstraction without extra processing. The GPU implementation of our method processes video in real-time. The results have the clearness of cartoon illustrations but also exhibit directional information as found in oil paintings. }, note = { Special issue on Pacific Graphics 2009 }, project = { gpuakf }, files = { user_upload/fachgebiete/doellner/publications/2009/KKD09b/jkyprian-pg2009.pdf }, sorting = { 6656 } } @article{ED09, author = { Engel, Juri and D{\"o}llner, J{\"u}rgen }, title = { Approaches Towards Visual 3D Analysis for Digital Landscapes and Its Applications }, journal = { Digital Landscape Architecture Proceedings 2009 }, year = { 2009 }, pages = { 33-41 }, abstract = { This contribution outlines approaches towards automatic visual 3D analysis of virtual 3D landscape models. We introduce different notions and definitions of concepts and functions for visual 3D analysis and outline a generic function that determines visual analytic measures among objects of landscape models. This class of functions can be used as a general tool to derive various landscape characteristics and measures on a per-object basis. We also show how these tools can be efficiently implemented to cope with large, complex landscape models. The approach exemplifies how landscape models can be used in the scope of analysis and simulation, which possibly will lead to a class of metrics and characteristics that can be automatically determined and, thereby, may help to systematically characterize and compare landscape plans and concepts. }, publisher = { Anhalt University of Applied Sciences }, project = { NFG }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/ED10/3DVisualAnalysisForDigitalLandscapes_color.pdf }, sorting = { 768 } } @article{BAD07, author = { Boo, Montserrat and Amor, Margarita and D{\"o}llner, J{\"u}rgen }, title = { Unified Hybrid Terrain Representation Based on Local Convexifications }, journal = { GeoInformatica }, year = { 2007 }, volume = { 11 }, number = { 3 }, pages = { 331--357 }, doi = { 10.1007/s10707-006-0003-y }, sorting = { 16 } } @article{DH07a, author = { D{\"o}llner, J{\"u}rgen and Hagedorn, Benjamin }, title = { Integration von GIS-, CAD- und BIM-Daten mit dienstbasierten virtuellen 3D-Stadtmodellen }, journal = { GIS - Zeitschrift f{\"u}r Geoinformatik }, year = { 2007 }, number = { 11 }, pages = { 28--37 }, abstract = { Dieser Beitrag beschreibt wie urbane räumliche Daten aus verschiedenen Anwendungsdomänen wie dem Computer Aided Design (CAD), Geographischen Informationssystemen (GIS) und Bauwerksinformationssystemen (BIM) mit Hilfe eines virtuellen 3D-Stadtmodells innerhalb einer servicebasierten Systemarchitektur kombiniert und integriert werden können. Der Ansatz beruht auf einem 3D-Viewer für komplexe Stadtmodelle, der es dem Nutzer ermöglicht auf die verschiedenen, von einer dienstbasierten Geodateninfrastruktur bereitgestellten, urbanen Geodaten zuzugreifen, diese zu importieren und zu integrieren. Der 3D-Viewer enthält eine Reihe von Adaptern für verschiedenen OGC Web Services, verwaltet das kombinierte virtuelle 3D-Stadtmodell auf der Basis von CityGML und ermöglicht dem Benutzer die Exploration und Analyse dieser integrierten Daten. Der Ansatz zeigt, wie urbane Geodaten unterschiedlicher Herkunft und Granularität auf der Visualisierungsebene integriert werden können. Diese Arbeit beruht auf der Entwicklung eines interoperablen 3D-Viewer-Clients innerhalb des CAD/GIS/BIM-Threads der 4. Phase der Web Services Initiative (OWS-4) des Open Geospatial Consortiums. }, publisher = { abcverlag }, sorting = { 32 } } @article{DH07b, author = { D{\"o}llner, J{\"u}rgen and Hagedorn, Benjamin }, title = { Welten verbinden - Virtuelle 3D-Stadtmodelle im OGC-Projekt OWS-4 }, journal = { GIS Business - Geoinformationstechnologie f{\"u}r die Praxis }, year = { 2007 }, number = { 9 }, pages = { 14--17 }, abstract = { Virtuelle 3D-Stadtmodelle bieten einen konzeptionellen und technischen Rahmen um unabhängige, heterogene und verteilte raumbezogene Daten situations- und aufgabengetrieben effizient zu integrieren und effektiv zu nutzen. Im OGC-Projekt OWS-4 wurde dies demonstriert. }, publisher = { abcverlag }, sorting = { 4096 } } @article{Do06, author = { D{\"o}llner, J{\"u}rgen }, title = { LandXplorer – ein Werkzeug f{\"u}r komplexe Geoinformationen auf Grundlage virtueller 3D-Stadt- und 3D-Landschaftsmodelle }, journal = { Vermessung Brandenburg }, year = { 2006 }, volume = { 1 }, pages = { 32--39 }, sorting = { 4 } } @article{DBL06, author = { D{\"o}llner, J{\"u}rgen and Buchholz, Henrik and Lorenz, Haik }, title = { Ambient Occlusion - ein Schritt zur realistischen Beleuchtung von 3D-Stadtmodellen }, journal = { GIS - Zeitschrift f{\"u}r Geoinformatik }, year = { 2006 }, pages = { 7--13 }, abstract = { [Deutsch] Der Beitrag gibt einen Überblick über innovative, echtzeitfähige Verfahren zur realistischen Beleuchtung von 3D-Stadtmodellen. Diese Verfahren nutzen globale Beleuchtungsmodelle und ermöglichen dadurch für die Wahrnehmung und Kognition optimierte grafische Darstellungen komplexer 3D-Modelle. Im Folgenden werden die theoretischen Grundlagen sowie Algorithmen zur approximativen Berechnung des ambienten Lichts beschrieben. Die vorgestellten Algorithmen wurden in ein Viewer- und 3D-Content-Management-System für CityGML-basierte virtuelle Stadtmodelle erfolgreich integriert. [English] This contribution gives an overview of innovative real-time techniques for realistic illumination of 3D city models. The techniques rely on global illumination models and enable graphical representations of complex 3D models that are optimized with respect to human perception and cognition. We describe the theoretical background and algorithms for approximating the calculation of ambient light. The techniques have been implemented as part of a viewer and 3D content management system for CityGML-based virtual city models. }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2006/DBL06/GlobIllum_GIS_v10.pdf }, sorting = { 1536 } } @article{Do05c, author = { D{\"o}llner, J{\"u}rgen }, title = { Inkrementelle Entwicklung von 3D-Stadtmodellen }, journal = { GeoBIT }, year = { 2005 }, volume = { 3 }, sorting = { 16 } } @article{DP05, author = { D{\"o}llner, J{\"u}rgen and Paar, Philip }, title = { Die Landschaft aus neuer Sicht }, journal = { GeoBIT }, year = { 2005 }, volume = { 5 }, pages = { 20--22 }, sorting = { 32 } } @article{Do05b, author = { D{\"o}llner, J{\"u}rgen }, title = { Geospatial Digital Rights Management in Geovisualization }, journal = { The Cartographic Journal }, year = { 2005 }, volume = { 42 }, number = { 1 }, pages = { 27--34 }, abstract = { Geovisualization offers powerful tools, techniques, and strategies to present, explore, analyze, and manage geoinformation. Interactive geovirtual environments such as virtual 3D maps or virtual 3D city models, however, raise the question how to control geodata usage and distribution. We present a concept for embedding digital rights in geovisualizations. It is based on geo-documents, an object-oriented scheme to specify a wide range of geovisualizations. Geo-documents are assembled by building blocks categorized into presentation, structure, interaction, animation, and Digital Rights Management (DRM) classes. DRM objects allow for defining permissions and constraints for all objects contained in geo-documents. In this way, authors of geovisualizations can control how their geo-documents are used, personalized, and redistributed by users. The strengths of the presented concept include the ability to integrate heterogeneous 2D and 3D geodata within a compact design scheme and the ability to cope with privacy, security, and copyright issues. Embedded digital rights in geovisualizations can be applied to improve the usability of geodata user interfaces, to implement publisher-subscriber communication systems for geodata, and to establish business models for geodata trading systems. }, files = { user_upload/fachgebiete/doellner/publications/2005/Do05b/2005_CJ_Doellner_DigitalRights.pdf }, sorting = { 64 } } @article{ND05, author = { Nienhaus, Marc and D{\"o}llner, J{\"u}rgen }, title = { Depicting Dynamics Using Principles of Visual Art and Narrations }, journal = { IEEE Computer Graphics and Applications }, year = { 2005 }, volume = { 25 }, number = { 3 }, pages = { 40--51 }, files = { user_upload/fachgebiete/doellner/publications/2005/ND05/g3040.pdf }, sorting = { 128 } } @article{Do05a, author = { D{\"o}llner, J{\"u}rgen }, title = { Constraints as Means of Controlling Usage of Geovirtual Environments }, journal = { Journal of Cartography and Geographic Information Science }, year = { 2005 }, volume = { 32 }, number = { 2 }, pages = { 69--80 }, abstract = { In this paper a concept for controlling the usage of geovirtual environments by means of constraints is developed. Constraints serve to improve the usability of geovirtual environments by guarding the navigation and interaction processes of users. In addition, they facilitate the implementation of Digital Rights Management for geovirtual environments. The presented approach distinguishes spatial constraints, structural constraints, and redistribution constraints. Several types of spatial constraints have been identified for navigation in geovirtual environments. To demonstrate their applications, this paper reports on using constraints in virtual 3D city models. }, files = { user_upload/fachgebiete/doellner/publications/2005/Do05a/2005_cagis_doellner_constraints.pdf }, sorting = { 256 } } @article{KD03, author = { Kirsch, Florian and D{\"o}llner, J{\"u}rgen }, title = { Real-Time Soft Shadows Using a Single Light Sample }, journal = { International Winter School of Computer Graphics, Journal of WSCG }, year = { 2003 }, volume = { 11 }, number = { 2 }, pages = { 255--262 }, abstract = { We present a real-time rendering algorithm that generates soft shadows of dynamic scenes using a single light sample. As a depth-map algorithm it can handle arbitrary shadowed surfaces. The shadow-casting surfaces, however, should satisfy a few geometric properties to prevent artifacts. Our algorithm is based on a bivariate attenuation function, whose result modulates the intensity of a light causing shadows. The first argument specifies the distance of the occluding point to the shadowed point; the second argument measures how deep the shadowed point is inside the shadow. The attenuation function can be implemented using dependent texture accesses; the complete implementation of the algorithm can be accelerated by today’s graphics hardware. We outline the implementation, and discuss details of artifact prevention and filtering. }, files = { user_upload/fachgebiete/doellner/publications/2003/KD03/2003-realtime soft shadows.pdf,user_upload/fachgebiete/doellner/publications/2003/KD03/softshadows_wscg03_talk.pdf }, sorting = { 768 } } @article{ND03b, author = { Nienhaus, Marc and D{\"o}llner, J{\"u}rgen }, title = { Edge-Enhancement - An Algorithm for Real-Time Non-Photorealistic Rendering }, journal = { International Winter School of Computer Graphics, Journal of WSCG }, year = { 2003 }, volume = { 11 }, number = { 2 }, pages = { 346--353 }, abstract = { In this paper, we propose an algorithm for enhancing edges of real-time non-photorealistic renderings. It is based on the edge map, a 2D texture that encodes visually important edges of 3D scene objects, and includes silhouette edges, border edges, and crease edges. The edge map allows us to derive and augment a wide range of nonphotorealistic rendering styles. Furthermore, the algorithm is designed to be orthogonal to complementary realtime rendering techniques. The implementation is based on multipass rendering: First, we extract geometrical properties of 3D scene objects generating image-space data similar to G-buffers. Next, we extract discontinuities in the image-space data using common graphics hardware to emulate image-processing operations. In subsequent rendering passes, the algorithm applies texture mapping to combine the edge map with 3D scene objects. }, files = { user_upload/fachgebiete/doellner/publications/2003/ND03b/2003-edgeenhancement.pdf }, sorting = { 1024 } } @article{DBK03, author = { D{\"o}llner, J{\"u}rgen and Baumann, Konstantin and Kersting, Oliver }, title = { LandExplorer - Ein System f{\"u}r interaktive 3D-Karten }, journal = { Kartographische Schriften }, year = { 2003 }, volume = { 7 }, pages = { 67-76 }, abstract = { Das Projekt LandExplorer befasst sich mit theoretischen und technischen Fragestellungen der Kommunikation raumbezogener Informationen. In diesem Kontext sind interaktive 3D-Karten ein zentrales Konstrukt, das zur Präsentation, Exploration, Analyse und Editierung von raumbezogenen Daten dient. Eine einzelne 3D-Karte setzt sich aus Kartenbauelementen zusammen, die visuelle, strukturelle und verhaltensgebende Aspekte spezifizieren. Durch Instanziierung, Komposition und Assoziation von Kartenbauelementen entstehen konkrete 3D-Karten. Diese Aufgaben werden von einem 3D-Karten-Editor unterstützt, der 3D-Karten als in sich abgeschlossene digitale Dokumente verwaltet. Zur Nutzung von 3D-Karten-Dokumenten steht ein 3D-Karten- Viewer bereit. Somit können 3D-Karten- Dokumente plattform- und anwendungsübergreifend weitergegeben werden. 3D-Karten des Land- Explorer-Systems enthalten damit sowohl die raumbezogenen Daten als auch die Werkzeuge und Strategien zu deren Kommunikation. }, files = { user_upload/fachgebiete/doellner/publications/2003/DBK03/DoellnerBaumannKersting_LandExplorer_GeoVis2003.pdf }, sorting = { 1280 } } @article{Do03b, author = { D{\"o}llner, J{\"u}rgen }, title = { Die Landschaft aus neuer Sicht. Visualisierung raumbezogener Information mittels 3D-Karten }, journal = { Journal GeoBit - Magazin f{\"u}r raumbezogene Informationstechnologie }, year = { 2003 }, number = { 5 }, pages = { 29--31 }, sorting = { 1536 } } @article{Do02a, author = { D{\"o}llner, J{\"u}rgen }, title = { Geovisualization: The Role of Computer Graphics and Software Engineering }, journal = { Geoinformatics }, year = { 2002 }, volume = { 5 }, number = { 12 }, pages = { 28-29 }, sorting = { 8 } } @article{DH02, author = { D{\"o}llner, J{\"u}rgen and Hinrichs, Klaus }, title = { A Generic Rendering System }, journal = { IEEE Transactions on Visualization and Computer Graphics }, year = { 2002 }, volume = { 8 }, number = { 2 }, pages = { 99--118 }, abstract = { We describe the software architecture of a rendering system that follows a pragmatic approach to integrating and bundling the power of different lowlevel rendering systems within an object-oriented framework. The generic rendering system provides higher-level abstractions to existing rendering systems and serves as a framework for developing new rendering techniques. It wraps the functionality of several, widely used rendering systems, defines a unified, object-oriented application programming interface, and provides an extensible, customizable apparatus for evaluating and interpreting hierarchical scene information. As a fundamental property, individual features of a specific rendering system can be integrated into the generic rendering system in a transparent way. The system is based on a state machine, called engine, which operates on rendering components. Four major categories of rendering components constitute the generic rendering system: shapes represent geometries; attributes specify properties assigned to geometries and scenes; handlers encapsulate rendering algorithms, and techniques represent evaluation strategies for rendering components. As a proof of concept, we have implemented the described software architecture by the Virtual Rendering System which currently wraps OpenGL, Radiance, POV Ray, and RenderMan. }, keywords = { Rendering systems, object-oriented graphics, generic rendering, rendering framework, multi-pass rendering }, files = { user_upload/fachgebiete/doellner/publications/2002/DH02/2002_DollnerHinrichs_GenericRendering_draft.pdf }, sorting = { 16 } } @article{Do01a, author = { D{\"o}llner, J{\"u}rgen }, title = { Raumbezogene Informationsvisualisierung mit dynamischen, interaktiven 3D-Karten }, journal = { Kartographische Nachrichten }, year = { 2001 }, volume = { 51 }, number = { 4 }, pages = { 80--85 }, abstract = { Dieser Beitrag beschreibt computergraphische Konzepte für die Visualisierung und Kommunikation raumbezogener Informationen auf der Grundlage kartographischer Prinzipien. Die Kartographie mit ihrem über Jahrhunderte entwickelten Repertoire an Methoden und Verfahren bildet die Grundlage für das in diesem Beitrag vorgestellte Werkzeug zur Präsentation, Exploration und Analyse raumbezogener Daten und Prozesse, den dynamischen, interaktiven 3D-Karten. Der Beitrag gibt einen Überblick über die zum Einsatz gelangenden computergraphischen Verfahren, die sowohl eine hohe visuelle Bildkomplexität als auch eine Darstellung in Echtzeit ermöglichen. Interaktive, dynamische 3D-Karten lassen sich als kartenverwandte Darstellungen begreifen, die raumbezogene Daten und Prozesse auf der Grundlage eines digitalen Geländemodells dreidimensional computergraphisch visualisieren, die Interaktion mit dem Dargestellten ermöglichen und den Kartenaufbau und die Kartengestaltung in Abhängigkeit von der Sichtsituation, den Benutzerwünschen und dem Benutzerverhalten dynamisch festlegen. Der Begriff „3D-Karte“ wurde gewählt, da mit ihm – im heutigen Sprachgebrauch auf dem Gebiet der Computergraphik und Visualisierung – intuitiv ein dreidimensionales Kartenmodell und dessen geometrische Projektion auf eine Zeichenfläche verstanden wird. Arbeiten, die sich mit kartenverwandten Darstellungen in 3D befassen, finden sich u.a. bei Ervin (1993), bei Häberling (2000) in Form der topographischen 3D-Karte und bei Terribilini (1999) in Form des kartographischen 3D-Modells. Kartenverwandte Darstellungen in virtuellen 3D-Umgebungen erlauben allgemein die Interaktion zwischen Benutzer und Karten, die Immersion des Benutzers in die Umgebung (in Abhängigkeit von der eingesetzten VRHardware), die Variierung der Informationsintensität und schließlich die Ausstattung von Kartenobjekten mit Intelligenz (MacEachren et al. 1999) - neue Formen von Karten und Kartennutzung entstehen. Kennzeichnend für die hier vorgestellten 3D-Karten ist es, dass sie auf einem hybriden Geländemodell basieren, dynamisch texturiert werden und softwaretechnisch durch Zusammenschluss von vorgefertigten Kartenbauelementen systematisch und konstruktiv erstellt werden können. In den folgenden Abschnitten wird die Multiresolutionsmodellierung der Geländeoberfläche, Verfahren zur Texturierung der Geländeoberfläche und die Kartenbauelemente besprochen. }, files = { user_upload/fachgebiete/doellner/publications/2001/Do01a/d_kartonachr.pdf }, sorting = { 128 } } @article{DH00b, author = { D{\"o}llner, J{\"u}rgen and Hinrichs, Klaus }, title = { An Object-Oriented Approach for Integrating 3D Visualization Systems and GIS }, journal = { Computers \& Geosciences }, year = { 2000 }, volume = { 26 }, number = { 1 }, pages = { 67--76 }, abstract = { Visualization has become an integral part in many applications of GIS. Due to the rapid development of computer graphics, visualization and animation techniques, general-purpose GIS can no longer satisfy the multitude of visualization demands. Therefore, GIS have to utilize independent visualization toolkits. This article examines how visualization systems can be used with and integrated into GIS. We analyze several key characteristics visualization toolkits should satisfy in order to be used efficiently by GIS. We show how GIS can provide visualization and animation features for geo objects by embedding the visualization system using object-oriented techniques. The concepts are described along with a new visualization and animation toolkit which provides extensible objectoriented technology for the development of visualization components for 2D, 3D, and timevarying data. The design of this visualization toolkit concentrates on a seamless integration of application-specific geo-data into visualization components, an open interface for different rendering techniques, and an advanced management of data dynamics. }, keywords = { ViSC, Visualization Framework, Object-oriented Visualization, Computer Animation }, publisher = { Elsevier Science Verlag }, files = { user_upload/fachgebiete/doellner/publications/2000/DH00b/dh_geoscience.pdf }, sorting = { 256 } } @article{BBDHHKSS00, author = { Becker, Ludger and Bernard, Lars and D{\"o}llner, J{\"u}rgen and Hammelbeck, Stefan and Hinrichs, Klaus and Kr{\"u}ger, Thomas and Schmidt, Benno and Streit, Ulrich }, title = { Integration of Dynamic Atmospheric Modeling and Object-Oriented GIS }, journal = { Geoinformationssysteme }, year = { 2000 }, volume = { 13 }, number = { 2 }, pages = { 19--23 }, abstract = { Efforts to integrate Geographic Information Systems (GIS) and environmental simulation models often fail due to the structure of current GIS which do not meet the requirements of complex numerical simulation techniques. Therefore research in the field of environmental modeling focuses on adequate flexible simulation methods and powerful interoperable GIS interfaces to implement these methods. This contribution shows approaches towards an interoperable object-oriented GIS interface for atmospheric modeling and analysis. The architecture and specifics of a first prototype system, called AtmoGIS, are presented. }, files = { user_upload/fachgebiete/doellner/publications/2000/BBDHHKSS00/GIS_becker.pdf }, sorting = { 2816 } } @article{DU98, author = { D{\"o}llner, J{\"u}rgen and Uhlenk{\"u}ken, C. }, title = { Von Bits {\"u}ber Bilder zur Bedeutung }, journal = { Forschungsjournal der Westf{\"a}lischen Wilhelms-Universit{\"a}t M{\"u}nster }, year = { 1998 }, number = { 1 }, pages = { 29--31 }, sorting = { 2048 } } @article{DH97a, author = { D{\"o}llner, J{\"u}rgen and Hinrichs, Klaus }, title = { Object-Oriented 3D Modeling, Animation and Interaction }, journal = { The Journal of Visualization and Computer Animation (JVCA) }, year = { 1997 }, volume = { 8 }, number = { 1 }, pages = { 33--64 }, abstract = { We present an object-oriented 3D graphics and animation framework which provides a new methodology for the symmetric modeling of geometry and behavior. The toolkit separates the specification of geometry and behavior by two types of directed acyclic graphs, the geometry graph and the behavior graph, which are linked together through constraint relations. All geometry objects and behavior objects are represented as DAG nodes. The geometry graph provides a renderer-independent hierarchical description of 3D scenes and rendering processes. The behavior graph specifies time- and event-dependent constraints applied to graphics objects. Behavior graphs simplify the specification of complex animations and 3D interactions by providing nodes for the management of the time and event flow (e.g. durations, time layouts, time repeaters, actions). Nodes contain, manipulate and share instances of constrainable graphical abstract data types. Geometry nodes and behavior nodes are used to configure high-level 3D widgets, i.e. highlevel building blocks for constructing 3D applications. The fine-grained object structure of the system leads to an extensible reusable framework which can be implemented efficiently. }, keywords = { Computer Animation, Object-oriented Visualization, 3D interaction, Virtual Reality }, files = { user_upload/fachgebiete/doellner/publications/1997/DH97a/dh_mam_jvca.pdf }, sorting = { 1280 } } @book{Do01c, author = { D{\"o}llner, J{\"u}rgen }, title = { Software-Architektur computergrafischer Systeme }, year = { 2001 }, abstract = { Die vorliegende Arbeit diskutiert den Entwurf und die Implementierung von Software-Architekturen computergraphischer Systeme und stellt einen Ansatz zur Integration heterogener computergraphischer Software-Architekturen vor. Der Begriff computergraphische Systeme bezeichnet im Allgemeinen Software und Hardware, die die computerunterstützte Modellierung realer oder imaginärer Objekte und die Bilderzeugung auf der Grundlage dieser Modelle übernehmen. Im Zentrum der vorliegenden Arbeit steht die diesen Systemen zugrundeliegende Software-Architektur. Die Software-Architektur eines Systems beschreibt die Struktur der das System implementierenden Software durch ein Software-Modell sowie das statische und dynamische Zusammenspiel der Modellkomponenten. Die Arbeit analysiert zunächst die Software-Architekturen ausgewählter computergraphischer Systeme und evaluiert sie hinsichtlich ihrer Verständlichkeit, Wiederverwendbarkeit und Erweiterbarkeit. Betrachtet werden hauptsächlich solche Systeme, die allgemein als Renderingsysteme bezeichnet werden: Sie besitzen überwiegend eine auf die Bilderzeugung ausgerichtete Funktionalität. Als wesentliche Aspekte der Software-Architektur von Renderingsystemen identifizieren wir die Modellierung von Renderingkomponenten und Renderingverfahren. Ein verallgemeinernder Entwurf eines generischen Renderingsystems, das unterschiedliche Renderingsysteme inkorporiert und durch eine methodisch einheitliche Schnittstelle abstrahiert, schließt sich an. Der Entwurf wird ergänzt durch ein Konzept für einen generischen Szenengraphen, der die Beschreibung graphisch-geometrischer Sachverhalte und ihre Umsetzung mit unterschiedlichen Renderingverfahren erlaubt. Die Leistungsfähigkeit dieser Software-Architektur wird an mehreren Fallstudien aufgezeigt. Sie untersuchen die Integration von Multi-Pass-Rendering, von nichtphotorealistischem Rendering und von Terrain- Rendering in Verbindung mit dem generischen Renderingsystem und generischen Szenengraphen. Schließlich zeigt die Arbeit, welche Konsequenzen und welche Möglichkeiten aus dem verallgemeinernden Entwurf des generischen Renderingsystems und des generischen Szenengraphen sich für die Software-Architektur zukünftiger computergraphischer Systeme ergeben. }, note = { Habilitationsschrift }, keywords = { Software-Architektur, Computer Grafik, Szenengraph, Multi-Pass-Rendering, NPR, Terrain-Rendering }, publisher = { Westf{\"a}lische Wilhelms-Universit{\"a}t M{\"u}nster }, files = { user_upload/fachgebiete/doellner/publications/2001/Do01c/dollner_habil.pdf }, sorting = { 64 } } @inbook{SBTD2017, author = { Scheibel, Willy and Buschmann, Stefan and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Attributed Vertex Clouds }, year = { 2017 }, month = { 3 }, abstract = {

In todays computer graphics applications, large 3D scenes are rendered which consist of polygonal geometries such as triangle meshes. Using state- of-the-art techniques, this geometry is often represented on the GPU using vertex and index buffers, as well as additional auxiliary data such as tex- tures or uniform buffers. For polygonal meshes of arbitrary complexity, the described approach is indispensable. However, there are several types of simpler geometries (e.g., cuboids, spheres, tubes, or splats) that can be generated procedurally. We present an efficient data representation and render- ing concept for such geometries, denoted as attributed vertex clouds (AVCs). Using this approach, geometry is generated on the GPU during execution of the programmable rendering pipeline. Each vertex is used as the argument for a function that procedurally generates the target geometry. This function is called a transfer function, and it is implemented using shader programs and therefore executed as part of the rendering process. This approach allows for compact geometry representation and results in reduced memory footprints in comparison to traditional representations. By shifting geometry generation to the GPU, the resulting volatile geometry can be controlled flexibly, i.e., its position, parameteri- zation, and even the type of geometry can be modified without requiring state changes or uploading new data to the GPU. Performance measurements suggests improved rendering times and reduced memory transmission through the rendering pipeline.
}, editor = { Christopher Oat }, publisher = { Wolfgang Engel }, series = { GPU Pro }, edition = { 8 }, booktitle = { GPU Zen }, project = { HPI;NFGII;BIMAP }, institution = { Hasso Plattner Institute, University of Potsdam }, sorting = { 4 }, state = { 3 } } @inbook{LD10b, author = { Lorenz, Haik and D{\"o}llner, J{\"u}rgen }, title = { High-Quality Non-Planar Projections Using Real-time Piecewise Perspective Projections }, year = { 2010 }, volume = { 68 }, pages = { 45--58 }, abstract = { This paper presents an approach to real-time rendering of non-planar projections with a single center and straight projection rays. Its goal is to provide the same optimal and consistent image quality GPUs deliver for perspective projections. It therefor renders the result directly without image resampling. In contrast to most object-space approaches, it does not evaluate non-linear functions on the GPU, but approximates the projection itself by a set of perspective projection pieces. Within each piece, graphics hardware can provide optimal image quality. The result is a coherent and crisp rendering. Procedural textures and stylization effects greatly benefit from our method as they usually rely on screen-space operations. The real-time implementation runs entirely on GPU. It replicates input primitives on demand and renders them into all relevant projection pieces. The method is independent of the input mesh density and is not restricted to static meshes. Thus, it is well suited for interactive applications. We demonstrate an analytic and a freely designed projection based on our method. }, editor = { Alpesh Kumar Ranchordas and Jo{\~a}o Madeiras Pereira and H{\'e}lder J. Ara{\'u}jo and Jo{\~a}o Manuel R. S. Tavares }, publisher = { Springer }, series = { Communications in Computer and Information Science }, booktitle = { Computer Vision, Imaging and Computer Graphics. Theory and Applications. International Joint Conference, VISIGRAPP 2009, Lisboa, Portugal, February 5-8, 2009. Revised Selected Papers }, project = { NFG }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/LD10b/ppp_rev.pdf }, sorting = { 128 } } @conference{BTD2014, author = { Buschmann, Stefan and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Real-Time Animated Visualization of Massive Air-Traffic Trajectories }, year = { 2014 }, pages = { 172-181 }, abstract = { With increasing numbers of flights world-wide and a continuing rise in airport traffic, air-traffic management is faced with a number of challenges. These include monitoring, reporting, planning, and problem analysis of past and current air traffic, e.g., to identify hotspots, minimize delays, or to optimize sector assignments to air-traffic controllers. Interactive and dynamic 3D visualization and visual analysis of massive aircraft trajectories, i.e., analytical reasoning enabled by interactive cyber worlds, can be used to approach these challenges. To facilitate this kind of analysis, especially in the context of real-time data, interactive tools for filtering, mapping, and rendering are required. In particular, the mapping process should be configurable at run-time and support both static mappings and animations to allow users to effectively explore and realize movement dynamics. However, with growing data size and complexity, these stages of the visualization pipeline require computational efficient implementations to be capable of processing within real-time constraints. This paper presents an approach for real-time animated visualization of massive air-traffic data, that implements all stages of the visualization pipeline based on GPU techniques for efficient processing. It enables (1) interactive spatio-temporal filtering, (2) generic mapping of trajectory attributes to geometric representations and appearances, as well as (3) real-time rendering within 3D virtual environments, such as virtual 3D airport and city models. Based on this pipeline, different visualization metaphors (e.g., temporal focus+context, density maps, and overview+detail visualization) are implemented and discussed. The presented concepts and implementation can be generally used as visual analytics and data mining techniques in cyber worlds, e.g., to visualize movement data, geo-referenced networks, or other spatio-temporal data. }, keywords = { spatio-temporal visualization, trajectory visualization, 3D visualization, visual analytics, real-time rendering }, publisher = { IEEE Computer Society }, booktitle = { Proceedings of CyberWorlds 2014 }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/BTLD2014/cw2014_draft.pdf }, isbn = { 978-1-4799-4677-8/14 }, doi = { 10.1109/CW.2014.32 }, sorting = { 1024 } } @incollection{LSHD2017, author = { Limberger, Daniel and Scheibel, Willy and Hahn, Sebastian and D{\"o}llner, J{\"u}rgen }, title = { Reducing Visual Complexity in Software Maps using Importance-based Aggregation of Nodes }, year = { 2017 }, abstract = {

Depicting massive software system data using software maps can result in visual clutter and increased cognitive load. This paper introduces an adaptive level-of-detail (LoD) technique that uses scoring for interactive aggregation on a per-node basis. The scoring approximates importance by degree-of-interest measures as well as screen and user-interaction scores. The technique adheres to established aggregation guidelines and was evaluated by means of two user studies. The first user study investigates task completion time in visual search. The second evaluates the readability of the presented nesting level contouring for aggregates. With the adap- tive LoD technique software maps allow for multi-resolution depictions of software system information. It facilitates efficient identification of important nodes and allows for additional annotation.

© The Authors 2017. This is the authors' version of the work. It is posted here for your personal use. Not for redistribution. The definitive version will be published in Proceedings of the 8th International Conference on Information Visualization Theory and Applications (IVAPP 2017).
}, affiliation = { Hasso Plattner Institute, University of Potsdam }, series = { IVAPP 2017 }, booktitle = { Proceedings of the 8th International Conference on Information Visualization Theory and Applications }, project = { NFGII;HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2017/LSHD2017/LSHD2017.pdf }, sorting = { 1 } } @incollection{SKD10, author = { Semmo, Amir and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen }, title = { Automated Image-Based Abstraction of Aerial Images }, year = { 2010 }, pages = { 359-378 }, month = { 5 }, abstract = {

Aerial images represent a fundamental type of geodata with a broad range of applications in GIS and geovisualization. The perception and cognitive processing of aerial images by the human, however, still is faced with the specific limitations of photorealistic depictions such as low contrast areas, unsharp object borders as well as visual noise. In this paper we present a novel technique to automatically abstract aerial images that enhances visual clarity and generalizes the contents of aerial images to improve their perception and recognition. The technique applies non-photorealistic image processing by smoothing local image regions with low contrast and emphasizing edges in image regions with high contrast. To handle the abstraction of large images, we introduce an image tiling procedure that is optimized for post-processing images on GPUs and avoids visible artifacts across junctions. This is technically achieved by filtering additional connection tiles that overlap the main tiles of the input image. The technique also allows the generation of different levels of abstraction for aerial images by computing a mipmap pyramid, where each of the mipmap levels is filtered with adapted abstraction parameters. These mipmaps can then be used to perform level-of-detail rendering of abstracted aerial images. Finally, the paper contributes a study to aerial image abstraction by analyzing the results of the abstraction process on distinctive visible elements in common aerial image types. In particular, we have identified a high abstraction straction potential in landscape images and a higher benefit from edge enhancement in urban environments.
}, editor = { Painho, Marco and Santos, Maribel Yasmina and Pundt, Hardy }, publisher = { Springer }, series = { Lecture Notes in Geoinformation and Cartography }, booktitle = { Geospatial Thinking }, project = { flowabs }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/SKD10/asemmo-agile2010.pdf }, doi = { 10.1007/978-3-642-12326-9_19 }, sorting = { 4352 } } @incollection{KKD10, author = { Kyprianidis, Jan Eric and Kang, Henry and D{\"o}llner, J{\"u}rgen }, title = { Anisotropic Kuwahara Filtering on the GPU }, year = { 2010 }, pages = { 247--264 }, url = { http://www.akpeters.com/product.asp?ProdCode=4728 }, editor = { W. Engel }, publisher = { AK Peters }, booktitle = { GPU Pro – Advanced Rendering Techniques }, project = { gpuakf }, sorting = { 4096 } } @incollection{KD09, author = { Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen }, title = { Real-Time Image Abstraction by Directed Filtering }, year = { 2009 }, pages = { 285--302 }, abstract = { In this chapter we present a framework of automatic image processing techniques that create high-quality, simplified, stylistic illustrations from color images, videos, and 3D renderings. Our method extends the approach of [Winnemöller06] to use iterated bilateral filtering for abstraction and difference-of-Gaussians (DoG) for edge detection. We present enhancements to these techniques to improve the quality of the output by adapting them to the local orientation of the input. }, editor = { W. Engel }, publisher = { Charles River Media }, booktitle = { ShaderX7 - Advanced Rendering Techniques }, project = { flowabs }, sorting = { 5632 } } @incollection{TD08c, author = { Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Generalization of Single-Center Projections Using Projection Tile Screens }, year = { 2008 }, editor = { Jos\'e Braz and Alpesh Kumar Ranchordas and João Madeiras Pereira and H\'elder J. Ara\'ujo }, publisher = { Springer }, series = { Communications in Computer and Information Science (CCIS) }, booktitle = { Advances in Computer Graphics and Computer Vision (VISIGRAPP) }, project = { NFG }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=Y6SBylq5SFA }, sorting = { 128 } } @incollection{WKDLB05, author = { Wood, Jo and Kirschenbauer, Sabine and D{\"o}llner, J{\"u}rgen and Lopes, Adriano and Bodum, Lars }, title = { Using 3D in Visualization }, year = { 2005 }, pages = { 295--312 }, editor = { Dykes, J. and MacEachren, A.M. and Kraak, M.J. }, publisher = { Elsevier Amsterdam }, chapter = { 14 }, booktitle = { Exploring Geovisualization }, sorting = { 1 } } @incollection{DB05a, author = { D{\"o}llner, J{\"u}rgen and Baumann, Konstantin }, title = { Gel{\"a}ndetexturen als Mittel f{\"u}r die Pr{\"a}sentation, Exploration und Analyse komplexer r{\"a}umlicher Informationen in 3D-GIS }, year = { 2005 }, pages = { 217--230 }, editor = { A. Zipf and V. Coors }, publisher = { Wichmann Verlag }, booktitle = { 3D-Geoinformationssysteme }, sorting = { 2 } } @incollection{Do05, author = { D{\"o}llner, J{\"u}rgen }, title = { Geo-Visualization and Real-Time Computer Graphics }, year = { 2005 }, pages = { 325--344 }, editor = { J. Dykes and A.M. MacEachren and M. J. Kraak }, publisher = { Elsevier Amsterdam }, chapter = { 16 }, booktitle = { Exploring Geovisualization }, sorting = { 4 } } @incollection{ND05a, author = { Nienhaus, Marc and D{\"o}llner, J{\"u}rgen }, title = { Blueprint Rendering and Sketchy Drawings }, year = { 2005 }, pages = { 235--252 }, editor = { M. Pharr }, publisher = { Addison-Wesley Professional }, booktitle = { GPU Gems II: Programming Techniques for High Performance Graphics and General-Purpose Computation }, sorting = { 8 } } @incollection{Do03a, author = { D{\"o}llner, J{\"u}rgen }, title = { Virtuelle 3D-Kartenmodelle }, year = { 2003 }, pages = { 77--88 }, editor = { H. Asche and C.Herrmann }, publisher = { Wichmann Verlag }, booktitle = { Web.Mapping 2. Telekartographie, Geovisualisierung und mobile Dienste }, sorting = { 256 } } @incollection{Do03, author = { D{\"o}llner, J{\"u}rgen }, title = { Informationsvisualisierung raumbezogener Inhalte }, year = { 2003 }, pages = { 65--74 }, editor = { Ch. Herrman }, publisher = { FH Karlsruhe }, booktitle = { Festschrift 25 Jahre Kartographie und Geomatik in Karlsruhe }, sorting = { 512 } } @incollection{Do00b, author = { D{\"o}llner, J{\"u}rgen }, title = { Objektorientierte kartographische Visualisierung }, year = { 2000 }, pages = { 61-82 }, editor = { G. Buziek, D. Dransch, W.-D. Rase }, publisher = { Springer-Verlag }, booktitle = { Dynamische Visualisierung. Grundlagen mit Anwendungsbeispielen aus der Kartographie }, sorting = { 128 } } @incollection{DH98a, author = { D{\"o}llner, J{\"u}rgen and Hinrichs, Klaus }, title = { Support of Explicit Time and Event Flows in the Object-Oriented Visualization Toolkit MAM/VRS }, year = { 1998 }, pages = { 153-166 }, editor = { H.-C. Hege, K. Polthier }, publisher = { Springer Verlag }, booktitle = { Mathematical Visualization. Algorithms, Applications and Numerics }, sorting = { 2304 } } @proceedings{Buschmann2012a, author = { Buschmann, Stefan and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Challenges and Approaches for the Visualization of Movement Trajectories in 3D Geovirtual Environments }, year = { 2012 }, abstract = { The visualization of trajectories and their attributes represents an essential functionality for spatio-temporal data visualization and analysis. Many visualization methods, however, focus mainly on sparse 2D movements or consider only the 2D components of movements. This paper is concerned with true 3D movement data, i.e., movements that take place in the three-dimensional space and which characteristics significantly depend an all dimensions. In this case, spatio-temporal visualization approaches need to map all three spatial dimensions together with required mappings for associated attributes. We describe visualization approaches for true 3D movement data and evaluate their application within 3D geovirtual environments. We also identify challenges and propose approaches for the interactive visualization of 3D movement data using 3D geovirtual environments as scenery. }, keywords = { spatio-temporal data, trajectories, interactive 3D visualization, visual analytics }, booktitle = { GIScience workshop on GeoVisual Analytics, Time to Focus on Time }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/BTD2012/geovat2012_paper.pdf }, sorting = { 1024 } } @inproceedings{HBD2017, author = { Hahn, Sebastian and Bethge, Joseph and D{\"o}llner, J{\"u}rgen }, title = { Relative Direction Change: A Topology-based Metric for Layout Stability in Treemaps }, year = { 2017 }, month = { 2 }, abstract = {

This paper presents a topology-based metric for layout stability in treemaps—the Relative Direction Change (RDC). The presented metric considers the adjacency and arrangement of single shapes in a treemap, and allows for a rotation-invariant description of layout changes between two snapshots of a dataset depicted with treemaps. A user study was conducted that shows the applicability of the Relative Direction Change in comparison and addition to established layout metrics, such as Average Distance Change (ADC) and Average Aspect Ratio (AAR), with respect to human perception of treemaps. This work contributes to the establishment of a more precise model for the replicable and reliable comparison of treemap layout algorithms

© The Authors 2017. This is the authors' version of the work. It is posted here for your personal use. Not for redistribution. The definitive version will be published in Proceedings of the 8th International Conference on Information Visualization Theory and Applications (IVAPP 2017).
}, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { Layout Stability, Treemaps, Evaluation }, booktitle = { Proceedings of the 8th International Conference of Information Visualization Theory and Applications (IVAPP 2017) }, project = { HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2017/HBD2017/rdc_ivapp2017.pdf }, sorting = { 2 } } @inproceedings{SDTKDP2016, author = { Semmo, Amir and D{\"u}rschmid, Tobias and Trapp, Matthias and Klingbeil, Mandy and D{\"o}llner, J{\"u}rgen and Pasewaldt, Sebastian }, title = { Interactive Image Filtering with Multiple Levels-of-Control on Mobile Devices }, year = { 2016 }, month = { 12 }, abstract = {
With the continuous development of mobile graphics hardware, interactive high-quality image stylization based on nonlinear filtering is becoming feasible and increasingly used in casual creativity apps. However, these apps often only serve high-level controls to parameterize image filters and generally lack support for low-level (artistic) control, thus automating art creation rather than assisting it. This work presents a GPU-based framework that enables to parameterize image filters at three levels of control: (1) presets followed by (2) global parameter adjustments can be interactively refined by (3) complementary on-screen painting that operates within the filters' parameter spaces for local adjustments. The framework provides a modular XML-based effect scheme to effectively build complex image processing chains-using these interactive filters as building blocks-that can be efficiently processed on mobile devices. Thereby, global and local parameterizations are directed with higher-level algorithmic support to ease the interactive editing process, which is demonstrated by state-of-the-art stylization effects, such as oil paint filtering and watercolor rendering.
}, booktitle = { Proceedings ACM SIGGRAPH Asia Symposium on Mobile Graphics and Interactive Applications }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/SDTKDP2016/asemmo-mgia2016-authors-version.pdf }, doi = { 10.1145/2999508.2999521 }, sorting = { 768 } } @inproceedings{PSDS2016, author = { Pasewaldt, Sebastian and Semmo, Amir and D{\"o}llner, J{\"u}rgen and Schlegel, Frank }, title = { BeCasso: Artistic Image Processing and Editing on Mobile Devices }, year = { 2016 }, month = { 12 }, abstract = { BeCasso is a mobile app that enables users to transform photos into high-quality, high-resolution non-photorealistic renditions, such as oil and watercolor paintings, cartoons, and colored pencil drawings, which are inspired by real-world paintings or drawing techniques. In contrast to neuronal network and physically-based approaches, the app employs state-of-the-art nonlinear image filtering. For example, oil paint and cartoon effects are based on smoothed structure information to interactively synthesize renderings with soft color transitions. BeCasso empowers users to easily create aesthetic renderings by implementing a two-fold strategy: First, it provides parameter presets that may serve as a starting point for a custom stylization based on global parameter adjustments. Thereby, users can obtain initial renditions that may be fine-tuned afterwards. Second, it enables local style adjustments: using on-screen painting metaphors, users are able to locally adjust different stylization features, e.g., to vary the level of abstraction, pen, brush and stroke direction or the contour lines. In this way, the app provides tools for both higher-level interaction and low-level control [Isenberg 2016] to serve the different needs of non-experts and digital artists.

References:
Isenberg, T. 2016. Interactive NPAR: What Type of Tools Should We Create? In Proc. NPAR, The Eurographics Association, Goslar, Germany, 89–96 }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, booktitle = { Proceedings ACM SIGGRAPH Asia Symposium on Mobile Graphics and Interactive Applications (Demo) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/PSDS2016/mgia-demo2016_authors_version.pdf }, doi = { 10.1145/2999508.2999518 }, sorting = { 512 }, state = { 1 } } @inproceedings{SDS2016, author = { Semmo, Amir and D{\"o}llner, J{\"u}rgen and Schlegel, Frank }, title = { BeCasso: Image Stylization by Interactive Oil Paint Filtering on Mobile Devices }, year = { 2016 }, month = { 7 }, abstract = { BeCasso is a mobile app that enables users to transform photos into an oil paint look that is inspired by traditional painting elements. In contrast to stroke-based approaches, the app uses state-of-the-art nonlinear image filtering techniques based on smoothed structure information to interactively synthesize oil paint renderings with soft color transitions. BeCasso empowers users to easily create aesthetic oil paint renderings by implementing a two-fold strategy. First, it provides parameter presets that may serve as a starting point for a custom stylization based on global parameter adjustments. Second, it introduces a novel interaction approach that operates within the parameter spaces of the stylization effect to facilitate creative control over the visual output: on-screen painting enables users to locally adjust the appearance in image regions, e.g., to vary the level of abstraction, brush and stroke direction. This way, the app provides tools for both higher-level interaction and low-level control [Isenberg 2016] to serve the different needs of non-experts and digital artists.

References:
Isenberg, T. 2016. Interactive NPAR: What Type of Tools Should We Create? In Proc. NPAR, The Eurographics Association, Goslar, Germany, 89–96 }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, booktitle = { Proceedings ACM SIGGRAPH Appy Hour }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/SDS2016/asemmo-siggraph2016-appyhour.pdf }, doi = { 10.1145/2936744.2936750 }, sorting = { 1792 } } @inproceedings{STHD2016, author = { Schoedon, Alexander and Trapp, Matthias and Hollburg, Henning and Döllner, Jürgen }, title = { Interactive Web-based Visualization for Accessibility Mapping of Transportation Networks }, year = { 2016 }, month = { 6 }, abstract = { Accessibility is a fundamental aspect in transportation, routing, and spare-time activity planning concerning traveling in modern cities. In this context, interactive web-based accessibility-map visualization techniques and systems are important tools for provisioning, exploration, analysis, and assessment of multi-modal and location-based travel time data and routing information. To enable their effective application, such interactive visualization techniques demands for flexible mappings with respect to user-adjustable parameters such as maximum travel times, the types of transportation used, or used color schemes. However, traditional approaches for web-based visualization of accessibility-maps do not allow this degree of parametrization without significant latencies introduced by required data processing and transmission between the routing server and the visualization client. This paper presents a novel web-based visualization technique that allows for efficient client-side mapping and rendering of accessibility data onto transportation networks using WebGL and the OpenGL transmission format. A performance evaluation and comparison shows the superior performance of the approach over alternative implementations. }, booktitle = { Proceedings of EuroVis 2016 - Short Papers }, project = { NFG-II, MOBIE }, sorting = { 2304 } } @inproceedings{LFHTD, author = { Limberger, Daniel and Fiedler, Carolin and Hahn, Sebastian and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Evaluation of Sketchiness as a Visual Variable for 2.5D Treemaps }, year = { 2016 }, month = { 5 }, abstract = {

Interactive 2.5D treemaps serve as an effective tool for the visualization of attributed hierarchies, enabling exploration of non-spatial, multi-variate, hierarchical data. In this paper the suitability of sketchiness as a visual variable, e.g., for uncertainty, is evaluated. Therefore, a design space for sketchy rendering in 2.5D and integration details for real-time applications are presented. The results of three user studies indicate, that sketchiness is a promising candidate for a visual variable that can be used independently and in addition to others, e.g., color and height.

© The Authors 2016. This is the authors' version of the work. It is posted here for your personal use. Not for redistribution. The definitive version will be published in Proceedings of the 20th International Conference on Information Visualization (IV'16).
}, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { Visual Analytics, 2.5D Treemaps, Sketchiness, Visual Variables, Uncertainty }, booktitle = { Proceedings of the 20th International Conference of Information Visualization (IV'16) }, project = { HPI;NFGII }, sorting = { 2048 } } @inproceedings{STDDP2016, author = { Semmo, Amir and Trapp, Matthias and D{\"u}rschmid, Tobias and D{\"o}llner, J{\"u}rgen and Pasewaldt, Sebastian }, title = { Interactive Multi-scale Oil Paint Filtering on Mobile Devices }, year = { 2016 }, abstract = {
This work presents an interactive mobile implementation of a filter that transforms images into an oil paint look. At this, a multi-scale approach that processes image pyramids is introduced that uses flow-based joint bilateral upsampling to achieve deliberate levels of abstraction at multiple scales and interactive frame rates. The approach facilitates the implementation of interactive tools that adjust the appearance of filtering effects at run-time, which is demonstrated by an on-screen painting interface for per-pixel parameterization that fosters the casual creativity of non-artists.
}, booktitle = { Proceedings ACM SIGGRAPH Posters }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/STDDP2016/asemmo-siggraph2016-poster.pdf }, doi = { 10.1145/2945078.2945120 }, sorting = { 1536 } } @inproceedings{STPD2016, author = { Semmo, Amir and Trapp, Matthias and Pasewaldt, Sebastian and D{\"o}llner, J{\"u}rgen }, title = { Interactive Oil Paint Filtering On Mobile Devices }, year = { 2016 }, abstract = {
Image stylization enjoys a growing popularity on mobile devices to foster casual creativity. However, the implementation and provision of high-quality image filters for artistic rendering is still faced by the inherent limitations of mobile graphics hardware such as computing power and memory resources. This work presents a mobile implementation of a filter that transforms images into an oil paint look, thereby highlighting concepts and techniques on how to perform multi-stage nonlinear image filtering on mobile devices. The proposed implementation is based on OpenGL ES and the OpenGL ES shading language, and supports on-screen painting to interactively adjust the appearance in local image regions, e.g., to vary the level of abstraction, brush, and stroke direction. Evaluations of the implementation indicate interactive performance and results that are of similar aesthetic quality than its original desktop variant.
}, booktitle = { Expressive 2016 - Posters, Artworks, and Bridging Papers }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/STPD2016/asemmo-exressive2016-poster.pdf }, doi = { 10.2312/exp.20161255 }, sorting = { 1280 } } @inproceedings{LSLD2016, author = { Limberger, Daniel and Scheibel, Willy and Lemme, Stefan and J{\"u}rgen, D{\"o}llner }, title = { Dynamic 2.5D Treemaps using Declarative 3D on the Web }, year = { 2016 }, pages = { 33--36 }, abstract = { The 2.5D treemap represents a general purpose visualization technique to map multi-variate hierarchical data in a scalable, interactive, and consistent way used in a number of application fields. In this paper, we explore the capabilities of Declarative 3D for the web-based implementation of 2.5D treemap clients. Particularly, we investigate how X3DOM and XML3D can be used to implement clients with equivalent features that interactively display 2.5D treemaps with dynamic mapping of attributes. We also show a first step towards a glTF-based implementation. These approaches are benchmarked focusing on their interaction capabilities with respect to rendering and speed of dynamic data mapping. We discuss the results for our representative example of a complex 3D interactive visualization technique and summerize recommendations for improvements towards operational web clients. }, keywords = { 2.5D treemap, Dec3D, X3DOM, XML3D, glTF }, booktitle = { Proceedings of the 21st International Conference on Web3D Technology }, project = { HPI;NFGII }, isbn = { 978-1-4503-4428-9 }, doi = { 10.1145/2945292.2945313 }, sorting = { 1024 } } @inproceedings{VTD2016, author = { Vollmer, Jan Ole and Trapp, Matthias and Döllner, Jürgen }, title = { Interactive GPU-based Image Deformation for Mobile Devices }, year = { 2016 }, abstract = { Interactive image deformation is an important feature of modern image processing pipelines. It is often used to create caricatures and animation for input images, especially photos. State-of-the-art image deformation techniques are based on transforming vertices of a mesh, which is textured by the input image, using affine transformations such as translation, and scaling. However, the resulting visual quality of the output image depends on the geometric resolution of the mesh. Performing these transformations on the CPU often further inhibits performance and quality. This is especially problematic on mobile devices where the limited computational power reduces the maximum achievable quality. To overcome these issue, we propose the concept of an intermediate deformation buffer that stores deformation information at a resolution independent of the mesh resolution. This allows the combination of a high-resolution buffer with a low-resolution mesh for interactive preview, as well as a high-resolution mesh to export the final image. Further, we present a fully GPU-based implementation of this concept, taking advantage of modern OpenGL ES features, such as compute shaders. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { image deformation, image warping }, publisher = { The Eurographics Association }, booktitle = { Computer Graphics and Visual Computing (CGVC) }, isbn = { 978-3-03868-022-2 }, doi = { 10.2312/cgvc.20161303 }, sorting = { 3072 } } @inproceedings{DRD2016, author = { Discher, Sören and Richter, Rico and Döllner, Jürgen }, title = { Interactive and View-Dependent See-Through Lenses for Massive 3D Point Clouds }, year = { 2016 }, booktitle = { Advances in 3D Geoinformation }, project = { NFGII }, sorting = { 1408 } } @inproceedings{STD2016, author = { Scheibel, Willy and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Interactive Revision Exploration using Small Multiples of Software Maps }, year = { 2016 }, pages = { 131-138 }, abstract = { To explore and to compare different revisions of complex software systems is a challenging task as it requires to constantly switch between different revisions and the corresponding information visualization. This paper proposes to combine the concept of small multiples and focus+context techniques for software maps to facilitate the comparison of multiple software map themes and revisions simultaneously on a single screen. This approach reduces the amount of switches and helps to preserve the mental map of the user. Given a software project the small multiples are based on a common dataset but are specialized by specific revisions and themes. The small multiples are arranged in a matrix where rows and columns represents different themes and revisions, respectively. To ensure scalability of the visualization technique we also discuss two rendering pipelines to ensure interactive frame-rates. The capabilities of the proposed visualization technique are demonstrated in a collaborative exploration setting using a high-resolution, multi-touch display. }, affiliation = { Hasso Plattner Institute, University of Potsdam }, keywords = { Software visualization, visual analytics, software maps, small multiples, interactive visualization techniques }, series = { IVAPP 2016 }, booktitle = { 7th International Conference on Information Visualization Theory and Applications }, project = { HPI;NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/STD2016/smallmultiples_ivapp2016-short.pdf,fileadmin/user_upload/fachgebiete/doellner/publications/2016/STD2016/smallmultiples-poster-landscape.pdf }, sorting = { 2816 } } @inproceedings{HTWD15, author = { Hahn, Sebastian and Trapp, Matthias and Wuttke, Nikolai and D{\"o}llner, J{\"u}rgen }, title = { ThreadCity: Combined Visualization of Structure and Activity for the Exploration of Multi-threaded Software Systems }, year = { 2015 }, month = { 7 }, abstract = {

This paper presents a novel visualization technique for the interactive exploration of multi-threaded software systems. It combines the visualization of static system structure based on the EvoStreets approach with an additional traffic metaphor to communicate the runtime characteristics of multiple threads simultaneously. To improve visual scalability with respect to the visualization of complex software systems, we further present an effective level-of-detail visualization based on hierarchical aggregation of system components by taking viewing parameters into account. We demonstrate our technique by means of a prototypical implementation and compare our result with existing visualization techniques.

© The Authors 2015. This is the authors' version of the work. It is posted here for your personal use. Not for redistribution. The definitive version will be published in Proceedings of the 19th International Conference on Information Visualization (IV'15).
}, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { Visual Software Analytics, Trace-Visualization, Multi-threaded Software Systems }, booktitle = { Proceedings of the 19th International Conference of Information Visualization (IV'15) }, project = { HPI;NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/HTWD2015/ThreadCity.pdf }, sorting = { 2048 } } @inproceedings{SLKD15, author = { Semmo, Amir and Limberger, Daniel and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen }, title = { Image Stylization by Oil Paint Filtering using Color Palettes }, year = { 2015 }, pages = { 149--158 }, month = { 6 }, abstract = {
This paper presents an approach for transforming images into an oil paint look. To this end, a color quantization scheme is proposed that performs feature-aware recolorization using the dominant colors of the input image. In addition, an approach for real-time computation of paint textures is presented that builds on the smoothed structure adapted to the main feature contours of the quantized image. Our stylization technique leads to homogeneous outputs in the color domain and enables creative control over the visual output, such as color adjustments and per-pixel parametrizations by means of interactive painting.

© The Authors 2015. This is the authors' version of the work. It is posted here for your personal use. Not for redistribution. The definitive version will be published in Proceedings of the International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe'15).
}, booktitle = { Proceedings International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/SLKD2015/asemmo-cae2015-authors-version.pdf }, doi = { 10.2312/exp.20151188 }, sorting = { 2304 } } @inproceedings{RDD2014, author = { Richter, Rico and Discher, Sören and Döllner, Jürgen }, title = { Out-of-Core Visualization of Classified 3D Point Clouds }, year = { 2015 }, pages = { 227-242 }, abstract = { 3D point clouds represent an essential category of geodata used in a variety of geoinformation applications and systems. We present a novel, interactive out-of-core rendering technique for massive 3D point clouds based on a layered, multi-resolution kd-tree, whereby point-based rendering techniques are selected according to each point's classification (e.g., vegetation, buildings, terrain). The classification-dependent rendering leads to an improved visual representation, enhances recognition of objects within 3D point cloud depictions, and facilitates visual filtering and highlighting. To interactively explore objects, structures, and relations represented by 3D point clouds, our technique provides efficient means for an instantaneous, ad-hoc visualization compared to approaches that visualize 3D point clouds by deriving mesh-based 3D models. We have evaluated our approach for massive laser scan datasets of urban areas. The results show the scalability of the technique and how different configurations allow for designing task and domain-specific analysis and inspection tools.
© The Authors 2014. This is the authors' version of the work. It is posted here for your personal use. Not for redistribution. The definitive version will be published in 3D Geoinformation Science: The Selected Papers of the 3D GeoInfo 2014 by Springer International Publishing. http://dx.doi.org/10.1007/978-3-319-12181-9.
}, keywords = { 3D point clouds, LiDAR, visualization, point-based rendering }, publisher = { Cham: Springer International Publishing }, booktitle = { 3D Geoinformation Science: The Selected Papers of the 3D GeoInfo 2014 }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/3DGeoInfo2014/richter_discher_doeelner_3dgeoinfo2014_draft.pdf }, isbn = { 978-3-319-12180-2 }, doi = { 10.1007/978-3-319-12181-9 }, sorting = { 768 } } @inproceedings{TSD2015, author = { Trapp, Matthias and Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Interactive Rendering and Stylization of Transportation Networks Using Distance Fields }, year = { 2015 }, pages = { 207-219 }, abstract = {
Transportation networks, such as streets, railroads or metro systems, constitute primary elements in cartography for reckoning and navigation. In recent years, they have become an increasingly important part of 3D virtual environments for the interactive analysis and communication of complex hierarchical information, for example in routing, logistics optimization, and disaster management. A variety of rendering techniques have been proposed that deal with integrating transportation networks within these environments, but have so far neglected the many challenges of an interactive design process to adapt their spatial and thematic granularity (i.e., level-of-detail and level-of-abstraction) according to a user's context. This paper presents an efficient real-time rendering technique for the view-dependent rendering of geometrically complex transportation networks within 3D virtual environments. Our technique is based on distance fields using deferred texturing that shifts the design process to the shading stage for real-time stylization. We demonstrate and discuss our approach by means of street networks using cartographic design principles for context-aware stylization, including view-dependent scaling for clutter reduction, contour-lining to provide figure-ground, handling of street crossings via shading-based blending, and task-dependent colorization. Finally, we present potential usage scenarios and applications together with a performance evaluation of our implementation.
}, keywords = { transportation networks, 3D visualization, image-based rendering, distance fields, shading, map design }, booktitle = { Proceedings of the 10th International Conference on Computer Graphics Theory and Applications (GRAPP 2015) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/TSD2015/streets.pdf }, sorting = { 3072 } } @inproceedings{TD2015, author = { Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Geometry Batching Using Texture-Arrays }, year = { 2015 }, pages = { 239-246 }, abstract = {
High-quality rendering of 3D virtual environments typically depends on high-quality 3D models with significant geometric complexity and texture data. One major bottleneck for real-time image-synthesis represents the number of state changes, which a specific rendering API has to perform. To improve performance, batching can be used to group and sort geometric primitives into batches to reduce the number of required state changes, whereas the size of the batches determines the number of required draw-calls, and therefore, is critical for rendering performance. For example, in the case of texture atlases, which provide an approach for efficient texture management, the batch size is limited by the efficiency of the texture-packing algorithm and the texture resolution itself. This paper presents a pre-processing approach and rendering technique that overcomes these limitations by further grouping textures or texture atlases and thus enables the creation of larger geometry batches. It is based on texture arrays in combination with an additional indexing schema that is evaluated at run-time using shader programs. This type of texture management is especially suitable for real-time rendering of large-scale texture-rich 3D virtual environments, such as virtual city and landscape models.
}, keywords = { Batching, Texture-array Processing, Real-time Rendering. }, booktitle = { Proceedings of the 10th International Conference on Computer Graphics Theory and Applications (GRAPP 2015) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/TD2015/TextureStacks.pdf }, sorting = { 3328 } } @inproceedings{MTD2015, author = { Meier, Benjamin-Heinz and Trapp, Matthias and Döllner, Jürgen }, title = { VideoMR: A Map and Reduce Framework for Real-time Video Processing }, year = { 2015 }, abstract = { This paper presents VideoMR: a novel map and reduce framework for real-time video processing on graphic processing units (GPUs). Using the advantages of implicit parallelism and bounded memory allocation, our approach enables developers to focus on implementing video operations without taking care of GPU memory handling or the details of code parallelization. Therefore, a new concept for map and reduce is introduced, redefining both operations to fit to the specific requirements of video processing. A prototypical implementation using OpenGL facilitates various operating platforms, including mobile development, and will be widely interoperable with other state-of-the-art video processing frameworks. }, url = { file:195536 }, booktitle = { International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision (WSCG 2015) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/MTD2015/wscgVideoMR.pdf }, sorting = { 1792 } } @inproceedings{BTD2015, author = { Buschmann, Stefan and Trapp, Matthias and Döllner, Jürgen }, title = { Real-Time Visualization of Massive Movement Data in Digital Landscapes }, year = { 2015 }, pages = { 213-220 }, abstract = { Due to continuing advances in sensor technology and increasing availability of digital infrastructure that allows for acquisition, transfer, and storage of big data sets, large amounts of movement data (e.g., road, naval, or air-traffic) become available. In the near future, movement data such as traffic data may even be available in real-time. In a growing number of application fields (e.g., landscape planning and design, urban development, and infrastructure planning), movement data enables new analysis and simulation applications. In this paper, we present an interactive technique for visualizing massive 3D movement trajectories. It is based on mapping massive movement data to graphics primitives and their visual variables in real-time, supporting a number of visualization schemes such as sphere, line, or tube-based trajectories, including animations of direction and speed. This generic technique enhances the functionality of VR and interactive 3D systems using virtual environments such as digital landscape models, city models, or virtual globes by adding support for this important category of spatio-temporal data. }, booktitle = { 16th Conference on Digital Landscape Architecture (DLA 2015) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/BTD2015/dla2015-draft.pdf }, sorting = { 1536 } } @inproceedings{ORD2015, author = { Oehlke, Christoph and Richter, Rico and Döllner, Jürgen }, title = { Automatic Detection and Large-Scale Visualization of Trees for Digital Landscapes and City Models based on 3D Point Clouds }, year = { 2015 }, pages = { 151-160 }, booktitle = { 16th Conference on Digital Landscape Architecture (DLA 2015) }, project = { NFGII }, sorting = { 1280 } } @inproceedings{DRD2015, author = { Discher, Sören }, title = { Echtzeit-Rendering-Techniken für 3D-Punktwolken basierend auf semantischen und topologischen Attributen }, year = { 2015 }, url = { file:201365 }, booktitle = { Shortlist Karl Kraus Young Scientists Award 2015, 35. Wissenschaftlich-Technische Jahrestagung der DGPF }, sorting = { 1024 } } @inproceedings{WTLD2015, author = { Würfel, Hannes and Trapp, Matthias and Limberger, Daniel and Döllner, Jürgen }, title = { Natural Phenomena as Metaphors for Visualization of Trend Data in Interactive Software Maps }, year = { 2015 }, abstract = { Software maps are a commonly used tool for code quality monitoring in software-development projects and decision making processes. While providing an important visualization technique for the hierarchical system structure of a single software revision, they lack capabilities with respect to the visualization of changes over multiple revisions. This paper presents a novel technique for visualizing the evolution of the software system structure based on software metric trends. These trend maps extend software maps by using real-time rendering techniques for natural phenomena yielding additional visual variables that can be effectively used for the communication of changes. Therefore, trend data is automatically computed by hierarchically aggregating software metrics. We demonstrate and discuss the presented technique using two real world data sets of complex software systems. }, url = { file:195534 }, booktitle = { Computer Graphics and Visual Computing (CGVC) }, organization = { The Eurographics Association }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/WTLD2015/natural-metaphors-cgvc2015-final.pdf }, doi = { 10.2312/cgvc.20151246 }, sorting = { 768 } } @inproceedings{SD2014_3, author = { Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { An Interaction Framework for Level-of-Abstraction Visualization of 3D Geovirtual Environments }, year = { 2014 }, pages = { 43--49 }, month = { 11 }, abstract = {
3D geovirtual environments constitute effective media for the analysis and communication of complex geospatial data. Today, these environments are often visualized using static graphical variants (e.g., 2D maps, 3D photorealistic) from which a user is able to choose from. To serve the different interests of users in specific information, however, the spatial and thematic granularity at which model contents are represented (i.e., level of abstraction) should be dynamically adapted to the user's context, which requires specialized interaction techniques for parameterization. In this work, we present a framework that enables interaction interfaces to parameterize the level-of-abstraction visualization according to spatial, semantic, and thematic data. The framework is implemented in a visualization system that provides image-based rendering techniques for context-aware abstraction and highlighting. Using touch and natural language interfaces, we demonstrate its versatile application to geospatial tasks, including exploration, navigation, and orientation.
}, booktitle = { Proceedings 2nd ACM SIGSPATIAL Workshop on MapInteraction }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/SD2014_3/asemmo-mapinteract2014-authors-version.pdf }, doi = { 10.1145/2677068.2677072 }, sorting = { 256 } } @inproceedings{DRST2014, author = { D{\"u}bel, Steve and R{\"o}hlig, Martin and Schumann, Heidrun and Trapp, Matthias }, title = { 2D and 3D Presentation of Spatial Data: A Systematic Review }, year = { 2014 }, month = { 11 }, abstract = { The question whether to use 2D or 3D for data visualization is generally difficult to decide. Two-dimensional and three-dimensional visualization techniques exhibit different advantages and disadvantages related to various perceptual and technical aspects such as occlusion, clutter, distortion, or scalability. To facilitate problem understanding and comparison of existing visualization techniques with regard to these aspects, this report introduces a systematization based on presentation characteristics. It enables a categorization with respect to combinations of static 2D and 3D presentations of attributes and their spatial reference. Further, it complements ex-isting systematizations of data in an effort to formalize a common terminology and theoretical framework for this problem domain. We demonstrate our approach by reviewing different visualization techniques of spatial data according to the presented systematization. }, url = { file:195535 }, booktitle = { IEEE VIS International Workshop on 3DVis }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/DRST2014/survey2d3d.pdf }, sorting = { 64 } } @inproceedings{SD14, author = { Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Image Filtering for Interactive Level-of-Abstraction Visualization of 3D Scenes }, year = { 2014 }, pages = { 5--14 }, month = { 8 }, abstract = {

Texture mapping is a key technology in computer graphics for visual design of rendered 3D scenes. An effective information transfer of surface properties, encoded by textures, however, depends significantly on how important information is highlighted and cognitively processed by the user in an application context. Edge-preserving image filtering is a promising approach to address this concern while preserving global salient structures. Much research has focused on applying image filters in a post-process stage to foster an artistically stylized rendering, but these approaches are generally not able to preserve depth cues important for 3D visualization (e.g., texture gradient). To this end, filtering that processes texture data coherently with respect to linear perspective and spatial relationships is required. In this work, we present a system that enables to process textured 3D scenes with perspective coherence by arbitrary image filters. We propose decoupled deferred texturing with (1) caching strategies to interactively perform image filtering prior to texture mapping, and (2) for each mipmap level separately to enable a progressive level of abstraction. We demonstrate the potentials of our methods on several applications, including illustrative visualization, focus+context visualization, geometric detail removal, and depth of field. Our system supports frame-to-frame coherence, order-independent transparency, multitexturing, and content-based filtering.

© The Authors 2014. This is the authors' version of the work. It is posted here for your personal use. Not for redistribution. The definitive version will be published in Proceedings of the International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe'14). http://dx.doi.org/10.1145/2630099.2630101.
}, booktitle = { Proceedings International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/SD2014/asemmo-cae2014-authors-version.pdf,fileadmin/user_upload/fachgebiete/doellner/publications/2014/SD2014/asemmo-cae2014-additional_material.pdf }, doi = { 10.1145/2630099.2630101 }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=Vqh3OQbWPpI }, sorting = { 1536 } } @inproceedings{KHTD2014, author = { Klimke, Jan and Hagedorn, Benjamin and Trapp, Matthias and Döllner, Jürgen }, title = { Web-based and Mobile Provisioning of Virtual 3D Reconstructions }, year = { 2014 }, pages = { 17--28 }, month = { 5 }, abstract = { Communication of cultural heritage by means of digital information systems has been gaining more and more importance over recent years. Interactive virtual 3D applications enable users to explore 3D virtual reconstructions in real-time, to directly interact with the contained digital cultural heritage artifacts, and to obtain insights into this data. Nevertheless, these artifacts are usually very detailed and complex 3D models that are hard to handle for end-user systems. This paper presents the concept and a prototypical implementation of an image-based, web-based approach for the communication of digital cultural heritage and its provisioning for the Web and mobile devices by the example of the project Colonia3D – a high-detail, virtual reconstruction and high-detail 3D city model of Roman Cologne. Through this web-based and mobile provisioning, complex digital reconstructions can be used, e.g., on-site to match local findings and reconstructions. }, editor = { R. Franken-Wendelstorf and E. Lindinger and J. Sieck }, publisher = { Werner Hülsbusch Verlag }, chapter = { 2 }, booktitle = { Tagungsband der 12. Konferenz Kultur und Informatik: Reality and Virtuality }, files = { fileadmin/user_upload/fachgebiete/doellner/People/jklimke/Web-based-and-Mobile-Provisioning-of-Virtual-3D-Reconstructions.pdf }, isbn = { 978-3-86488-064-3 }, sorting = { 2048 } } @inproceedings{STMD2014, author = { Hahn, Sebastian and Tr{\"u}mper, Jonas and Moritz, Dominik and D{\"o}llner, J{\"u}rgen }, title = { Visualization of Varying Hierarchies by Stable Layout of Voronoi Treemaps }, year = { 2014 }, pages = { 50-58 }, month = { 1 }, abstract = { Space-restricted techniques for visualizing hierarchies generally achieve high scalability and readability (e.g., tree maps, bundle views, sunburst). However, the visualization layout directly depends on the hierarchy, that is, small changes to the hierarchy can cause wide-ranging changes to the layout. For this reason, it is difficult to use these techniques to compare similar variants of a hierarchy because users are confronted with layouts that do not expose the expected similarity. Voronoi treemaps appear to be promising candidates to overcome this limitation. However, existing Voronoi treemap algorithms do not provide deterministic layouts or assume a fixed hierarchy. In this paper we present an extended layout algorithm for Voronoi treemaps that provides a high degree of layout similiarity for varying hierarchies, such as software-system hierarchies. The implementation uses a deterministic initial-distribution approach that reduces the variation in node positioning even if changes in the underlying hierarchy data occur. Compared to existing layout algorithms, our algorithm achieves lower error rates with respect to node areas in the case of weighted Voronoi diagrams, which we show in a comparative study. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { Hierarchical Visualization, Voronoi Treemaps, Stable Layout, Changing Hierarchies }, publisher = { SCITEPRESS – Science and Technology Publications }, booktitle = { Proceedings of the 5th International Conference on Information Visualization Theory and Applications (IVAPP 2014) }, project = { HPI;NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/STMD2014/Visualization_of_Varying_Hierarchies_by_Stable_Layout_of_Voronoi_Treemaps.pdf }, sorting = { 2816 } } @inproceedings{BTLD2014, author = { Buschmann, Stefan and Trapp, Matthias and L{\"uhne}, Patrick and D{\"o}llner, J{\"u}rgen }, title = { Hardware-Accelerated Attribute Mapping for Interactive Visualization of Complex 3D Trajectories }, year = { 2014 }, pages = { 355-363 }, month = { 1 }, abstract = { The visualization of 3D trajectories of moving objects and related attributes in 3D virtual environments represents a fundamental functionality in various visualization domains. Interactive rendering and visual analytics of such attributed trajectories involves both conceptual questions as well as technical challenges. Specifically, the mapping of trajectory attributes to rendering primitives and appearance represents a challenging task in the case of large data sets of high geometric complexity. There are various visualization approaches and rendering techniques considering specific aspects of these mappings to facilitate visualization and analysis of this kind of data. To solve the underlying general mapping problem efficiently, we developed an approach that uses and combines diverse types of visualizations, rather than being tailored to a specific use case. This paper describes an interactive rendering system for the visualization of 3D trajectories that enables the combinations of different mappings as well as their dynamic configuration at runtime. A fully hardware-accelerated implementation enables the processing of large sets of attributed 3D trajectories in real-time. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { 3D Attributed Trajectories, Real-time Rendering, Attribute Mapping }, publisher = { SCITEPRESS – Science and Technology Publications }, booktitle = { Proceedings of the 5th International Conference on Information Visualization Theory and Applications (IVAPP 2014) }, project = { NFGII }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2014/BTLD2014/appearance-mapping.pdf }, sorting = { 2560 } } @inproceedings{SD2014_2, author = { Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Oil Paint Filtering Using Color Palettes For Colorization }, year = { 2014 }, abstract = {
We present a novel technique for oil paint filtering that uses color palettes for colorization. First, dominant feature-aware colors are derived from the input image via entropy-based metrics. Seed pixels are then determined and propagated to the remaining pixels by adopting the optimization framework of Levin et al. [2004] for feature-aware colorization. Finally, the quantized output is combined with flow-based highlights and contour lines to simulate paint texture. Our technique leads to homogeneous outputs in the color domain and enables interactive control over color definitions.
}, booktitle = { Expressive Poster Session }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/SD2014_2/expressive_2014_semmo.pdf }, sorting = { 512 } } @inproceedings{LD2014, author = { Limberger, Daniel and D{\"o}llner, J{\"u}rgen }, title = { Painting Per-Pixel Parametrization for Interactive Image Filtering }, year = { 2014 }, abstract = {
We present a photo-editing method that enables per-pixel parameter manipulation of image filtering by means of interactive painting. Predefined as well as custom image filters are exposed to the user, as a parametrizable composition of image operations. Brushes, as a sequences of actions mapping user inputs (in terms of brush shape, flow, pressure, etc.) to arbitrary functions or convolution operators, are used to draw within the parameter space. Our tool demonstrates that interactive painting can be used to, e.g., locally tweak inadequate parametrization and, furthermore, provides a blueprint for an open, collaborative photo-editing platform.
}, booktitle = { Expressive Poster Session }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/LD2014/expressive_2014_limberger.pdf }, sorting = { 128 } } @inproceedings{DRD2014, author = { Discher, Sören and Richter, Rico and Döllner, Jürgen }, title = { Konzepte für eine Service-basierte Systemarchitektur zur Integration, Prozessierung und Analyse von massiven 3D-Punktwolken }, year = { 2014 }, abstract = { Die Nutzung von hoch aufgelösten, räumlich überlappenden und multitemporalen 3D-Punktwolken im Kontext von Geoinformationssystemen stellt hohe Anforderungen an die Leistungsfähigkeit der zugrundeliegenden Software- und Hardwaresysteme. Um angesichts eines weiter zunehmenden Datenaufkommens ein effizientes und wirtschaftliches Arbeiten mit solchen Daten zu ermöglichen, schlagen wir die Nutzung einer service-basierten Software- und Geodateninfrastruktur vor, die eine Erfassung, Aktualisierung und Bereitstellung von 3D-Punktwolken im Sinne eines kontinuierlichen Prozesses ermöglicht. In diesem Beitrag erläutern wir die grundlegenden Anforderungen und den konzeptionellen Aufbau einer entsprechenden Infrastruktur, die unter anderem die bedarfsgerechte Bereitstellung ausgewählter Bereiche einer 3D Punktwolke anhand von semantischen oder temporalen Attributen unterstützt. }, booktitle = { Tagungsbände der 34. Wissenschaftlich-Technischen Jahrestagung der DGPF }, project = { NFGII }, sorting = { 2304 } } @inproceedings{KTD2013, author = { Benjamin Karran and Jonas Trümper and Jürgen Döllner }, title = { SyncTrace: Visual Thread-Interplay Analysis }, year = { 2013 }, pages = { 10 }, month = { 9 }, abstract = { In software comprehension, program traces are important to gain insight into certain aspects of concurrent runtime behavior, e.g., thread-interplay. Here, key tasks are finding usages of blocking operations, such as synchronization and I/O operations, assessing temporal order of such operations, and analyzing their effects. This is a hard task for large and complex program traces due to their size and number of threads involved. In this paper, we present SyncTrace, a new visualization technique based on (bended) activity diagrams and edge bundles that allows for parallel analysis of multiple threads and their inter-thread correspondences. We demonstrate how the technique, implemented as a tool, can be applied on real-world trace datasets to support understanding concurrent behavior. }, keywords = { trace analysis, software visualization, program comprehension, software maintenance, visualization }, publisher = { IEEE Computer Society }, booktitle = { Proceedings (electronic) of the 1st Working Conference on Software Visualization (VISSOFT) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2013/KTD2013/synctrace_preprint.pdf }, doi = { 10.1109/VISSOFT.2013.6650534 }, link1 = { Video (YouTube) http://youtu.be/rTQlyVMre_w }, sorting = { 1024 } } @inproceedings{LTSD13, author = { Lux, Roland and Trapp, Matthias and Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Interactive Projective Texturing for Non-Photorealistic Shading of Technical 3D Models }, year = { 2013 }, pages = { 101--108 }, month = { 9 }, abstract = { This paper presents a novel interactive rendering technique for creating and editing shadings for man-made objects in technical 3D visualizations. In contrast to shading approaches that use intensities computed based on surface normals (e.g., Phong, Gooch, Toon shading), the presented approach uses one-dimensional gradient textures, which can be parametrized and interactively manipulated based on per-object bounding volume approximations. The fully hardware-accelerated rendering technique is based on projective texture mapping and customizable intensity transfer functions. A provided performance evaluation shows comparable results to traditional normal-based shading approaches. The work also introduce simple direct-manipulation metaphors that enables interactive user control of the gradient texture alignment and intensity transfer functions. }, affiliation = { Hasso-Plattner-Insititut, University of Potsdam }, editor = { Silvester Czanner, Wen Tang }, publisher = { The Eurographics Association }, booktitle = { Proceedings of 11th Theory and Practice of Computer Graphics 2013 Conference (TP.CG.2013) }, project = { NFGII }, isbn = { 978-3-905673-98-2 }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=PmBTK8TbpPA }, sorting = { 768 } } @inproceedings{PTD2013, author = { Pasewaldt, Sebastian and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Multi-Perspective Detail+Overview Visualization for 3D Building Exploration }, year = { 2013 }, pages = { 57--64 }, month = { 9 }, abstract = { This paper presents a multi-perspective rendering technique that enables detail+overview visualization and interactive exploration of virtual 3D building model. Virtual 3D building models, as main elements of virtual 3D city models, are used in a growing number of application domains, such as geoanalysis, disaster management and architectural planning. Visualization systems for such building models often rely on perspective or orthogonal projections using a single viewpoint. Therefore, the exploration of a complete model requires a user to change the viewpoint multiple times and to memorize the content of each view to obtain a comprehensive mental model. Since this is usually a time-consuming task, which implies context switching, current visualization systems use multiple viewports to simultaneously depict an object from different perspectives. Our approach extends the idea of multiple viewports by combining two linked views for the interactive exploration of virtual 3D buildings model and their facades. In contrast to traditional approaches, we automatically generate a multi-perspective view that simultaneously depicts all facades of the building in one overview image. This facilitates the process of obtaining overviews and supports fast and direct navigation to various points-of-interest. We describe the concept and implementations of our Multiple-Center-of-Projection camera model for real-time multi-perspective image synthesis. Further, we provide insights into different interaction techniques for linked multi-perspective views and outline approaches of future work. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam }, editor = { Silvester Czanner, Wen Tang }, publisher = { The Eurographics Association }, booktitle = { Proceedings of 11th Theory and Practice of Computer Graphics 2013 Conference (TP.CG.2013) }, project = { HPI; NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2013/PTD2013/PTD2013.pdf }, isbn = { 978-3-905673-98-2 }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=Ywo4gpx0rE8&feature=share&list=UURf7yK_n8IfSBtpWh8uP0mA }, sorting = { 512 } } @inproceedings{ESTD2013, author = { Engel, Juri and Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Evaluating the Perceptual Impact of Rendering Techniques on Thematic Color Mappings in 3D Virtual Environments }, year = { 2013 }, pages = { 25-32 }, month = { 9 }, abstract = {

Using colors for thematic mapping is a fundamental approach in visualization, and has become essential for 3D virtual environments to effectively communicate multidimensional, thematic information. Preserving depth cues within these environments to emphasize spatial relations between geospatial features remains an important issue.A variety of rendering techniques have been developed to preserve depth cues in 3D information visualization, including shading, global illumination, and image stylization. However, these techniques alter color values, which may lead to ambiguity in a color mapping and loss of information. Depending on the applied rendering techniques and color mapping, this loss should be reduced while still preserving depth cues when communicating thematic information. This paper presents the results of a quantitative and qualitative user study that evaluates the impact of rendering techniques on information and spatial perception when using visualization of thematic data in 3D virtual environments. We report the results of this study with respect to four perception-related tasks, showing significant differences in error rate and task completion time for different rendering techniques and color mappings.
}, editor = { Michael Bronstein, Jean Favre, and Kai Hormann }, publisher = { The Eurographics Association }, booktitle = { Proceedings of 18th International Workshop on Vision, Modeling and Visualization (VMV 2013) }, project = { NFGII }, doi = { 10.2312/PE.VMV.VMV13.025-032 }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2013/ESTD2013/jengel-vmv2013-authors-version-hq.pdf }, link2 = { User Study Raw Data (ZIP) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2013/ESTD2013/user_study_raw_data_txt.zip }, sorting = { 128 } } @inproceedings{SKTD13, author = { Semmo, Amir and Kyprianidis, Jan Eric and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Real-Time Rendering of Water Surfaces with Cartography-Oriented Design }, year = { 2013 }, pages = { 5--14 }, month = { 7 }, abstract = {

More than 70% of the Earth's surface is covered by oceans, seas, and lakes, making water surfaces one of the primary elements in geospatial visualization. Traditional approaches in computer graphics simulate and animate water surfaces in the most realistic ways. However, to improve orientation, navigation, and analysis tasks within 3D virtual environments, these surfaces need to be carefully designed to enhance shape perception and land-water distinction. We present an interactive system that renders water surfaces with cartography-oriented design using the conventions of mapmakers. Our approach is based on the observation that hand-drawn maps utilize and align texture features to shorelines with non-linear distance to improve figure-ground perception and express motion. To obtain local orientation and principal curvature directions, first, our system computes distance and feature-aligned distance maps. Given these maps, waterlining, water stippling, contour-hatching, and labeling are applied in real-time with spatial and temporal coherence. The presented methods can be useful for map exploration, landscaping, urban planning, and disaster management, which is demonstrated by various real-world virtual 3D city and landscape models.

© ACM, 2013. This is the authors' version of the work. It is posted here by permission of ACM for your personal use. Not for redistribution. The definitive version was published in Proceedings of the International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe'13). http://dx.doi.org/10.1145/2487276.2487277.
}, series = { Proceedings International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2013/SKTD13/asemmo-cae2013-authors-version-hq.pdf,fileadmin/user_upload/fachgebiete/doellner/publications/2013/SKTD13/asemmo-cae2013-slides.pdf }, doi = { 10.1145/2487276.2487277 }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=DFjjcMRWWoE }, sorting = { 1536 } } @inproceedings{TDT2013, author = { Jonas Trümper and Jürgen Döllner and Alexandru Telea }, title = { Multiscale Visual Comparison of Execution Traces }, year = { 2013 }, pages = { 53-62 }, month = { 5 }, abstract = { Understanding the execution of programs by means of program traces is a key strategy in software comprehension. An important task in this context is comparing two traces in order to find similarities and differences in terms of executed code, execution order, and execution duration. For large and complex program traces, this is a difficult task due to the cardinality of the trace data. In this paper, we propose a new visualization method based on icicle plots and edge bundles. We address visual scalability by several multiscale visualization metaphors, which help users navigating from the main differences between two traces to intermediate structural-difference levels, and, finally fine-grained function call levels. We show how our approach, implemented in a tool called TraceDiff, is applicable in several scenarios for trace difference comprehension on real-world trace datasets. }, keywords = { trace analysis, software visualization, program comprehension, software maintenance, visualization }, publisher = { IEEE Computer Society }, booktitle = { Proceedings of the International Conference on Program Comprehension }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2013/TDT2013/truemper2013_tracediff_preprint.pdf }, link1 = { Video (YouTube) http://youtu.be/9x2mOsf_fjU }, sorting = { 2304 } } @inproceedings{LWTD2013, author = { Limberger, Daniel and Wasty, Benjamin and Jonas Trümper and Döllner, Jürgen }, title = { Interactive Software Maps for Web-Based Source Code Analysis }, year = { 2013 }, pages = { 8 }, month = { 5 }, abstract = { Software maps -- linking rectangular 3D-Treemaps, software system structure, and performance indicators -- are commonly used to support informed decision making in software-engineering processes. A key aspect for this decision making is that software maps provide the structural context required for correct interpretation of these performance indicators. In parallel, source code repositories and collaboration platforms are an integral part of today's software-engineering tool set, but cannot properly incorporate software maps since implementations are only available as stand-alone applications. Hence, software maps are 'disconnected' from the main body of this tool set, rendering their use and provisioning overly complicated, which is one of the main reasons against regular use. We thus present a web-based rendering system for software maps that achieves both fast client-side page load time and interactive frame rates even with large software maps. We significantly reduce page load time by efficiently encoding hierarchy and geometry data for the net transport. Apart from that, appropriate interaction, layouting, and labeling techniques as well as common image enhancements aid evaluation of project-related quality aspects. Metrics provisioning can further be implemented by predefined attribute mappings to simplify communication of project specific quality aspects. The system is integrated into dashboards to demonstrate how our web-based approach makes software maps more accessible to many different stakeholders in software-engineering projects. }, publisher = { ACM }, booktitle = { In Proceedings of the International Web3D Conference }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2013/LWTD2013/web3d2013-treemaps-limberger.pdf }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=AaHJRVQ3Z1E }, sorting = { 2048 } } @inproceedings{TSD2013, author = { Trapp, Matthias and Hahn, Sebastian and D{\"o}llner, J{\"u}rgen }, title = { Interactive Rendering of Complex 3D-Treemaps }, year = { 2013 }, pages = { 165-175 }, month = { 2 }, abstract = { 3D-Treemaps are an important visualization technique for hierarchical views. In contrast to 2D-Treemaps, height can be used to map one additional attribute of the data items. Using the Treemap technique in combination with large datasets (more than 500k) a fast rendering and interaction techniques that are beyond collapsing/uncollapsing nodes is still one of the main challenges. This paper presents a novel rendering technique that enables the image synthesis of geometrical complex 3D-Treemaps in real-time. The fully hardware accelerated approach is based on shape generation using geometry shaders. This approach offers increased rendering performance and low update latency compared to existing techniques and through it enables new real-time interaction techniques to large datasets. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { 3D-treemaps, real-time rendering, performance evaluation }, editor = { Sabine Coquillart, Carlos Andujar, Robert S. Laramee, Andreas Kerren and José Braz }, publisher = { SCITEPRESS – Science and Technology Publications }, booktitle = { Proceedings of the 8th International Conference on Computer Graphics Theory and Applications (GRAPP 2013) }, project = { NFGII;HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2013/TSD2013/TreeMap.pdf }, isbn = { 978-989-8565-46-4 }, link1 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-1702377-rendering-complex-3d-tree-maps-grapp-2013/ }, sorting = { 2816 } } @inproceedings{Buschmann2013b, author = { Buschmann, Stefan and Nocke, Thomas and Tominski, Christian and Döllner, Jürgen }, title = { Towards Visualizing Geo-Referenced Climate Networks }, year = { 2013 }, abstract = { In the last few years, network construction from climate data has developed to a promising analysis method. We discuss challenges for the interactive visual analysis of such geo-referenced networks and present first ideas for addressing the challenges. In particular, we present visualizations of 3D geo-spatial networks and of large networks within cartographic contexts. }, booktitle = { Proceedings of Workshop GeoViz Hamburg 2013 }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2013/BNTD13/geoviz2013_paper.pdf }, sorting = { 1280 } } @inproceedings{Pasewaldt2012a, author = { Pasewaldt, Sebastian and Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Towards Comprehensible Digital 3D Maps }, year = { 2012 }, pages = { 261-276 }, month = { 11 }, abstract = { Digital mapping services have become fundamental tools in economy and society to provide domain experts and non-experts with customized, multi-layered map contents. In particular because of the continuous advancements in the acquisition, provision, and visualization of virtual 3D city and landscape models, 3D mapping services, today, represent key components to a growing number of applications, like car navigation, education, or disaster management. However, current systems and applications providing digital 3D maps are faced by drawbacks and limitations, such as occlusion, visual clutter, or insufficient use of screen space, that impact an effective comprehension of geoinformation. To this end, cartographers and computer graphics engineers developed design guidelines, rendering and visualization techniques that aim to increase the effectiveness and expressiveness of digital 3D maps, but whose seamless combination has yet to be achieved. This work discusses potentials of digital 3D maps that are based on combining cartography-oriented rendering techniques and multi-perspective views. For this purpose, a classification of cartographic design principles, visualization techniques, as well as suitable combinations are identified that aid comprehension of digital 3D maps. According to this classification, a prototypical implementation demonstrates the benefits of multi-perspective and non-photorealistic rendering techniques for visualization of 3D map contents. In particular, it enables (1) a seamless combination of cartography-oriented and photorealistic graphic styles while (2) increasing screen-space utilization, and (3) simultaneously directing a viewer’s gaze to important or prioritized information. }, editor = { Markus Jobst }, publisher = { Jobstmedia Management Verlag, Wien }, chapter = { 4 }, booktitle = { Service-Oriented Mapping 2012 (SOMAP2012) }, organization = { Internation Cartographic Association }, project = { NFGII;HPI }, language = { English }, isbn = { 3-9502039-2-3 }, link1 = { Slides http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2012/PSTD2012/somap2012_pasewaldt_towards_comprehensible_3D_maps.pdf }, link2 = { Paper http://www.hpi.de/fileadmin/user_upload/fachgebiete/doellner/publications/2012/PSTD2012/PSTD_2012_SOMAP.pdf }, sorting = { 32 } } @inproceedings{KHD2012, author = { Klimke, Jan Hagedorn, Benjamin Döllner, Jürgen }, title = { A Service-Oriented Platform for Interactive 3D Web Mapping }, year = { 2012 }, month = { 11 }, abstract = { Design, implementation, and operation of interactive 3D map services are faced with a large number of challenges including (a) processing and integration of massive amounts of heterogeneous and distributed 2D and 3D geodata such as terrain models, buildings models, and thematic georeferenced data, (b) assembling, styling, and rendering 3D map contents according to application requirements and design principles, and (c) interactive provisioning of created 3D maps on mobile devices and thin clients as well as their integration as third-party components into domain-specific web and information systems. This paper discusses concept and implementation of a service-oriented platform that addresses these major requirements of 3D web mapping systems. It is based on a separation of concerns for data management, 3D rendering, application logic, and user interaction. The main idea is to divide 3D rendering process into two stages. In the first stage, at the server side, we construct an image-based, omni-directional approximation of the 3D scene by means of multi-layered virtual 3D panoramas; in the second stage, at the client side, we interactively reconstruct the 3D scene based on the panorama. We demonstrate the prototype implementation for real-time 3D rendering service and related iOS 3D client applications. In our case study, we show how to interactively visualize a complex, large-scale 3D city model based on our service-oriented platform. }, keywords = { Service-oriented 3D Mapping, 3D Map Creation, 3D Map Delivery, 3D Map Styling, 3D City Models }, booktitle = { Proceedings of the Symposium of Service-oriented Mapping }, project = { HPI }, sorting = { 8 } } @inproceedings{MED12, author = { Limberger, Daniel and Engel, Juri and D{\"o}llner, J{\"u}rgen }, title = { Single-Pass Rendering of Day and Night Sky Phenomena }, year = { 2012 }, pages = { 55-62 }, month = { 11 }, abstract = { This paper presents astronomical based rendering of skies as seen from low altitudes on earth, in respect to location, date, and time. The technique allows to compose an atmosphere with sun, multiple cloud layers, moon, bright stars, and Milky Way, into a holistic sky with unprecedented high level of detail and diversity. GPU generated, viewpoint-aligned billboards are used to render stars with approximated color, brightness, and scintillations. A similar approach is used to synthesize the moon considering lunar phase, earthshine, shading, and lunar eclipses. Atmosphere and clouds are rendered using existing methods adapted to our needs. Rendering is done in a single pass supporting interactive day-night cycles with low performance impact, and allows for easy integration in existing rendering systems. Results of our approach are compared to related renderings and photos, and the performance impact is discussed. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, publisher = { Eurographics Association }, booktitle = { Proceedings of the Vision, Modeling, and Visualization Workshop 2012 }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/MED12/paper_1056_cr.pdf }, link1 = { https://code.google.com/p/osghimmel/ }, sorting = { 3968 } } @inproceedings{TTD2012, author = { Jonas Trümper and Alexandru Telea and Jürgen Döllner }, title = { ViewFusion: Correlating Structure and Activity Views for Execution Traces }, year = { 2012 }, pages = { 45-52 }, month = { 9 }, abstract = { Visualization of data on structure and related temporal activity supports the analysis of correlations between the two types of data. This is typically done by linked views. This has shortcomings with respect to efficient space usage and makes mapping the effect of user input into one view into the other view difficult. We propose here a novel, space-efficient technique that `fuses' the two information spaces -- structure and activity -- in one view. We base our technique on the idea that user interaction should be simple, yet easy to understand and follow. We apply our technique, implemented in a prototype tool, for the understanding of software engineering datasets, namely static structure and execution traces of the Chromium web browser. }, keywords = { visualization, linking, correlation, software, execution traces, program comprehension, correlation, view, fusion, information space, best student application-paper }, publisher = { European Association for Computer Graphics }, address = { **Best Application-Paper** }, booktitle = { Proceedings of the 10th Theory and Practice of Computer Graphics Conference }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/TTD2012/truemper_tpcg2012_preprint.pdf }, doi = { 10.2312/LocalChapterEvents/TPCG/TPCG12/045-052 }, link1 = { Definitive version http://diglib.eg.org/EG/DL/LocalChapterEvents/TPCG/TPCG12/045-052.pdf.abstract.pdf;internal&action=action.digitallibrary.ShowPaperAbstract }, link2 = { Video (Youtube) http://youtu.be/czhXPtt-Eoo }, link3 = { Project page at University of Groningen, NL http://www.cs.rug.nl/svcg/SoftVis/ViewFusion }, sorting = { 512 } } @inproceedings{DHK2012, author = { Döllner, Jürgen and Hagedorn, Benjamin and Klimke, Jan }, title = { Server-Based Rendering of Large 3D Scenes for Mobile Devices Using G-Buffer Cube Maps }, year = { 2012 }, pages = { 97-100 }, month = { 8 }, abstract = { Large virtual 3D scenes play a major role in growing number of applications, systems, and technologies to effectively communicate complex spatial information. Their web-based provision, in particular on mobile devices, represents a key challenge for system and application development. In contrast to approaches based on streaming 3D scene data to clients, our approach splits 3D rendering into two processes: A server process is responsible for realtime rendering of virtual panoramas, represented by G-buffer cube maps, for a requested camera setting. The client reconstruction process uses these cube maps to reconstruct the 3D scene and allows users to operate on and interact with that representation. The key properties of this approach include that (a) the complexity of transmitted data not depend on the 3D scene’s complexity; (b) 3D rendering can take place within a controlled and a-priori known server environment; (c) crucial 3D model data never leaves the server environment; and (d) the clients can flexibly extend the 3D cube map viewer by adding both local 3D models and specialized 3D operations. }, booktitle = { Web3D '12 Proceedings of the 17th International Conference on 3D Web Technology }, project = { HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/DHK2012/paper.pdf }, isbn = { 978-1-4503-1432-9 }, doi = { 10.1145/2338714.2338729 }, link1 = { http://dl.acm.org/citation.cfm?id=2338729 }, sorting = { 64 } } @inproceedings{TBD2012, author = { Jonas Trümper and Martin Beck and Jürgen Döllner }, title = { A Visual Analysis Approach to Support Perfective Software Maintenance }, year = { 2012 }, pages = { 308-315 }, month = { 7 }, abstract = { Ensuring code quality represents an essential task in «perfective software maintenance», which aims at keeping future maintenance costs low by facilitating adaptations of complex software systems. For this purpose, changes and related efforts have to be identified that imply high positive impact on future maintainability. In this paper, we propose a novel assessment method that applies visual analysis techniques to combine multiple indicators for low maintainability, including code complexity and entanglement with other parts of the system, and recent changes applied to the code. The approach generally helps to identify modules that impose a high risk by causing increased future maintenance efforts. Consequently, it allows for exploration, analysis, and planning of different preventive measures that, e.g., most likely will have a high return on investment. In our tool implementation, we use circular bundle views, extended by the third dimension in which indicators can be mapped to. We have evaluated our approach by conducting a case study based on our tool for a large-scale software system of an industry partner. }, publisher = { IEEE Computer Society }, booktitle = { Proceedings of the 16th International Conference on Information Visualisation }, project = { HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/TBD2012/truemper_iv2012_preprint.pdf }, doi = { 10.1109/IV.2012.59 }, link1 = { Video (Youtube) http://youtu.be/XixapoI2JVI }, link2 = { Definitive version http://dx.doi.org/10.1109/IV.2012.59 }, sorting = { 1792 } } @inproceedings{EPTD12, author = { Engel, Juri and Pasewaldt, Sebastian and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { An Immersive Visualization System for Virtual 3D City Models }, year = { 2012 }, month = { 6 }, abstract = { Virtual 3D city models are essential visualization tools for effective communication of complex urban spatial information. Immersive visualization of virtual 3D city models offers an intuitive access to and an effective way of realization of urban spatial information, enabling new collaborative applications and decision-support systems. This paper discusses techniques for and usage of fully immersive environments for visualizing virtual 3D city models by advanced 3D rendering techniques. Fully immersive environments imply a number of specific requirements for both hardware and software, which are discussed in detail. Further, we identify and outline conceptual and technical challenges as well as possible solution approaches by visualization system prototypes for large-scale, fully immersive environments. We evaluate the presented concepts using two application examples and discuss the results. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, publisher = { IEEE GRSS }, booktitle = { 20th International Conference on Geoinformatics (GEOINFORMATICS), 2012 }, project = { NFGII;HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/EPTD12/EPTD12_draft.pdf }, sorting = { 3840 } } @inproceedings{RD12a, author = { Richter, Rico and D{\"o}llner, J{\"u}rgen }, title = { Potentiale von massiven 3D Punktwolkendatenstr{\"o}men }, year = { 2012 }, pages = { 215-222 }, month = { 3 }, abstract = { Dieser Beitrag stellt eine Systemarchitektur zur effizienten Verwaltung, Aufbereitung, Analyse, und Visualisierung von 3D-Punktwolkendaten vor. Die zugrunde liegende Idee ist die, dass zukünftig durch eine große Zahl von Geräten kontinuierlich, aktuell und redundant die räumliche Umgebung in Form von 3D-Punktwolken erfasst werden kann; die dadurch entstehenden massiven 3D-Punktwolkendatenströme erzeugen ein diskretes, vierdimensionales Modell der räumlichen Umgebung. Zum Beispiel können dazu mobile Erfassungssysteme eingesetzt werden, die zukünftig einen festen Bestandteil der Ausstattung von Fahrzeugen darstellen. Wesentliche Herausforderungen liegen in der Integration von Einzeldatensätzen in den Gesamtdatenbestand, der interaktiven 3D-Visualisierung und der effizienten Analyse der Daten. }, publisher = { Shaker Verlag }, booktitle = { Geoinformatik 2012 - „Mobilität und Umwelt“ }, isbn = { 978-3-8440-0888-3 }, sorting = { 3072 } } @inproceedings{BD12, author = { Buschmann, Stefan and D{\"o}llner, J{\"u}rgen }, title = { Dichte- und Distanzanalyse massiver raumzeitlicher Bewegungsdaten }, year = { 2012 }, pages = { 67-74 }, month = { 3 }, abstract = { Dieser Beitrag stellt einen Ansatz zur interaktiven Analyse massiver raumzeitlicher Bewegungsdaten vor. Der Ansatz ermöglicht es, auf Bewegungstrajektorien basierende Daten interaktiv zu visualisieren und mit flächenbasierten Verfahren auf Basis von Dichte- und Distanzkarten zu analysieren. Ziel ist es, dadurch unterschiedliche raumzeitliche Phänomene in massiven Bewegungsdaten aufspüren, extrahieren und untersuchen zu können. Diese Verfahren stellen generische Basisfunktionen für die Auswertung massiver raumzeitlicher Bewegungsdaten, z. B. für entscheidungsunterstützende Geoinformationssysteme, bereit. Anwendungsfelder umfassen die Echtzeit-Bewegungsüberwachung und die Auswertung von Bewegungsdaten, z. B. im Straßen-, Schiffs- oder Flugverkehr. }, publisher = { Shaker Verlag }, booktitle = { Geoinformatik 2012 - „Mobilität und Umwelt“ }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/BD12/geoinformatik2012_paper_v03.pdf }, isbn = { 978-3-8440-0888-3 }, sorting = { 2560 } } @inproceedings{ED12, author = { Engel, Juri and D{\"o}llner, J{\"u}rgen }, title = { Immersive Visualisierung von virtuellen 3D-Stadtmodellen und ihr Einsatz in der Stadtplanung }, year = { 2012 }, volume = { 21 }, pages = { 165-172 }, abstract = { Virtuelle 3D-Stadtmodelle ermöglichen die effektive Kommunikation komplexer stadträumlicher Informationen. Immersive Visualisierung von virtuellen 3D-Stadtmodellen bietet einen intuitiven Zugang zu diesen Informationen und eröffnet neue Anwendungsfelder in der Stadtplanung, z. B. bei der Entscheidungsfindung, dem Marketing und der Öffentlichkeitspräsentation von Projekten, Vorgängen oder Konzepten. Immersive Visualisierung impliziert zahlreiche Anforderungen an das Softwaresystem. In diesem Beitrag untersuchen wir die softwaretechnischen Herausforderungen bei der Entwicklung eines solchen Systems und zeigen anhand eines Prototyps zur immersiven Visualisierung von virtuellen 3D-Stadtmodellen wie man diese Herausforderungen bewältigen kann. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, publisher = { DGPF }, booktitle = { Publikationen der Deutschen Gesellschaft für Photogrammetrie, Fernerkundung und Geoinformation }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/ED12/jengel_dgpf2012_draft.pdf }, issn = { 0942-2870 }, sorting = { 3904 } } @inproceedings{RD12, author = { Richter, Rico and Döllner, Jürgen }, title = { Semantische Klassifizierung von 3D-Punktwolken für Stadtgebiete }, journal = { 121. DVW-Seminar Terrestrisches-Laser-Scanning 2012 }, year = { 2012 }, pages = { 127 - 134 }, abstract = { 3D-Scanner-Technologien und bildbasierte Verfahren ermöglichen die flächen-deckende Erfassung von Städten und Metropolregionen in Form von georefe-renzierten 3D-Punktwolken. Diese Geobasisdaten stellen eine diskrete Oberflä-chenrepräsentation dar und verfügen über punktbezogene Metadaten, die bei der Erfassung generiert werden (z.B. Farbe, Intensität). Zentrale Herausforderungen für Systeme und Anwendungen, die mit diesen Daten arbeiten, sind das massive Datenaufkommen und die daraus resultierenden Verarbeitungszeiten. Die ziel-gerichtete und anwendungsspezifische Prozessierung von Teilmengen ist in der Regel nicht möglich, da die 3D-Punktwolken keine Informationen über zu Grunde liegenden Objektklassen (z.B. Bebauung, Vegetation, Gelände) beinhal-ten. In diesem Beitrag werden Konzepte und Techniken vorgestellt, die eine Klassifizierung von 3D-Punktwolken für ein Stadtgebiet ermöglichen. Durch die Anwendung von Out-of-Core Techniken können 3D Punktwolke mit mehreren Milliarden Punkten klassifiziert und in Objektklassen unterteilt werden. Analyse- und Visualisierungswerkzeuge können mit diesen zusätzlichen Informationen die benötigten Daten reduzieren, Verarbeitungszeiten verringern, Algorithmen optimieren sowie 3D Punktwolken selbst effektiver visualisieren. }, publisher = { Wißner-Verlag }, edition = { 1 }, booktitle = { Terrestrisches Laserscanning 2012 (TLS 2012) }, project = { NFGII }, isbn = { 978-3-89639-899-4 }, sorting = { 4 } } @inproceedings{PBADB12, author = { E.G. Paredes, M. Boo, M. Amor, J. Döllner, J.D. Bruguera }, title = { GPU-Based Visualization of Hybrid Terrain Models }, year = { 2012 }, abstract = { Hybrid terrain models formed by a large regular mesh refined with detailed local TIN meshes represent an interesting and efficient approach for the representation of complex terrains. However, direct rendering of the component meshes would lead to overlapping geometries and discontinuities around their boundaries. The Hybrid Meshing algorithm solves this problem by generating an adaptive tessellation between the boundaries of the component meshes in real-time. In this paper, we present a highly parallel implementation of this algorithm using the Geometry Shader on the GPU. }, booktitle = { International Conference on Computer Graphics Theory and Applications (GRAPP) }, project = { CGS }, sorting = { 5120 } } @inproceedings{Klimke2012a, author = { Klimke, Jan and Döllner, Jürgen }, title = { Datenintegration in dienstbasierte 3D-Geovisualisierungssysteme für mobile Geräte }, year = { 2012 }, booktitle = { Tagungsbände der 32. Wissenschaftlich-Technischen Jahrestagung der DGPF }, project = { HPI }, sorting = { 4864 }, state = { 1 } } @inproceedings{TVD2012, author = { Jonas Trümper and Stefan Voigt and Jürgen Döllner }, title = { Maintenance of Embedded Systems: Supporting Program Comprehension Using Dynamic Analysis }, year = { 2012 }, pages = { 58-64 }, abstract = { Maintenance of embedded software systems is faced with multiple challenges, including the exploration and analysis of the actual system's runtime behavior. As a fundamental technique, tracing can be used to capture data about runtime behavior as a whole, and represents one of the few methods to observe and record data about embedded systems within their production environments. In this paper we present a software-based, function-boundary tracing approach for embedded software systems. It uses static binary instrumentation, which implies only lightweight memory and performance overheads. To further reduce these overheads, instrumentation can be configured per trace, i.e., activated only for a specified group of functions without having to recompile the system. The technique can be characterized by its robust implementation and its versatile usage. It is complemented by a visualization framework that allows for analysis and exploration of a system's runtime behavior, e.g., to examine thread interaction. To show the technique's applicability, we conclude with a case study that has been applied to an industrial embedded software system. }, keywords = { Computerized instrumentation; Performance analysis; Software maintenance; Embedded software }, publisher = { IEEE Computer Society }, booktitle = { In Proceedings of the 2nd International ICSE Workshop on Software Engineering for Embedded Systems (SEES) }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/TVD2012/truemper_sees2012_preprint.pdf }, doi = { 10.1109/SEES.2012.6225492 }, link1 = { Definitive version http://dx.doi.org/10.1109/SEES.2012.6225492 }, sorting = { 3584 } } @inproceedings{TD2012, author = { Jonas Trümper and Jürgen Döllner }, title = { Extending Recommendation Systems with Software Maps }, year = { 2012 }, pages = { 92-96 }, abstract = { In practice, recommendation systems have evolved as helpful tools to facilitate and optimize software engineering processes. Serving both developers and managers, specific recommendation systems address their individual problems. Yet, in a number of cases complementing them with other techniques can enhance their use and extend their scope. In this paper, we first discuss different perspectives on software-engineering processes and examples of recommendation systems that support representatives of these perspectives. We then identify how select software-map techniques can extend recommendation systems to facilitate decision making by addressing the perspectives' information and communication needs. }, keywords = { Decision making, Context, Computer aided analysis, Visualization, Recommendation Systems }, publisher = { IEEE Computer Society }, booktitle = { Proceedings of the 3rd International ICSE Workshop on Recommendation Systems for Software Engineering (RSSE) }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/TD2012/truemper_rsse2012_preprint.pdf }, doi = { 10.1109/RSSE.2012.6233420 }, link1 = { Definitive version http://dx.doi.org/10.1109/RSSE.2012.6233420 }, sorting = { 3328 } } @inproceedings{KHD12, author = { Klimke, Jan and Hagedorn, Benjamin and D{\"o}llner, J{\"u}rgen }, title = { A Service-Based Concept for Camera Control in 3D Geovirtual Environments }, year = { 2012 }, abstract = { 3D geovirtual environments (3D GeoVEs) such as virtual 3D city models serve as integration platforms for complex geospatial information and facilitate effective use and communication of that information. Recent developments towards standards and service-based, interactive 3D geovisualization systems enable the large-scale distribution of 3D GeoVEs also by thin client applications that work on mobile devices or in web browsers. To construct such systems, 3D portrayal services can be used as building blocks for service-based rendering. Service-based approaches for 3D user interaction, however, have not been formalized and specified to a similar degree. In this paper, we present a concept for service-based 3D camera control as a key element of 3D user interaction used to explore and manipulate 3D GeoVEs and their objects. It is based on the decomposition of 3D user interaction functionality into a set of services that can be flexibly combined to build automated, assisting, and application-specific 3D user interaction tools, which fit into service-oriented architectures of GIS and SDI based IT solutions. We discuss 3D camera techniques, categories of 3D camera tasks, and derive a collection of general-purpose 3D interaction services. We also explain how to eficiently compose these services and discuss their impact on the architecture of service-based visualization systems. Furthermore, we outline an example of a distributed 3D geovisualization system that shows how the concepts can be applied applications based on virtual 3D city models. }, booktitle = { Proceedings of the 7th 3D GeoInfo Conference 2012 }, project = { HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/KHD12/300878_1_En_6_Chapter_OnlinePDF.pdf }, sorting = { 2816 } } @inproceedings{GTD11, author = { Glander, Tassilo and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Concepts for Automatic Generalization of Virtual 3D Landscape Models }, journal = { Peer Reviewed Proceedings Digital Landscape Architecture 2011: Teaching & Learning with Digital Methods & Tools }, year = { 2011 }, pages = { 127-135 }, month = { 5 }, abstract = { This paper discusses concepts for the automatic generalization of virtual 3D landscape models. As complexity, heterogeneity, and diversity of geodata that constitute landscape models are constantly growing, the need for landscape models that generalize their contents to a consistent, coherent level-of-abstraction and information density becomes an essential requirement for applications such as in conceptual landscape design, simulation and analysis, and mobile mapping. We discuss concepts of generalization and working principles as well as the concept of level-of-abstraction. We furthermore present three exemplary automated techniques for generalizing 3D landscape models, including a geometric generalization technique that generates discrete iso-surfaces of 3D terrain models in real-time, a geometric generalization technique for site and building models, and a real-time generalization lens technique. }, editor = { Erich Buhmann AND Stephen Ervin AND Dana Tomlin AND Matthias Pietsch }, booktitle = { Proceedings of the annual conference of Digital Landscape Architecture (DLA) }, project = { NFG }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2011/GTD11/2011_GlanderTrappDoellner_AutomaticGeneralization.pdf }, sorting = { 2304 } } @inproceedings{TSD11, author = { Trapp, Matthias and Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Colonia3D }, journal = { Tagungsband der 9. Konferenz Kultur und Informatik - Multimediale Systeme }, year = { 2011 }, pages = { 201-212 }, month = { 5 }, abstract = { Dieser Beitrag stellt die Ergebnisse des interdisziplinären Projektes Colonia3D - Visualisierung des Römischen Kölns vor. Die digitale 3D Rekonstruktion des antiken Köln ist das Ergebnis eines gemeinsamen Forschungsprojekts des Archäologischen Instituts der Universität zu Köln, der Köln International School of Design (KISD) der Fachhochschule Köln, des Hasso-Plattner Instituts an der Universität Potsdam und des Römisch Germanischen Museums (RGM) Köln. Der Beitrag präsentiert die wesentlichen Konzepte dieses interaktiven, auf Museen ausgerichteten 3D-Informationssystems, beschreibt verschiedene Präsentationsmodi und deren technische Umsetzung. Er diskutiert Vorgehensweisen und Interaktionskonzepte, die den Benutzer während der Erkundung und Bewegung im virtuellen 3D-Stadtmodell unterstützen. Weiter werden die Techniken für den Austausch, die Aufbereitung und die Optimierung komplexer 3D-Datensätze beschrieben sowie Potenziale für digitale Museen und Ausstellungen skizziert. Der vorgestellte Ansatz stellt insbesondere eine IT-Lösung für einen vereinfachten, räumlich-kontextintegrierten informellen Wissenszugang zu archäologischer Fachinformation dar. }, publisher = { Werner H{\"u}lsbusch Verlag }, booktitle = { Tagungsband der 9. Konferenz Kultur und Informatik - Multimediale Systeme }, project = { NFG }, sorting = { 1792 } } @inproceedings{BD2011, author = { Johannes Bohnet and D{\"o}llner, J{\"u}rgen }, title = { Monitoring Code Quality and Development Activity by Software Maps }, year = { 2011 }, pages = { 9-16 }, month = { 5 }, abstract = { Software development projects are difficult to manage, in general, due to the friction between completing system features and, at the same time, obtaining a high degree of code quality to ensure maintainability of the system in the future. A major challenge of this optimization problem is that code quality is less visible to stakeholders in the development process, particularly, to the management. In this paper, we describe an approach for automated software analysis and monitoring of both quality-related code metrics and development activities by means of software maps. A software map represents an adaptive, hierarchical representation of software implementation artifacts such as source code files being organized in a modular hierarchy. The maps can express and combine information about software development, software quality, and system dynamics; they can systematically be specified, automatically generated, and organized by templates. The maps aim at supporting decision-making processes. For example, they facilitate to decide where in the code an increase of quality would be beneficial both for speeding up current development activities and for reducing risks of future maintenance problems. Due to their high degree of expressiveness and their instantaneous generation, the maps additionally serve as up-to-date information tools, bridging an essential information gap between management and development, improve awareness, and serve as early risk detection instrument. The software map concept and its tool implementation are evaluated by means of two case studies on large industrially developed software systems. }, booktitle = { Proceedings of the IEEE ACM ICSE Workshop on Managing Technical Debt }, sorting = { 1280 }, priority = { 1 } } @inproceedings{QDFTHJ11, author = { Joachim Quantz, Jürgen Döllner, Rolf Fricke, Robert Tolksdorf, Thomas Hoppe, Ingolf Jung }, title = { DigiPolis - An Approach for Interactive 3D Building & Interior Models as Communication Tools }, year = { 2011 }, month = { 5 }, abstract = { This paper gives an overview over the research project DigiPolis, which develops a framework integrating 3D models of buildings, including interior models, with semantic annotations and intuitive, touch-based interaction and navigation. Results obtained in the first year of the project range from touch-based interaction in 3D building and interior models over ontology browser to interactive 3D annotations. The current DigiPolis showcase comprises a 3D model of the Berlin Hauptbahnhof, Berlin’s central station, embedded in a generic virtual 3D Berlin model and Open Street Map. Application areas for DigiPolis technology include visitor information, controlling & monitoring, event management and facility management as well as all IT solutions that require a sophisticated, interactive 3D front end for complex, detailed 3D geospatial and related georeferenced information. }, editor = { Jürgen Sieck }, publisher = { Verlag Werner Hülsbusch }, series = { Kultur und Informatik }, booktitle = { Multimediale Systeme }, sorting = { 128 } } @inproceedings{STD11, author = { Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Ansätze zur kartographischen Gestaltung von 3D-Stadtmodellen }, journal = { 31. Wissenschaftlich-Technische Jahrestagung der DGPF }, year = { 2011 }, volume = { 20 }, pages = { 473-482 }, month = { 4 }, abstract = { Interaktive virtuelle 3D-Stadtmodelle haben sich zu einem bewährten Medium für die effektive und effiziente Kommunikation von Geoinformation entwickelt. Sie präsentieren eine spezialisierte Form geovirtueller Umgebungen und sind gekennzeichnet durch ein zugrunde liegendes 3D-Geländemodell, einer darin befindlichen 3D-Bebauung sowie des dazu komplementären Straßen-, Grünflächen- und Naturraumes. 3D-Stadtmodell-Systeme ermöglichen es dem Nutzer, sich im Modell interaktiv zu bewegen und sie stellen die Grundfunktionen für die Exploration, Analyse, Präsentation und das Editieren der raumbezogenen Information bereit. Besonders im Gebiet der kartenähnlichen und kartenverwandten 3D-Darstellungen stellen u.a. automatische Verfahren und Techniken zur Stilisierung und Abstraktion von Objekten eines 3D Stadtmodell ein Hauptproblem für die interaktive 3D-Bildsynthese dar. Hier spielt insbesondere die Abstraktion und Illustration potentiell wichtiger Information und somit die Reduzierung der kognitiven Belastung des Nutzers eine tragende Rolle. Diesbezüglich sind Verfahren und Techniken zur nicht-photorealistischen Bildsynthese ein bewährtes Mittel der Computergrafik, deren direkte Anwendung auf ein komplettes 3D-Stadtmodell jedoch häufig monotone sowie gestalterisch und kartographisch stark eingeschränkte Resultate liefert. Eine effiziente und kontextsensitive Kommunikation von 3D-Geoinformation bedarf jedoch der Kopplung von Objektsemantik und Abstraktionsverfahren. Diese Arbeit präsentiert ein Konzept und dessen Umsetzung, das die Auswahl und Parametrisierung von nicht-photorealistischen Darstellungstechniken auf Basis von Objektsemantiken erlaubt (Abbildung 1). Dies ermöglicht die Zuweisung unterschiedlicher automatischer Abstraktionstechniken zu Objekten und Objektgruppen. Der vorgestellte Ansatz ist echtzeitfähig und erlaubt eine interaktive Klassifikation von Objekten und Features zur Laufzeit, wodurch sich u.a. Szenarien zur interaktiven Exploration von thematisch-stilisierten Features bzw. feature-bezogenen Daten visualisieren lassen. Dieser Ansatz eröffnet Möglichkeiten für eine gezielte und systematische kartographische Gestaltung von 3D-Stadtmodellen sowie deren echtzeitfähige Implementierung durch entsprechende 3D-Visualisierungsdienste. }, publisher = { Landesvermessung und Geobasisinformation Brandenburg }, series = { Publikationen der Deutschen Gesellschaft f{\"u}r Photogrammetrie, Fernerkundung und Geoinformation e.V. }, booktitle = { 31. Wissenschaftlich-Technische Jahrestagung der DGPF }, project = { NFG }, sorting = { 2048 } } @inproceedings{RD11a, author = { Richter, Rico and D{\"o}llner, J{\"u}rgen }, title = { Ein Ansatz für die Differenzanalyse zwischen 3D-Punktwolken und 3D-Referenzgeometrie }, journal = { 31. Wissenschaftlich-Technische Jahrestagung der DGPF }, year = { 2011 }, pages = { 463 - 471 }, month = { 4 }, abstract = { Dieser Beitrag stellt ein Verfahren zur Detektion, Visualisierung und Analyse von Differenzen zwischen massiven 3D-Punktwolken und explizit spezifizierten 3D-Referenz-Geometrien vor. Das Verfahren basiert darauf 3D-Punktwolkendaten mit Abstandsinformationen zu attributieren. Die Berechnung dieser Abstandsinformationen wird in einem Vorverarbeitungsschritt durchgeführt und kann auf beliebig große 3D-Punktwolken angewendet werden. Für die Visualisierung der Ergebnisse wird eine echtzeitfähige, punktbasierte 3D-Rendering-Technik verwendet. Mit diesem Verfahren können insbesondere interaktive 3D-Explorations- und Analysewerkzeuge entwickelt werden, die z. B. Aussagen über Veränderungen von Gebäuden, Bauwerken und Infrastruktureinrichtungen bzw. die Abweichungen zwischen Planungsmodell und Ist-Zustand sichtbar werden lassen. }, booktitle = { 31. Wissenschaftlich-Technische Jahrestagung der DGPF }, project = { NFG }, sorting = { 768 } } @inproceedings{CK11, author = { Collomosse, John and Kyprianidis, Jan Eric }, title = { Artistic Stylization of Images and Video }, year = { 2011 }, abstract = { The half-day tutorial provides an introduction to Non-Photorealistic Rendering (NPR), targeted at both students and experienced researchers of Computer Graphics who have not previously explored NPR in their work. The tutorial focuses on two-dimensional (2D) NPR, specifically the transformation of photos or videos into synthetic artwork (e.g. paintings or cartoons). Consequently the course will touch not only on computer graphics topics, but also on the image processing and computer vision techniques that drive such algorithms. However the latter concepts will be introduced gently and no prior knowledge is assumed beyond a working knowledge of filtering and convolution operations. Some elements of the course will touch upon GPU implementation, but GPU concepts will be described at a high level of abstraction without need for detailed working knowledge of GPU programming. }, booktitle = { Tutorial at Eurographics }, link1 = { http://kahlan.eps.surrey.ac.uk/EG2011/ }, sorting = { 256 } } @inproceedings{Klimke2011b, author = { Klimke, Jan and Hildebrandt, Dieter and Hagedorn, Benjamin and Döllner, Jürgen }, title = { Integrating 3D Data in Service-based Visualization Systems }, year = { 2011 }, abstract = { Georeferenced data is available from a wide range of sources, e.g., Directory Services, Sensor Observation Services, Web Feature Services or even proprietary interfaces. Many of the data originating from an Internet of things will be threedimensional representing outdoor as well as indoor geographic features and their properties. Based on this data, its integration, and its visualization totally new applications and systems could be designed and implemented supporting various applications domains. Recent work in the area of service-based 3D visualization enables high-quality visualization of complex 3D geodata, e.g., 3D city models and 3D indoor building models, on thin clients as well as mobile devices such as smartphones and tablets. This work uses a service-based, image-based visualization approach that decouples the server-side resource-intensive management and rendering of complex, massive 3D geodata from client-side display functionalities: A Web View Service provides image representations of a 3D scene; these images, which can contain different types of information per pixel, are transmitted to a client application that can reconstruct a 3D representation of this scene. – In this talk, we will describe how to combine 3D geodata originating from the Internet of Things with this service-based approach in a way that allows for the interactive exploration of and interaction with 3D worlds and objects of interest. In detail, this 3D geodata can be integrated into the visualization process a) at the rendering stage of a portrayal service, b) through an image post processing step or c) in the client application itself. Moreover, this data can be visually represented directly by modifying the appearance of existing features, e.g., for visualizing measurements, or indirectly by introducing additional objects, e.g., icons, into the 3D scene. We will discuss advantages and disadvantages of these different approaches for implementing visualization applications using live geodata sources. }, publisher = { ACM }, booktitle = { COM.Geo '11 Proceedings of the 2nd International Conference on Computing for Geospatial Research & Applications }, project = { HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/KD2012a/Abstract_ComGeoWorkshop_klimke.pdf,fileadmin/user_upload/fachgebiete/doellner/publications/2012/KD2012a/Jan_Klimke_Integrating_3D_Data_in_Service-based.pdf }, isbn = { 978-1-4503-0681-2 }, doi = { 10.1145/1999320.1999395 }, sorting = { 64 } } @inproceedings{Kyp11, author = { Kyprianidis, Jan Eric }, title = { Image and Video Abstraction by Multi-scale Anisotropic Kuwahara Filtering }, year = { 2011 }, abstract = { The anisotropic Kuwahara filter is an edge-preserving filter that is especially useful for creating stylized abstractions from images or videos. It is based on a generalization of the Kuwahara filter that is adapted to the local structure of image features. In this work, two limitations of the anisotropic Kuwahara filter are addressed. First, it is shown that by adding thresholding to the weighting term computation of the sectors, artifacts are avoided and smooth results in noise-corrupted regions are achieved. Second, a multi-scale computation scheme is proposed that simultaneously propagates local orientation estimates and filtering results up a low-pass filtered pyramid. This allows for a strong abstraction effect and avoids artifacts in large low-contrast regions. The propagation is controlled by the local variances and anisotropies that are derived during the computation without extra overhead, resulting in a highly efficient scheme that is particularly suitable for real-time processing on a GPU. }, booktitle = { Proc. 9th Symposium on Non-Photorealistic Animation and Rendering (NPAR) }, project = { gpuakf }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2011/Kyp11/jkyprian-npar2011.pdf }, sorting = { 32 } } @inproceedings{LTD2011, author = { Christine Lehmann and Jonas Trümper and Jürgen Döllner }, title = { Interactive Areal Annotations for 3D Treemaps of Large-Scale Software Systems }, year = { 2011 }, abstract = { Exploration of large-scale software systems typically poses a challenge to human mind and perception. Among other approaches to this challenge, visualizing such tree-structured data using treemaps is a common solution. Especially three-dimensional treemaps enable intuitive exploration through a large-scale software system using the landscape metaphor for navigation. Annotations of treemap nodes contribute essential semantic information, e.g., class or method names. However, textual annotations in three-dimensional environments typically suffer from ambiguousness, illegibility and instability. In this paper, we propose an interactive labeling algorithm suitable for 3D areal annotation of large-scale software systems that are visualized using three-dimensional treemaps. We demonstrate how the algorithm generates an unambiguous and stable layout with respect to legibility using 3D treemaps of a software visualization tool that visualizes the hierarchical structure of a large software system, e.g., Google Chromium. }, keywords = { treemaps, labeling, software visualization }, booktitle = { Proceedings (CD-ROM) of the Workshop on Geovisualization }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2011/LTD2011/clehmann_geoviz2011.pdf }, sorting = { 6 }, priority = { 1 } } @inproceedings{BTD2011, author = { Martin Beck and Jonas Trümper and Jürgen Döllner }, title = { A Visual Analysis and Design Tool for Planning Software Reengineerings }, year = { 2011 }, pages = { 54-61 }, abstract = { Reengineering complex software systems represents a non-trivial process. As a fundamental technique in software engineering, reengineering includes (a) reverse engineering the as-is system design, (b) identifying a set of transformations to the design, and (c) applying these transformations. While methods a) and c) are widely supported by existing tools, identifying possible transformations to improve architectural quality is not well supported and, therefore, becomes increasingly complex in aged and large software systems. In this paper we present a novel visual analysis and design tool to support software architects during reengineering tasks in identifying a given software's design and in visually planning quality-improving changes to its design. The tool eases estimating effort and change impact of a planned reengineering. A prototype implementation shows the proposed technique's feasibility. Three case studies conducted on industrial software systems demonstrate usage and scalability of our approach. }, keywords = { software, visualization, reengineering, what-if analysis }, publisher = { IEEE Computer Society }, booktitle = { Proceedings of the 6th IEEE International Workshop on Visualizing Software for Understanding and Analysis }, project = { HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2011/BTD2011/beck_vissoft2011_preprint.pdf }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=4rkgfRaCS20&context=C36131dcADOEgsToPDskISqJ8lSK3-CBkmMZEdHKPa }, link2 = { Definitive version http://dx.doi.org/10.1109/VISSOF.2011.6069458 }, sorting = { 1 }, priority = { 1 } } @inproceedings{TBPD10, author = { Trapp, Matthias and Beesk, Christian and Pasewaldt, Sebastian and Döllner Jürgen }, title = { Interactive Rendering Techniques for Highlighting in 3D Geovirtual Environments }, year = { 2010 }, month = { 11 }, abstract = { 3D geovirtual environments (GeoVE), such as virtual 3D city and landscape models became an important tool for the visualization of geospatial information. Highlighting is an important component within a visualization framework and is essential for the user interaction within many applications. It enables the user to easily perceive active or selected objects in the context of the current interaction task. With respect to 3D GeoVE, it has a number of applications, such as the visualization of user selections, data base queries, as well as navigation aid by highlighting way points, routes, or to guide the user attention. The geometrical complexity of 3D GeoVE often requires specialized rendering techniques for the real-time image synthesis. This paper presents a framework that unifies various highlighting techniques and is especially suitable for the interactive rendering 3D GeoVE of high geometrical complexity. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, url = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/TBPD10/Highlighting.pdf }, publisher = { Springer }, series = { Lecture Notes in Geoinformation & Cartography }, booktitle = { Proceedings of the 5th 3D GeoInfo Conference }, project = { NFG;HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/TBPD10/Highlighting.pdf }, link2 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-629065-interactive-rendering-techniques-for-highlighting/ }, sorting = { 1280 } } @inproceedings{TSPHDEH10, author = { Trapp, Matthias and Semmo, Amir and Pokorski, Rafael and Herrmann, Claus-Daniel and Döllner, Jürgen and Eichhorn, Michael and Heinzelmann, Michael }, title = { Communication of Digital Cultural Heritage in Public Spaces by the Example of Roman Cologne }, year = { 2010 }, pages = { 262-276 }, month = { 11 }, abstract = { The communication of cultural heritage in public spaces such as museums or exhibitions, gain more and more importance during the last years. The possibilities of interactive 3D applications open a new degree of freedom beyond the mere presentation of static visualizations, such as pre-produced video or image data. A user is now able to directly interact with 3D virtual environments that enable the depiction and exploration of digital cultural heritage artefacts in real-time. However, such technology requires concepts and strategies for guiding a user throughout these scenarios, since varying levels of experiences within interactive media can be assumed. This paper presents a concept as well as implementation for communication of digital cultural heritage in public spaces, by example of the project Roman Cologne. It describes the results achieved by an interdisciplinary team of archaeologists, designers, and computer graphics engineers with the aim to virtually reconstruct an interactive high-detail 3D city model of Roman Cologne. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam }, note = { Best-Paper-Award }, editor = { M. Ioannides }, publisher = { Springer-Verlag Berlin Heidelberg }, series = { Lecture Notes in Computer Science (LNCS) }, booktitle = { Digital Heritage, Proceedings of 3rd EuroMed Conference }, project = { NFG }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/TSPHDEH10/EuroMed2010Coloniad3D_CRC_HQ.pdf }, issn = { 0302-9743 }, link1 = { Paper (Google Books) http://books.google.de/books?id=lLGWMJc_s24C&lpg=PA262&ots=gFwW_7fmJI&dq=Communication%20of%20Digital%20Cultural%20Heritage%20in%20Public%20Spaces%20by%20the%20Example%20of%20Roman%20Cologne&pg=PA250#v=onepage&q&f=false }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=HoC_mmy51CE }, link3 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-645625-colonia3d/ }, sorting = { 1024 } } @inproceedings{TBD10, author = { Jonas Trümper and Johannes Bohnet and Jürgen Döllner }, title = { Understanding Complex Multithreaded Software Systems by Using Trace Visualization }, year = { 2010 }, pages = { 133-142 }, month = { 10 }, abstract = { Understanding multithreaded software systems is typically a tedious task: Due to parallel execution and interactions between multiple threads, such a system's runtime behavior is often much more complex than the behavior of a single-threaded system. For many maintenance activities, system understanding is a prerequisite. Hence, tasks such as bug fixing or performance optimization are highly demanding in the case of multithreaded systems. Unfortunately, state-of-the-art tools for system understanding and debuggers provide only limited support for these systems. We present a dynamic analysis and visualization technique that helps developers in understanding multithreaded software systems in general and in identifying performance bottlenecks in particular. The technique first performs method boundary tracing. Second, developers perform a post-mortem analysis of a system's behavior using visualization optimized for trace data of multithreaded software systems. The technique enables developers to understand how multiple threads collaborate at runtime. The technique is integrated into a professional and scalable tool for visualizing the behavior of complex software systems. In case studies, we have tested the technique with industrially developed, multithreaded software systems to understand system behavior and to identify multithreading-related performance bottlenecks. }, keywords = { multithreaded, software, visualization, comprehension, trace compaction }, publisher = { ACM }, booktitle = { Proceedings of the 5th International Symposium on Software Visualization }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/TBD10/jtruemper_softvis2010_preprint.pdf }, link1 = { Definitive version http://dx.doi.org/10.1145/1879211.1879232 }, sorting = { 256 }, priority = { 2 } } @inproceedings{MG10, author = { Müller, Matthias and Glander, Tassilo }, title = { Distance Transformations for Accessibility Mapping in the Public Transport Domain: A Performance Assessment }, year = { 2010 }, month = { 9 }, abstract = { In this work, we will conduct an experiment to assess accuracy and computation speed of two distance transformation algorithms, capturing the performance range of state-of-the-art distance transformations for application in public transport scenarios. The first one is a fast but possibly inaccurate modified Euclidean Distance transformation implemented on graphics hardware. The second is a slower but very accurate algorithm realised with the PCRaster library, incorporating road network and additional surface constraints. }, booktitle = { CD-Proceedings of GIScience }, project = { NFG }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/MG10/Mueller_Glander_DistanceTransformations_for_AccessibilityMapping.pdf }, sorting = { 768 } } @inproceedings{TBVD10, author = { Jonas Trümper and Johannes Bohnet and Stefan Voigt and Jürgen Döllner }, title = { Visualization of Multithreaded Behavior to Facilitate Maintenance of Complex Software Systems }, year = { 2010 }, pages = { 325-330 }, month = { 9 }, abstract = { Maintenance accounts for the major part of a software system's total costs. Therein, program comprehension is an important, but complex activity: Typically, up-to-date documentation is not available, so the main reliable source of information on the implementation represent the artifacts of the system's implementation. Understanding software systems is difficult, in particular, if multithreading concepts are involved because state-of-the art development tools provide only limited support for maintenance activities. In addition, concurrency is often not directly reflected by the source code, i.e., there is only a non-obvious correlation between control structures in the source code and a system's runtime behavior. We present a program comprehension technique that helps to analyze and understand runtime behavior of multithreaded software systems and, thereby, facilitates software maintenance tasks. Our approach contains the following concepts: First, light-weight dynamic analysis records executed method calls at runtime. Second, visualization of multithreading trace data allows developers to explore the system behavior post-mortem. The technique forms part of a scalable tool suite for understanding the behavior of complex software systems. We also show how to apply the technique on industrial software systems to solve common maintenance problems. }, keywords = { multithreaded, software, visualization, comprehension, maintenance }, publisher = { IEEE Computer Society }, booktitle = { Proceedings of the 7th International Conference on the Quality of Information and Communications Technology }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/TBVD10/jtruemper_quatic2010_preprint.pdf }, link1 = { Definitive version http://dx.doi.org/10.1109/QUATIC.2010.59 }, sorting = { 512 }, priority = { 1 } } @inproceedings{KSKD10b, author = { Kyprianidis, Jan Eric and Semmo, Amir and Kang, Henry and D{\"o}llner, J{\"u}rgen }, title = { Anisotropic Kuwahara Filtering with Polynomial Weighting Functions }, year = { 2010 }, pages = { 25--30 }, month = { 9 }, abstract = { In this work we present new weighting functions for the anisotropic Kuwahara filter. The anisotropic Kuwahara filter is an edge-preserving filter that is especially useful for creating stylized abstractions from images or videos. It is based on a generalization of the Kuwahara filter that is adapted to the local shape of features. For the smoothing process, the anisotropic Kuwahara filter uses weighting functions that use convolution in their definition. For an efficient implementation, these weighting functions are therefore usually sampled into a texture map. By contrast, our new weighting functions do not require convolution and can be efficiently computed directly during the filtering in real-time. We show that our approach creates output of similar quality as the original anisotropic Kuwahara filter and present an evaluation scheme to compute the new weighting functions efficiently by using rotational symmetries. }, booktitle = { Proc. EG UK Theory and Practice of Computer Graphics }, project = { gpuakf }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/KSKD10b/jkyprian-tpcg2010.pdf }, sorting = { 2048 } } @inproceedings{TD10, author = { Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Interactive Rendering to Perspective Texture-Atlases }, year = { 2010 }, pages = { 81-84 }, month = { 5 }, abstract = { The image-based representation of geometry is a well known concept in computer graphics. Due to z-buffering, the derivation of such representations using render-to-texture delivers only information of the nearest fragments. Often, transparency-based visualization techniques, e.g., ghost views, also require information of occluded fragments. These can be captured using multi-pass rendering techniques such as depth-peeling or stencil-routed A-buffers on a per-fragment basis. This paper presents an alternative rendering technique that enables the derivation image-based representations on a per-object or per-primitive level within a single rendering pass. We use a dynamic 3D texture atlas that is parameterized on a per-frame basis. Then, prior to rasterization, the primitives are transformed to their respective position within the texture atlas, using vertex-displacement in screen space. }, affiliation = { Hasso-Plattner-Institute, University of Potsdam }, url = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/TD10/RenderToTextureAtlas.pdf }, editor = { Stefan Seipel and Hendrik Lensch }, publisher = { The Eurographics Association }, address = { Norrköping, Sweden }, booktitle = { Eurographics 2010 Shortpaper }, project = { NFG }, issn = { 1017-4656 }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2010/TD10/RenderToTextureAtlas.pdf }, link2 = { Video (Youtube) http://www.youtube.com/user/trappcg#p/a/u/1/llLKU-Oa2iU }, link3 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-386462-view-dependent-texture-atlases-atlas-real-time-rendering-render-rttav1-science-technology-ppt-powerpoint/ }, sorting = { 4864 } } @inproceedings{GTD10, author = { Glander, Tassilo and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { 3D Isocontours – Real-time Generation and Visualization of 3D Stepped Terrain Models }, year = { 2010 }, pages = { 17-20 }, month = { 5 }, abstract = { Isocontours (also isopleths, isolines, level sets) are commonly used to visualize real-valued data defined over a 2D plane according to a set of given isovalues. To support the 3D landscape metaphor for information visualization, a 3D stepped terrain can be derived by lifting and extruding isolines to their particular isovalue, but typically requires triangulation of the resulting surface representation in a preprocessing step. We present a concept and rendering technique for triangle-based terrain models that provide interactive, adaptive generation and visualization of such stepped terrains without preprocessing. Our fully hardware-accelerated rendering technique creates additional step geometry for each triangle intersecting an iso-plane on-the-fly. Further, an additional interpolation schema facilitates smooth transition between established 3D terrain visualization and its stepped variant. }, affiliation = { Hasso-Plattner-Institute, University of Potsdam }, url = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/GTD10/3DIsolines_draft.pdf }, editor = { Stefan Seipel and Hendrik Lensch }, publisher = { The Eurographics Association }, address = { Norrköping, Sweden }, booktitle = { Eurographics 2010 Shortpaper }, project = { NFG }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/GTD10/3DIsolines_draft.pdf }, issn = { 1017-4656 }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=7w3yRp3Xqp8 }, sorting = { 4608 } } @inproceedings{BD10, author = { Beck, Martin and Döllner, Jürgen }, title = { Towards Automated Analysis and Visualization of Distributed Software Systems }, year = { 2010 }, pages = { 213-214 }, abstract = { This paper sketches one approach to facilitate comprehension of distributed software systems. These systems gain more and more importance due to a paradigm shift in software systems and applications, evolving from single-chip solutions to multi-tiered web-based applications. For a single developer, it becomes increasingly difficult to cope with the complexity of such software systems. We propose a novel automated analysis and visualization technique that aims at the interactive exploration of static structures and behavior of distributed software systems. First, it automatically gathers communication data from the instrumented system components. Second, it generates a visual representation using a heuristic layout approach. Finally, it allows developers to interactively refine and explore this structural and behavioral information. }, booktitle = { Proceedings of the International Symposium on Software Visualization }, project = { HPI }, sorting = { 64 } } @inproceedings{GBD10, author = { Glander, Tassilo and Baresel, Janett and Döllner, Jürgen }, title = { Überlegungen zum stufenlosen Übergang zwischen verschieden generalisierten 3D-Stadtmodellrepräsentationen }, year = { 2010 }, abstract = { Generalisierungsalgorithmen für virtuelle 3D-Gebäude- und Stadtmodelle leiten aus detaillierten Repräsentationen weniger detaillierte Repräsentationen in unterschiedlich starkem Generalisierungsgrad ab. Für eine kartographisch optimierte Darstellung von 3D-Stadtmodellen in interaktiven Anwendungen muss abhängig vom Maßstab bzw. vom Abstand zum Betrachter eine geeignete Abstraktionsstufe ausgewählt werden. In diesem Beitrag betrachten wir das Problem der Abbildung zwischen kontinuierlichem Maßstab und diskreten generalisierten Modellen und diskutieren Ideen zu Lösungen für den dynamischen Übergang zwischen verschiedenen Generalisierungsstufen eines 3D-Stadtmodells. }, affiliation = { Hasso-Plattner-Institut, Universität Potsdam }, address = { Vienna }, howpublished = { CD-Proceedings }, booktitle = { Tagungsband der 3-Ländertagung DGPF }, project = { NFG }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/GBD10/gbd10.pdf }, sorting = { 32 } } @inproceedings{RD10a, author = { Richter, Rico and D{\"o}llner, J{\"u}rgen }, title = { Bestandsaktualisierung von 3D-Stadtmodellen durch Analyse von 3D-Punktwolken }, year = { 2010 }, abstract = { In dieser Arbeit wird ein Verfahren zur Fortführung und Aktualisierung virtueller 3D-Stadtmodelle auf Basis automatischer Auswertung von LiDAR-Daten vorgestellt. Scanner-Technologien (z. B. LiDAR) ermöglichen die regelmäßige und kosteneffiziente Erfassung von Städten und Metropolregionen durch Befliegungen oder terrestrische Aufnahmen. Die Konstruktion eines 3D-Stadtmodells ist hingegen im Allgemeinen aufwändig, so dass die Aktualisierung eines bereits bestehenden 3D-Stadtmodells aus wirtschaftlicher Sicht effizienter durchgeführt werden muss. Mit dem hier vorgestellten Verfahren ist es möglich, bestehende 3D-Stadtmodelle und erfasste LiDAR-Daten in Echtzeit zu visualisieren und Veränderungen im Datenbestand automatisiert zu ermitteln. Unterschiede zwischen der tatsächlichen Bebauung und des im 3D-Stadtmodell vorhandenen Datenbestandes werden so zuverlässig erkannt, so dass eine nachfolgende manuelle Bestandsaktualisierung durch 3D-Stadtmodellwerkzeuge fokussiert durchgeführt werden kann. }, affiliation = { Hasso-Plattner-Institut, Universität Potsdam }, address = { Vienna }, howpublished = { CD-Proceedings }, booktitle = { Tagungsband der 3-Ländertagung DGPF }, project = { NFG }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/RR10a/3Laendertagung-2010_Richter-Doellner.pdf }, sorting = { 16 } } @inproceedings{DK10a, author = { Döllner, Jürgen and Kyprianidis, Jan Eric }, title = { Approaches to Image Abstraction for Photorealistic Depictions of Virtual 3D Models }, year = { 2010 }, pages = { 263--277 }, abstract = { In our contribution, we present approaches of automatic image abstraction, applied to images and image sequences derived as views of virtual 3D city models and landscape models. We first discuss the requirements of cartography-specific visualization based on the virtual globe metaphor as well as the specific characteristics and deficiencies of visualization based on photorealism. We introduce a concept that extends the classical visualization pipeline by cartography-specific functionality, object-space and image-space abstraction, which also represent the two principle ways for implementing cartographic visualization systems. Abstraction provides the prerequisites to visually communicate uncertainty, to simplify and filter detailed elements, and to clearly encode displayed information of complex geospatial information. In addition, it offers many degrees of freedom for artistic and stylistic design of cartographic products. Furthermore, we outline general working principles and implementation of an automatic image-space abstraction technique we developed that creates high-quality, simplified, stylistic illustrations from color images, videos, and 3D renderings. }, keywords = { Non-photorealistic rendering, image abstraction, virtual 3D city models }, editor = { Georg Gartner and Felix Ortag }, publisher = { Springer }, series = { Lecture Notes in Geoinformation and Cartography }, booktitle = { Cartography in Central and Eastern Europe }, project = { flowabs }, doi = { 10.1007/978-3-642-03294-3_17 }, sorting = { 5120 } } @inproceedings{KD10a, author = { Klimke, Jan and Döllner, Jürgen }, title = { Geospatial Annotations for 3D Environments and their WFS-based Implementation }, year = { 2010 }, pages = { 379-397 }, abstract = { Collaborative geovisualization provides effective means to communicate spatial information among a group of users. Annotations as one key element of collaborative geovisualization systems enable comprehension of collaboration processes and support time-shifted communication. By annotations we refer to user-generated information such as remarks, comments, findings and any other information related to the 3D environment. They have to be efficiently modeled, stored and visualized while precisely retaining their spatial reference and creation context. Existing models for an-notations generally do not fully support spatial references and, therefore, do not fully take advantage of the spatial relationships associated with annotations. This paper presents a GML-based data model for geospatial annotations that explicitly incorporates spatial references and allows different types of annotations to be stored together with their context of creation. With this approach annotations can be represented as first-class spatial features. Consequently, annotations can be seamlessly integrated into their 3D environment and the author's original intention and message can be better expressed and understood. An OGC Web Feature Service is used as standardized interface for storage and retrieval of annotations, which assures data interoperability with existing geodata infrastructures. We have identi-fied three types of annotation subjects, namely geographic features, geometry, and scene views, represented by their corresponding 2D/3D geometry. The model also defines a point-based approximation for complex geometry, such that annotations can also be used by client application with limited abilities regarding display size, bandwidth or geometry handling. Furthermore we extended our model by annotations that can contain 3D geometry besides textual information. In this way the expressiveness of annotations can be further enhanced for communicating spatial relationships such as distances or arrangements of geographic features. }, editor = { Painho, M. and Santos, M.Y. and Pundt, H. }, publisher = { Springer }, series = { Lecture Notes in Geoinformation and Cartography }, booktitle = { Geospatial Thinking }, project = { HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/KD10a/GeoCommentsAGILE2010.pdf }, isbn = { 978-3-642-12326-2 }, issn = { 1863-2246 }, doi = { 10.1007/978-3-642-12326-9_20 }, sorting = { 5376 } } @inproceedings{KD10b, author = { Klimke, Jan and Döllner, Jürgen }, title = { Combining Synchronous and Asynchronous Collaboration within 3D City Models }, year = { 2010 }, number = { 6292 }, abstract = { This paper presents and approach for combining spatially distributed synchronous and asynchronous collaboration within 3D city models. Applications use these models as additional communication medium - beside traditional ones like chat, audio, and video conferences - to facilitate communication of georeferenced and geospatial information. Among many challenges collaboration tools should support both the communication with other collaborators and their awareness of the current collaboration context. To support knowledge construction and gathering, we have designed a collaboration tool that facilitates (a) creation of comments that have 3D references to the virtual 3D city model and (b) information about the context in which these comments are reated. The synchronous collaboration in connection with the creation of non volatile, precisely georeferenced units of information allows users a comprehensible form of cooperation in spatially distributed settings. In our approach media breaks are avoided by using a single 3D user interface to avoid media switches and disruption in working processes. By visualizing the information associated with a geographic feature or 3D geometry of a virtual 3D city model a coherent representation of content and reference is possible. }, editor = { Fabrikant, S.I. and Reichenbacher, T. and van Kreveld, M. and Schlieder, C. }, publisher = { Springer }, series = { LNCS }, booktitle = { Sixth International Conference, GIScience 2010, Zürich, Switzerland, Sep. 14-17 2010, Proceedings }, project = { HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/KD10b/PaperGIScience-Draft.pdf }, sorting = { 3840 } } @inproceedings{RD10, author = { Richter, Rico and D{\"o}llner, J{\"u}rgen }, title = { Out-of-Core Real-Time Visualization of Massive 3D Point Clouds }, journal = { 7th International Conference on Virtual Reality, Computer Graphics, Visualisation and Interaction in Africa (ACM AFRIGRAPH 2010) }, year = { 2010 }, pages = { 121 - 128 }, abstract = { This paper presents a point-based rendering approach to visualize massive sets of 3D points in real-time. In many disciplines such as architecture, engineering, and archeology LiDAR technology is used to capture sites and landscapes; the resulting massive 3D point clouds pose challenges for traditional storage, processing, and presentation techniques. The available hardware resources of CPU and GPU are limited, and the 3D point cloud data exceeds available memory size in general. Hence out-of-core strategies are required to overcome the limit of memory. We discuss concepts and implementations of rendering algorithms and interaction techniques that make out-of-core real-time visualization and exploration of massive 3D point clouds feasible. We demonstrate with our implementation real-time visualization of arbitrarily sized 3D point clouds with current PC hardware using a spatial data structure in combination with a point-based rendering algorithm. A rendering front is used to increase the performance taking into account user interaction as well as available hardware resources. Furthermore, we evaluate our approach, describe its characteristics, and report on applications. }, keywords = { out-of-core visualization, point-based rendering, 3D point clouds, LiDAR }, booktitle = { 7th International Conference on Virtual Reality, Computer Graphics, Visualisation and Interaction in Africa }, project = { NFG }, sorting = { 3072 } } @inproceedings{ED10b, author = { Engel, Juri and D{\"o}llner, J{\"u}rgen }, title = { Effiziente Verschattungsberechnung für die Solarpotenzialanalyse unter Berücksichtigung der Einstrahlungsintensität }, year = { 2010 }, abstract = { In diesem Beitrag stellen wir ein Verfahren zur effizienten Berechnung von Verschattungsdaten vor, das insbesondere die Einstrahlungsintensität berücksichtigt und einen zentralen Bestandteil von Solarpotenzialanalysen bildet. Das Verfahren basiert auf der 3D-Analyse einer virtuellen 3D-Umgebung und nutzt dabei die Funktionalität von 3DGrafikhardware, um die Berechnung zu beschleunigen, indem 3D-Analyserechnungen durch bildbasierte Verfahren approximiert werden. Bisherige Verfahren berechnen den Verschattungsgrad für eine definierte Zeitspanne, berücksichtigen aber nicht wann in dieser Zeitspanne die analysierte Fläche verschattet ist. Dies bestimmt jedoch maßgeblich die potenziell erzeugbare Energiemenge. Im vorgestellten Verfahren werden hierfür die für die Analyse herangezogenen Sonnenpositionen anhand der Einstrahlungsintensität gewichtet. Aus den somit erzeugten Verschattungsdaten mit gewichteten Werten kann die Reduktion der potenziell erzeugbaren Energiemenge aufgrund von zeitweiliger Verschattung präzise errechnet werden. }, affiliation = { Hasso-Plattner-Institut, Universität Potsdam }, address = { Vienna }, booktitle = { Tagungsband der 3-Ländertagung DGPF }, project = { NFG }, sorting = { 3456 } } @inproceedings{WSASH, author = { Hosain Wasty, Benjamin and Semmo, Amir and Appeltauer, Malte and Steinert, Bastian and Hirschfeld, Robert }, title = { ContextLua: Dynamic Behavioral Variations in Computer Games }, year = { 2010 }, pages = { 5:1--5:6 }, abstract = { Behavioral variations are central to modern computer games as they are making the gameplay a more interesting user experience. However, these variations significantly add to the implementation complexity. We discuss the domain of computer games with respect to dynamic behavioral variations and argue that context-oriented programming is of special interest for this domain. This motivates our extension to the dynamic scripting language Lua, which is frequently used in the development of computer games. Our newly provided programming constructs allow game developers to use layers for defining and activating variations of the basic gameplay. }, booktitle = { Proceedings of the 2nd International Workshop on Context-Oriented Programming }, doi = { 10.1145/1930021.1930026 }, sorting = { 3712 } } @inproceedings{TSHD09, author = { Trapp, Matthias and Schneider, Lars and Holz, Norman and D{\"o}llner, J{\"u}rgen }, title = { Strategies for Visualizing Points-of-Interest of 3D Virtual Environments on Mobile Devices }, year = { 2009 }, month = { 9 }, abstract = { 3D virtual environments are increasingly used as general- purpose medium for communicating spatial information. In particular, virtual 3D city models have numerous applications such as car naviga- tion, city marketing, tourism, and gaming. In these applications, points- of-interest (POI) play a major role since they typically represent features relevant for speci c user tasks and facilitate eective user orientation and navigation through the 3D virtual environment. In this paper, we present strategies that aim at eectively visualizing points-of-interest in a 3D vir- tual environment used on mobile devices. Here, we additionally have to face the "keyhole" situation, i.e., the users can realize only a small part of the environment due to the limited view space and resolution. For the eective visualization of points-of-interest in 3D virtual environments we propose to combine specialized occlusion management for 3D scenes to- gether with visual cues that handle out-of-frame points-of-interest. We also discuss general aspects and de nitions of points-of-interest in the scope of 3D models and outline a prototype implementation of the mo- bile 3D viewer application based on the presented concepts. In addition, we give a rst performance evaluation with respect to rendering speed and power consumptions. }, affiliation = { Hasso-Plattner-Institute, University of Potsdam }, publisher = { Springer }, booktitle = { 6th International Symposium on LBS \& TeleCartography }, project = { NFG }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2009/TSHD09/MobilePOI.pdf }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=9rkykc-sSSI }, link3 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-233953-point-interest-visualization-mobilepoi-v2-science-technology-ppt-powerpoint/ }, sorting = { 2816 } } @inproceedings{TLJD09, author = { Trapp, Matthias and Lorenz, Haik and Jobst, Markus and D{\"o}llner, J{\"u}rgen }, title = { Enhancing Interactive Non-Planar Projections of 3D Geovirtual Environments with Stereoscopic Imaging }, year = { 2009 }, pages = { 281-296 }, month = { 9 }, abstract = { Stereo rendering, as an additional visual cue for humans, is an important method to increase the immersion into 3D virtual environments. Stereo pairs synthesized for the left and right eye are displayed in a way that the human visual system interprets as 3D perception. Stereoscopy is an emerging field in cinematography and gaming. While generating stereo images is well known for standard projections, the implementation of stereoscopic viewing for interactive non-planar single-center projections, such as cylindrical and spherical projections, is still a challenge. This paper presents the results of adapting an existing image-based approach for generating interactive stereoscopic non-planar projections for polygonal scenes on consumer graphics hardware. In particular, it introduces a rendering technique for generating image-based, non-planar stereo pairs within a single rendering pass. Further, this paper presents a comparison between the image-based and a geometry-based approach with respect to selected criteria. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam }, editor = { Manfred Buchroithner }, publisher = { Springer Verlag }, series = { Lecture Notes in Geoinformation and Cartography }, booktitle = { True-3D In Cartography - 1st International Conference on 3D Maps }, project = { NFG }, isbn = { 978-3-642-12271-2 }, issn = { 1863-2246 }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=RevbRJD3pPE }, link2 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-232625-stereoscopy-non-planar-projections-true-3d-cartography-real-time-rendering-true3d-v3-science-technology-ppt-powerpoint/ }, sorting = { 3328 } } @inproceedings{TD09, author = { Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Dynamic Mapping of Raster-Data for 3D Geovirtual Environments }, year = { 2009 }, pages = { 387--392 }, month = { 7 }, abstract = { Interactive 3D geovirtual environments (GeoVE), such as 3D virtual city and landscape models, are important tools to communicate geospatial information. Usually, this includes static polygonal data (e.g., digital terrain model) and raster data (e.g., aerial images) which are composed from multiple data sources during a complex, only partial automatic pre-processing step. When dealing with highly dynamic geo-referenced raster data, such as the propagation of fires or floods, this pre-processing step hinders the direct application of 3D GeoVE for decision support systems. To compensate for this limitation, this paper presents a concept for dynamically mapping multiple layers of raster for interactive GeoVE. The implementation of our rendering technique is based on the concept of projective texture mapping and can be implemented efficiently using consumer graphics hardware. Further, this paper demonstrates the flexibility of our technique using a number of typical application examples. }, affiliation = { Hasso-Plattner-Institute, University of Potsdam }, keywords = { 3D geovirtual environments, spatial-temporal data mapping, projective texture mapping }, publisher = { IEEE Computer Society Press }, booktitle = { 13th International Conference on IEEE Information Visualisation }, project = { NFG }, doi = { http://doi.ieeecomputersociety.org/10.1109/IV.2009.28 }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2009/TD09/projectivemappings_lores.pdf }, link2 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-219538-dynamic-mapping-raster-data-3d-ge-dynamicmapping-science-technology-ppt-powerpoint/ }, sorting = { 3840 } } @inproceedings{GPTD09, author = { Glander, Tassilo and Peters, Denise and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { 3D Wayfinding Choremes: A Cognitively Motivated Representation of Route Junctions in Virtual Environments }, year = { 2009 }, pages = { 407--427 }, month = { 6 }, abstract = { Research in cognitive sciences suggests that orientation and navigation along routes can be improved if the graphical representation is aligned with the user’s mental concepts of a route. In this paper, we analyze an existing 2D schematization approach called wayfinding choremes and present an implementation for virtual 3D urban models, transferring the approach to 3D. To create the virtual environment, we transform the junctions of a route defined for a given road network to comply with the eight sector model, that is, outgoing legs of a junction are slightly rotated to align with prototypical directions in 45° increments. Then, the adapted road network is decomposed into polygonal block cells, the individual polygons being extruded to blocks and their facades textured. For the evaluation of our 3D wayfinding choreme implementation, we present an experiment framework allowing for training and testing subjects by a route learning task. The experimental framework can be parameterized flexibly, exposing parameters to the conductor. We finally give a sketch of a user study by identifying hypotheses, indicators, and, hence, experiments to be done. }, publisher = { Springer }, series = { Lecture Notes in Geoinformation and Cartography }, booktitle = { 12th AGILE International Conference on GI Science }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2009/GPTD09/glander_choremes_final_flat.pdf }, isbn = { 978-3-642-00317-2 }, issn = { 1863-2246 }, doi = { 10.1007/978-3-642-00318-9_21 }, sorting = { 4352 } } @inproceedings{RDKS09, author = { Ross, Lutz and Döllner, Jürgen and Kleinschmit, Birgit and Schroth, Olaf }, title = { E-Collaboration Between the Private and the Civil Sector: Support of Long-Term Utilization and Update of Official 3D City Models }, year = { 2009 }, volume = { XXXVIII-3-4/C3 }, editor = { Kolbe, Zhang, Zlatanova }, publisher = { International Archives of Photogrammetry, Remote Sensing and Spatial Information Science }, address = { Vancouver, Canada }, booktitle = { Proceedings of Geoweb 2009 Conference }, isbn = { 1682-1777 }, sorting = { 1024 } } @inproceedings{D09, author = { D{\"o}llner, J{\"u}rgen }, title = { Towards the Automated Construction of Digital Cities }, year = { 2009 }, pages = { 341--348 }, editor = { Dieter Fritch }, publisher = { Wichmann }, address = { Heidelberg }, booktitle = { Photogrammetric Week '09 }, sorting = { 256 } } @inproceedings{HHD09, author = { Hagedorn, Benjamin and Hildebrandt, Dieter and D{\"o}llner, J{\"u}rgen }, title = { Towards Advanced and Interactive Web Perspective View Services }, year = { 2009 }, pages = { 33--51 }, abstract = { The Web Perspective View Service (WPVS) generates 2D images of perspective views of 3D geovirtual environments (e.g., virtual 3D city models) and represents one fundamental class of portrayal services. As key advantage, this image-based approach can be deployed across arbitrary networks due to server-side 3D rendering and 3D model management. However, restricted visualization and interaction capabilities of WPVS-based applications represent its main weaknesses. To overcome these limitations, we present the concept and an implementation of the WPVS++, a WPVS extension, which provides A) additional thematic information layers for generated images and B) additional service operations for requesting spatial and thematic information. Based on these functional extensions, WPVS++ clients can implement various 3D visualization and interaction features without changing the underlying working principle, which leads to an increased degree of interactivity and is demonstrated by prototypic web-based client applications. }, publisher = { Springer }, series = { Lecture Notes in Geoinformation and Cartography }, booktitle = { Developments in 3D Geo-Information Sciences }, files = { user_upload/fachgebiete/doellner/publications/2009/HHD09/3dgeoinfo2009.pdf }, doi = { 10.1007/978-3-642-04791-6 }, sorting = { 2048 } } @inproceedings{VBD09, author = { Voigt, Stefan and Bohnet, Johannes and D{\"o}llner, J{\"u}rgen }, title = { Enhancing Structural Views of Software Systems by Dynamic Information }, year = { 2009 }, pages = { 47 - 50 }, abstract = { Understanding software systems comprises the analysis of different aspects of the respective systems, such as dynamic and static analysis with all their facets. Consequently, developers study different kinds of information and create different mental models. We introduce a visualization technique that facilitates cross referencing mental models, in particular models that describe the structure and models describing the behavior of software systems. To achieve this goal, we enhance structural views by runtime information depending on the current focus of a sequential view. Animation enables developers to explore how the system’s state changes over time, by this, supporting developers in understanding program behavior. }, keywords = { Dynamic Analysis, Animation, Program Comprehension }, publisher = { IEEE Computer Society Press }, booktitle = { 5th IEEE International Workshop on Visualizing Software for Understanding and Analysis }, sorting = { 2304 }, state = { 1 }, priority = { 1 } } @inproceedings{BKD09, author = { Bohnet, Johannes and Koeleman, Martin and D{\"o}llner, J{\"u}rgen }, title = { Visualizing Massively Pruned Execution Traces to Facilitate Trace Exploration }, year = { 2009 }, pages = { 57-64 }, abstract = { Execution traces provide precise pictures of the inner workings of software systems. They therefore support programmers in performing various maintenance tasks. However, exploring traces is difficult due to their size. They typically consist of thousands of participating functions and millions of control flow events. When exploring traces, it is particularly time-consuming to identify those time ranges within the trace that are relevant for the current maintenance task. In this paper, we propose a technique that supports programmers in exploring traces in that it first prunes less relevant calls from the trace and then provides condensed and repetition-aware visualizations that facilitate fast and accurate navigation even within very large traces. Repetitions in the trace are detected by a novel metrics to measure similarity between function calls in a fuzzy and adjustable way. The metrics helps to identify outlier calls in repetitive call sequences and guides programmers on control paths being likely relevant for their comprehension task. The technique is implemented within a prototypical analysis tool that copes with large C/C++ software systems. We demonstrate the concepts by means of a case study with our industrial partner. }, keywords = { software visualization, trace visualization, reverse engineering, dynamic analysis }, publisher = { IEEE Computer Society Press }, booktitle = { 5th IEEE International Workshop on Visualizing Software for Understanding and Analysis }, sorting = { 2560 }, priority = { 1 } } @inproceedings{VBD09a, author = { Voigt, Stefan and Bohnet, Johannes and D{\"o}llner, J{\"u}rgen }, title = { Object Aware Execution Trace Exploration }, year = { 2009 }, pages = { 201 - 210 }, abstract = { To understand software systems it is common practice to explore runtime information such as method calls. System behavior analysis can further be facilitated by additionally taking runtime data dependencies into account. In object oriented systems, a typical data dependency is the information about which objects are accessed by the traced method calls. To support software engineers in handling the massive amount of information that execution traces typically consist of, highly scalable visualizations are needed. In this paper, we propose a trace-visualization technique that (a) explicitly visualizes both, method calls and object accesses, and (b) provides high scalability to handle large execution traces. With regard to the visualization technique proposed, we give a systematic overview of visual patterns that are to be expected and of their meanings with respect to system behavior. Additionally, we present the results of three case-studies to show how our approach facilitates developers in comprehending the behavior of complex C++ software systems. }, keywords = { Reverse Engineering; Dynamic Analysis; Execution Trace Exploration; Object Orientation; Software Visualization }, publisher = { IEEE Computer Society Press }, booktitle = { 25th IEEE International Conference on Software Maintenance }, sorting = { 3072 }, state = { 1 }, priority = { 1 } } @inproceedings{RBDK09, author = { Ross, Lutz and Bolling, Jannes and D{\"o}llner, J{\"u}rgen and Kleinschmit, Birgit }, title = { Enhancing 3D City Models with Heterogeneous Spatial Information: Towards 3D Land Information Systems }, year = { 2009 }, pages = { 113--133 }, abstract = { Spatial and georeferenced information plays an important role in urban land management processes such as spatial planning and environmental management. As many of the processes are increasingly coined by participation of and collaboration between multiple stakeholders, a common medium capable of integrating different types and sources of spatial information is necessary. It is argued that 3D city models provide such a framework and medium into which heterogeneous information can be integrated. Therefore, the main research question of this contribution is to identify and develop methods for integrating heterogeneous spatial and georeferenced information into 3D city models in the context of urban land management. We present a prototype 3D Land Information System and a use case for the city centre of Potsdam, Germany. In addition, constraints within administrations regarding the systematic, sustainable use of such a system are discussed. }, editor = { Sester, Monika and Bernard, Lars and Paelke, Volker }, publisher = { Springer }, series = { Lecture Notes in Geoinformation and Cartography }, booktitle = { 12th AGILE International Conference on GI Science }, files = { user_upload/fachgebiete/doellner/publications/2009/RBDK09/2009_AGILE_Ross_et_al.pdf }, sorting = { 4096 } } @inproceedings{HTGD09, author = { Hagedorn, Benjamin and Trapp, Matthias and Glander, Tassilo and D{\"o}llner, J{\"u}rgen }, title = { Towards an Indoor Level-of-Detail Model for Route Visualization }, year = { 2009 }, pages = { 692--697 }, abstract = { Indoor routing represents an essential feature required by applications and systems that provide spatial information about complex sites, buildings and infrastructures such as in the case of visitor guidance for trade fairs and customer navigation at airports or train stations. Apart from up-to-date, precise 3D spatial models these systems and applications need user interfaces as core system components that allow users to efficiently express navigation goals and to effectively visualize routing information. For interoperable and flexible indoor routing systems, common specifications and standards for indoor structures, objects, and relationships are needed as well as for metadata such as data quality and certainty. In this paper, we introduce a classification of indoor objects and structures taking into account geometry, semantics, and appearance, and propose a level-of-detail model for them that supports the generation of effective indoor route visualization. }, publisher = { IEEE Computer Society Press }, booktitle = { First International Workshop on Indoor Spatial Awareness (ISA) }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2009/HTGD09/IndoorLOD08.pdf }, doi = { 10.1109/MDM.2009.118 }, sorting = { 4608 } } @inproceedings{BVD09, author = { Bohnet, Johannes and Voigt, Stefan and D{\"o}llner, J{\"u}rgen }, title = { Projecting Code Changes onto Execution Traces to Support Localization of Recently Introduced Bugs }, year = { 2009 }, pages = { 438--442 }, abstract = { Working collaboratively on complex software systems often leads to situations where a developer enhances or extends system functionality, thereby however, introducing bugs. At best the unintentional changes are caught immediately by regression tests. Often however, the bugs are detected days or weeks later by other developers noticing strange system behavior while working on different parts of the system. Then it is a highly time-consuming task to trace back this behavior change to code changes in the past. In this paper we propose a technique for identifying the recently introduced change that is responsible for the unexpected behavior. The key idea is to combine dynamic, static, and code change information on the system to reduce the possibly great amount of code modifications to those that may affect the system while running its faulty behavior. After having applied this massive automated filtering step, developers receive support in semi-automatically identifying the root cause change by means of a trace exploration frontend. Within multiple synchronized views, developers explore when, how and why modified code locations are executed. The technique is implemented within a prototypical analysis tool that copes with large (> MLOC) C/C++ software systems. We demonstrate the approach by means of industrial case studies. }, keywords = { software visualization, fault localization, reverse engineering }, publisher = { ACM }, booktitle = { 24th ACM Symposium on Applied Computing }, sorting = { 4864 }, priority = { 1 } } @inproceedings{TLD09, author = { Trapp, Matthias and Lorenz, Haik and D{\"o}llner, J{\"u}rgen }, title = { Interactive Stereo Rendering For Non-Planar Projections of 3D Virtual Environments }, year = { 2009 }, pages = { 199--204 }, abstract = { Stereo rendering, as an additional visual cue for humans, is an important method to increase the immersion into 3D virtual environments. Stereo pairs synthesized for the left and right eye are displayed in a way that the human visual system interprets as 3D perception. Stereoscopy is an emerging field in cinematography and gaming. While generating stereo images is well known for standard projections, the implementation of stereoscopic viewing for non-planar single-center projections, such as cylindrical and spherical projections in real-time, is still a challenge. This paper presents the results of adapting existing image-based and object-based approaches for generating interactive stereoscopic non-planar projections for polygonal scenes on consumer graphics hardware. In particular, it introduces a rendering technique for generating image-based, non-planar stereo pairs within a single rendering pass. Further, this paper presents a comparison between these both approaches with respect to selected criteria. }, publisher = { INSTICC Press }, booktitle = { GRAPP 2009 - 4th International Conference on Computer Graphics Theory and Applications }, project = { NFG }, issn = { 978-989-8111-67-8 }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2009/TLD09/StereoscopicNonPlanarProjections.pdf }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=RevbRJD3pPE }, link3 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-149181-interactive-stereoscopic-rendering-imaging-non-planar-projections-real-time-3d-virtual-environments-grapp-2009-science-technology-ppt-powerpoint/ }, sorting = { 5120 } } @inproceedings{LD09, author = { Lorenz, Haik and D{\"o}llner, J{\"u}rgen }, title = { Real-time Piecewise Perspective Projections }, year = { 2009 }, pages = { 147--155 }, abstract = { This paper presents an approach to real-time rendering of non-planar projections with a single center and straight projection rays. Its goal is to provide optimal and consistent image quality. It operates entirely in object space to remove the need for image resampling. In contrast to most other object-space approaches, it does not evaluate non-linear functions on the GPU, but approximates the projection itself by a set of perspective projection pieces. Within each piece, graphics hardware can provide optimal image quality. The result is a coherent and crisp rendering. Procedural textures and stylization effects greatly benefit from our method as they usually rely on screen-space operations. The real-time implementation runs entirely on GPU. It replicates input primitives on demand and renders them into all relevant projection pieces. The method is independent of the input mesh density and is not restricted to static meshes. Thus, it is well suited for interactive applications. We demonstrate it for an analytic and a freely designed projection. }, publisher = { INSTICC Press }, booktitle = { GRAPP 2009 - International Conference on Computer Graphics Theory and Applications }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2009/LD09/ppp_grapp09.pdf }, sorting = { 5376 } } @inproceedings{JMKD09, author = { Jahnke, Mathias and Meng, Liqiu and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen }, title = { Non-photorealistic Visualizations on Mobile Devices and Usability Concerns }, year = { 2009 }, pages = { 168--181 }, editor = { Lin, H. and Batty M. }, publisher = { Science Press, Beijing, China }, booktitle = { Virtual Geographic Environments }, sorting = { 6144 } } @inproceedings{DK09, author = { D{\"o}llner, J{\"u}rgen and Kyprianidis, Jan Eric }, title = { Approaches to Image Abstraction for Photorealistic Depictions of Virtual 3D Models }, year = { 2009 }, pages = { 371--385 }, abstract = { In our contribution, we present approaches of automatic image abstraction, applied to images and image sequences derived as views of virtual 3D city models and landscape models. We first discuss the requirements of cartography-specific visualization based on the virtual globe metaphor as well as the specific characteristics and deficiencies of visualization based on photorealism. We introduce a concept that extends the classical visualization pipeline by cartography-specific functionality, object-space and image-space abstraction, which also represent the two principle ways for implementing cartographic visualization systems. Abstraction provides the prerequisites to visually communicate uncertainty, to simplify and filter detailed elements, and to clearly encode displayed information of complex geospatial information. In addition, it offers many degrees of freedoms for artistic and stylistic design of cartographic products. Furthermore, we outline general working principles and implementation of an automatic image-space abstraction technique we developed that creates high-quality, simplified, stylistic illustrations from color images, videos, and 3D renderings. }, editor = { Georg Gartner and Felix Ortag }, booktitle = { Proceedings of the First ICA Symposium for Central and Eastern Europe 2009 }, organization = { Vienna University of Technology }, project = { flowabs }, sorting = { 5888 } } @inproceedings{HD2009, author = { Dieter Hildebrandt and Jürgen Döllner }, title = { Implementing 3D Geovisualization in Spatial Data Infrastructures: The Pros and Cons of 3D Portrayal Services }, year = { 2009 }, pages = { 9 }, abstract = { Visual representations of geospatial information proved to be valuable means to facilitate thinking, understanding, and knowledge construction about human and physical environments, at geographic scales of measurement. Massive amounts of distributed and heterogeneous geospatial information and geospatial computing functionality are increasingly available as distributed resources that can be accessed through the Internet. This increased availability has created the demand and feasibility to build distributed systems that leverage these resources for visualizing and interacting with geospatial information. In this paper, we characterize, discuss, and compare the W3DS and WPVS portrayal services as proposed by the OGC with a particular focus on their application to portray complex virtual 3D city models. We discuss the potentials and limitations of the different approaches and the conditions under which they can be applied in an effective and value adding way. With this contribution, we aim at supporting decision makers in choosing portrayal services meeting their requirements for spatial data infrastructures (SDI), the present process of standardizing 3D portrayal services and related research. }, edition = { Wolfgang Reinhardt and Antonio Krüger and Manfred Ehlers }, booktitle = { Geoinformatik 2009 }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2009/HD2009/_Hildebrandt2009__Geoinformatik2009.pdf }, sorting = { 128 } } @inproceedings{TD08d, author = { Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Relief Clipping Planes for Real-Time Rendering }, year = { 2008 }, month = { 12 }, abstract = { The concept of clipping planes is well known in computer graphics and can be used to create cut-away views. But clipping against just analytical defined planes is not always suitable for communicating every aspect of such visualization. For example, in hand-drawn technical illustrations, artists tend to communicate the difference between a cut and a model feature by using non-regular, sketchy cut lines instead of straight ones. To enable this functionality in computer graphics, we present a technique for rendering relief clip planes in real-time. Therefore, we extend the clip plane equation with an additional offset map, which can be represented by a texture map that contains height values. Clipping is then performed by varying the clip plane equation with respect to such an offset map. Further, we propose a capping technique that enables the rendering of caps onto the clipped area to convey the impression of solid material. It avoids a re-meshing of a solid polygonal mesh after clipping is performed. Our approach is pixel precise, applicable in real-time, and takes fully advantage of graphics accelerators. }, affiliation = { Hasso-Plattner-Insitute, University of Potsdam }, address = { Singapore }, booktitle = { ACM SIGGRAPH Asia 2008 - Sketch Program }, project = { NFG }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2008/TD08d/ReliefClipPlanes.pdf }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=ydZIROOiNb0 }, link3 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-145934-reliefclippingplanes-trappdoellner-clipping-plane-real-time-rendering-science-technology-ppt-powerpoint/ }, sorting = { 2304 } } @inproceedings{GD08a, author = { Glander, Tassilo and D{\"o}llner, J{\"u}rgen }, title = { Techniques for Generalizing Building Geometry of Complex Virtual 3D City Models }, year = { 2008 }, pages = { 381--400 }, month = { 12 }, abstract = { Comprehensible and effective visualization of complex virtual 3D city models requires an abstraction of city model components to provide different degrees of generalization. This paper discusses generalization techniques that achieve clustering, simplification, aggregation and accentuation of 3D building ensembles. In a preprocessing step, individual building models are clustered into cells defined by and derived from its surrounding infrastructure network such as streets and rivers. If the infrastructure network is organized hierarchically, the granularity of the cells can be varied correspondingly. Three fundamental approaches have been identified, implemented, and analyzed: The first technique uses cell generalization; from a given cell it extrudes a 3D block, whose height is calculated as the weighted average of the contained buildings; as optimization, outliers can be managed separately. The second technique is based on convex-hull generalization, which approximates the contained buildings by creating the convex hull for the building ensemble. The third technique relies on voxelization, which converts the buildings’ geometry into a regular 3D raster data representation. Through morphological operations and Gaussian blurring, aggregation and simplification is yielded; polygonal geometry is created through a marching cubes algorithm. The paper closes with conclusions drawn with respect to the characteristics and applicability of the presented generalization techniques for interactive 3D systems based on complex virtual 3D city models. }, editor = { Peter van Oosterom and Sisi Zlatanova and Friso Penninga and Elfriede M. Fendel }, publisher = { Springer }, series = { Lecture Notes in Geoinformation and Cartography }, booktitle = { Advances in 3D Geoinformation Systems }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2008/GD08a/3dgeoinfo07_glander_doellner.pdf }, issn = { 978-3-540-72134-5 }, doi = { 10.1007/978-3-540-72135-2_21 }, sorting = { 2560 } } @inproceedings{TD08b, author = { Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Efficient Representation of Layered Depth Images for Real-time Volumetric Tests }, year = { 2008 }, pages = { 9--16 }, month = { 8 }, abstract = { Representing Layered Depth Images (LDI) as 3D texture can be used to approximate complex, arbitrarily shaped volumes on graphics hardware. Based on this concept, a number of real-time applications such as collision detection or 3D clipping against multiple volumes can be implemented efficiently using programmable hardware. One major drawback of this image-based representation is the high video memory consumption. To compensate that disadvantage, this paper presents a concept and associated algorithms that enable a lossless, efficient LDI representation which is especially designed for the usage within shader programs. The concept comprises the application of a viewpoint selection, a cropping, and a compression algorithm. We evaluated our algorithm with different test cases and show possible use cases. }, editor = { Ik Soo Lim and Wen Tang }, publisher = { The Eurographics Association }, booktitle = { EG UK Theory and Practice of Computer Graphics (2008) Conference }, organization = { UK Chapter of the Eurographics Association }, project = { NFG }, issn = { 978-3-905673-67-8 }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2008/TD08b/132-Trapp-Efficient-LDI-Representation.pdf }, link2 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-77166-matthias-trapp-efficient-LDI-representation-TPCG-2-EG-UK-Theory-Practice-Computer-Graphics-2008-Conference-Education-ppt-powerpoint/ }, sorting = { 4096 } } @inproceedings{KD08b, author = { Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen }, title = { Image Abstraction by Structure Adaptive Filtering }, year = { 2008 }, pages = { 51--58 }, month = { 6 }, abstract = { In this work we present a framework of automatic non-photorealistic image processing techniques that create simplified stylistic illustrations from color images, videos and 3D renderings. To smooth low-contrast regions while preserving edges, we present a new fast separated implementation of the bilateral filter. Our approach works by filtering in direction of the gradient and then filtering the intermediate result in perpendicular direction. When applied iteratively, our approach does not suffer from horizontal or vertical artifacts and creates smooth output at curved boundaries. To extract salient important edges we first apply a one-dimensional difference-of-Gaussians filter in direction of the gradient and then apply smoothing along a flow field which we derive from the smoothed structure tensor. Our method creates smooth coherent output for line and curve segments. }, editor = { Ik Soo Lim and Wen Tang }, publisher = { Eurographics Association }, booktitle = { EG UK Theory and Practice of Computer Graphics }, project = { flowabs }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2008/KD08b/jkyprian-tpcg2008.pdf }, sorting = { 4608 } } @inproceedings{TD08a, author = { Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Real-Time Volumetric Tests Using Layered Depth Images }, year = { 2008 }, pages = { 235--238 }, month = { 4 }, abstract = { This paper presents a new approach for performing efficiently 3D point-in-volume tests for solid and arbitrary complex shapes. It classifies a 3D point as inside or outside of a solid specified by 3D polygonal geometry. Our technique implements a basic functionality that offers a wide range of applications such as clipping, collision detection, interactive rendering of multiple 3D lenses as well as rendering using multiple styles. It is based on an extension of layered depth images (LDI) in combination with shader programs. An LDI contains layers of unique depth complexity and is represented by a 3D volume texture. The test algorithm transforms a 3D point into an LDI texture space and, then, performs ray marching through the depth layers to determine its classification. We show how to apply real-time volumetric tests to implement 3D clipping and rendering using multiple styles. In addition, we discuss limitations and possible improvements. }, affiliation = { Hasso-Plattner-Institute, University of Potsdam }, editor = { K. Mania and E. Reinhard }, publisher = { The Eurographics Association }, booktitle = { Eurographics 2008 Shortpaper }, organization = { Eurographics }, project = { NFG }, issn = { 1017-4656 }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2008/TD08a/VolumetricTest.pdf }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=o7NSLQvdghg }, link3 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-76989-real-time-volumetric-tests-using-layered-depth-ima-graphics-processors-boundary-representation-data-structures-types-matthias-trapp-eg2008-education-ppt-powerpoint/ }, sorting = { 5888 } } @inproceedings{TD08, author = { Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { A Generalization Approach for 3D Viewing Deformations of Single-Center Projections }, year = { 2008 }, number = { 3 }, pages = { 162--170 }, month = { 2 }, abstract = { This paper presents a novel image-based approach to efficiently generate real-time non-planar projections of arbitrary 3D scenes such as panorama and fish-eye views. The real-time creation of such projections has a multitude of applications, e.g., in geovirtual environments and in augmented reality. Our rendering technique is based on dynamically created cube map textures in combination with shader programs that calculate the specific projections. We discuss two different approaches to create such cubemaps and introduce possible optimizations. Our technique can be applied within a single rendering pass, is easy to implement, and exploits the capability of modern programmable graphics hardware completely. Further, we present an approach to customize and combine different planar as well as non-planar projections. We have integrated our technique into an existing real-time rendering framework and demonstrate its performance on large scale datasets such as virtual 3D city and terrain models. }, affiliation = { Hasso-Plattner-Institute, University of Potsdam }, keywords = { Real-time Panorama, Non-Planar Projection, Fish-Eye Views, Projection Tiles }, editor = { Jos\'e Braz and Nuno Jardim Nunes and Joao Madeiras Pereira }, publisher = { INSTICC Press }, booktitle = { International Conference on Computer Graphics Theory and Applications (GRAPP) }, project = { NFG }, issn = { 978-989-8111-20-3 }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2008/TD08/NonPlanarProjection.pdf }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=Y6SBylq5SFA }, link3 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-77167-matthias-trapp-non-planar-projection-GRAPP-2008-Funchal-Madeira-Real-time-Panorama-Fish-Eye-Views-Tiles-Education-ppt-powerpoint/ }, sorting = { 6400 } } @inproceedings{JD08b, author = { Jobst, Markus and D{\"o}llner, J{\"u}rgen }, title = { 3D City Model Visualization with Cartography-Oriented Design }, year = { 2008 }, pages = { 507--516 }, abstract = { This paper investigates and discusses concepts and techniques to enhance spatial knowledge transmission of 3D city model representations based on cartography-oriented design. 3D city models have evolved to important tools for urban decision processes and information systems, especially in planning, simulation, networks, and navigation. For example, planning tools analyze visibility characteristics of inner urban areas and allow planers to estimate whether a minimum amount of light is needed in intensely covered areas to avoid “Gotham city effect“, i.e., when these areas become too dark due to shadowing. For radio network planning, 3D city models are required to configure wireless network services, i.e., to calculate and analyze network coverage and connectivity features. 3D city model visualization often lacks effectiveness and expressiveness. For example, if we analyze common 3D views, large areas of the graphical presentations contain useless or even “misused” pixels with respect to information content and transfer (e.g., pixels that represent several hundreds of buildings at once or pixels that show sky). Typical avatar perspectives frequently show too many details at once and do not distinguish between areas in focus and surrounding areas. In this case the perceptual and cognitive quality of visualized virtual 3D city model could be enhanced by cartographic models and semiotic adaptations. For example, we can integrate strongly perceivable landmarks as referencing marks to the real world, which establish more effective presentations and improve efficient interaction. The referencing aspect in depictions of 3D city models is essential: Only correctly perceived elements can fulfill referencing tasks. With misleading perception, the same elements may cause false understanding. This circumstance leads to media-dependent semiotic models that aim at supporting effective transmission of spatial contents. This contribution compares different dynamic 3D visualization approaches for virtual 3D city models and demonstrates applied cartographic techniques to enhance information transfer. The underlying concept is based on a model for 3D semiotics, includes a number of pragmatic components, and presents examples of visualizations of adapted 3D city model visualizations. }, editor = { Manfred Schrenk, Vasily V. Popovich, Dirk Engelke, Pietro Elisei }, publisher = { CORP – Competence Center of Urban and Regional Planning }, booktitle = { 13th International Conference on Urban Planning, Regional Development and Information Society (REAL CORP) }, files = { user_upload/fachgebiete/doellner/publications/2008/JD08b/2008-03-15_corp08_jobst_preprint.pdf }, issn = { 978-39502139-5-9 }, sorting = { 256 } } @inproceedings{LD08a, author = { Lorenz, Haik and D{\"o}llner, J{\"u}rgen }, title = { Modellierung oberfl{\"a}chenbezogener Informationen virtueller 3D Stadtmodelle in CityGML }, year = { 2008 }, volume = { 41 }, pages = { 69--82 }, abstract = { Dieser Beitrag beschreibt die Modellierung oberflächenbezogener Daten für geometrische Objekte in CityGML, einem XML-basierten Standardentwurf für den Austausch und die Speicherung von virtuellen 3D-Stadtmodellen. Im Unterschied zu 2D-Spezifikationen von geometrischen Objekten, denen sich im Allgemeinen direkt, z. B. eckpunkt- oder objektbezogen, Fachinformationen zuordnen lassen, ist eine derartige Zuordnung raumbezogener Fachinformationen zu dreidimensionalen Geometrien schwieriger. Oberflächenbezogene Daten ergeben sich z. B. aus Messungen (z. B. Photographie oder Infrarot-Aufnahmen), Simulationen (z. B. Lärmausbreitung) oder raumbezogenen Datenbanken (z. B. Fassadenmaterial und -eigenschaften). Eine Möglichkeit einer Zuordnung liegt darin, die Oberflächen der geometrischen Objekte als Träger dieser Fachinformationen zu verwenden. Daher ist es notwendig, diese Fachinformationen in Bezug zu den Oberflächen der geometrischen Objekte des 3D-Stadtmodells zu speichern. Ein der Signaturierung aus der Kartografie vergleichbares Konzept, welches lediglich die Präsentation von Objektkategorien losgelöst von den eigentlichen Objekten beschreibt, reicht dafür nicht aus. Stattdessen müssen solche oberflächenbezogenen Daten als Teil des 3D-Stadtmodells modelliert werden. Dieser Beitrag geht insbesondere auf Entwicklungsentscheidungen ein, um das in CityGML verwendete Datenmodell nachvollziehbar und transparent zu machen. Dazu werden die konkreten Ziele beschrieben, unterschiedliche Modellierungsansätze bewertet, das Datenmodell erläutert und das Ergebnis diskutiert. }, publisher = { Verlag des Bundesamtes f{\"u}r Kartographie und Geod{\"a}sie }, booktitle = { Mitteilungen des Bundesamtes f{\"u}r Kartographie und Geod{\"a}sie }, project = { NFG }, issn = { 1436-3445 }, sorting = { 512 } } @inproceedings{JD08, author = { Jobst, Markus and D{\"o}llner, J{\"u}rgen }, title = { Better Perception of 3D-Spatial Relations by Viewport Variations }, year = { 2008 }, volume = { 5188/2008 }, pages = { 7--18 }, abstract = { Computer-graphics and especially virtual 3D worlds evolve to important tools for digital cartography, where the main aim of efficient spatial communication rules processes of conception, design and dissemination. This paper investigates the enhancement of visual spatial relations in virtual 3D worlds, which’s limitation on standard displays is a main drawback of digital cartography. The main limitations of digital cartography concern the extension of viewing plane and its resolution, which have impact on information depth of the map content, transmitting an overview, thus the highlighting of spatial relations and additional request for cognitive load. These drawbacks are not only limited to 2D maps, but are also existing in virtual 3D environments, where additional geometric characteristics, like perspective distortions, multiple scales or overriding, may influence a correct extraction of spatial-related content. On the other hand these specific geometric disadvantages should be formulated as benefit of 3D, especially when infinite numbers of scale can be combined in a “natural way” or spatial content becomes extracted by naïve interaction. One main limitation of digital presentations generally persists: the limitation of the presentation area on standard displays, which leads to a very restricted overview and fewer visible relations of spatial content. View-port variations that modify perspective and/or orthographic projections are one possibility to enhance rendering methods in a way that the main disadvantages of regular perspective views become decreased and the perceptibility for an overview and spatial relations expanded. These variations cover progressive and degressive central-perspectives as well as progressive and degressive “parallel-perspectives”, which provide very specific characteristics in use with spatial information transmission. This contribution focuses on enhancing virtual spatial relations in 3D environments by using view-port variations, that modify perspective and orthographic views in a progressive and degressive way. Provided that standard displays deliver significant limitations for effective and expressive geo-communication with virtual 3D environments, offers by 3D cartography are discussed. An exemplary comparison of actual 3D city models allows to identify “dead values” and gives one clue for the requested modification. The description and exemplary visualization of view-port varia-tions lead to their theoretical communication aspects, which will guide pragmatic (user) studies in future. }, note = { 10th International Conference, VISUAL 2008, Salerno, Italy }, keywords = { 3D geovisualization, geo-mediatechnique, perception, graphical design, geo-communication }, publisher = { Springer }, series = { Lecture Notes in Computer Science }, booktitle = { Visual Information Systems. Web-Based Visual Information Search and Management }, files = { user_upload/fachgebiete/doellner/publications/2008/JD08/2008-05-21_viewport variations_jobst-doellner_preprint.pdf }, doi = { 10.1007/978-3-540-85891-1_4 }, sorting = { 768 } } @inproceedings{JD08a, author = { Jobst, Markus and D{\"o}llner, J{\"u}rgen }, title = { Neo-Cartographic Influence on Map Communication in LBS }, year = { 2008 }, pages = { 207--219 }, abstract = { Neo-cartography spans ubiquitous cartography, user participation and considerations for geo-media techniques. This new expansion of multimedia and internet cartography combines the latest Web developments with traditional cartography and imagery research. Therefore the focus within the conceptual communication model shifts and leads to a separate investigation of information-carrier and information-content. At least this separation helps to prepare geospatial data effi ciently, especially within virtual 3D presentation methods. But it is also a crucial aspect of geo-media techniques whenever detailed information is put on lower resolution interfaces. This contribution introduces the notion neo-cartography, discusses the mutation of importance within the conceptual communication model and explains its consequences for geo-media techniques in LBS. }, keywords = { neo-cartography, communication models, mass-communication, dead information pixels, geomedia-techniques }, editor = { Georg Gartner and Karl Rehrl }, publisher = { Springer }, series = { Lecture Notes in Geoinformation and Cartography }, booktitle = { Location Based Services and TeleCartography II, From Sensor Fusion to Context Models }, files = { user_upload/fachgebiete/doellner/publications/2008/JD08a/2008-09-23_lbs08_jobst-doellner_preprint.pdf }, doi = { 10.1007/978-3-540-87393-8_13 }, sorting = { 1280 } } @inproceedings{JD08c, author = { Jobst, Markus and D{\"o}llner, J{\"u}rgen }, title = { A Conceptual Cartographic Heritage Architecture }, year = { 2008 }, abstract = { The development of digital cartography, geospatial services and productive recording methods allows for detailed worldwide analysis, global earth documentation and new forms of cartographic products, which integrate users´ recordings in combination with remote sensing and digital cartography. By integration of geospatial web-services a global network and basis for unusual maps calls for cartographic attention. This network does not only focus on “fresh” data and information, but also includes historic documents and ancient spatial sources. The precondition to use dissemination methods like these is digitalisation: geospatial webservices and digital cartography rely on digital source-material. Thus interesting sources have to be digitized, referenced and anyhow provided/incorporated for access. With this step of digitalisation and dissemination, new aspects for a cartographic heritage occur: How to archive digital cartographic heritage? Is it worth to define digital artworks or geospatial webservices as cartographic heritage? If we agree that digital cartography is some kind of cartographic heritage of the future, we have to consider procedures, dependencies and strategies to keep this heritage accessible. Dissemination is only one keyfactor to keep the knowledge of cartographic heritage. Technical procedures are another one. }, booktitle = { 3rd International Workshop on Digital Approaches to Cartographic Heritage }, sorting = { 1792 } } @inproceedings{JKD08, author = { Jobst, Markus and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen }, title = { Mechanisms on Graphical Core Variables in the Design of Cartographic 3D City Presentations }, year = { 2008 }, pages = { 45--59 }, abstract = { Virtual 3D city models are increasingly used in geospatial planning and discussion processes. This is one reason that the effectiveness and expressiveness of the presentation form has to be investigated from a cartographic point of view. This investigation reaches from recording and modelling procedures for 3D city models to a semiotic model, which has to be adapted for the use of 3D. This contribution focuses on mechanisms on graphical core variables that play an important role for the precise geospatial information transmission with 3D. Methods of non-photorealistic rendering can be combined with cartographic requirements. By this means a new potential for the graphic design of cartographic 3D city presentations can be shown. }, keywords = { virtual 3D - non-photorealistic rendering - effectiveness - expressiveness }, editor = { Moore, Antoni and Drecki, Igor }, publisher = { Springer }, series = { Lecture Notes in Geoinformation and Cartography }, booktitle = { Geospatial Vision }, files = { user_upload/fachgebiete/doellner/publications/2008/JKD08/2008-09_mechanisms on graphical core variables_jobst_geocart08_preversion.pdf }, doi = { 10.1007/978-3-540-70970-1_3 }, sorting = { 2048 } } @inproceedings{MTKDEPBH08, author = { Maass, Stefan and Trapp, Matthias and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen and Eichhorn, M. and Pokorski, Rafael and B{\"a}uerlein, Johannes and Hesberg, H. v. }, title = { Techniques For The Interactive Exploration Of High-Detail 3D Building Reconstruction Using The Example Of Roman Cologne }, year = { 2008 }, pages = { 223--229 }, abstract = { This paper presents the results achieved by an interdisciplinary team of archaeologists, designers, and computer graphics engineers with the aim to virtually reconstruct an interactive high-detail 3D city model of Roman Cologne. We describe a content creation pipeline established to enable a flexible exchange and enhancement of building models, the applied optimization techniques necessary for real-time rendering, and the design of an application framework that enables the coupling of 3D visualizations with additional information in corresponding Adobe® Flash® widgets. Furthermore, we expose challenges arising by incorporating state-of-the-art visualization techniques, such as cut-away views, non-photorealistic rendering (NPR), and automated label placement. These techniques are used to enhance the interactive 3D environments, to enable for the presentation of interior structures, the precise communication what is hypothetic and what proven knowledge, and the integration of meta information. }, keywords = { High-detail 3D Models, Virtual Reality, Real-Time 3D Visualization, Roman Cologne }, editor = { M. Loannides and A. Addison and A. Georgopoulos and L. Kalisperis }, publisher = { Archaeolingua }, booktitle = { 14th International Conference on Virtual Systems and Multimedia (VSMM 2008) }, project = { NFG }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2008/MTKDEPBH08/vsmm2008..pdf }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=xAqMM6G3de0 }, sorting = { 2816 } } @inproceedings{HD08, author = { Hagedorn, Benjamin and D{\"o}llner, J{\"u}rgen }, title = { Sketch-Based Navigation in 3D Virtual Environments }, year = { 2008 }, pages = { 239--246 }, abstract = { Navigation represents the fundamental interaction technique in 3D virtual environments (3D VEs) as it enables the users to explore the 3D world and to interact with its objects. Efficient navigation strategies and techniques are required, which take account of the users and their goals and avoid problems of general navigation methods, such as ''getting-lost'' situations and confusing view configurations. This paper presents a novel method for specifying and controlling navigation in 3D VEs based on sketching navigation commands. The users sketch their navigation intentions on top of the perspective projection of the 3D scene. The system interprets these sketches regarding their geometry, spatial context, and temporal context. Unlike other sketchy navigation techniques, our approach identifies the hit objects of the underlying 3D scene and takes advantage of their semantics and inherent navigation affordances. The approach has been prototypically implemented for the exploration of a virtual 3D city model with a touch-sensitive display. }, publisher = { Springer }, series = { Lecture Notes in Computer Science }, booktitle = { 8th International Symposium on Smart Graphics 2008 }, sorting = { 3072 } } @inproceedings{TGBD08, author = { Trapp, Matthias and Glander, Tassilo and Buchholz, Henrik and D{\"o}llner, J{\"u}rgen }, title = { 3D Generalization Lenses for Interactive Focus + Context Visualization of Virtual City Models }, year = { 2008 }, pages = { 356--361 }, abstract = { Focus + context visualization facilitates the exploration of complex information spaces. This paper proposes 3D generalization lenses, a new visualization technique for virtual 3D city models that combines different levels of structural abstraction. In an automatic preprocessing step, we derive a generalized representation of a given city model. At runtime, this representation is combined with a full-detail representation within a single view based on one or more 3D lenses of arbitrary shape. Focus areas within lens volumes are shown in full detail while excluding less important details of the surrounding area. Our technique supports simultaneous use of multiple lenses associated with different abstraction levels, can handle overlapping and nested lenses, and provides interactive lens modification. }, keywords = { 3D Lenses, Focus + Context Visualisation, Virtual 3D City Model, Generalization }, publisher = { IEEE Computer Society Press }, booktitle = { 12th International Conference on IEEE Information Visualization }, project = { NFG }, doi = { http://doi.ieeecomputersociety.org/10.1109/IV.2008.18 }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2008/TGBD08/FocusMaps08.pdf }, link2 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-77164-3D-Generalization-Lenses-Interactive-Focus-Con-12th-Conference-Information-Visualization-LSBU-2008-matthias-trapp-Education-ppt-powerpoint/ }, sorting = { 3328 } } @inproceedings{BVD08, author = { Bohnet, Johannes and Voigt, Stefan and D{\"o}llner, J{\"u}rgen }, title = { Locating and Understanding Features of Complex Software Systems by Synchronizing Time-, Collaboration- and Code-focused Views on Execution Traces }, year = { 2008 }, pages = { 268--271 }, keywords = { software visualization, program comprehension, reverse engineering, feature location }, publisher = { IEEE Computer Society Press }, booktitle = { 16th IEEE International Conference on Program Comprehension }, organization = { IEEE }, sorting = { 3584 }, priority = { 1 } } @inproceedings{MD08, author = { Maass, Stefan and D{\"o}llner, J{\"u}rgen }, title = { Seamless Integration of Labels into Interactive Virtual 3D Environments Using Parameterized Hulls }, year = { 2008 }, pages = { 33--40 }, abstract = { This paper presents an approach for the automated, dynamic placement of labels attached to objects of 3D scenes. These labels are seamlessly integrated into the 3D scene by textured polygons aligned to parameterized hulls, which generalize an object’s geometry. This way, the labels follow the principle shape of the annotated objects, emphasize the label-object relationship, reduce ambiguities of interpretation, and achieve visual concise and aesthetic results. The algorithm first sets up candidate positions across the hull. Cascaded filtering and presorting steps reject early improper placement candidates and order the remaining ones according to their quality. Then, they are iteratively tested to find positions that allow for a visible and legible label placement. If more than one position exists, the selection is controlled by a layout strategy, for which we present efficient evaluation techniques and discuss the impact on the aesthetic appearance. As proof of concept, we have implemented a 3D viewer that annotates building models of 3D city models. Compared to view plane-based labeling approaches, our approach achieves a strong visual relation between label and annotated object, treats labels as first-class objects of virtual 3D environments, and offers a high degree of customization and stylization. }, keywords = { Labeling, Annotation, Interactive Virtual 3D Environments }, editor = { P. Brown and D. W. Cunningham and V. Interrante and J. MacCormack }, publisher = { The Eurographics Association }, address = { Lisbon, Portugal }, booktitle = { 4th International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2008/MD08/cae2008.pdf }, issn = { 1816-0859 }, sorting = { 3840 } } @inproceedings{BD08, author = { Bohnet, Johannes and D{\"o}llner, J{\"u}rgen }, title = { Analyzing Dynamic Call Graphs Enhanced with Program State Information for Feature Location and Understanding }, year = { 2008 }, pages = { 915--916 }, keywords = { software visualization, feature location, reverse engineering, maintenance }, publisher = { IEEE Computer Society Press }, booktitle = { 30th IEEE International Conference on Software Engineering }, organization = { IEEE }, sorting = { 4864 }, priority = { 1 } } @inproceedings{RKDK08, author = { Ross, Lutz and Kleinschmit, Birgit and D{\"o}llner, J{\"u}rgen and Kegel, Anselm }, title = { Entwicklung von Fl{\"a}cheninformationssystemen auf Basis virtueller 3D-Stadtmodelle }, year = { 2008 }, pages = { 565--570 }, editor = { Manfred Schrenk, Vasily V. Popovich, Dirk Engelke, Pietro Elisei }, publisher = { CORP – Competence Center of Urban and Regional Planning }, booktitle = { 13th International Conference on Urban Planning, Regional Development and Information Society (REAL CORP) }, project = { REFINA }, files = { user_upload/fachgebiete/doellner/publications/2008/RKDK08/CORP2008_78.pdf }, issn = { ISBN: 978-39502139-5-9 }, sorting = { 5120 } } @inproceedings{LTJD08, author = { Lorenz, Haik and Trapp, Matthias and Jobst, Markus and D{\"o}llner, J{\"u}rgen }, title = { Interactive Multi-Perspective Views of Virtual 3D Landscape and City Models }, year = { 2008 }, pages = { 301--321 }, abstract = { Based on principles of panorama maps we present an interactive visualization technique that generates multi-perspective views of complex spatial environments such as virtual 3D landscape and city models. Panorama maps seamlessly combine easily readable maps in the foreground with 3D views in the background – both within a single image. Such nonlinear, non-standard 3D projections enable novel focus & context views of complex virtual spatial environments. The presented technique relies on global space deformation to model multi-perspective views while using a standard linear projection for rendering which enables single-pass processing by graphics hardware. It automatically configures the deformation in a view-dependent way to maintain the multi-perspective view in an interactive environment. The technique supports different distortion schemata beyond classical panorama maps and can seamlessly combine different visualization styles of focus and context areas. We exemplify our approach in an interactive 3D tourist information system. }, note = { Best Paper Award }, keywords = { multi-perspective views, focus \& context visualization, global space deformation, virtual 3D city models, virtual 3D landscape models, geovisualization }, editor = { Lars Bernard and Anders Friis-Christensen and Hardy Pundt }, publisher = { Springer }, series = { Lecture Notes in Geoinformation and Cartography }, booktitle = { 11th AGILE International Conference on GI Science }, project = { NFG }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2008/LTJD08/agile08_draft.pdf }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=2bYDKbzocSg }, link3 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-77168-matthias-trapp-multi-perspective-views-AGILE-2008-focus-context-visualization-globalspace-Education-ppt-powerpoint/ }, sorting = { 5376 } } @inproceedings{RK08, author = { Ross, Lutz and Kleinschmit, Birgit }, title = { Virtuelle 3D-Stadtmodelle im kommunalen Einsatz - Entwicklungen, Trends und Perspektiven }, year = { 2008 }, abstract = { Der Beitrag diskutiert an Hand von Beispielen aus der Praxis und der Forschung gegenwärtige Entwicklungen beim Aufbau und Einsatz von virtuellen 3D-Stadtmodellen für kommunale Anwendungen und leitet daraus aktuelle Trends und Perspektiven ab. Als wesentliche technische Entwicklungen werden die zunehmende Etablierung von CityGML (City Geography Markup Language) als Datenschema für die Modellierung von 3D-Stadtmodellen und erste Beispiele für dienstebasierte Integrationslösungen identifiziert. Im Hinblick auf die Inhalte von 3D-Stadtmodellen wird festgestellt, dass in den meisten Praxis¬anwendungen bisher auf die Modellierung der Flächeninfrastruktur und der Vegetation verzichtet wird. Aufkommende Anwendungen aus dem Bereich der Planung, etwa Plan¬informationssysteme und Systeme für die Unterstützung von Beteiligungsprozessen oder für die kollaborative Nutzung von 3D-Stadtmodellen durch verschiedene Nutzer über das Internet fordern aber die Integration eben dieser Inhalte. Für die aktive Nutzung von virtuellen 3D-Stadtmodellen in Planungsprozessen sind zukünftig innovative Systeme und Fachfunktionen zu entwickeln. }, booktitle = { 28. Jahrestagung der DGPF }, project = { REFINA }, files = { user_upload/fachgebiete/doellner/publications/2008/RK08/DGPF_Ross_Kleinschmit.pdf }, sorting = { 5632 } } @inproceedings{LD08, author = { Lorenz, Haik and D{\"o}llner, J{\"u}rgen }, title = { Dynamic Mesh Refinement on GPU using Geometry Shaders }, year = { 2008 }, pages = { 97--104 }, abstract = { This paper presents a real-time rendering technique for dynamic, incremental 3D mesh refinement. The technique can be applied to any triangulated 3D mesh with arbitrary topology and connectivity. The functionality relies on geometry shaders that are used to amplify or remove geometry based on precalculated refinement patterns. For each triangle, the instantiated refinement pattern is selected dynamically. Due to limitations of current hardware, on-the-fly pattern instantiation cannot be implemented on the GPU. Instead, the complete refined mesh must be generated through pattern copying. We propose an incremental approach where the refined mesh is generated by using the previous refined mesh as primitive source. This algorithm runs exclusively on the GPU and requires no continuous data exchange between CPU and GPU. Due to the necessary mesh generation, the approach is particularly suitable for applications with small refinement levels. It complements traditional pattern-based refinement approaches that deliver high throughput for large refinement levels, but incur a substantial CPU-GPU communication overhead otherwise. Interesting applications include view-dependent mesh smoothing and interactive non-planar projections. In these areas, our algorithm enables efficient vertex-based implementations due to adaptive refinement. }, booktitle = { 16-th International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision (WSCG) - Full Papers }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2008/LD08/wscg08.pdf }, sorting = { 6144 } } @inproceedings{GTD08, author = { Glander, Tassilo and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Konzepte f{\"u}r die Generalisierung von 3D-Geb{\"a}udemodellen }, year = { 2008 }, volume = { 41 }, pages = { 33--45 }, note = { Arbeitsgruppe Automation in Kartographie, Photogrammetrie und GIS }, publisher = { Bundesamt f{\"u}r Kartographie und Geod{\"a}sie }, booktitle = { Mitteilungen des Bundesamtes f{\"u}r Kartographie und Geod{\"a}sie }, project = { NFG }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2008/GTD08/Glander_Trapp_Doellner-Konzepte_fuer_die_Generalisierung_von_3D-Gebaeudemodellen.pdf }, issn = { 1436-3445 }, sorting = { 7168 } } @inproceedings{JMKD08, author = { Jahnke, Mathias and Meng, Liqiu and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen }, title = { Non-photorealistic Rendering on Mobile Devices and its Usability Concerns }, year = { 2008 }, booktitle = { Virtual Geographic Environments }, organization = { Chinese University of Hong Kong }, sorting = { 7424 } } @inproceedings{GD07, author = { Glander, Tassilo and D{\"o}llner, J{\"u}rgen }, title = { Cell-Based Generalization of 3D Building Groups with Outlier Management }, year = { 2007 }, month = { 11 }, abstract = { In this paper, we present a technique that generalizes 3D building groups of virtual 3D city models according to a cell structure that is derived from infrastructure networks. In addition, the technique handles vegetation areas and outliers such as landmark buildings. Generalized 3D representations abstract from complex, detailed 3D city models and enable storage, analysis, exploration, and interaction at varying levels of scales. Our technique implements the cartographic generalization operators clustering, aggregation, and accentuation; it performs the generalization in four steps: 1) City model components are clustered based on the cell structure. 2) For each cell, the weighted average height is calculated, which is also used to automatically identify outliers. 3) Free space is subtracted from the cells such as in the case of outliers or vegetation areas. 4) The modified cells are extruded to building blocks; vegetation areas and outliers are modeled or, respectively, integrated separately. The paper demonstrates the application of the presented technique by a case study. }, keywords = { Generalization, 3D City Models, Clustering, Aggregation, Accentuation, Level-of-Detail, Outliers, Cells }, publisher = { ACM Press }, booktitle = { 15th International Symposium on Advances in Geographic Information Systems (ACM GIS) }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2007/GD07/glander_doellner_generalization_posterPaper_final_draft.pdf }, doi = { http://doi.acm.org/10.1145/1341012.1341078 }, sorting = { 256 } } @inproceedings{TD07, author = { Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Automated Combination of Real-Time Shader Programs }, year = { 2007 }, pages = { 53--56 }, month = { 9 }, abstract = { This work proposes an approach for automatic and generic runtime-combination of high-level shader programs. Many of recently introduced real-time rendering techniques rely on such programs. The fact that only a single program can be active concurrently becomes a main conceptual problem when embedding these techniques into middleware systems or 3D applications. Their implementations frequently demand for a combined use of individual shader functionality and, therefore, need to combine existing shader programs. Such a task is often timeconsuming, error-prone, requires a skilled software engineer, and needs to be repeated for each further extension. Our extensible approach solves these problems efficiently: It structures a shader program into code fragments, each typed with a predefined semantics. Based on an explicit order of those semantics, the code fragments of different programs can be combined at runtime. This technique facilitates the reuse of shader code as well as the development of extensible rendering frameworks for future hardware generations. We integrated our approach into an object-oriented high-level rendering system. }, editor = { P. Cignoni and J. Sochor }, publisher = { The Eurographics Association }, booktitle = { Eurographics 2007 Shortpaper }, organization = { Eurographics }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2007/TD07/ShaderCombination_MatthiasTrapp.pdf,user_upload/fachgebiete/doellner/publications/2007/TD07/matthias_trapp_shader_combination_EG_2007_poster.pdf }, issn = { 1017-4656 }, sorting = { 1024 } } @inproceedings{NGD07, author = { Nienhaus, Marc and Gooch, Bruce and D{\"o}llner, J{\"u}rgen }, title = { Light threads: illustrating movement dynamics in city models }, year = { 2007 }, pages = { 1 }, publisher = { ACM Press }, booktitle = { ACM SIGGRAPH sketches }, doi = { 10.1145/1278780.1278782 }, sorting = { 64 } } @inproceedings{HMD07, author = { Hagedorn, Benjamin and Maass, Stefan and D{\"o}llner, J{\"u}rgen }, title = { Chaining Geoinformation Services for the Visualization and Annotation of 3D Geovirtual Environments }, year = { 2007 }, abstract = { Visualization of 3D geovirtual environments enables users to gain insight into and to interactively operate with complex 3D geoinformation as well as to explore and analyze underlying structures, relationships, and associated thematic georeferenced data. It represents a key functionality for a growing number of applications and systems such as for way-finding, urban city planning, geo-marketing, and tourist information. For the systematic implementation of these systems, geoinformation services provide an interoperable and standardized way to access, process, and view distributed, heterogeneous 2D and 3D geospatial data. To broaden the use and scope of 3D geovirtual environments, we have to support the management and visualization of user-defined or application-defined data associated with geospatial objects. For this purpose, we can embed annotations such as texts or symbols as meta elements into images of 3D geovirtual environments. For example, annotations can represent georeferenced comments added interactively by the user to explicitly denote spatial objects and locations the comments refer to. This paper presents the design of a chain of web services for the creation of annotated perspective views. First, a Web Perspective View Service (WPVS) is outlined that synthesizes images of a given 3D geovirtual environment and delivers the images to the service consumer. Second, the Web View Annotation Service (WVAS) is introduced that processes these images as input along with a set of spatial locations and content definitions. To ensure high legibility, annotations are embedded into the view plane and placed such that they avoid occlusions among themselves and with geospatial objects of the scene. As result of this service composition, the service consumer receives an enhanced image depicting the current perspective view with seamlessly embedded annotations. We describe the design and system architecture by the example of a prototypic interactive campus information system. }, booktitle = { 4th International Symposium on LBS and Telecartography }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2007/HMD07/hmd_telecarto2007_draft.pdf }, sorting = { 128 } } @inproceedings{DH07, author = { D{\"o}llner, J{\"u}rgen and Hagedorn, Benjamin }, title = { Integrating Urban GIS, CAD, and BIM Data By Service-Based Virtual 3D City-Models }, year = { 2007 }, pages = { 157--170 }, abstract = { In this paper we describe how urban data from different system and application domains such as Computer Aided Design (CAD), Geographic Information Systems (GIS), and Building Information Models (BIM) can be integrated by a service-based virtual 3D city model system. The 3D viewer client allows users to access, to import, and to integrate semantic-enhanced information models from the CAD, GIS, and BIM domain that are provided within a service-based geodata infrastructure. The 3D viewer client represents the core component that implements a number of adaptors for various OGC web service types, manages the resulting virtual 3D city model based on CityGML, and can act itself as a higher-level service delivering integrated information. This approach shows how urban data from different scales, different domains, and different stakeholders can be seamlessly integrated at visualization level and how corresponding services can be setup. The work is based on our development of an interoperable 3D viewer client within the CAD/GIS/BIM thread of the Web Services Initiative Phase 4 of the Open Geospatial Consortium. }, editor = { Massimo Rumor and Volker Coors and Elfriede M. Fendel and Sisi Zlatanova }, publisher = { Taylor \& Francis Ltd }, address = { Stuttgart, Germany }, booktitle = { Urban and Regional Data Management: UDMS 2007 Annual }, sorting = { 512 } } @inproceedings{MD07, author = { Maass, Stefan and D{\"o}llner, J{\"u}rgen }, title = { Embedded Labels for Line Features in Interactive 3D Virtual Environments }, year = { 2007 }, pages = { 53--59 }, abstract = { This paper presents a novel method for labeling line features in interactive virtual 3D environments. It embeds labels into the surfaces of the annotated objects, whereas occlusion by other scene elements is minimized and overlaps between labels are resolved. Embedded labels provide a high correlation between label and annotated object – they are specifically useful in environments, where available screen-space for annotations is limited (e.g., small displays). To determine optimal positions for the annotation of line features, the degree of occlusion for each position is estimated during the realtime rendering process. We discuss a number of sampling schemes that are used to approximate the visibility measure, including an adapted variant that is particularly suitable for the integration of text based on Latin alphabets. Overlaps between embedded labels are resolved with a conflict graph, which is calculated in a preprocessing step and stores all possible overlap conflicts. To prove the applicability of our approach, we have implemented a prototype application that visualizes street names as embedded labels within a 3D virtual city model in real-time. }, keywords = { Labeling, Annotation, Interactive Virtual 3D Environments }, publisher = { ACM Press }, address = { New York, NY, USA }, booktitle = { 5th International Conference on Computer Graphics, Virtual Reality, Visualization and Interaction in Africa (ACM AFRIGRAPH 2007) }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2007/MD07/maass - embedde labels for line features in interactive 3d environments - [ACM Afrigraph 2007].pdf }, issn = { isbn 978-1-59593-906-7 }, doi = { http://doi.acm.org/10.1145/1294685.1294695 }, sorting = { 768 } } @inproceedings{MJD07, author = { Maass, Stefan and Jobst, Markus and D{\"o}llner, J{\"u}rgen }, title = { Depth Cue of Occlusion Information as Criterion for the Quality of Annotation Placement in Perspective Views }, year = { 2007 }, pages = { 473--486 }, abstract = { In cartography and computational geometry, concepts and techniques for automated label placement have been developed for two-dimensional maps. Less is known whether these methods can be applied to annotate geovirtual 3D environments. In this paper we discuss the application of these methods within geovirtual 3D environments and investigate the effects that can possibly harm the information transfer. To achieve high quality labeling readability, visibility, and the unambiguous correlation to the reference have to be ensured. Illustrated by examples, we show that perspective attributes inherently encoded in a depiction have to be considered as well. In particular, we focus on overriding occlusion information by added annotations and the impact on the complexity of the cognition process. A user test verifies our hypothesis that this disturbance is actually noticeable by users. }, keywords = { annotation, labeling, depth-cues, 3d geo-virtual environments }, editor = { Sara Irina Fabrikant and Monica Wachowicz }, publisher = { Springer }, series = { Lecture Notes in Geoinformation and Cartography }, booktitle = { The European Information Society - Leading the Way with Geo-information }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2007/MJD07/2007-agile.pdf }, issn = { 1863-2246 }, sorting = { 1280 } } @inproceedings{BD07, author = { Bohnet, Johannes and D{\"o}llner, J{\"u}rgen }, title = { Facilitating Exploration of Unfamiliar Source Code by Providing 2-1/2-D Visualizations of Dynamic Call Graphs }, year = { 2007 }, pages = { 63--66 }, keywords = { software visualization, program understanding }, publisher = { IEEE Computer Society Press }, booktitle = { 4th IEEE International Workshop on Visualizing Software for Understanding and Analysis }, organization = { IEEE }, sorting = { 1536 }, priority = { 1 } } @inproceedings{BD07a, author = { Bohnet, Johannes and D{\"o}llner, J{\"u}rgen }, title = { CGA Call Graph Analyzer - Locating and Understanding Functionality within the Gnu Compiler Collection's Million Lines of Code }, year = { 2007 }, pages = { 161--162 }, keywords = { software visualization, program understanding }, publisher = { IEEE Computer Society Press }, booktitle = { 4th IEEE International Workshop on Visualizing Software for Understanding and Analysis }, organization = { IEEE }, sorting = { 1792 }, priority = { 1 } } @inproceedings{RKDB07, author = { Ross, Lutz and Kleinschmit, Birgit and D{\"o}llner, J{\"u}rgen and Buchholz, Henrik }, title = { Creation and Management of Object-Based Terrain Models }, year = { 2007 }, pages = { 273--282 }, editor = { Manfred Schrenk and Vasily V. Popovich and Josef Benedikt }, publisher = { CORP – Competence Center of Urban and Regional Planning }, booktitle = { 12th International Conference on Urban Planning, Regional Development and Information Society (REAL CORP) }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2007/RKDB07/Ross_Kleinschmit_Doellner_Buchholz_CORP2007.pdf }, issn = { ISBN 978-39502139-3-5 }, sorting = { 2048 } } @inproceedings{RKDK07, author = { Ross, Lutz and Kleinschmit, Birgit and D{\"o}llner, J{\"u}rgen and Kegel, Anselm }, title = { Geovirtual Urban Environments as Media for the Communication of Information related to Managing Urban Land }, year = { 2007 }, pages = { 577--582 }, abstract = { This contribution outlines a research project targeting at the utilization of virtual 3D city models in the context of urban land management. It explains the projects background and the research concept as well as basic methods, which will be applied during the project runtime. The project will build upon findings in environmental and urban planning, computer graphics science, and geoinformation science; it contributes to a German research framework (REFINA) aiming at the development of guidelines, best practice examples, and tools for the reduction of land consumption. }, publisher = { Bundesministerium f{\"u}r Bildung und Forschung }, booktitle = { 2nd International Conference on Managing Urban Land - Towards More Effective And Sustainable Brownfield Revitalisation Policies. }, project = { REFINA }, files = { user_upload/fachgebiete/doellner/publications/2007/RKDK07/Revit2007_paper_draft.pdf }, issn = { ISBN 978-3-934409-33-4 }, sorting = { 2304 } } @inproceedings{BD07b, author = { Bohnet, Johannes and D{\"o}llner, J{\"u}rgen }, title = { Visually Exploring Control Flow Graphs to Support Legacy Software Migration }, year = { 2007 }, pages = { 245--246 }, keywords = { software visualization, program understanding }, publisher = { Springer }, series = { GI-Edition Lecture Notes in Informatics }, booktitle = { Conference on Software Engineering (SE) }, organization = { GI }, sorting = { 2560 }, priority = { 1 } } @inproceedings{BD07c, author = { Bohnet, Johannes and D{\"o}llner, J{\"u}rgen }, title = { Planning an Experiment on User Performance for Exploration of Diagrams Displayed in 2 1/2 Dimensions }, year = { 2007 }, pages = { 223--230 }, keywords = { software visualization, program understanding }, publisher = { Springer }, series = { GI-Edition Lecture Notes in Informatics }, booktitle = { Workshop on SE07 - Empirische Untersuchungen von Visualisierungswerkzeugen zur Software-Analyse }, organization = { GI }, sorting = { 3072 }, priority = { 1 } } @inproceedings{KD07, author = { Kegel, Anselm and D{\"o}llner, J{\"u}rgen }, title = { Photorealistische Echtzeit-Visualisierung geovirtueller Umgebungen - Ausarbeitung }, year = { 2007 }, abstract = { Dieser Beitrag präsentiert eine Reihe aktueller Rendering-Verfahren und deren Einsatz in der Visualisierung von interaktiven, virtuellen 3D Stadt- und Landschaftsmodellen. Er erhebt nicht den Anspruch eines technischen Artikels, in dem Details dieser Verfahren erläutert werden, vielmehr soll es einen groben Einblick in die Thematik eröffnen und die Möglichkeiten der 3D-Echtzeit-Grafik für die Visualisierung aktueller und zukünftiger Stadtmodelle aufzeigen. Die behandelten Themen umfassen das Rendering natürlicher Phänomene wie Wasser und Himmel, verschiedene Arten von Vegetation sowie physikalisch orientierte Beleuchtungsberechnungen. Die Rendering-Verfahren dienen in erster Linie, jedoch nicht ausschließlich, der Steigerung der visuellen Qualität geovirtueller Umgebungen und bilden eine technische Basis für die Entwicklung zukünftiger interaktiver 3D-Anwendungen und -Systeme. }, booktitle = { Arbeitsgruppe Automation in Kartographie, Photogrammetrie und GIS (AgA) }, organization = { Bundesamt f{\"u}r Kartographie und Geod{\"a}sie }, project = { REFINA }, files = { user_upload/fachgebiete/doellner/publications/2007/KD07/AgA 2007 - Photorealistische Echtzeit-Visualisierung geovirtueller Umgebungen.pdf }, sorting = { 3328 } } @inproceedings{HD07, author = { Hagedorn, Benjamin and D{\"o}llner, J{\"u}rgen }, title = { High-Level Web Service for 3D Building Information Visualization and Analysis }, year = { 2007 }, pages = { 1--8 }, abstract = { This paper presents an approach to visualize and analyze 3D building information models within virtual 3D city models. Building information models (BIMs) formalize and represent detailed information related to the lifecycle of buildings, e.g., information about composition, facilities, equipment, usage, maintenance, and workflows such as rescue scenarios. Complementary, virtual 3D city models represent objects and phenomena of urban areas, typically at a lower level of information detail; virtual 3D city models as a general framework and platform for spatial data allow us to seamlessly combine GIS and BIM data. In our approach, BIM data and 3D geodata, both provided by possibly distributed, heterogeneous web services, are efficiently integrated by the underlying real-time 3D geovisualization system. To facilitate insights into complex spatial scenarios, two configurable BIM-specific visualization techniques have been developed, which map BIM data onto 3D building graphics variables respectively geometrically distort 3D building representations. The visualization functionality can itself be accessed as a specialized web 3D perspective view service. We demonstrate our approach by a fire and rescue scenario for a part of a 3D campus model. }, publisher = { ACM Press }, address = { Seattle, WA }, booktitle = { 15th International Symposium on Advances in Geographic Information Systems (ACM GIS) }, sorting = { 3584 } } @inproceedings{GTD07a, author = { Glander, Tassilo and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { A Concept of Effective Landmark Depiction in Geovirtual 3D Environments by View-Dependent Deformation }, year = { 2007 }, abstract = { Landmarks represent elements of geovirtual 3D environments with outstanding importance for user orientation. Especially, they facilitate navigation and exploration within virtual 3D city models. This paper presents a novel concept for the real-time depiction of landmarks that effectively emphasizes these 3D objects by improving their visibility with respect to their surrounding areas and the current 3D viewing settings. The concept is based on scaling landmark geometry according to an importance function while simultaneously adjusting the corresponding surrounding region. The amplification of landmarks takes into account the current camera parameters. To reduce visual artifacts caused by this multi-scale presentation, e.g., geometry intersections, the surrounding objects of each landmark are adapted according to a deformation field that encodes the displacement and scaling transformations. An individual weight coefficient can be defined that denotes the landmark’s importance. To render a collection of weighted landmarks within a virtual 3D city model, the technique accumulates their associated, weighted deformation fields in a view-dependent way. Our concept provides a flexible solution for the importance-driven enhancement of objects within interactive geovirtual 3D environments and aims at improving the perceptual and cognitive quality of their display. In particular, the concept can be applied to systems and applications that use abstracted, generalized virtual 3D city models such as in the fields of car and pedestrian navigation, disaster management, and spatial data mining. }, note = { CD proceedings }, keywords = { Visualization, Smart Environments and Landmarks, Navigation Systems }, booktitle = { 4th International Symposium on LBS and Telecartography }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2007/GTD07a/LBS2007_Glander_Trapp_Doellner_Landmark_visualization_draft.pdf }, sorting = { 3840 } } @inproceedings{MJD07a, author = { Maass, Stefan and Jobst, Markus and D{\"o}llner, J{\"u}rgen }, title = { Use of Depth Cues for the Annotation of 3D Geo-Virtual Environments }, year = { 2007 }, abstract = { An increasing number IT applications and systems applies 3D geo-virtual environments as user interfaces, that facilitate the intuitive communication of spatial information, whereby depth cues play an important role for mental reconstruction of the depicted scene. These applications require the integration of application-specific information, represented by annotations such as textual labels or symbols, in analogy to labeling in traditional 2D maps. This contribution presents first results on how to improve information transfer with respect to annotations used within. We found that automated annotation techniques, used in traditional 2D map presentations, if applied to 3D geo-virtual environments, can damage depth cues and, thereby, possibly harm the human perception. Illustrated by examples, we show the override of different depth cues and discuss possible solutions. Finally, we propose a number of user tests, whose results are needed to improve the quality of automatic label placement techniques in the future. }, note = { CD proceedings }, address = { Moskow, Russia }, booktitle = { 23rd International Cartographic Conference }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2007/MJD07a/maass-icc2007-final.pdf }, sorting = { 4352 } } @inproceedings{BPMD07, author = { B{\"a}uerlein, Johannes and Pokorski, Rafael and Maass, Stefan and D{\"o}llner, J{\"u}rgen }, title = { Visualization Project of Roman Cologne - How to Make VR models Available for Scientific Work }, year = { 2007 }, pages = { 121--126 }, editor = { Posluschny, Axel and Lambers, Karsten and Herzog, Irmela }, publisher = { Dr. Rudolf Habelt GmbH }, booktitle = { Layers of Perception. Proceedings of the 35th International Conference on Computer Applications and Quantitative Methods in Archaeology (CAA) }, project = { NFG }, sorting = { 4864 } } @inproceedings{WBDA07, author = { Markus Wolff, Henrik Buchholz, Jürgen Döllner, Hartmut Asche }, title = { Geovisualisierungsmethoden zur Gefahrenabwehr auf Basis von 3D-Stadtmodellen }, year = { 2007 }, address = { Königslutter }, booktitle = { Tagungsband Symposium der DGfK }, project = { NFG }, sorting = { 8 } } @inproceedings{DBB06, author = { D{\"o}llner, J{\"u}rgen and Baumann, Konstantin and Buchholz, Henrik }, title = { Virtual 3D City Models as Foundation of Complex Urban Information Spaces }, year = { 2006 }, pages = { 107--112 }, editor = { Manfred Schrenk }, publisher = { CORP – Competence Center of Urban and Regional Planning }, booktitle = { 11th international conference on Urban Planning and Spatial Development in the Information Society (REAL CORP) }, files = { user_upload/fachgebiete/doellner/publications/2006/DBB06/CORP2006_doellner.pdf }, sorting = { 8 } } @inproceedings{DKLST06, author = { D{\"o}llner, J{\"u}rgen and Kolbe, Thomas H. and Liecke, Falko and Sgouros, Takis and Teichmann, Karin }, title = { The Virtual 3D City Model of Berlin - Managing, Integrating, and Communicating Complex Urban Information }, year = { 2006 }, note = { Online proceedings }, booktitle = { 25th Urban Data Management Symposium (UDMS) }, sorting = { 16 } } @inproceedings{NKD06, author = { Nienhaus, Marc and Kirsch, Florian and D{\"o}llner, J{\"u}rgen }, title = { Illustrating Design and Spatial Assembly of Interactive CSG }, year = { 2006 }, pages = { 91--98 }, abstract = { Illustrating in a sketchy manner is essential to communicate visual ideas and can be used to present and reconsider drafts and concepts in product design. This paper introduces a real-time illustration technique that sketches the design and spatial assembly of CSG models. The illustration technique generates a graphical decomposition of the CSG model into disjunctive layers to extract 1) the perceptually important edges that outline the model’s outer and inner features and 2) the surface shading of the outer and inner faces. Then, the technique applies uncertainty to these layers to simulate a sketchy effect. Finally, the technique composes the sketched layers in depth-sorted order while ensuring a correct depth behavior in the frame buffer. Because the sketchy illustrations are frame-to-frame coherent, the technique can be used as a tool for interactive presentation and reconsideration of the design and spatial assembly of CSG models. }, keywords = { Illustrative Visualization, NPR }, editor = { Stephen N. Spencer }, publisher = { ACM Press }, booktitle = { 4th International Conference on Computer Graphics, Virtual Reality, Visualisation and Interaction in Africa (ACM AFRIGRAPH) }, files = { user_upload/fachgebiete/doellner/publications/2006/NKD06/2006_IV_nienhaus.pdf }, issn = { 1-59593-288-7 }, sorting = { 32 } } @inproceedings{BDRK06, author = { Buchholz, Henrik and D{\"o}llner, J{\"u}rgen and Ross, Lutz and Kleinschmit, Birgit }, title = { Automated Construction of Urban Terrain Models }, year = { 2006 }, pages = { 547--562 }, abstract = { Elements of urban terrain models such as streets, pavements, lawns, walls, and fences are fundamental for effective recognition and convincing ap-pearance of virtual 3D cities and virtual 3D landscapes. These elements complement important other components such as 3D building models and 3D vegetation models. This paper introduces an object-oriented, rule-based and heuristic-based approach for modeling detailed virtual 3D terrains in an automated way. Terrain models are derived from 2D vector-based plans based on generation rules, which can be controlled by attributes assigned to 2D vector elements. The individual parts of the resulting urban terrain models are represented as “first-class” objects. These objects remain linked to the underlying 2D vector-based plan elements and, therefore, preserve data semantics and associated thematic information. With urban terrain models, we can achieve high-quality photorealistic 3D geovirtual environments and support interactive creation and manipulation. The automated construction represents a systematic solution for the bi-directional linkage of 2D plans and 3D geovirtual environments and over-comes cost-intensive CAD-based construction processes. The approach both simplifies the geometric construction of detailed urban terrain models and provides a seamless integration into traditional GIS-based workflows. The resulting 3D geovirtual environments are well suited for a variety of applications including urban and open-space planning, information sys-tems for tourism and marketing, and navigation systems. As a case study, we demonstrate our approach applied to an urban development area of downtown Potsdam, Germany. }, editor = { Andreas Riedl and Wolfgang Kainz and Gregory Elmes }, publisher = { Springer }, booktitle = { Progress in Spatial Data Handling. 12th International Symposium on Spatial Data Handling (SDH) }, files = { user_upload/fachgebiete/doellner/publications/2006/BDRK06/smartterrain_sdh2006.pdf }, sorting = { 64 } } @inproceedings{NKD06a, author = { Nienhaus, Marc and Kirsch, Florian and D{\"o}llner, J{\"u}rgen }, title = { Sketchy Illustrations for Presenting the Design of Interactive {CSG} }, year = { 2006 }, pages = { 772--777 }, publisher = { IEEE Computer Society Press }, booktitle = { IV }, sorting = { 128 } } @inproceedings{BD06, author = { Bohnet, Johannes and D{\"o}llner, J{\"u}rgen }, title = { Visual Exploration of Function Call Graphs for Feature Location in Complex Software Systems }, year = { 2006 }, pages = { 95--104 }, keywords = { software visualization, program understanding }, publisher = { ACM Press }, booktitle = { ACM Symposium on Software Visualization }, organization = { ACM }, sorting = { 256 }, priority = { 1 } } @inproceedings{MD06, author = { Maass, Stefan and D{\"o}llner, J{\"u}rgen }, title = { Efficient View Management for Dynamic Annotation Placement in Virtual Landscapes }, year = { 2006 }, volume = { 4073 }, pages = { 1--12 }, abstract = { We present a dynamic placement technique for annotations of virtual landscapes that is based on efficient view management. Annotations represent textual or symbolic descriptions and provide explanatory or thematic information associated with spatial positions. The technique handles external annotations as 2.5 dimensional objects and adjusts their positions with respect to available space in the view-plane. The approach intends to place labels without occlusions and, if this cannot be achieved, favors those annotations that are close to the observer. This technique solves the visibility problem of annotations in an approximate but user-centric way. It operates in real-time and therefore can be applied to interactive virtual landscapes. Additionally, the approach can be configured to fine tune the trade off between placement quality and processing time with a single parameter. }, editor = { Andreas Butz and Brian Fischer and Antonio Kr{\"u}ger and Patrick Oliver }, publisher = { Springer }, series = { Lecture Notes in Computer Science }, booktitle = { 6th International Symposium on Smart Graphics 2006 }, files = { user_upload/fachgebiete/doellner/publications/2006/MD06/2006-sg-maass.pdf }, sorting = { 512 } } @inproceedings{BD06a, author = { Bohnet, Johannes and D{\"o}llner, J{\"u}rgen }, title = { Analyzing Feature Implementation by Visual Exploration of Architecturally-Embedded Call-Graphs }, year = { 2006 }, pages = { 41--48 }, keywords = { software visualization, program understanding }, publisher = { ACM Press }, booktitle = { 4th International Workshop on Dynamic Analysis }, organization = { ACM }, sorting = { 768 }, priority = { 1 } } @inproceedings{MD06a, author = { Maass, Stefan and D{\"o}llner, J{\"u}rgen }, title = { Dynamic Annotation of Interactive Environments using Object-Integrated Billboards }, year = { 2006 }, pages = { 327--334 }, abstract = { We present a technique for the dynamic annotation of three-dimensional objects in interactive virtual environments. Annotations represent textual or symbolic descriptions providing explanatory or thematic information associated with scene objects. In contrast to techniques that treat annotations as two-dimensional view-plane elements, our technique models annotations as separate three-dimensional scene elements that are automatically positioned and oriented according to the shape of the referenced object. The shape of such an object is generalized by an annotation hull and skeleton used to determine an adequate position and orientation of the annotation with respect to the viewing direction. During camera movements, annotations float along the surface of the annotation hull. Additional constraints for the generalizations provide further control about geometric and dynamical properties. In a case study, we show how this technique can be applied for annotating buildings and other components of virtual 3D city models. }, editor = { Joaquim Jorge and Vaclav Skala }, address = { Plzen, Czech Republic }, booktitle = { 14th International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision (WSCG) }, files = { user_upload/fachgebiete/doellner/publications/2006/MD06a/2006-maass-wscg.pdf }, sorting = { 1024 } } @inproceedings{DNB06, author = { D{\"o}llner, J{\"u}rgen and Nienhaus, Marc and Buchholz, Henrik }, title = { Potenziale nichtphotorealistischer 3D-Darstellungen f{\"u}r Geoinformationen }, year = { 2006 }, booktitle = { Symposium Praktische Kartographie }, sorting = { 1280 } } @inproceedings{MD06b, author = { Maass, Stefan and D{\"o}llner, J{\"u}rgen }, title = { Ein Konzept zur dynamischen Annotation virtueller 3D-Stadtmodelle }, year = { 2006 }, volume = { 10 }, pages = { 19--26 }, abstract = { Mit der zunehmenden automatisierten Herstellung und Verfügbarkeit virtueller 3D-Stadtmodelle ergibt sich eine wachsende Zahl von Anwendungsfeldern. Neben der Visualisierung klassischer Komponenten von Stadtmodellen – zum Beispiel Geländeoberflächen, Gebäude, Straßenraum-Elemente, Vegetation – spielt die Integration von Annotationen eine entscheidende Rolle, um die Darstellung dieser Modelle mit anwendungsspezifischen Daten, Hinweisen und Kommentaren zu erweitern und somit ihre fachspezifische Nutzung zu ermöglichen. Im einfachsten Fall kann dies zum Beispiel bedeuten, Gebäudemodelle einzeln zu beschriften. Der Beitrag charakterisiert Annotationen im Kontext virtueller Stadtmodelle und stellt ein Verfahren zur dynamischen objektintegrierten Annotation von Gebäuden vor. }, publisher = { Kirschbaum Verlag, Bonn }, address = { Potsdam, Germany }, booktitle = { Kartographische Schriften - Aktuelle Entwicklungen in Geoinformation und Visualisierung (GeoVis) }, organization = { Deutsche Gesellschaft f{\"u}r Kartographie e.V. }, files = { user_upload/fachgebiete/doellner/publications/2006/MD06b/2006-geovis-maass.pdf }, sorting = { 1792 } } @inproceedings{BBD05a, author = { Buchholz, Henrik and Bohnet, Johannes and D{\"o}llner, J{\"u}rgen }, title = { Smart Navigation Strategies for Virtual Landscapes }, year = { 2005 }, pages = { 124--131 }, abstract = { Navigation is a key factor for user acceptance of virtual 3D landscapes. Existing geovirtual environments (GeoVEs) frequently suffer from the lack of a proper handling and prevention of confusing or disorientating situations. As FUHRMANN & MACEACHREN (2001) point out, “core problems for users of these desktop GeoVEs are to navigate through, and remain oriented in, the display space and to relate that display space to the geographic space it depicts.” This paper proposes smart navigation strategies, which overcome these problems and give additional features to landscape designers for user guidance: 1) Smart navigation strategies interpret user interaction regarding the current view specification, i.e., the parameters of the virtual camera, and determine if the user is about to get into confusing or disorienting situations in an anticipatory way. 2) They guide the user away from situations where usual navigation behavior tends to fail. 3) They always indicate to the user when the guidance mechanism is operating, so that the user understands the behavior of the smart navigation strategy. 4) They allow for constraining the camera according to data quality and the emphasis given to certain parts of the virtual landscape. With smart navigation strategies we aim to achieve a higher user acceptance for virtual landscape applications. Particularly, smart navigation strategies facilitate the use of visualization applications for inexperienced users without the need for a specific training. }, editor = { Buhmann, E. and Paar, P. and Bishop, I.D. and Lange, E. }, publisher = { Wichmann }, booktitle = { Trends in Real-time Visualization and Participation. Proceedings at Anhalt University of Applied Sciences }, files = { user_upload/fachgebiete/doellner/publications/2005/BBD05a/SmartNavForVirtualLandscapes.pdf }, sorting = { 512 } } @inproceedings{BBD05, author = { Buchholz, Henrik and Bohnet, Johannes and D{\"o}llner, J{\"u}rgen }, title = { Smart and Physically-Based Navigation in 3D Geovirtual Environments }, year = { 2005 }, pages = { 629--635 }, abstract = { This paper describes an approach for smart and physically-based navigation, which aims at supporting effective and intuitive user interactions with 3D geovirtual environments (GeoVEs). The approach is based on two aligned concepts: 1) All navigation techniques are controlled by constraints that ensure user orientation and avoid “getting lost” situations. 2) All navigation techniques are handled in a time-coherent way achieving steady, continuous user movements using a physically-based motion model. Based on these concepts, we demonstrate several ways to improve commonly used navigation techniques for geovirtual environments. }, publisher = { IEEE Computer Society Press }, booktitle = { 9th International Conference on Information Visualization }, files = { user_upload/fachgebiete/doellner/publications/2005/BBD05/SmartNavigation_Buchholz_draft.pdf }, sorting = { 768 } } @inproceedings{HZMAOPWKND05, author = { Hentschel, Christian and Zahn, M. and Merettig, Ralf and Anke, A. and Oschwald, Mario and Porscha, R. and Wiegand, Frank and Kirsch, Florian and Nienhaus, Marc and D{\"o}llner, J{\"u}rgen }, title = { SVG2MHP: A Content Transformation System for the Multimedia Home Platform }, year = { 2005 }, note = { Online proceedings }, booktitle = { 4th Annual Conference on Scalable Vector Graphics (SVG Open) }, sorting = { 1024 } } @inproceedings{BBD05b, author = { Buchholz, Henrik and Baumann, Konstantin and D{\"o}llner, J{\"u}rgen }, title = { Representation and Interactive Editing of Vector Data in Virtual Landscapes }, year = { 2005 }, pages = { 115--123 }, editor = { Buhmann, E. and Paar, P. and Bishop, I.D. and Lange, E. }, publisher = { Wichmann }, booktitle = { Trends in Real-time Visualization and Participation. Proceedings at Anhalt University of Applied Sciences }, sorting = { 1280 } } @inproceedings{BD05, author = { Buchholz, Henrik and D{\"o}llner, J{\"u}rgen }, title = { View-Dependent Rendering of Multiresolution Texture-Atlases }, year = { 2005 }, pages = { 215--222 }, abstract = { Real-time rendering of massively textured 3D scenes usually involves two major problems: Large numbers of texture switches are a well-known performance bottleneck and the set of simultaneously visible textures is limited by the graphics memory. This paper presents a level-of-detail texturing technique that overcomes both problems. In a preprocessing step, the technique creates a hierarchical data structure for all textures used by scene objects, and it derives texture atlases at different resolutions. At runtime, our texturing technique requires only a small set of these texture atlases, which represent scene textures in an appropriate size depending on the current camera position and screen resolution. Independent of the number and total size of all simultaneously visible textures, the achieved frame rates are similar to that of rendering the scene without any texture switches. Since the approach includes dynamic texture loading, the total size of the textures is only limited by the hard disk capacity. The technique is applicable for any 3D scenes whose scene objects are primarily distributed in a plane, such as in the case of 3D city models or outdoor scenes in computer games. Our approach has been successfully applied to massively textured, large-scale 3D city models. }, publisher = { IEEE Computer Society Press }, booktitle = { IEEE Visualization 2005 }, files = { user_upload/fachgebiete/doellner/publications/2005/BD05/MultiresolutionTextureAtlases_stamped.pdf }, sorting = { 1536 } } @inproceedings{KD05, author = { Kirsch, Florian and D{\"o}llner, J{\"u}rgen }, title = { OpenCSG: A Library for Image-Based CSG Rendering }, year = { 2005 }, pages = { 129--140 }, abstract = { We present the design and implementation of a real-time 3D graphics library for image-based Constructive Solid Geometry (CSG). This major approach of 3D modeling has not been supported by real-time computer graphics until recently. We explain two essential image-based CSG rendering algorithms, and we introduce an API that provides a compact access to their complex functionality and implementation. As an important feature, the CSG library seamlessly integrates application-defined 3D shapes as primitives of CSG operations to ensure high adaptability and openness. We also outline optimization techniques to improve the performance in the case of complex CSG models. A number of use cases demonstrate potential applications of the library. }, booktitle = { USENIX 2005 Annual Technical Conference, FREENIX Track }, files = { user_upload/fachgebiete/doellner/publications/2005/KD05/csg_freenix2005.pdf,user_upload/fachgebiete/doellner/publications/2005/KD05/csg_freenix2005_paper.pdf }, sorting = { 1792 } } @inproceedings{WDDHPR05, author = { Werner, Armin and Deussen, Oliver and D{\"o}llner, J{\"u}rgen and Hege, Hans-Christian and Paar, Philip and Rekittke, J{\"o}rg }, title = { Lenn{\'e}3D - Walking through Landscape Plans }, year = { 2005 }, pages = { 48--59 }, editor = { Buhmann, E. and Paar, P. and Bishop, I.D. and Lange, E. }, publisher = { Wichmann }, booktitle = { Trends in Real-time Visualization and Participation. Proceedings at Anhalt University of Applied Sciences }, sorting = { 2048 } } @inproceedings{DBNK05, author = { D{\"o}llner, J{\"u}rgen and Buchholz, Henrik and Nienhaus, Marc and Kirsch, Florian }, title = { Illustrative Visualization of 3D City Models }, year = { 2005 }, volume = { 5669 }, pages = { 42--51 }, abstract = { This paper presents an illustrative visualization technique that provides expressive representations of large-scale 3D city models, inspired by the tradition of artistic and cartographic visualizations typically found in bird’s-eye view and panoramic maps. We define a collection of city model components and a real-time multi-pass rendering algorithm that achieves comprehensible, abstract 3D city model depictions based on edge enhancement, color-based and shadow-based depth cues, and procedural facade texturing. Illustrative visualization provides an effective visual interface to urban spatial information and associated thematic information complementing visual interfaces based on the Virtual Reality paradigm, offering a huge potential for graphics design. Primary application areas include city and landscape planning, cartoon worlds in computer games, and tourist information systems. }, editor = { Erbacher, Robert F. and Roberts, Jonathan C.and Gr{\"o}hn, Matti T. and B{\"o}rner, Katy }, publisher = { International Society for Optical Engine (SPIE) }, series = { Proceedings of the SPIE }, booktitle = { Visualization and Data Analysis }, files = { user_upload/fachgebiete/doellner/publications/2005/DBNK05/NprPaperVDM_stamped.pdf }, sorting = { 2304 } } @inproceedings{DB05, author = { D{\"o}llner, J{\"u}rgen and Buchholz, Henrik }, title = { Continuous Level-of-Detail Modeling of Buildings in Virtual 3D City Models }, year = { 2005 }, pages = { 173--181 }, abstract = { This paper introduces a concept for representing and modeling buildings in GIS at continuous levels of quality. Buildings are essential objects of virtual 3D city models, which serve as platforms for integrated, urban geoinformation. Existing concepts for the representation of buildings are restricted to a specific level-ofquality such as block models, roof-including models, architectural models, and indoor virtual reality models. The continuous level-of-quality approach unifies the representation of heterogeneous sets of buildings, which occur in most virtual 3D city models. It also leads to a systematic method for the incremental refinement of buildings – an important requirement of the long-term management of virtual city models. In our concept, a building's geometry is structured on a per-floor basis; each floor refers to a floor prototype, which is defined by a ground plan, walls, and wall segments. To specify the appearance projective textures across floors and textures per wall segment are supported. Application-specific data can be associated similar to appearance information. These few components already allow us to express efficiently most common building features. Furthermore, the approach seamlessly integrates into CityGML, an upcoming standard for virtual city model data. }, publisher = { ACM Press }, booktitle = { 13th ACM International Symposium of Geographical Information Systems (ACM GIS) }, files = { user_upload/fachgebiete/doellner/publications/2005/DB05/2005_Doellner_ContinuousLODModeling.pdf }, sorting = { 2560 } } @inproceedings{DHS05, author = { D{\"o}llner, J{\"u}rgen and Hagedorn, Benjamin and Schmidt, Steffen }, title = { An Approach towards Semantics-Based Navigation in 3D City Models on Mobile Devices }, year = { 2005 }, number = { 74 }, pages = { 171--176 }, abstract = { This paper outlines a novel approach for user navigation in complex virtual 3D city models on mobile devices. Users navigate within the virtual 3D city model by sketching navigation commands in the perspective view on the mobile client. The sketches are sent to the server, which reprojects the sketches onto the 3D scene, interprets these sketches in terms of naviga-tion commands, and sends the resulting video-encoded image stream to the mobile client. This approach allows us to provide interactivity for complex virtual 3D city models on resource and bandwidth limited mobile clients. A high degree of usability is achieved because users can trigger complex navigation commands in a task and goal oriented way taking advantage of the navigation properties and affordances inherent to elements of geovirtual environments. }, editor = { Georg Gartner }, publisher = { TU Wien }, series = { Schriftenreihe der Studienrichtung Vermessungswesen und Geoinformation an der TU Wien }, address = { Vienna }, booktitle = { Proceedings of the 3rd Symposium on LBS \& TeleCartography 2005 }, files = { user_upload/fachgebiete/doellner/publications/2005/DHS05/dhs2005_draft.pdf }, sorting = { 2816 } } @inproceedings{DB05b, author = { D{\"o}llner, J{\"u}rgen and Buchholz, Henrik }, title = { Expressive Virtual 3D City Models }, year = { 2005 }, abstract = { Virtual 3D city models include components such as 3D building models, transportation network models, and vegetation models. Traditional applications of virtual 3D city models focus on presenting photorealistic views. The potential of 3D city models, however, goes beyond virtual reality. In general, virtual 3D city models can serve as user interfaces to complex, spatial urban information, that is, for exploration, analysis, and communication of thematic and spatial information. For the last decades, the intrinsic concepts of real-time 3D computer graphics lead to near-photorealistic, interactive virtual 3D city models. With the recent advent of non-photorealistic 3D rendering, a new genre in computer graphics, a repertoire of illustrative, expressive, and artistic graphics techniques becomes viable to developers and designers of virtual city models. This contribution outlines main techniques and discusses consequences for cartographic information display based on 3D city models. }, note = { CD proceedings }, booktitle = { International Cartographic Conference }, organization = { International Cartographic Association (ICA - ACI) }, files = { user_upload/fachgebiete/doellner/publications/2005/DB05b/icc_2005_doellner.pdf }, sorting = { 3072 } } @inproceedings{DBBGJK05, author = { D{\"o}llner, J{\"u}rgen and Buchholz, Henrik and Brodersen, Florian and Glander, Tassilo and J{\"u}tterschenke, Sascha and Klimetschek, Alexander }, title = { SmartBuildings - A Concept for Ad-Hoc Creation and Refinement of 3D Building Models }, year = { 2005 }, abstract = { This paper presents smart buildings, a concept for ad-hoc creation and refinement of 3D building models. A smart building represents a building’s geometry and appearance on a per-floor basis. A building’s floor basically consists of a ground plan and walls, whereby each floor and its parts can be specified independently. In addition, smart buildings provide and maintain detailed building semantics and allow for attaching application-specific semantics not just to the whole building but also to specific floors and sections of façades. Smart buildings provide intuitive means for constructing, reshaping, and refining 3D building models. In particular, they provide an effective solution for integrating building models of different level-of-detail within a uniform framework. With smart buildings, authoring systems for 3D city models can implement cost-effective intuitive tools for the maintenance and incremental development of 3D city models. The concept can serve both as a schema for implementing 3D city model systems as well as provide a suggestion for further extensions to standards regarding 3D city models such as CityGML. }, note = { Online proceedings }, editor = { Gr{\"o}ger, G. and Kolbe, T.H. }, publisher = { EuroSDR }, address = { Bonn }, booktitle = { 1st International Workshop of 3D City Models }, organization = { SPRS/EuroSDR/DGPF }, files = { user_upload/fachgebiete/doellner/publications/2005/DBBGJK05/SmartBuildings.pdf }, sorting = { 3328 } } @inproceedings{BDNK05, author = { Buchholz, Henrik and D{\"o}llner, J{\"u}rgen and Nienhaus, Marc and Kirsch, Florian }, title = { Real-Time Non-Photorealistic Rendering of 3D City Models }, year = { 2005 }, abstract = { This paper presents a real-time non-photorealistic rendering technique that provides expressive depictions of 3D city models, inspired by the tradition of artistic and cartographic depictions typically found in bird’s-eye view and panoramic maps. We define a collection of city model components and a real-time multi-pass rendering algorithm that achieves comprehensible, abstract 3D city model depictions based on edge enhancement and stylization, color-based and shadow-based depth cues, and procedural facade texturing. Non-photorealistic rendering facilitates the implementation of effective visual interfaces to urban spatial information and associated thematic information going beyond the Virtual Reality paradigm and offering a huge potential for graphics design. Primary application areas include city and landscape planning, cartoon worlds in computer games, and tourist information systems. }, note = { Online proceedings }, editor = { Gr{\"o}ger, G. and Kolbe, T.H. }, publisher = { EuroSDR }, booktitle = { 1st International Workshop on Next Generation 3D City Models }, files = { user_upload/fachgebiete/doellner/publications/2005/DBNK05/NprPaperVDM_stamped.pdf }, sorting = { 3584 } } @inproceedings{DBBP05, author = { D{\"o}llner, J{\"u}rgen and Baumann, Konstantin and Buchholz, Henrik and Paar, Philip }, title = { Real-Time Virtual Landscapes in Landscape and Urban Planning }, year = { 2005 }, abstract = { In landscape and urban planning, public participation and interactivity become more and more an issue. Real-time virtual 3D landscapes represent communication tools that allow experts as well as non-experts to use, explore, analyze, and understand landscape information. In our contribution, we describe the architecture and functionality of an interactive, participatory system, Lenné3D, which facilitates creation, management, and distribution of real-time virtual landscapes. Systems supporting virtual landscapes must handle the inherent complexity of their components, in particular building and vegetation objects. In our approach, real-time virtual landscapes are based on 3D maps as underlying concept for composing and managing virtual landscape models. They are complemented by 3D vegetation models, which use botanical-based 3D plant models. To cope with the massive geometric complexity of virtual landscapes, multiresolution schemes need to be applied to buildings as well as to vegetation objects. The concepts have been successfully implemented within the Lenné3D system as demonstrated by a case study. }, note = { CD proceedings }, booktitle = { International Conference on Geographic Information (GIS Planet) }, files = { user_upload/fachgebiete/doellner/publications/2005/DBBP05/Gisplanet_doellner_et_al.pdf }, sorting = { 3840 } } @inproceedings{ND05b, author = { Nienhaus, Marc and D{\"o}llner, J{\"u}rgen }, title = { Dynamik in Bildern - Neue Wege der automatisierten Illustration }, year = { 2005 }, booktitle = { Tagungsband der Potsdamer Multimedia Tage }, sorting = { 4096 } } @inproceedings{HMAPOWZKND05, author = { Hentschel, Christian and Merettig, Ralf and Anke, A. and Porscha, R. and Oschwald, Mario and Wiegand, Frank and Zahn, M. and Kirsch, Florian and Nienhaus, Marc and D{\"o}llner, J{\"u}rgen }, title = { SVG als Beschreibungssprache f{\"u}r interaktive TV-Applikationen auf Basis von MHP }, year = { 2005 }, booktitle = { Tagungsband der Potsdamer Multimedia Tage }, sorting = { 4352 } } @inproceedings{ND04a, author = { Nienhaus, Marc and D{\"o}llner, J{\"u}rgen }, title = { Sketchy Drawings }, year = { 2004 }, pages = { 73--81 }, abstract = { In non-photorealistic rendering sketchiness is essential to communicate visual ideas and can be used to illustrate drafts and concepts in, for instance, architecture and product design. In this paper, we present a hardware-accelerated real-time rendering algorithm for drawings that sketches visually important edges as well as inner color patches of arbitrary 3D objects even beyond the geometrical boundary. The algorithm preserves edges and color patches as intermediate rendering results using textures. To achieve sketchiness it applies uncertainty values in imagespace to perturb texture coordinates when accessing intermediate rendering results. The algorithm adjusts depth information derived from 3D objects to ensure visibility when composing sketchy drawings with arbitrary 3D scene contents. Rendering correct depth values while sketching edges and colors beyond the boundary of 3D objects is achieved by depth sprite rendering. Moreover, we maintain frame-to-frame coherence because consecutive uncertainty values have been determined by a Perlin noise function, so that they are correlated in image-space. Finally, we introduce a solution to control and predetermine sketchiness by preserving geometrical properties of 3D objects in order to calculate associated uncertainty values. This method significantly reduces the inherent shower-door effect. }, keywords = { Non-photorealistic rendering, sketching, real-time rendering, image-space, hardware-acceleration, depth sprites. }, editor = { Lynette van Zijl and Patrick Marais }, publisher = { ACM Press }, booktitle = { 3rd International Conference on Computer Graphics, Virtual Reality, Visualisation and Interaction in Africa (ACM AFRIGRAPH) }, files = { user_upload/fachgebiete/doellner/publications/2004/ND04a/afrigraph_sketchydrawing_final.pdf }, issn = { 1-58113-863-6 }, sorting = { 4 } } @inproceedings{ND04b, author = { Nienhaus, Marc and D{\"o}llner, J{\"u}rgen }, title = { Blueprints - Illustrating Architecture and Technical Parts using Hardware-Accelerated Non-Photorealistic Rendering }, year = { 2004 }, pages = { 49--56 }, abstract = { Outlining and enhancing visible and occluded features in drafts of architecture and technical parts are essential techniques to visualize complex aggregated objects and to illustrate position, layout, and relations of their components. In this paper, we present blueprints, a novel nonphotorealistic hardware-accelerated rendering technique that outlines visible and non-visible perceptually important edges of 3D objects. Our technique is based on the edge map algorithm and the depth peeling technique to extract these edges from arbitrary 3D scene geometry in depth-sorted order. After edge maps have been generated, they are composed in image space using depth sprites, which allow us to combine blueprints with further 3D scene contents. We introduce depth masking to dynamically adapt the number of rendering passes for highlighting and illustrating features of particular importance and their relation to the entire assembly. Finally, we give an example of blueprints that visualize and illustrate ancient architecture in the scope of cultural heritage. }, editor = { Heidrich, W and Balakrishnan, R }, publisher = { AK Peters }, address = { Canada }, booktitle = { Graphics Interface }, files = { user_upload/fachgebiete/doellner/publications/2004/ND04b/blueprints_final.pdf }, sorting = { 8 } } @inproceedings{KD04, author = { Kirsch, Florian and D{\"o}llner, J{\"u}rgen }, title = { Rendering Techniques for Hardware-Accelerated Image-Based CSG }, year = { 2004 }, pages = { 221--228 }, abstract = { Image-based CSG rendering algorithms for standard graphics hardware rely on multipass rendering that includes reading and writing large amounts of pixel data from and to the frame buffer. Since the performance of this data path has hardly improved over the last years, we describe new implementation techniques that efficiently use modern graphics hardware. 1) The render-to-texture ability is used to temporarily store shape visibility, avoiding the expensive copy of z-buffer content to external memory. Shape visibility is encoded discretely instead of using depth values. Hence, the technique is also not susceptible to artifacts in contrast to previously described methods. 2) We present an image-based technique for calculating the depth complexity of a CSG shape that avoids reading and analyzing pixel data from the frame buffer. Both techniques optimize various CSG rendering algorithms, namely the Goldfeather and the layered Goldfeather algorithm, and the Sequenced-Convex- Subtraction (SCS) algorithm. This way, these image-based CSG algorithms now operate accelerated by graphics hardware and, therefore, represent a significant improvement towards real-time image-based CSG rendering for complex models. }, keywords = { Constructive Solid Geometry, CSG Rendering, Image-Based Rendering, Rendering Algorithms, Solid Modeling }, booktitle = { Journal of WSCG }, files = { user_upload/fachgebiete/doellner/publications/2004/KD04/1_wscg04_KirschDoellner.pdf,user_upload/fachgebiete/doellner/publications/2004/KD04/2_csg_wscg04_talk.pdf }, sorting = { 16 } } @inproceedings{Do04, author = { D{\"o}llner, J{\"u}rgen }, title = { Embedding Digital Rights in Geovisualizations }, year = { 2004 }, pages = { 375--382 }, address = { G{\"a}vle, Sweden }, booktitle = { 12th International Conference on Geoinformatics - Geospatial Information Research }, sorting = { 32 } } @inproceedings{ND04, author = { Nienhaus, Marc and D{\"o}llner, J{\"u}rgen }, title = { Visualizing Design and Spatial Structure of Ancient Architecture using Blueprint Rendering }, year = { 2004 }, pages = { 63--64 }, abstract = { We present the blueprint rendering technique as an effective tool for interactively visualizing, exploring, and communicating the design and spatial structure of ancient architecture by outlining and enhancing their visible and occluded features. The term blueprint in its original meaning denotes “a photographic print in white on a bright blue ground or blue on a white ground used especially for copying maps, mechanical drawings, and architects' plans” (Merriam Webster). Blueprints consist of transparently rendered features, represented by their outlines. This way, blueprints allow for realizing complex, hierarchical object assemblies such as architectural drafts. Our technique renders 3D models of architecture to automatically generate blueprints that provide spatial insights, and generates plan views that provide a systematic overview, and enhances these drafts using glyphs. Additionally, blueprint rendering can highlight features of particular importance and their relation to the entire structure, and can reduce visual complexity if the structural complexity of the 3D model is excessive. }, editor = { K. Cain and Y. Chrysanthou and F. Niccolucci and N. Silberman }, booktitle = { 5th International Symposium on Virtual Reality, Archaeology and Cultural Heritage (VAST) }, files = { user_upload/fachgebiete/doellner/publications/2004/ND04/vast04_nienhaus_doellner.pdf }, sorting = { 64 } } @inproceedings{Do04a, author = { D{\"o}llner, J{\"u}rgen }, title = { Interaktive 3D-Geoinformationsdokumente }, year = { 2004 }, address = { K{\"o}nigslutter }, booktitle = { Symposium Praktische Kartographie }, sorting = { 128 } } @inproceedings{BSDSW04, author = { Buchin, Kevin and Sousa, Mario Costa and D{\"o}llner, J{\"u}rgen and Samavati, Faramarz and Walther, Maike }, title = { Illustrating Terrains using Direction of Slope and Lighting }, year = { 2004 }, pages = { 259--269 }, abstract = { Landscape illustrations and cartographic maps depict terrain surface in a qualitatively effective way. In this paper, we present a framework for line drawing techniques for automatically reproducing traditional illustrations of terrain by means of slope lines and tonal variations. Given a digital elevation model, surface measures are computed and slope lines of the terrain are hierarchically traced and stored. At run-time slope lines are rendered by stylized procedural and texture-based strokes. The stroke density of the final image is determined according to the light intensities. Using a texture based approach, the line drawing pipeline is encapsulated from the rendering of the terrain geometry. Our system operates on terrain data at interactive rates while maintaining frame-to-frame coherence. }, keywords = { Terrain visualization, non-photorealistic rendering }, booktitle = { 4th ICA Mountain Carthography Workshop }, files = { user_upload/fachgebiete/doellner/publications/2004/BSDSW04/buchin.pdf }, sorting = { 256 } } @inproceedings{BD03, author = { Buchholz, Henrik and D{\"o}llner, J{\"u}rgen }, title = { Efficient Handling of Shading Discontinuities for Progressive Meshes }, year = { 2003 }, pages = { 65--71 }, abstract = { For visual perception of 3D models, shading plays one of the major roles. The shading quality of level-ofdetail models is limited generally because existing LOD algorithms assume a conceptually smooth surface. A complex mesh, however, is likely to have conceptually smooth and angular parts. We introduce an extension to the progressive mesh LOD approach that efficiently handles triangle meshes with large numbers of shading discontinuities. For this the algorithm distinguishes three principal shading situations for mesh vertices: completely continuous shading, completely discontinuous shading, and mixed continuous/discontinuous shading. Remarkably, the algorithm does not introduce any overhead for completely smooth surfaces. As one field of application, we briefly outline its application for LOD representations of 3D city models. }, booktitle = { Proceedings of the Seventh International Conference on Information Visualization 2003 }, files = { user_upload/fachgebiete/doellner/publications/2003/BD03/BuchholzDoellner_Shading_draft.pdf }, sorting = { 1792 } } @inproceedings{ND03a, author = { Nienhaus, Marc and D{\"o}llner, J{\"u}rgen }, title = { Sketchy Drawings - A Hardware-Accelerated Approach for Real-Time Non-Photorealistic Rendering }, year = { 2003 }, address = { San Diego, CA, USA }, booktitle = { ACM SIGGRAPH 2003 - Sketches and Applications }, files = { user_upload/fachgebiete/doellner/publications/2003/ND03a/nienhaus_sketchyDrawings.pdf }, sorting = { 2048 } } @inproceedings{DW03, author = { D{\"o}llner, J{\"u}rgen and Walther, Maike }, title = { Real-Time Expressive Rendering of City Models }, year = { 2003 }, pages = { 245-250 }, abstract = { City models have become central elements for visually communicating spatial information related to urban areas and have manifold applications. Our real-time non-photorealistic rendering technique aims at abstract, comprehensible, and vivid drawings of assemblies of polygonal 3D urban objects. It takes into account related principles in cartography, cognition, and nonphotorealism. Technically, the geometry of a building is rendered using expressive line drawings to enhance the edges, two-tone or three-tone shading to draw the faces, and simulated shadows. The edge enhancement offers several degrees of freedom, such as interactively changing the style, width, tilt, color, transparency, and length of the strokes. Traditional drawings of cities and panoramas inspired the tone shading that achieves a pleasing visual color effect. The rendering technique can be applied not only to city models but to polygonal shapes in general. }, booktitle = { Proceedings of the IEEE Information Visualization }, files = { user_upload/fachgebiete/doellner/publications/2003/DW03/DoellnerWalther_CityModels_draft.pdf }, sorting = { 2304 } } @inproceedings{ND03, author = { Nienhaus, Marc and D{\"o}llner, J{\"u}rgen }, title = { Dynamic Glyphs - Depicting Dynamics in Images of 3D Scenes }, year = { 2003 }, pages = { 102-111 }, abstract = { Depicting dynamics offers manifold ways to visualize dynamics in static media, to understand dynamics in the whole, and to relate dynamics of the past and the future with the current state of a 3D scene. The depiction strategy we propose is based on visual elements, called dynamic glyphs, which are integrated in the 3D scene as additional 2D and 3D geometric objects. They are derived from a formal specification of dynamics based on acyclic, directed graphs, called behavior graphs. Different types of dynamics and corresponding mappings to dynamic glyphs can be identified, for instance, scene events at a discrete point in time, transformation processes of scene objects, and activities of scene actors. The designer or the application can control the visual mapping of dynamics to dynamic glyphs, and, thereby, create own styles of dynamic depiction. Applications of dynamic glyphs include the automated production of instruction manuals, illustrations, and storyboards. }, booktitle = { 3th Int. Symposium on Smart Graphics 2003 }, files = { user_upload/fachgebiete/doellner/publications/2003/ND03/2003-dynamicglyphs.pdf }, sorting = { 2560 } } @inproceedings{KD02, author = { Kersting, Oliver and D{\"o}llner, J{\"u}rgen }, title = { Interactive Visualization of 3D Vector Data in GIS }, year = { 2002 }, pages = { 107-112 }, abstract = { Vector data represents one major category of data managed by GIS. This paper presents a new technique for vector-data display that is able to precisely and efficiently map vector data on 3D objects such as digital terrain models. The technique allows the system to adapt the visual mapping to the context and user needs and enables users to interactively modify vector data through the visual representation. It represents a basic mechanism for GIS interface technology and facilitates the development of visual analysis and exploration tools. }, booktitle = { Proceedings of the ACM GIS }, files = { user_upload/fachgebiete/doellner/publications/2002/KD02/acmgis2002_kersting_doellner_draft.pdf }, sorting = { 32 } } @inproceedings{KD02a, author = { Kersting, Oliver and D{\"o}llner, J{\"u}rgen }, title = { Interactively Developing 3D Graphics in Tcl }, year = { 2002 }, pages = { 1-12 }, abstract = { This paper presents an approach to integrate interactive real-time 3D graphics into the scripting language Tcl. 3D graphics libraries are typically implemented in system programming languages such as C or C++ in order to be type safe and fast. We have developed a technique that analyzes the C++ application programming interface of such a library and maps it to appropriate scripting commands and structures. As 3D graphics library, we apply the Virtual Rendering System, an objectoriented library that supports 3D modeling, interaction, and animation. The mapped API represents a complete and powerful development tool for interactive, animated 3D graphics applications. The mapping technique takes advantage of the weak typing and dynamic features of the scripting language, preserves all usability- critical features of the C++ API, and has no impact on performance so that even real-time 3D applications can be developed. The mapping technique can be applied in general to all kinds of C++ APIs and automated. It also gathers reflection information of the API classes and supports interactive management of API objects. Consequently, interactive development environments can be built easily based on this information. We illustrate the approach by several examples of 3D graphics applications. }, booktitle = { Usenix Annual Technical Conference }, files = { user_upload/fachgebiete/doellner/publications/2002/KD02a/kersting_doellner_freenix.pdf }, sorting = { 64 } } @inproceedings{Do02, author = { D{\"o}llner, J{\"u}rgen }, title = { Grafische, kognitive und algorithmische Aspekte niedrig aufgel{\"o}ster Bilder }, year = { 2002 }, booktitle = { WebMapping Symposium }, sorting = { 128 } } @inproceedings{Do01, author = { D{\"o}llner, J{\"u}rgen }, title = { Virtuelle 3D-Kartenwelten }, year = { 2001 }, pages = { X.1-X.14 }, editor = { Hermann, Asche }, publisher = { Wichmann-Verlag }, booktitle = { WebMapping Symposium, In: Raumbezogenen Information und Kommunikation im Internet }, sorting = { 256 } } @inproceedings{DH00, author = { D{\"o}llner, J{\"u}rgen and Hinrichs, Klaus }, title = { A Generalized Scene Graph API. Vision, Modeling, Visualization }, year = { 2000 }, pages = { 247--254 }, abstract = { Scene graphs are fundamental data structures for hierarchical scene modeling. The generalized scene graph overcomes various limitations of current scene graph architectures such as support for different 3D rendering systems, integration of multi-pass rendering, and declarative modeling of scenes. The main idea is to separate scene specification from scene evaluation. To specify scenes, scene graph nodes are arranged and equipped with rendering objects, e.g., shapes, attributes, and algorithms. To evaluate scenes, the contents of scene graphs nodes, the rendering objects, are evaluated by rendering engines, which use the algorithm objects to interpret shapes and attributes. Using generalized scene graphs, most real-time rendering techniques for OpenGL and several 3D rendering systems can be integrated in a single scene representation without loosing control over or limiting individual strengths of rendering systems. }, address = { Saarbr{\"u}cken }, booktitle = { 5th Fall Workshop (GI/IEEE) }, files = { user_upload/fachgebiete/doellner/publications/2000/DH00/dh_generalizedscenegraph_vmv00.pdf }, sorting = { 512 } } @inproceedings{BDH00, author = { Baumann, Konstantin and D{\"o}llner, J{\"u}rgen and Hinrichs, Klaus }, title = { Integrated Multiresolution Geometry and Texture Models }, year = { 2000 }, pages = { 157-166 }, abstract = { In this paper, an approach for integrating multiresolution representations of terrain geometry and terrain texture data is presented. A terrain is modeled by a regular grid, which can be partially refined by local TINs in order to represent morphologically complex terrain parts. The multiresolution models for terrain texture data and geometry data are closely related: The rendering algorithm selects geometry and texture patches based on screen-space error criteria. Multiple texture hierarchies, which may represent different thematic information layers, can be bound to one terrain model. Multiple textures lead to a drastic improvement of visual quality: Topographic textures can be used to provide pixel-precise shading, alpha textures can be used to restrict or to highlight thematic textures. Multiple textures facilitate the development of visual interaction tools such as magic lenses, and texture animations. Multitexturing permits an efficient implementation of these concepts. }, address = { Amsterdam }, booktitle = { Eurographics/IEEE TCVG Symposium on Visualization }, files = { user_upload/fachgebiete/doellner/publications/2000/BDH00/bdh_approxtree_vissym_00_draft.pdf }, sorting = { 768 } } @inproceedings{DKH00, author = { D{\"o}llner, J{\"u}rgen and Kersting, Oliver and Hinrichs, Klaus }, title = { Programmierbare, interaktive 3D-Karten zur Kommunikation raumbezogener Information }, year = { 2000 }, volume = { 1 }, pages = { 131-145 }, abstract = { Raumbezogene Objekte und Prozesse beschreiben den Zustand und die Veränderung in der Umwelt und spielen damit eine zentrale Rolle in der Umweltinformatik. Zur Durchführung von Maßnahmen des Umweltschutzes und der Umweltgestaltung ist es notwendig, diese Objekte und Prozesse zu visualisieren und zu steuern. Dieser Beitrag stellt ein Konzept für programmierbare, interaktive 3D-Karten vor, die die Präsentation, Exploration und Manipulation räumlicher und raumzeitlicher Objekte und Prozesse ermöglichen. Eine 3D-Karte basiert auf einem Geländemodell, in dessen Kontext raumbezogene Informationen durch Kartentexturen und geometrische Kartenobjekte visualisiert sind. Die Interaktivität der Kartenobjekte wird mit Hilfe einer integrierten Skriptsprache spezifiziert, die außerdem die Konfiguration bestehender und die Konstruktion neuer Kartenobjekte einer 3D-Karte durch den Benutzer ermöglicht. Derart programmierbare, interaktive 3D-Karten können zum Bau von Werkzeugen zur effizienten Kommunikation von Umweltinformation eingesetzt werden. }, editor = { A.B.Cremers, K. Greve }, publisher = { Metropolis Verlag }, booktitle = { 14. Internationales Symposium "Informatik f{\"u}r den Umweltschutz", Bonn. In: Umweltinformation f{\"u}r Planung, Politik und Öffentlichkeit }, files = { user_upload/fachgebiete/doellner/publications/2000/DKH00/dhk_umwinf00_final.pdf }, sorting = { 1024 } } @inproceedings{Do00a, author = { D{\"o}llner, J{\"u}rgen }, title = { Abstract Image Representation for Non-Photorealistic 3D Graphics }, year = { 2000 }, pages = { 76--85 }, booktitle = { SCCG 2000 Spring Conference on Computer Graphics }, sorting = { 1280 } } @inproceedings{DB00, author = { D{\"o}llner, J{\"u}rgen and Baumann, Konstantin }, title = { Texturing Techniques for Terrain Visualization }, year = { 2000 }, pages = { 227--234 }, booktitle = { Proceedings of IEEE Visualization }, sorting = { 1536 } } @inproceedings{DK00, author = { D{\"o}llner, J{\"u}rgen and Kersting, Oliver }, title = { Dynamic 3D Maps as Visual Interfaces for Spatio-Temporal Data }, year = { 2000 }, pages = { 115-120 }, booktitle = { Proceedings of the ACM GIS }, sorting = { 1792 } } @inproceedings{Do00, author = { D{\"o}llner, J{\"u}rgen }, title = { Integrated Multiresolution Modeling of Terrain Geometry and Terrain Texture Data }, year = { 2000 }, abstract = { The increasing capabilities of today's 3D graphics hardware raise the question how visualization software can take actually advantage of these developments. In this paper, an approach for integrating multiresolution representations of terrain geometry and terrain texture is presented which provides high image quality and high rendering speed focussing on PC graphics hardware. A terrain is modeled by a regular grid which can be partially refined by local TINs in order to represent morphologically complex terrain parts. The multiresolution model for texture data of a terrain and the multiresolution model for its geometry data are closely related: The rendering algorithm selects geometry terrain patches based on a criterion which takes into account geometry and texture approximation errors. Multiple texture hierarchies, which may represent different thematic information layers, can be bound to a terrain model and rendered using multitexturing. For example, the terrain shading can be provided by a pre-calculated shading texture which permits the implementation of different shading schemes and improves visual quality compared to a geometry-based shading. }, address = { Scottsdale, Arizona }, booktitle = { IMAGE 2000 Conference }, files = { user_upload/fachgebiete/doellner/publications/2000/Do00/DollnerImage2000WidthAppendix_draft.pdf }, sorting = { 2048 } } @inproceedings{DH00a, author = { D{\"o}llner, J{\"u}rgen and Hinrichs, Klaus }, title = { Dynamic 3D Maps and Their Texture-Based Design }, year = { 2000 }, pages = { 325--334 }, abstract = { Three-dimensional maps are fundamental tools for presenting, exploring, and manipulating geo data. This paper describes multiresolution concepts for 3D maps and their texture-based design. In our approach, 3D maps are based on a hybrid, multiresolution terrain model composed of data sets having different topological structure, for example a coarse regular grid combined with by triangulated microstructures. Any number of texture layers can be associated with the terrain model. For each texture layer, the multiresolution structure builds a texture tree which is linked to geometry patches of the multiresolution terrain model. The terrain model together with multiple texture layers can be rendered in real-time, in particular if multitexturing is available. Texture layers can be combined by high-level operations such as blending and masking, and can be rebuilt at run-time. This mechanism simplifies the implementation of visual exploration tools and of procedural, automated map designs. 3D maps facilitate the visual simulation of environmental issues in spatial support systems, virtual reality applications, real-time GIS, and interactive cartography. }, booktitle = { IEEE Computer Graphics International CGI }, files = { user_upload/fachgebiete/doellner/publications/2000/DH00a/dollner_cgi2000_draft.pdf }, sorting = { 2304 } } @inproceedings{DK00a, author = { D{\"o}llner, J{\"u}rgen and Kersting, Oliver }, title = { Visual Support of Navigation and Orientation in 3D Maps }, year = { 2000 }, address = { Paderborn }, booktitle = { Workshop "Guiding Users through Interactive Experiences" }, sorting = { 2560 } } @inproceedings{DKB99, author = { D{\"o}llner, J{\"u}rgen and Kersting, Oliver and Baumann, Konstantin }, title = { Konzepte und Implementierung eines kartographischen 3D-Visualisierungssystems }, year = { 1999 }, pages = { 89--107 }, editor = { B. Schmidt, U. Uhlenk{\"u}ken }, publisher = { Natur \& Wissenschaft }, booktitle = { GeoViSC'99 Workshop Visualisierung raumbezogener Daten: Methoden und Anwendungen }, sorting = { 128 } } @inproceedings{BBDHHKSS99, author = { Becker, Ludger and Bernard, Lars and D{\"o}llner, J{\"u}rgen and Hammelbeck, Stefan and Hinrichs, Klaus and Kr{\"u}ger, Thomas and Schmidt, Benno and Streit, Ulrich }, title = { Integration dynamischer Atmosph{\"a}renmodelle mit einem (3+1)-dimensionalen objektorientierten GIS-Kern }, year = { 1999 }, pages = { 429--442 }, editor = { C. Rautenstrauch, M. Schenk }, publisher = { Metropolis }, booktitle = { 13. Internationales Symposium "Informatik f{\"u}r den Umweltschutz" }, sorting = { 256 } } @inproceedings{BD99, author = { Buziek, G. and D{\"o}llner, J{\"u}rgen }, title = { Concept and Implementation of an Interactive, Cartographic Virtual Reality System }, year = { 1999 }, pages = { 637--648 }, address = { Ottawa }, booktitle = { International Cartographic Conference ICA´99 }, sorting = { 512 } } @inproceedings{DKHB99, author = { D{\"o}llner, J{\"u}rgen and Kersting, Oliver and Hinrichs, Klaus and Baumann, Konstantin }, title = { Konzepte und 3D-Visualisierung interaktiver, perspektivischer Karten }, year = { 1999 }, pages = { 128--139 }, editor = { Strobl, Blaschke }, address = { Salzburg }, booktitle = { Angewandte Geographische Informationsverarbeitung XI }, sorting = { 2816 } } @inproceedings{BDHK99, author = { Baumann, Konstantin and D{\"o}llner, J{\"u}rgen and Hinrichs, Klaus and Kersting, Oliver }, title = { A Hybrid, Hierarchical Data Structure for Real-Time Terrain Visualization }, year = { 1999 }, pages = { 85--92 }, booktitle = { IEEE Proceedings Computer Graphics International }, sorting = { 2560 } } @inproceedings{BDDHRV98, author = { Becker, Ludger and Ditt, H. and D{\"o}llner, J{\"u}rgen and Hinrichs, Klaus and Reiberg, A. and Voigtmann, A. }, title = { Building an Interoperable GIS: Integration of an Object-Oriented GIS Database Kernel and a Visualization Framework }, year = { 1998 }, pages = { 386--395 }, abstract = { Geo-Information Systems (GIS) are used in various application areas of the geosciences. Advanced GIS-applications like meteorological simulations operate on three-dimensional and time-varying data. An extensible database kernel supporting three-dimensional timevarying data forms a promising base for implementing such applications since it can be adapted to the individual data modeling needs of an application. An extensible visualization framework supporting the visualization of data and their dynamics can be adapted to the specific visualization needs of GIS-applications. The Geo Object-Oriented DAtabase Core GOODAC is an extensible database core which supports the development of new-generation GIS-applications. We describe the integration of GOODAC with the visualization and animation framework MAM/VRS which provides extensible object-oriented technology for the development of scientific visualization components for 2D, 3D, and time-varying data. }, booktitle = { 8th International Symposium on Spatial Data Handling (SDH '98) }, files = { user_upload/fachgebiete/doellner/publications/1998/BDDHRV98/interoperable_gis.pdf }, sorting = { 1792 } } @inproceedings{DH98, author = { D{\"o}llner, J{\"u}rgen and Hinrichs, Klaus }, title = { Interactive, Animated 3D Widgets }, year = { 1998 }, pages = { 278--286 }, abstract = { If 3D applications become large, hierarchical networks of geometric objects lead to messy specifications. Furthermore, if time- and event-dependencies are merged with the geometric modeling, the global layout of animation and interaction can hardly be achieved. We present an objectoriented architecture for interactive, animated 3D widgets which reduces the complexity of building 3D applications. 3D widgets encapsulate look (geometry) and feel (behavior) into high-level building blocks based on two types of directed acyclic graphs, geometry graphs and behavior graphs. 3D widgets construct internal geometry graphs and behavior graphs, perform operations on these graphs through high-level interfaces which hide details and raise the level of abstraction. 3D widgets define object ports which are used to link together 3D widgets. A visual language for 3D widgets allows the developer the interactive construction of 3D applications. }, booktitle = { IEEE Computer Graphics International }, files = { user_upload/fachgebiete/doellner/publications/1998/DH98/dh_3dwidgets_cgi98.pdf }, sorting = { 1536 } } @inproceedings{DHS97, author = { D{\"o}llner, J{\"u}rgen and Hinrichs, Klaus and Spiegel, H. }, title = { An Interactive Environment for Visualizing and Animating Algorithms }, year = { 1997 }, pages = { 409--411 }, address = { Nice }, booktitle = { ACM Proceedings of the 13 th Annual Symposium on Computational Geometry }, sorting = { 1024 } } @inproceedings{DH97, author = { D{\"o}llner, J{\"u}rgen and Hinrichs, Klaus }, title = { The Design of a 3D Rendering Meta System }, year = { 1997 }, pages = { 43--54 }, editor = { F. Arbab, P. Slussalek }, address = { Budapest }, booktitle = { Proceedings of the 6 th Eurographics Workshop on Programming Paradigms for Graphics }, sorting = { 768 } } @inproceedings{DH95, author = { D{\"o}llner, J{\"u}rgen and Hinrichs, Klaus }, title = { The Virtual Rendering System - A Toolkit for Object-Oriented 3D Rendering }, year = { 1995 }, pages = { 309--318 }, booktitle = { EduGraphics - CompuGraphics Combined Proceedings }, files = { user_upload/fachgebiete/doellner/publications/1995/DH95/the-virtual-rendering-system.pdf }, sorting = { 3072 } } @inproceedings{DH95a, author = { D{\"o}llner, J{\"u}rgen and Hinrichs, Klaus }, title = { Modellierung virtueller Umgebungen durch Spezifikation von Geometrie- und Verhaltensgraphen }, year = { 1995 }, pages = { 37--48 }, editor = { D. W. Fellner }, publisher = { Infix Verlag }, booktitle = { Modeling Virtual Worlds and Distributed Graphics MVD '95 }, sorting = { 3328 } } @masterthesis{Tra07, author = { Trapp, Matthias }, title = { Analysis and Exploration of Virtual 3D-Citymodels using 3D Information Lenses }, year = { 2007 }, month = { 2 }, abstract = { This thesis addresses real-time rendering techniques for 3D Information-Lenses, which are based on the focus & context metaphor. It analyzes, conceives, implements and reviews its applicability to objects and structures of virtual 3D city models. In contradiction to digital terrain models, the application of focus & context visualization to virtual 3D city models is barely researched. However, the purposeful visualization of contextual data of is extreme importance for the interactive exploration and analysis of this field. Programmable hardware enables the implementation of new lens-techniques, which allows the augmentation of the perceptive and cognitive quality of the visualization, compared to classical perspective projections. A set of 3D information-lenses is integrated into a 3D scene graph system: 1.) Occlusion Lenses modify the appearance of virtual 3D city-model objects in order to resolve their occlusion and consequently facilitate the navigation. 2.) Best-View Lenses display city-model objects in a priority-based manner and mediate their meta-information. Thus, they support exploration and navigation of virtual 3D-city-models. 3.) Color and deformation lenses modify the appearance and geometry of 3D city-models to facilitate their perception. The present techniques for 3D information lenses and the application to virtual 3D city models clarify their potential for interactive visualization and form a base for further development. }, url = { http://opus.kobv.de/ubp/volltexte/2008/1393/ }, address = { Hasso-Plattner-Institut f{\"u}r Softwaresystemtechnik GmbH, Prof.-Dr.-Helmert-Str. 2-3, D-14482 Potsdam }, school = { Hasso Plattner Institut, University Potsdam }, files = { user_upload/fachgebiete/doellner/publications/2007/Tra07/diplom.pdf }, link1 = { (PDF) http://opus.kobv.de/ubp/volltexte/2008/1393/pdf/trapp_matthias.pdf }, sorting = { 5376 } } @masterthesis{Gla07, author = { Glander, Tassilo }, title = { Automated Generalization of 3D Building Models }, year = { 2007 }, keywords = { city models 3d automated generalization aggregation }, address = { Hasso-Plattner-Institut f{\"u}r Softwaresystemtechnik GmbH, Prof.-Dr.-Helmert-Str. 2-3, D-14482 Potsdam }, school = { HPI, Universit{\"a}t Potsdam }, link1 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-77170-Automated-Combination-Real-Time-Shader-Programs-Eurographics-2007-matthias-trapp-Education-ppt-powerpoint/ }, sorting = { 5120 } } @phdthesis{S2016, author = { Amir Semmo }, title = { Design and Implementation of Non-Photorealistic Rendering Techniques for 3D Geospatial Data }, year = { 2016 }, month = { 11 }, abstract = {
Geospatial data has become a natural part of a growing number of information systems and services in the economy, society, and people's personal lives. In particular, virtual 3D city and landscape models constitute valuable information sources within a wide variety of applications such as urban planning, navigation, tourist information, and disaster management. Today, these models are often visualized in detail to provide realistic imagery. However, a photorealistic rendering does not automatically lead to high image quality, with respect to an effective information transfer, which requires important or prioritized information to be interactively highlighted in a context-dependent manner. Approaches in non-photorealistic renderings particularly consider a user's task and camera perspective when attempting optimal expression, recognition, and communication of important or prioritized information. However, the design and implementation of non-photorealistic rendering techniques for 3D geospatial data pose a number of challenges, especially when inherently complex geometry, appearance, and thematic data must be processed interactively. Hence, a promising technical foundation is established by the programmable and parallel computing architecture of graphics processing units. This thesis proposes non-photorealistic rendering techniques that enable both the computation and selection of the abstraction level of 3D geospatial model contents according to user interaction and dynamically changing thematic information. To achieve this goal, the techniques integrate with hardware-accelerated rendering pipelines using shader technologies of graphics processing units for real-time image synthesis. The techniques employ principles of artistic rendering, cartographic generalization, and 3D semiotics—unlike photorealistic rendering—to synthesize illustrative renditions of geospatial feature type entities such as water surfaces, buildings, and infrastructure networks. In addition, this thesis contributes a generic system that enables to integrate different graphic styles—photorealistic and non-photorealistic—and provide their seamless transition according to user tasks, camera view, and image resolution. Evaluations of the proposed techniques have demonstrated their significance to the field of geospatial information visualization including topics such as spatial perception, cognition, and mapping. In addition, the applications in illustrative and focus+context visualization have reflected their potential impact on optimizing the information transfer regarding factors such as cognitive load, integration of non-realistic information, visualization of uncertainty, and visualization on small displays.
}, school = { University of Potsdam }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/S2016/asemmo-thesis_compressed.pdf }, link1 = { Institutional Repository of the University of Potsdam (http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-99525) }, sorting = { 256 } } @phdthesis{T2013, author = { Trapp, Matthias }, title = { Interactive Rendering Techniques for Focus+Context Visualization of 3D Geovirtual Environments }, year = { 2013 }, month = { 7 }, abstract = { This thesis introduces a collection of new real-time rendering techniques and applications for focus+context visualization of interactive 3D geovirtual environments such as virtual 3D city and landscape models. These environments are generally characterized by a large number of objects and are of high complexity with respect to geometry and textures. For these reasons, their interactive 3D rendering represents a major challenge. Their 3D depiction implies a number of weaknesses such as occlusions, cluttered image contents, and partial screen-space usage. To overcome these limitations and, thus, to facilitate the effective communication of geo-information, principles of focus+context visualization can be used for the design of real-time 3D rendering techniques for 3D geovirtual environments (see Figure). In general, detailed views of a 3D geovirtual environment are combined seamlessly with abstracted views of the context within a single image. To perform the real-time image synthesis required for interactive visualization, dedicated parallel processors (GPUs) for rasterization of computer graphics primitives are used. For this purpose, the design and implementation of appropriate data structures and rendering pipelines are necessary. The contribution of this work comprises the following five real-time rendering methods: The rendering technique for 3D generalization lenses enables the combination of different 3D city geometries (e.g., generalized versions of a 3D city model) in an single image in real time. The method is based on a generalized and fragment-precise clipping approach, which uses a compressible, raster-based data structure. It enables the combination of detailed views in the focus area with the representation of abstracted variants in the context area. The rendering technique for the interactive visualization of dynamic raster data in 3D geovirtual environments facilitates the rendering of 2D surface lenses. It enables a flexible combination of different raster layers (e.g., aerial images or videos) using projective texturing for decoupling image and geometry data. Thus, various overlapping and nested 2D surface lenses of different contents can be visualized interactively. The interactive rendering technique for image-based deformation of 3D geovirtual environments enables the real-time image synthesis of non-planar projections, such as cylindrical and spherical projections, as well as multi-focal 3D fisheye-lenses and the combination of planar and non-planar projections. The rendering technique for view-dependent multi-perspective views of 3D geovirtual environments, based on the application of global deformations to the 3D scene geometry, can be used for synthesizing interactive panorama maps to combine detailed views close to the camera (focus) with abstract views in the background (context). This approach reduces occlusions, increases the usage the available screen space, and reduces the overload of image contents. The object-based and image-based rendering techniques for highlighting objects and focus areas inside and outside the view frustum facilitate preattentive perception. The concepts and implementations of interactive image synthesis for focus+context visualization and their selected applications enable a more effective communication of spatial information, and provide building blocks for design and development of new applications and systems in the field of 3D geovirtual environments. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam }, address = { Hasso-Plattner-Institut für Softwaresystemtechnik GmbH, Prof.-Dr.-Helmert-Str. 2-3, D-14482 Potsdam }, school = { Hasso Plattner Institut, University Potsdam }, sorting = { 256 } } @phdthesis{BOH11, author = { Johannes Bohnet }, title = { Visualization of Execution Traces and its Application to Software Maintenance }, year = { 2011 }, abstract = { Maintaining complex software systems tends to be costly because developers spend a significant part of their time with trying to understand the system’s structure and behavior. Among many reasons, program understanding is time consuming because the system’s structure and its internal behavior are not intuitively realizable and can only be partially inspected. The visualization of execution traces represents an approach to help developers to understand complex systems. Practically, execution trace visualization captures the sequence of function calls over time during system execution, analyzes and abstracts that data, and derives visual representations that permit developers to analyze the system’s structure and behavior. The goal of this thesis is to develop a trace visualization concept and tool that can handle the computational and cognitive scalability issues that trace visualization encounters due to the large amount of data that is typically produced when logging runtime processes. The thesis’ concept includes the following building blocks: (1) A concept for generating traces of C/C++ software systems in a scalable way; (2) a concept for trace reduction that automatically identifies recursive boundaries within the trace and, by this, supports developers in exploring a trace using a top-down approach; (3) a framework for trace visualization techniques that provides solutions to the question as to how core techniques for viewing trace data can be implemented such that developers are supported in performing top-down and bottom-up comprehension strategies; and (4) a concept for combining trace visualization with 3rd party tools and systems for reverse engineering. As a further contribution, this thesis validates the proposed concept by means of an implementation, performance measurements, and case studies. The implementation of the concept is provided as a framework for creating trace visualization tools. To ensure scalability of the concept, performance measurements were taken while applying the tool to large C/C++ software systems. Furthermore, the concept and tool has been experimentally applied to industrially developed software systems to solve particular maintenance problems in real-world scenarios. }, affiliation = { University of Potsdam }, school = { Hasso-Plattner-Institut, University of Potsdam }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2011/BOH11/2011-phd-dissertation-bohnet.pdf }, sorting = { 4 }, priority = { 1 } } @phdthesis{Lo10, author = { Haik Lorenz }, title = { Texturierung und Visualisierung virtueller 3D-Stadtmodelle }, year = { 2010 }, month = { 11 }, affiliation = { University of Potsdam }, school = { HPI, Universit{\"a}t Potsdam }, project = { NFG }, sorting = { 8 } } @phdthesis{Ma09, author = { Stefan Maass }, title = { Techniken zur automatisierten Annotation interaktiver geovirtueller 3D-Umgebungen }, year = { 2009 }, month = { 1 }, affiliation = { University of Potsdam }, school = { HPI, Universit{\"a}t Potsdam }, project = { NFG }, sorting = { 512 } } @phdthesis{Job08b, author = { Jobst, Markus }, title = { Ein semiotisches Modell f{\"u}r die kartografische Kommunikation mit 3D }, year = { 2008 }, school = { Vienna University of Technology }, sorting = { 7936 } } @phdthesis{Nie06, author = { Nienhaus, Marc }, title = { Real-Time Non-Photorealistic Rendering Techniques for Illustrating 3D Scenes and their Dynamics }, year = { 2006 }, abstract = { This thesis addresses real-time non-photorealistic rendering techniques and their applications in interactive visualization. Real-time rendering has emerged as an important discipline within computer graphics developing a broad variety of rendering and optimization techniques along with dramatic advances in computer graphics hardware. While many applications of real-time rendering techniques concentrate on achieving photorealistic imagery, non-photorealistic computer graphics is investigating concepts and techniques that deliberately abstract from reality using expressive, stylized, or illustrative rendering; major goals include visual clarity, attractiveness, comprehensibility, and perceptibility in depictions. Non-photorealistic rendering techniques often rely on the concepts and principles found in traditional illustrations, graphics design, and art. The contributions of this thesis include three general-purpose real-time non-photorealistic rendering techniques: The edge-enhancement rendering technique accentuates visually important edges of 3D models facilitating the effective communication of their shape. The technique takes an image-space approach for edge detection and encodes the resulting edge intensities as texture, called edge map, to enhance 3D models on a per-object basis. The blueprint rendering technique extends the edge-enhancement technique to the 3D models’ occluded parts to accentuate their visible as well as their occluded visually important edges. Vivid and expressive depictions of complex aggregate objects become possible that facilitate the visual perception of spatial relationships and let viewers obtain insights into the models. The sketchy drawing rendering technique stylizes visually important edges of 3D models. Depicting 3D models in a sketchy manner allows us to express vagueness and is vitally important for communicating ideas and for presenting a preliminary, incomplete state. Two applications based on these real-time non-photorealistic rendering techniques in the fields of visualization demonstrate their ability to build compelling, interactive visual interfaces: Illustrative 3D city models apply non-photorealism to represent virtual spatial 3D environments together with associated thematic information. The abstracted, stylized depiction emphasizes components of 3D city models and thereby eases recognition, facilitates navigation, exploration, and analysis of spatial information. Illustrative CSG models apply non-photorealism to image-based CSG rendering. They enable us to visualize the design and assembly of complex CSG models in a comprehensible fashion. It also simplifies the interactive construction of CSG models. Finally, the thesis investigates an automated approach to depict dynamics as a complementary, important dimension in information contents by means of non-photorealistic rendering: The smart depiction system automatically generates compelling images of a 3D scene’s related dynamics following the traditional design principles found in comic books and storyboards. The system symbolizes past, ongoing, and future activities and events taking place in and related to 3D scenes. The non-photorealistic rendering techniques and exemplary applications presented in this thesis demonstrate that non-photorealistic rendering serves as a fundamental technology for expressive and effective visual communication and facilitates the implementation of user interfaces based on illustrating 3D scenes and their related dynamics in an informative and comprehensible way. }, keywords = { real-time rendering, non-photorealistic rendering, depicting dynamics }, school = { HPI, Universit{\"a}t Potsdam }, files = { user_upload/fachgebiete/doellner/publications/2006/Nie06/nienhausDissertation_small.pdf }, sorting = { 2560 } } @phdthesis{Buc06, author = { Buchholz, Henrik }, title = { Real-Time Visualization of 3D City Models }, year = { 2006 }, abstract = { An increasing number of applications requires user interfaces that facilitate the handling of large geodata sets. Using virtual 3D city models, complex geospatial information can be communicated visually in an intuitive way. Therefore, real-time visualization of virtual 3D city models represents a key functionality for interactive exploration, presentation, analysis, and manipulation of geospatial data. This thesis concentrates on the development and implementation of concepts and techniques for real-time city model visualization. It discusses rendering algorithms as well as complementary modeling concepts and interaction techniques. Particularly, the work introduces a new real-time rendering technique to handle city models of high complexity concerning texture size and number of textures. Such models are difficult to handle by current technology, primarily due to two problems: 1) Limited texture memory: The amount of simultaneously usable texture data is limited by the memory of the graphics hardware. 2) Limited number of textures: Using several thousand different textures simultaneously causes significant performance problems due to texture switch operations during rendering. The multiresolution texture atlases approach, introduced in this thesis, overcomes both problems. During rendering, it permanently maintains a small set of textures that are sufficient for the current view and the screen resolution available. The efficiency of multiresolution texture atlases is evaluated in performance tests. To summarize, the results demonstrate that the following goals have been achieved: a) Real-time rendering becomes possible for 3D scenes whose amount of texture data exceeds the main memory capacity. b) Overhead due to texture switches is kept permanently low, so that the number of different textures has no significant effect on the rendering frame rate. Furthermore, this thesis introduces two new approaches for real-time city model visualization that use textures as core visualization elements: 1) An approach for visualization of thematic information. 2) An approach for illustrative visualization of 3D city models. Both techniques demonstrate that multiresolution texture atlases provide a basic functionality for the development of new applications and systems in the domain of city model visualization. }, school = { HPI, Universit{\"a}t Potsdam }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2006/Buc06/Dissertation_Henrik_Buchholz.pdf }, sorting = { 2816 } } @phdthesis{Kir05, author = { Kirsch, Florian }, title = { Entwurf und Implementierung eines computergraphischen Systems zur Integration komplexer, echtzeitf{\"a}higer 3D-Renderingverfahren }, year = { 2005 }, abstract = { This thesis is about real-time rendering algorithms that can render 3D-geometry with quality and design features beyond standard display. Examples include algorithms to render shadows, reflections, or transparency. Integrating these algorithms into 3D-applications using today’s rendering libraries for real-time computer graphics is exceedingly difficult: On the one hand, the rendering algorithms are technically and algorithmically complicated for their own, on the other hand, combining several algorithms causes resource conflicts and side effects that are very diffi-cult to handle. Scene graph libraries, which intend to provide a software layer to abstract from computer graphics hardware, currently offer no mechanisms for using these rendering algo-rithms, either. The objective of this thesis is to design and to implement a software architecture for a scene graph library that models real-time rendering algorithms as software components allowing an effective usage of these algorithms for 3D-application development within the scene graph li-brary. An application developer using the scene graph library controls these components with elements in a scene description that describe the effect of a rendering algorithm for some geo-metry in the scene graph, but that do not contain hints about the actual implementation of the rendering algorithm. This allows for deploying rendering algorithms in 3D-applications even for application developers that do not have detailed knowledge about them. In this way, the com-plexity of development of rendering algorithms can be drastically reduced. In particular, the thesis focuses on the feasibility of combining several rendering algorithms within a scene at the same time. This requires to classify rendering algorithms into different categories, which are, each, evaluated using different approaches. In this way, components for different rendering algorithms can collaborate and adjust their usage of common graphics re-sources. The possibility of combining different rendering algorithms can be limited in several ways: The graphical result of the combination can be undefined, or fundamental technical restrictions can render it impossible to use two rendering algorithms at the same time. The software architecture described in this work is not able to remove these limitations, but it allows to combine a lot of different rendering algorithms that, until now, could not be combined due to the high complexi-ties of the required implementation. The capability of collaboration, however, depends on the kind of rendering algorithm: For instance, algorithms for rendering transparent geometry can be combined with other algorithms only with a complete redesign of the algorithm. Therefore, components in the scene graph library for displaying transparency can be combined with com-ponents for other rendering algorithms in a limited way only. The system developed in this work integrates and combines algorithms for displaying bump mapping, several variants of shadow and reflection algorithms, and image-based CSG algo-rithms. Hence, major rendering algorithms are available for the first time in a scene graph li-brary as components with high abstraction level. Despite the required additional indirections and abstraction layers, the system, in principle, allows for using and combining the rendering algorithms in real-time. }, keywords = { real-time rendering, multi-pass rendering, imagebased CSG }, school = { HPI, Universit{\"a}t Potsdam }, files = { user_upload/fachgebiete/doellner/publications/2005/Kir05/kirsch - dissertation.pdf }, sorting = { 5376 } } @phdthesis{Ker02, author = { Kersting, Oliver }, title = { Interaktive, dynamische 3D-Karten zur Kommunikation raumbezogener Informationen }, year = { 2002 }, abstract = { Raumbezogene Informationen stehen im Mittelpunkt vieler kommerzieller und wissenschaftli-cher Anwendungsfelder, wie zum Beispiel dem Bereich der mobilen Kommunikation, des Transports und Verkehrs, der Energieversorgung, der Geographie oder der Kartographie. Die technischen Fortschritte in der Datenerfassung, wie zum Beispiel durch Satellitenbeobachtung oder Laser-Scanning, erlauben die Sammlung immer größerer Mengen an raumbezogenen Da-ten in zunehmend hoher Qualität und Geschwindigkeit. Um diese Flut an raumbezogenen Daten zu bewältigen und aus ihnen Informationen zu gewin-nen bedarf es besonderer Verfahren und Strategien zur ihrer Verarbeitung und Visualisierung. Mit Visualisierung wird versucht, Einsicht in Daten, in ihre Struktur und in ihre Zusammenhän-ge zu erhalten. Hierbei dient Visualisierung als Instrument zur Präsentation, Exploration und Analyse von meist hochdimensionalen Datenräumen. Die computergestützte Visualisierung zieht in besonderem Maße von dem rapiden technischen Fortschritt der Computergraphik-Hardware Nutzen. Jedoch müssen für einzelne Anwendungs-felder, wie z. B. der Geovisualisierung, Verfahren, Werkzeuge und Systeme entwickelt werden, die den spezifischen Merkmalen des jeweiligen Gebiets Rechnung tragen, um dieses Potential voll auszuschöpfen. Als adäquate Medien zur Kommunikation räumlicher Informationen dienen seit Jahrhunderten kartographische Darstellungen, allen voran die Karte. Sie hat sich in den vergangenen Jahren vom statischen Medium hin zum interaktiven, dynamischen Medium entwickelt, ebenfalls durch den rasanten technischen Fortschritt, wie z. B. im Bereich der Geoinformationssysteme. Durch die Möglichkeit Informationsdichten und Darstellungsformen dynamisch an die jeweiligen Nut-zergruppen und Aufgabenanforderungen anzupassen, ergeben sich neue Formen der Kommuni-kation raumbezogener Information. Häufig werden dabei virtuelle, dreidimensionale Umgebun-gen zur Darstellung räumlicher Information genutzt, die in vielen Anwendungsfällen dem Men-schen vertrauter sind als orthographisch zweidimensionale Darstellungen, weil sie der natürli-chen Sehgewohnheit des Menschen näher kommen und bezüglich Raum und Zeit größere Frei-heitsgrade aufweisen. In dieser Arbeit werden Konzepte für 3D-Karten sowie Entwurf und Implementierung eines 3D-Kartensystems vorgestellt. 3D-Karten repräsentieren ein interaktives, dynamisches Medium zur Kommunikation raumbezogener Information, das auf interaktive dreidimensionaler Visualisie-rung beruht. Konkret beschreibt diese Arbeit das zugrundeliegende Softwaresystem, ausgewähl-te Visualisierungsstrategien und -verfahren sowie ausgewählte Anwendungsbeispiele für 3D-Karten. }, keywords = { Modellierung von 3D-Karten, Visualisierung, Rasterdaten, Vektordaten, digitales Gel{\"a}ndemodell }, school = { Universit{\"a}t Potsdam }, files = { user_upload/fachgebiete/doellner/publications/2002/Ker02/kersting - dissertation.pdf }, sorting = { 256 } } @phdthesis{Do96, author = { D{\"o}llner, J{\"u}rgen }, title = { Object-Oriented 3D Modelling, Animation and Interaction }, year = { 1996 }, school = { Westf{\"a}lische Wilhelms-Universit{\"a}t M{\"u}nster }, institution = { Angewandte Mathematik und Informatik, Universit{\"a}t M{\"u}nster }, sorting = { 96 } } @techreport{K2011, author = { Klimke, Jan }, title = { Towards 3D Interaction Services for 3D Geovirtual Environments }, year = { 2011 }, abstract = { Service-oriented architectures provide a flexible approach for 3D geodata access, processing, management and visualization. Services for 3D visualization allow for reliable, high quality visualization on client platforms that were otherwise not capable of storing, mapping, or rendering of 3D geodata. This extends the range of applications using 3D visualization of such data. While approaches for interactive visualization on lightweight clients exist, especially camera interaction remains a challenging task. Thin client applications do not have the information necessary for user interaction that provides, e.g., collision detection with scene object or assisting 3D camera control. This report provides an overview over the current status of my work in the area of interactive, service-based systems for 3D geovisualization. It outlines progress made in creating useful camera services delivering camera specifications derived from 3D geometry and semantic city model data. Further, my current research in progress is outlined. }, project = { HPI }, sorting = { 8 } } @techreport{DKN05, author = { D{\"o}llner, J{\"u}rgen and Kirsch, Florian and Nienhaus, Marc }, title = { Visualizing Design and Spatial Assembly of Interactive CSG }, year = { 2005 }, number = { 7 }, institution = { Hasso-Plattner-Institut an der Universit{\"a}t Potsdam }, sorting = { 5120 } } @techreport{DVH96, author = { D{\"o}llner, J{\"u}rgen and Voigtmann, A. and Hinrichs, Klaus }, title = { Guiding Principles for Programming in C++ }, year = { 1996 }, number = { 17/96-I }, institution = { Angewandte Mathematik und Informatik, Universit{\"a}t M{\"u}nster }, sorting = { 64 } } @techreport{DH95b, author = { D{\"o}llner, J{\"u}rgen and Hinrichs, Klaus }, title = { Geometric, Chronological, and Behavioral Modeling }, year = { 1995 }, number = { 18/95-I }, institution = { Angewandte Mathematik und Informatik, Universit{\"a}t M{\"u}nster }, files = { user_upload/fachgebiete/doellner/publications/1995/DH95b/modeling_dh.pdf }, sorting = { 3584 } } @techreport{DH95c, author = { D{\"o}llner, J{\"u}rgen and Hinrichs, Klaus }, title = { The Virtual Rendering System - A Toolkit for Object-Oriented 3D Graphics }, year = { 1995 }, number = { 19/95-I }, abstract = { 3D applications are built on top of procedural low-level graphics packages which are difficult to learn and to use because of their inherent complexity and their renderer oriented design. We present a fine-grained object oriented model which views 3D graphics from the developer’s perspective. Our approach is based on a logical decomposition of the elements of 3D graphics into three major classes: Geometric primitives define shapes and their geometry. Rendering attributes specify quality and appearance of primitives and of the rendering process. Virtual rendering devices process attributes and primitives through a set of generic rendering commands for different types of rendering techniques and packages. Virtual rendering devices encapsulate the functionality of most of today’s graphics packages making them exchangeable even at runtime without the need to recode the application. We have implemented our concepts in VRS, the Virtual Rendering System, as a portable C++ toolkit. Currently we have integrated the standard graphics packages OpenGL, PEX, XGL, and Radiance. }, institution = { Angewandte Mathematik und Informatik, Universit{\"a}t M{\"u}nster }, files = { user_upload/fachgebiete/doellner/publications/1995/DH95c/the-virtual-rendering-system.pdf }, sorting = { 3840 } } @unpublished{LD06a, author = { Lorenz, Haik and D{\"o}llner, J{\"u}rgen }, title = { Towards Automating the Generation of Facade Textures of Virtual City Models }, year = { 2006 }, abstract = { This paper is concerned with concepts and techniques that support the automated generation of facade textures. In contrast to terrain textures, which can be derived automatically, capturing and processing facade textures still represent costly and time intensive tasks. In this paper we introduce a number of basic tools that allow us to work with aerial imagery captured by the High Resolution Stereo Camera (HRSC). In addition to the nadir view of standard aerial photography an HRSC image set contains multiple tilted views, in which facades are depicted more prominently. We develop a camera model for OpenGL that approximates the unique imaging sensor used for HRSC. With our tools, we can visually explore original HRSC imagery and synthesize idealized HRSC imagery based on virtual 3D city models. The visual exploration of HRSC images allows us to select regions from real-world images that have special properties useful for algorithm development, whereas the synthesis is used to generate test data and to run simulations. }, note = { ISPRS Commission II, WG II/5 Workshop, Vienna }, files = { user_upload/fachgebiete/doellner/publications/2006/LD06a/Towards Automating the Generation of Facade Textures of Virtual City Models.pdf }, sorting = { 2048 } } @unpublished{LD06, author = { Lorenz, Haik and D{\"o}llner, J{\"u}rgen }, title = { Facade Texture Quality Estimation for Aerial Photography in 3D City Models }, year = { 2006 }, abstract = { This contribution presents concepts and methods to estimate the quality of facade textures taken from a set of overlapping aerial images. It aims at providing a key component for a workflow to derive appearance elements such as facade textures for large-scale virtual 3D city models in an automated way. We describe a technique to project aerial imagery generated by the High Resolution Stereo Camera (HRSC) onto the 3D geometry of a virtual city model. This technique can be applied in real-time using 3D graphics hardware. It is based on rectified images that are extracted for each facade at a fixed spatial resolution along with a quality map encoding the actual effective spatial resolution of the projected aerial image. We demonstrate the potential of this combination of facade images and quality maps for applications such as facade coverage determination. }, note = { Meeting of the ICA Commission on Visualization and Virtual Environments, Vancouver, WA }, files = { user_upload/fachgebiete/doellner/publications/2006/LD06/Facade Texture Quality Estimation.pdf }, sorting = { 2304 } } @misc{HHD10, author = { Dieter Hildebrandt, Benjamin Hagedorn, Jürgen Döllner }, title = { Image-Based, Interactive Visualization of Complex 3D Geovirtual Environments on Lightweight Devices }, year = { 2010 }, abstract = { In this paper, we present strategies for service-oriented, standards and image-based 3D geovisualization that have the potential to provide interactive vi-sualization of complex 3D geovirtual environments (3DGeoVE) on lightweight devices. In our approach, interactive geovisualization clients retrieve sets of 2D images of projective views of 3DGeoVEs generated by a 3D rendering service. As the key advantage of the image-based approach, the complexity that a client is exposed to for displaying a visual representation is reduced to a constant factor primarily depending on the image resolution. To provide users with a high degree of interactivity, we propose strategies that are based on additional service-side functionality and on exploiting multiple layers of information encoded into the images for the local reconstruction of visual representations of the remote 3DGeoVE. The use of service-orientation and standards facilitates designing distributed 3D geovisualization systems that are open, interoperable and can easily be adapted to changing requirements. We demonstrate the validity of the proposed strategies by presenting several image-based 3D clients for the case of virtual 3D city models. }, booktitle = { Proceedings of the 7th International Symposium on LBS \& TeleCartography }, sorting = { 1536 }, state = { 3 } } @misc{Tra09, author = { Trapp, Matthias }, title = { Interaktive Visualisierung des R{\"o}mischen K{\"o}lns }, year = { 2009 }, note = { german }, project = { NFG }, files = { user_upload/fachgebiete/doellner/publications/2009/Tra09/HPI Magazin.pdf }, sorting = { 6400 } } @misc{Job08, author = { Jobst, Markus }, title = { Geo-Kommunikation mit 3D? }, year = { 2008 }, abstract = { Die fortschreitende Entwicklung von 3D Geo-Anwendungen für den öffentlichen Einsatz verbreitert das eingesetzte Spektrum der Kartographie. Vielfach dienen virtuelle 3D Umgebungen der „besseren“ Anschaulichkeit und werden für ein leichteres Raumverständnis der dritten Dimension eingesetzt. Diese Schnittstelle zur Öffentlichkeit erfordert Anpassungen des grafischen Inhaltes sowohl an den Wissenstand der Betrachter als auch an die Eigenschaften der verwendeten Geo-Medien. Somit wird man als Kartograph direkt mit einer zentralen Fragestellung konfrontiert: Kann mit dieser Präsentationsart erfolgreiche Geo-Kommunikation betrieben werden? Wenn ja, welche Adaptierungen werden auf syntaktischer Ebene sinnvoll oder gar notwendig? Die Argumentation für 3D Visualisierungsmethodik stützt sich auf aktuelle Erkenntnisse der Kommunikationsforschung, Lerntheorien, Wissensakquisition und menschlicher Verarbeitungsprozesse. Es sollen für die semiotischen Modellierungsentwürfe die Grundlagen perzeptionsgerechter BildschirmKartographie und medienspezifische Eigenschaften herangezogen werden. Die Verwendung von dynamischen dreidimensionalen Kartenelementen bzw. Kartendarstellungen bietet bei der räumlichen Wissensvermittlung die Möglichkeit, auf eine besonders breite Wissensbasis bei den Benutzern, weil diese eng mit der menschlichen Entwicklung gekoppelt ist, zurückzugreifen. Damit können die mentalen Weltmodelle/das Weltwissen der Benutzer direkt aktiviert, benutzt und ergänzt werden. Die traditionellen Herstellungsmethoden der Kartographie sind andererseits eng mit den benutzten Medien verbunden. Dies bedeutet, dass sowohl die Kartographische Semiotik als auch die darstellbare Informationstiefe dem medialen Auflösungsverhalten folgen muss, um eine generelle unmissverständliche Wahrnehmung zu ermöglichen. Für zweidimensionale Bildschirmkarten wurden einige dieser Beziehungen zwischen dem technischen Medium Bildschirm, der Informationstiefe und Perzeption übernommen. Für die multimediale 3D Kartographie ist auch eine Anpassung an das Übertratgungsmedium notwendig. Allerdings werden neben weiteren technischen und geometrischen Abhängigkeiten auch psychologische Parameter Einfluss auf die Wahrnehmbarkeit der räumlich basierten Information nehmen. Dieser Beitrag diskutiert den aktuellen Stellenwert von virtuellen 3D Umgebungen für eine Kartographische Kommunikation. Die aktuellen Präsentationsformen von virtuellen 3D Umgebungen sind für den Autor noch nicht hinreichend hinsichtlich der syntaktischen Anforderungen, der Geo-Medientechnik und eindeutigen Wahrnehmbarkeit, adaptiert. Erst wenn das aktuelle technische Potential für die Umsetzung der effektiven und expressiven Informationsvermittlung eingesetzt wird, können Nutzbarkeitsuntersuchungen die theoretische Arbeit vervollständigen und neue Perspektiven für die Kartographie aufzeigen. }, note = { Einladung der DEUTSCHE GESELLSCHAFT FÜR KARTOGRAPHIE e.V. Sektion Berlin-Brandenburg }, sorting = { 64 } } @misc{Job08a, author = { Jobst, Markus }, title = { Cartographic Visualization Services (CVS) for Service-based Geo-Information Systems }, year = { 2008 }, note = { Future Trends in SOC 2008, The Annual Symposium of the HPI Research School }, sorting = { 96 } } @misc{GD08, author = { Glander, Tassilo and D{\"o}llner, J{\"u}rgen }, title = { Automated Cell-Based Generalization of Virtual 3D City Models with Dynamic Landmark Highlighting }, year = { 2008 }, note = { Online Proceedings of 11th ICA Workshop on Generalisation and Multiple Representations }, project = { NFG }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2008/GD08/paper_ICA_workshop.pdf }, link1 = { http://aci.ign.fr/montpellier2008/papers/29_Glander_Dollner.pdf }, sorting = { 7680 } } @misc{NBKD05, author = { Nienhaus, Marc and Buchholz, Henrik and Kirsch, Florian and D{\"o}llner, J{\"u}rgen }, title = { Non-Photorealistic Visualization of Berlin }, year = { 2005 }, sorting = { 4608 } } @misc{BD05a, author = { Buchholz, Henrik and D{\"o}llner, J{\"u}rgen }, title = { Visual Data Mining in Large-Scale 3D City Models }, year = { 2005 }, abstract = { This paper presents an approach towards visual data mining in large-scale virtual 3D city models. The increasing availability of massive thematic data related to urban areas such as socio-demographic data, traffic data, or real-estate data, raises the question how to get insight and how to effectively visualize contained information. In our approach, we extend a real-time 3D city model system by features that interactively map thematic data to specified graphics variables of the city model’s geometry and appearance. In particular, a rendering technique is explained that can efficiently represent and reconfigure the scene graph of a 3D city model even for large-scale models. The resulting dynamic 3D city models serve as general geovisualization tools to effectively analyze, explore, and present geometric and related thematic information of urban areas. Sample applications include city information systems, urban planning and management systems, and navigation systems. }, files = { user_upload/fachgebiete/doellner/publications/2005/BD05a/Gisplanet_buchholz_doellner.pdf }, sorting = { 4864 } } @misc{DH95d, author = { D{\"o}llner, J{\"u}rgen and Hinrichs, Klaus }, title = { An Object-Oriented Framework for 3D Modeling, Animation and Interaction }, year = { 1995 }, note = { Abstracts of the Workshop Object-Oriented Computing in the Natural Sciences (OOCNS '95), IMAG, Grenoble }, sorting = { 4096 } } @other{L13, author = { Limberger, Daniel }, title = { Photorealistic Rendering of Day and Night Sky Phenomena in Interactive 3D Geovirtual Environments }, year = { 2013 }, month = { 2 }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2013/L13/Daniel_Limberger_Poster.pdf }, sorting = { 2560 } } @other{RKD10, author = { Richardt, Christian and Kyprianidis, Jan Eric and Dodgson, Neil A. }, title = { Stereo Coherence in Watercolour Rendering }, year = { 2010 }, month = { 6 }, abstract = { We investigate stereo coherence – or consistency of stereoscopic views – in non-photorealistic rendering (NPR) by example. We designed and carried out a pilot user study that compared stereoscopic animations created using two different watercolour rendering styles. The rendering styles use image-space and object-space noise respectively. A clear majority of participants preferred the object-based technique and found it more comfortable to watch than the image-space technique which suffers from the ‘shower door effect’. Based on these preliminary results, we conclude that stereo coherence is indeed a desirable property for non-photorealistic rendering techniques. }, booktitle = { Poster at Symposium on Computational Aesthetics (CAe) and Symposium on Non-Photorealistic Animation and Rendering (NPAR) }, sorting = { 1792 } } @other{KSKD10a, author = { Kyprianidis, Jan Eric and Semmo, Amir and Kang, Henry and D{\"o}llner, J{\"u}rgen }, title = { Anisotropic Kuwahara Filtering with Polynomial Weighting Functions }, year = { 2010 }, month = { 6 }, abstract = { In this work we present new weighting functions for the anisotropic Kuwahara filter. The anisotropic Kuwahara filter is an edge-preserving filter that is especially useful for creating stylized abstractions from images or videos. It is based on a generalization of the Kuwahara filter that is adapted to the local shape of features. For the smoothing process, the anisotropic Kuwahara filter uses weighting functions that use convolution in their definition. For an efficient implementation, these weighting functions are therefore usually sampled into a texture map. By contrast, our new weighting functions do not require convolution and can be efficiently computed directly during the filtering in real-time. We show that our approach creates output of similar quality as the original anisotropic Kuwahara filter and present an evaluation scheme to compute the new weighting functions efficiently by using rotational symmetries. }, booktitle = { NPAR Poster Session }, project = { gpuakf }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/KSKD10a/jkyprian-npar2010.pdf }, sorting = { 2304 } } @other{KKD09a, author = { Kyprianidis, Jan Eric and Kang, Henry and D{\"o}llner, J{\"u}rgen }, title = { Image and Video Abstraction by Anisotropic Kuwahara Filtering }, year = { 2009 }, month = { 8 }, booktitle = { 7th Symposium on Non-Photorealistic Animation and Rendering (NPAR), Poster Session }, project = { gpuakf }, files = { user_upload/fachgebiete/doellner/publications/2009/KKD09a/jkyprian-npar2009.pdf }, sorting = { 3584 } } @other{KD08a, author = { Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen }, title = { Image Abstraction by Structure Adaptive Filtering }, year = { 2008 }, abstract = { Photorealistic visualizations used in 3D applications designed for typical desktops usually show high visual information density. In [Kyprianidis and Döllner 2008] we present non-photorealistic image processing techniques to distill the perceptually important information and optimize the content for the limited screen space of small displays. Our method extends the approach of [Winnemöller et al. 2006] to use iterated bilateral filtering for abstraction and difference-of-Gaussians (DoG) for edge extraction by adapting it to the local orientation of the input. }, booktitle = { NPAR Poster Session }, project = { flowabs }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2008/KD08a/jkyprian-npar2008.pdf }, sorting = { 4352 } }