# This BibTeX File has been generated by # the Typo3 extension 'Sixpack-4-T3 by Sixten Boeck' # # URL: # Date: 07/28/2017 # Non-Standard BibTex fields are included. # state: 0 = published, 1 = accepted, 2 = submitted, 3 = to be published // if missing, published is assumed # extern,deleted,hidden: 0 = false, 1 = true // if missing, false is assumed # link format: Title Url // separated by a whitespace @article{SLKD2016, author = { Semmo, Amir and Limberger, Daniel and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen }, title = { Image Stylization by Interactive Oil Paint Filtering }, journal = { Computers \& Graphics }, year = { 2016 }, volume = { 55 }, pages = { 157--171 }, abstract = {

This paper presents an interactive system for transforming images into an oil paint look. The system comprises two major stages. First, it derives dominant colors from an input image for feature-aware recolorization and quantization to conform with a global color palette. Afterwards, it employs non-linear filtering based on the smoothed structure adapted to the main feature contours of the quantized image to synthesize a paint texture in real-time. Our filtering approach leads to homogeneous outputs in the color domain and enables creative control over the visual output, such as color adjustments and per-pixel parametrizations by means of interactive painting. To this end, our system introduces a generalized brush-based painting interface that operates within parameter spaces to locally adjust the level of abstraction of the filtering effects. Several results demonstrate the various applications of our filtering approach to different genres of photography.
}, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/SLKD2016/oilpaint-cag2016_authors_version.pdf }, doi = { 10.1016/j.cag.2015.12.001 }, sorting = { 2560 } } @article{SD2015, author = { Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Interactive Image Filtering for Level-of-Abstraction Texturing of Virtual 3D Scenes }, journal = { Computers \& Graphics }, year = { 2015 }, volume = { 52 }, pages = { 181--198 }, abstract = {

Texture mapping is a key technology in computer graphics. For the visual design of 3D scenes, in particular, effective texturing depends significantly on how important contents are expressed, e.g., by preserving global salient structures, and how their depiction is cognitively processed by the user in an application context. Edge-preserving image filtering is one key approach to address these concerns. Much research has focused on applying image filters in a post-process stage to generate artistically stylized depictions. However, these approaches generally do not preserve depth cues, which are important for the perception of 3D visualization (e.g., texture gradient). To this end, filtering is required that processes texture data coherently with respect to linear perspective and spatial relationships. In this work, we present an approach for texturing 3D scenes with perspective coherence by arbitrary image filters. We propose decoupled deferred texturing with (1) caching strategies to interactively perform image filtering prior to texture mapping and (2) for each mipmap level separately to enable a progressive level of abstraction, using (3) direct interaction interfaces to parameterize the visualization according to spatial, semantic, and thematic data. We demonstrate the potentials of our method by several applications using touch or natural language inputs to serve the different interests of users in specific information, including illustrative visualization, focus+context visualization, geometric detail removal, and semantic depth of field. The approach supports frame-to-frame coherence, order-independent transparency, multitexturing, and content-based filtering. In addition, it seamlessly integrates into real-time rendering pipelines, and is extensible for custom interaction techniques.
}, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/SD2015/asemmo-cag2015-authors-version.pdf }, doi = { 10.1016/j.cag.2015.02.001 }, sorting = { 2560 } } @article{PSTD2014, author = { Pasewaldt, Sebastian and Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Multi-Perspective 3D Panoramas }, journal = { International Journal of Geographical Information Science (IJGIS) }, year = { 2014 }, volume = { 28 }, number = { 10 }, pages = { 2030-2051 }, abstract = {
This article presents multi-perspective 3D panoramas that focus on visualizing 3D geovirtual environments (3D GeoVEs) for navigation and exploration tasks. Their key element, a multi-perspective view, seamlessly combines what is seen from multiple viewpoints into a single image. This approach facilitates thepresentation of information for virtual 3D city and landscape models, particularly by reducing occlusions, increasing screen-space utilization, and providing additional context within a single image. We complement multi-perspective views with cartographic visualization techniques to stylize features according to their semantics and highlight important or prioritized information. When combined, both techniques constitute the core implementation of interactive, multi-perspective 3D panoramas. They offer a large number of effective means for visual communication of 3D spatial information, a high degree of customization with respect to cartographic design, and manifold applications in different domains. We discuss design decisions of 3D panoramas for the exploration of and navigation in 3D GeoVEs. We also discuss a preliminary user study that indicates that 3D panoramas are a promising approach for navigation systems using 3D GeoVEs.
}, keywords = { multi-perspective visualization, panorama, focus+context visualization, 3D geovirtual environments, cartographic design }, project = { HPI;NFGII }, doi = { 10.1080/13658816.2014.922686 }, link1 = { http://dx.doi.org/10.1080/13658816.2014.922686 }, sorting = { 1792 } } @article{DEREKD2013, author = { Delikostidis, Ioannis and Engel, Juri and Retsios, Bas and van Elzakker, Corné P.J.M. and Kraak, Menno-Jan and Döllner, Jürgen }, title = { Increasing the Usability of Pedestrian Navigation Interfaces by means of Landmark Visibility Analysis }, journal = { The Journal of Navigation }, year = { 2013 }, volume = { 66 }, number = { 04 }, pages = { 523--537 }, month = { 6 }, abstract = { Communicating location-specific information to pedestrians is a challenging task which can be aided by user-friendly digital technologies. In this paper, landmark visibility analysis, as a means for developing more usable pedestrian navigation systems is discussed. Using an algorithmic framework for image-based 3D analysis, this method integrates a 3D city model with identified landmarks and produces raster visibility layers for each one. This output enables an Android phone prototype application to indicate the visibility of landmarks from the user’s actual position. Tested in the field, the method achieves sufficient accuracy for the context of use and improves navigation efficiency and effectiveness. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { pedestrian navigation, landmark visibility, user-centred design, usability testing }, publisher = { The Royal Institute of Navigation }, project = { NFGII }, issn = { 1469-7785 }, doi = { 10.1017/S0373463313000209 }, link1 = { http://journals.cambridge.org/article_S0373463313000209 }, sorting = { 3072 } } @article{TD2013, author = { Trapp, Matthias and D\"ollner, J\"urgen }, title = { 2.5D Clip-Surfaces for Technical Visualization }, journal = { Journal of WSCG }, year = { 2013 }, volume = { 21 }, number = { 1 }, pages = { 89-96 }, month = { 6 }, abstract = { The concept of clipping planes is well known in computer graphics and can be used to create cut-away views. But clipping against just analytical defined planes is not always suitable for communicating every aspect of such visualization. For example, in hand-drawn technical illustrations, artists tend to communicate the difference between a cut and a model feature by using non-regular, sketchy cut lines instead of straight ones. To enable this functionality in computer graphics, this paper presents a technique for applying 2.5D clip-surfaces in real-time. Therefore, the clip plane equation is extended with an additional offset map, which can be represented by a texture map that contains height values. Clipping is then performed by varying the clip plane equation with respect to such an offset map. Further, a capping technique is proposed that enables the rendering of caps onto the clipped area to convey the impression of solid material. It avoids a re-meshing of a solid polygonal mesh after clipping is performed. Our approach is pixel precise, applicable in real-time, and takes fully advantage of graphics accelerators. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { clipping planes, real-time rendering, technical 3D visualization }, editor = { Václav Skala }, publisher = { Union Agency }, address = { Na Mazinach 9, CZ 322 00 Plzen, Czech Republic }, booktitle = { Proceedings of WSCG 2013: 21st International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision }, project = { NFGII }, issn = { ISSN 1213 – 6972 }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=mBasfz37VoY }, link2 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-1861946-5d-clip-surfaces-technical-visualization/ }, link3 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2013/TD2013/clipping.pdf }, sorting = { 1792 } } @article{RD13, author = { Richter, Rico and Döllner, Jürgen }, title = { Concepts and techniques for integration, analysis and visualization of massive 3D point clouds }, journal = { Computers, Environment and Urban Systems }, year = { 2013 }, volume = { 45 }, pages = { 114-124 }, abstract = { Remote sensing methods, such as LiDAR and image-based photogrammetry, are established approaches for capturing the physical world. Professional and low-cost scanning devices are capable of generating dense 3D point clouds. Typically, these 3D point clouds are preprocessed by GIS and are then used as input data in a variety of applications such as urban planning, environmental monitoring, disaster management, and simulation. The availability of area-wide 3D point clouds will drastically increase in the future due to the availability of novel capturing methods (e.g., driver assistance systems) and low-cost scanning devices. Applications, systems, and workflows will therefore face large collections of redundant, up-to-date 3D point clouds and have to cope with massive amounts of data. Hence, approaches are required that will efficiently integrate, update, manage, analyze, and visualize 3D point clouds. In this paper, we define requirements for a system infrastructure that enables the integration of 3D point clouds from heterogeneous capturing devices and different timestamps. Change detection and update strategies for 3D point clouds are presented that reduce storage requirements and offer new insights for analysis purposes. We also present an approach that attributes 3D point clouds with semantic information (e.g., object class category information), which enables more effective data processing, analysis, and visualization. Out-of-core real-time rendering techniques then allow for an interactive exploration of the entire 3D point cloud and the corresponding analysis results. Web-based visualization services are utilized to make 3D point clouds available to a large community. The proposed concepts and techniques are designed to establish 3D point clouds as base datasets, as well as rendering primitives for analysis and visualization tasks, which allow operations to be performed directly on the point data. Finally, we evaluate the presented system, report on its applications, and discuss further research challenges. }, project = { NFGII }, doi = { DOI: 10.1016/j.compenvurbsys.2013.07.004 }, link1 = { http://www.sciencedirect.com/science/article/pii/S0198971513000653 }, sorting = { 64 } } @article{RKD12, author = { Richter, Rico and Behrens, Markus and Döllner, Jürgen }, title = { Object class segmentation of massive 3D point clouds of urban areas using point cloud topology }, journal = { International Journal of Remote Sensing }, year = { 2013 }, volume = { 34 }, number = { 23 }, pages = { 8408-8424 }, abstract = { A large number of remote-sensing techniques and image-based photogrammetric approaches allow an efficient generation of massive 3D point clouds of our physical environment. The efficient processing, analysis, exploration, and visualization of massive 3D point clouds constitute challenging tasks for applications, systems, and workflows in disciplines such as urban planning, environmental monitoring, disaster management, and homeland security. We present an approach to segment massive 3D point clouds according to object classes of virtual urban environments including terrain, building, vegetation, water, and infrastructure. The classification relies on analysing the point cloud topology; it does not require per-point attributes or representative training data. The approach is based on an iterative multi-pass processing scheme, where each pass focuses on different topological features and considers already detected object classes from previous passes. To cope with the massive amount of data, out-of-core spatial data structures and graphics processing unit (GPU)-accelerated algorithms are utilized. Classification results are discussed based on a massive 3D point cloud with almost 5 billion points of a city. The results indicate that object-class-enriched 3D point clouds can substantially improve analysis algorithms and applications as well as enhance visualization techniques. }, project = { NFGII }, doi = { DOI: 10.1080/01431161.2013.838710 }, link1 = { http://www.tandfonline.com/doi/full/10.1080/01431161.2013.838710 }, sorting = { 32 } } @article{RKD12, author = { Richter, Rico and Kyprianidis, Jan Eric and Döllner, Jürgen }, title = { Out-of-Core GPU-based Change Detection in Massive 3D Point Clouds }, journal = { Transactions in GIS }, year = { 2013 }, volume = { 17 }, number = { 5 }, pages = { 724-741 }, abstract = { If sites, cities, and landscapes are captured at different points in time using technology such as LiDAR, large collections of 3D point clouds result. Their efficient storage, processing, analysis, and presentation constitute a challenging task because of limited computation, memory, and time resources. In this work, we present an approach to detect changes in massive 3D point clouds based on an out-of-core spatial data structure that is designed to store data acquired at different points in time and to efficiently attribute 3D points with distance information. Based on this data structure, we present and evaluate different processing schemes optimized for performing the calculation on the CPU and GPU. In addition, we present a point-based rendering technique adapted for attributed 3D point clouds, to enable effective out-of-core real-time visualization of the computation results. Our approach enables conclusions to be drawn about temporal changes in large highly accurate 3D geodata sets of a captured area at reasonable preprocessing and rendering times. We evaluate our approach with two data sets from different points in time for the urban area of a city, describe its characteristics, and report on applications. }, project = { NFGII }, doi = { DOI: 10.1111/j.1467-9671.2012.01362.x }, link1 = { http://onlinelibrary.wiley.com/doi/10.1111/j.1467-9671.2012.01362.x/abstract }, sorting = { 256 } } @article{ED12_2, author = { Engel, Juri and D{\"o}llner, J{\"u}rgen }, title = { Immersive Visualization of Virtual 3D City Models and its Applications in E-Planning }, journal = { International Journal of E-Planning Research (IJEPR) }, year = { 2012 }, volume = { 1 }, number = { 4 }, pages = { 17-34 }, abstract = { Immersive visualization offers an intuitive access to and an effective way of realizing, exploring, and analyzing virtual 3D city models, which are essential tools for effective communication and management of complex urban spatial information in e-planning. In particular, immersive visualization allows for simulating planning scenarios and to receive a close-to-reality impression by both non-expert and expert stakeholders. This contribution is concerned with the main requirements and technical concepts of a system for visualizing virtual 3D city models in large-scale, fully immersive environments. It allows stakeholders ranging from citizens to decision-makers to explore and examine the virtual 3D city model and embedded planning models “in situ”. Fully immersive environments involve a number of specific requirements for both hardware and 3D rendering including enhanced 3D rendering techniques, an immersion-aware, autonomous, and assistive 3D camera system, and a synthetic, immersion-supporting soundscape. Based on these requirements, we have implemented a prototypical visualization system that we present in this article. The characteristics of fully immersive visualization enable a number of new applications within e-planning workflows and processes, in particular, with respect to public participation, decision support, and location marketing. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, publisher = { IGI Global }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/ED12-2/engel_IJEPR_2012_draft.pdf }, sorting = { 3872 } } @article{STKD12, author = { Semmo, Amir and Trapp, Matthias and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen }, title = { Interactive Visualization of Generalized Virtual 3D City Models using Level-of-Abstraction Transitions }, journal = { Computer Graphics Forum }, year = { 2012 }, volume = { 31 }, number = { 3 }, pages = { 885--894 }, abstract = {

Virtual 3D city models play an important role in the communication of complex geospatial information in a growing number of applications, such as urban planning, navigation, tourist information, and disaster management. In general, homogeneous graphic styles are used for visualization. For instance, photorealism is suitable for detailed presentations, and non-photorealism or abstract stylization is used to facilitate guidance of a viewer's gaze to prioritized information. However, to adapt visualization to different contexts and contents and to support saliency-guided visualization based on user interaction or dynamically changing thematic information, a combination of different graphic styles is necessary. Design and implementation of such combined graphic styles pose a number of challenges, specifically from the perspective of real-time 3D visualization. In this paper, the authors present a concept and an implementation of a system that enables different presentation styles, their seamless integration within a single view, and parametrized transitions between them, which are defined according to tasks, camera view, and image resolution. The paper outlines potential usage scenarios and application fields together with a performance evaluation of the implementation.
}, note = { Proceedings EuroVis 2012 }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/STKD12/asemmo-eurovis2012.pdf }, doi = { 10.1111/j.1467-8659.2012.03081.x }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=VXqtw44KxY4 }, sorting = { 2304 } } @article{SHTD2012, author = { Semmo, Amir and Hildebrandt, Dieter and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Concepts for Cartography-Oriented Visualization of Virtual 3D City Models }, journal = { Photogrammetrie - Fernerkundung - Geoinformation (PFG) }, year = { 2012 }, number = { 4 }, pages = { 455-465 }, abstract = {

Virtual 3D city models serve as an effective medium with manifold applications in geoinformation systems and services. To date, most 3D city models are visualized using photorealistic graphics. But an effective communication of geoinformation significantly depends on how important information is designed and cognitively processed in the given application context. One possibility to visually emphasize important information is based on non-photorealistic rendering, which comprehends artistic depiction styles and is characterized by its expressiveness and communication aspects. However, a direct application of non-photorealistic rendering techniques primarily results in monotonic visualization that lacks cartographic design aspects. In this work, we present concepts for cartography-oriented visualization of virtual 3D city models. These are based on coupling non-photorealistic rendering techniques and semantics-based information for a user, context, and media-dependent representation of thematic information. This work highlights challenges for cartography-oriented visualization of 3D geovirtual environments, presents stylization techniques and discusses their applications and ideas for a standardized visualization. In particular, the presented concepts enable a real-time and dynamic visualization of thematic geoinformation.
}, keywords = { 3D city models, cartography-oriented visualization, style description languages, real-time rendering }, publisher = { E. Schweizerbart'sche Verlagsbuchhandlung }, address = { Johannesstrasse 3A, D-70176 Stuttgart, Germany }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/SHTD2012/asemmo-PFG2012.pdf }, issn = { 1432-8364 }, doi = { 10.1127/1432-8364/2012/0131 }, sorting = { 16 } } @inbook{SBTD2017, author = { Scheibel, Willy and Buschmann, Stefan and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Attributed Vertex Clouds }, year = { 2017 }, month = { 3 }, abstract = {

In todays computer graphics applications, large 3D scenes are rendered which consist of polygonal geometries such as triangle meshes. Using state- of-the-art techniques, this geometry is often represented on the GPU using vertex and index buffers, as well as additional auxiliary data such as tex- tures or uniform buffers. For polygonal meshes of arbitrary complexity, the described approach is indispensable. However, there are several types of simpler geometries (e.g., cuboids, spheres, tubes, or splats) that can be generated procedurally. We present an efficient data representation and render- ing concept for such geometries, denoted as attributed vertex clouds (AVCs). Using this approach, geometry is generated on the GPU during execution of the programmable rendering pipeline. Each vertex is used as the argument for a function that procedurally generates the target geometry. This function is called a transfer function, and it is implemented using shader programs and therefore executed as part of the rendering process. This approach allows for compact geometry representation and results in reduced memory footprints in comparison to traditional representations. By shifting geometry generation to the GPU, the resulting volatile geometry can be controlled flexibly, i.e., its position, parameteri- zation, and even the type of geometry can be modified without requiring state changes or uploading new data to the GPU. Performance measurements suggests improved rendering times and reduced memory transmission through the rendering pipeline.
}, editor = { Christopher Oat }, publisher = { Wolfgang Engel }, series = { GPU Pro }, edition = { 8 }, booktitle = { GPU Zen }, project = { HPI;NFGII;BIMAP }, institution = { Hasso Plattner Institute, University of Potsdam }, sorting = { 4 }, state = { 3 } } @conference{BTD2014, author = { Buschmann, Stefan and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Real-Time Animated Visualization of Massive Air-Traffic Trajectories }, year = { 2014 }, pages = { 172-181 }, abstract = { With increasing numbers of flights world-wide and a continuing rise in airport traffic, air-traffic management is faced with a number of challenges. These include monitoring, reporting, planning, and problem analysis of past and current air traffic, e.g., to identify hotspots, minimize delays, or to optimize sector assignments to air-traffic controllers. Interactive and dynamic 3D visualization and visual analysis of massive aircraft trajectories, i.e., analytical reasoning enabled by interactive cyber worlds, can be used to approach these challenges. To facilitate this kind of analysis, especially in the context of real-time data, interactive tools for filtering, mapping, and rendering are required. In particular, the mapping process should be configurable at run-time and support both static mappings and animations to allow users to effectively explore and realize movement dynamics. However, with growing data size and complexity, these stages of the visualization pipeline require computational efficient implementations to be capable of processing within real-time constraints. This paper presents an approach for real-time animated visualization of massive air-traffic data, that implements all stages of the visualization pipeline based on GPU techniques for efficient processing. It enables (1) interactive spatio-temporal filtering, (2) generic mapping of trajectory attributes to geometric representations and appearances, as well as (3) real-time rendering within 3D virtual environments, such as virtual 3D airport and city models. Based on this pipeline, different visualization metaphors (e.g., temporal focus+context, density maps, and overview+detail visualization) are implemented and discussed. The presented concepts and implementation can be generally used as visual analytics and data mining techniques in cyber worlds, e.g., to visualize movement data, geo-referenced networks, or other spatio-temporal data. }, keywords = { spatio-temporal visualization, trajectory visualization, 3D visualization, visual analytics, real-time rendering }, publisher = { IEEE Computer Society }, booktitle = { Proceedings of CyberWorlds 2014 }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/BTLD2014/cw2014_draft.pdf }, isbn = { 978-1-4799-4677-8/14 }, doi = { 10.1109/CW.2014.32 }, sorting = { 1024 } } @incollection{LSHD2017, author = { Limberger, Daniel and Scheibel, Willy and Hahn, Sebastian and D{\"o}llner, J{\"u}rgen }, title = { Reducing Visual Complexity in Software Maps using Importance-based Aggregation of Nodes }, year = { 2017 }, abstract = {

Depicting massive software system data using software maps can result in visual clutter and increased cognitive load. This paper introduces an adaptive level-of-detail (LoD) technique that uses scoring for interactive aggregation on a per-node basis. The scoring approximates importance by degree-of-interest measures as well as screen and user-interaction scores. The technique adheres to established aggregation guidelines and was evaluated by means of two user studies. The first user study investigates task completion time in visual search. The second evaluates the readability of the presented nesting level contouring for aggregates. With the adap- tive LoD technique software maps allow for multi-resolution depictions of software system information. It facilitates efficient identification of important nodes and allows for additional annotation.

© The Authors 2017. This is the authors' version of the work. It is posted here for your personal use. Not for redistribution. The definitive version will be published in Proceedings of the 8th International Conference on Information Visualization Theory and Applications (IVAPP 2017).
}, affiliation = { Hasso Plattner Institute, University of Potsdam }, series = { IVAPP 2017 }, booktitle = { Proceedings of the 8th International Conference on Information Visualization Theory and Applications }, project = { NFGII;HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2017/LSHD2017/LSHD2017.pdf }, sorting = { 1 } } @proceedings{Buschmann2012a, author = { Buschmann, Stefan and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Challenges and Approaches for the Visualization of Movement Trajectories in 3D Geovirtual Environments }, year = { 2012 }, abstract = { The visualization of trajectories and their attributes represents an essential functionality for spatio-temporal data visualization and analysis. Many visualization methods, however, focus mainly on sparse 2D movements or consider only the 2D components of movements. This paper is concerned with true 3D movement data, i.e., movements that take place in the three-dimensional space and which characteristics significantly depend an all dimensions. In this case, spatio-temporal visualization approaches need to map all three spatial dimensions together with required mappings for associated attributes. We describe visualization approaches for true 3D movement data and evaluate their application within 3D geovirtual environments. We also identify challenges and propose approaches for the interactive visualization of 3D movement data using 3D geovirtual environments as scenery. }, keywords = { spatio-temporal data, trajectories, interactive 3D visualization, visual analytics }, booktitle = { GIScience workshop on GeoVisual Analytics, Time to Focus on Time }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/BTD2012/geovat2012_paper.pdf }, sorting = { 1024 } } @inproceedings{SDTKDP2016, author = { Semmo, Amir and D{\"u}rschmid, Tobias and Trapp, Matthias and Klingbeil, Mandy and D{\"o}llner, J{\"u}rgen and Pasewaldt, Sebastian }, title = { Interactive Image Filtering with Multiple Levels-of-Control on Mobile Devices }, year = { 2016 }, month = { 12 }, abstract = {
With the continuous development of mobile graphics hardware, interactive high-quality image stylization based on nonlinear filtering is becoming feasible and increasingly used in casual creativity apps. However, these apps often only serve high-level controls to parameterize image filters and generally lack support for low-level (artistic) control, thus automating art creation rather than assisting it. This work presents a GPU-based framework that enables to parameterize image filters at three levels of control: (1) presets followed by (2) global parameter adjustments can be interactively refined by (3) complementary on-screen painting that operates within the filters' parameter spaces for local adjustments. The framework provides a modular XML-based effect scheme to effectively build complex image processing chains-using these interactive filters as building blocks-that can be efficiently processed on mobile devices. Thereby, global and local parameterizations are directed with higher-level algorithmic support to ease the interactive editing process, which is demonstrated by state-of-the-art stylization effects, such as oil paint filtering and watercolor rendering.
}, booktitle = { Proceedings ACM SIGGRAPH Asia Symposium on Mobile Graphics and Interactive Applications }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/SDTKDP2016/asemmo-mgia2016-authors-version.pdf }, doi = { 10.1145/2999508.2999521 }, sorting = { 768 } } @inproceedings{PSDS2016, author = { Pasewaldt, Sebastian and Semmo, Amir and D{\"o}llner, J{\"u}rgen and Schlegel, Frank }, title = { BeCasso: Artistic Image Processing and Editing on Mobile Devices }, year = { 2016 }, month = { 12 }, abstract = { BeCasso is a mobile app that enables users to transform photos into high-quality, high-resolution non-photorealistic renditions, such as oil and watercolor paintings, cartoons, and colored pencil drawings, which are inspired by real-world paintings or drawing techniques. In contrast to neuronal network and physically-based approaches, the app employs state-of-the-art nonlinear image filtering. For example, oil paint and cartoon effects are based on smoothed structure information to interactively synthesize renderings with soft color transitions. BeCasso empowers users to easily create aesthetic renderings by implementing a two-fold strategy: First, it provides parameter presets that may serve as a starting point for a custom stylization based on global parameter adjustments. Thereby, users can obtain initial renditions that may be fine-tuned afterwards. Second, it enables local style adjustments: using on-screen painting metaphors, users are able to locally adjust different stylization features, e.g., to vary the level of abstraction, pen, brush and stroke direction or the contour lines. In this way, the app provides tools for both higher-level interaction and low-level control [Isenberg 2016] to serve the different needs of non-experts and digital artists.

References:
Isenberg, T. 2016. Interactive NPAR: What Type of Tools Should We Create? In Proc. NPAR, The Eurographics Association, Goslar, Germany, 89–96 }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, booktitle = { Proceedings ACM SIGGRAPH Asia Symposium on Mobile Graphics and Interactive Applications (Demo) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/PSDS2016/mgia-demo2016_authors_version.pdf }, doi = { 10.1145/2999508.2999518 }, sorting = { 512 }, state = { 1 } } @inproceedings{SDS2016, author = { Semmo, Amir and D{\"o}llner, J{\"u}rgen and Schlegel, Frank }, title = { BeCasso: Image Stylization by Interactive Oil Paint Filtering on Mobile Devices }, year = { 2016 }, month = { 7 }, abstract = { BeCasso is a mobile app that enables users to transform photos into an oil paint look that is inspired by traditional painting elements. In contrast to stroke-based approaches, the app uses state-of-the-art nonlinear image filtering techniques based on smoothed structure information to interactively synthesize oil paint renderings with soft color transitions. BeCasso empowers users to easily create aesthetic oil paint renderings by implementing a two-fold strategy. First, it provides parameter presets that may serve as a starting point for a custom stylization based on global parameter adjustments. Second, it introduces a novel interaction approach that operates within the parameter spaces of the stylization effect to facilitate creative control over the visual output: on-screen painting enables users to locally adjust the appearance in image regions, e.g., to vary the level of abstraction, brush and stroke direction. This way, the app provides tools for both higher-level interaction and low-level control [Isenberg 2016] to serve the different needs of non-experts and digital artists.

References:
Isenberg, T. 2016. Interactive NPAR: What Type of Tools Should We Create? In Proc. NPAR, The Eurographics Association, Goslar, Germany, 89–96 }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, booktitle = { Proceedings ACM SIGGRAPH Appy Hour }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/SDS2016/asemmo-siggraph2016-appyhour.pdf }, doi = { 10.1145/2936744.2936750 }, sorting = { 1792 } } @inproceedings{LFHTD, author = { Limberger, Daniel and Fiedler, Carolin and Hahn, Sebastian and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Evaluation of Sketchiness as a Visual Variable for 2.5D Treemaps }, year = { 2016 }, month = { 5 }, abstract = {

Interactive 2.5D treemaps serve as an effective tool for the visualization of attributed hierarchies, enabling exploration of non-spatial, multi-variate, hierarchical data. In this paper the suitability of sketchiness as a visual variable, e.g., for uncertainty, is evaluated. Therefore, a design space for sketchy rendering in 2.5D and integration details for real-time applications are presented. The results of three user studies indicate, that sketchiness is a promising candidate for a visual variable that can be used independently and in addition to others, e.g., color and height.

© The Authors 2016. This is the authors' version of the work. It is posted here for your personal use. Not for redistribution. The definitive version will be published in Proceedings of the 20th International Conference on Information Visualization (IV'16).
}, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { Visual Analytics, 2.5D Treemaps, Sketchiness, Visual Variables, Uncertainty }, booktitle = { Proceedings of the 20th International Conference of Information Visualization (IV'16) }, project = { HPI;NFGII }, sorting = { 2048 } } @inproceedings{STDDP2016, author = { Semmo, Amir and Trapp, Matthias and D{\"u}rschmid, Tobias and D{\"o}llner, J{\"u}rgen and Pasewaldt, Sebastian }, title = { Interactive Multi-scale Oil Paint Filtering on Mobile Devices }, year = { 2016 }, abstract = {
This work presents an interactive mobile implementation of a filter that transforms images into an oil paint look. At this, a multi-scale approach that processes image pyramids is introduced that uses flow-based joint bilateral upsampling to achieve deliberate levels of abstraction at multiple scales and interactive frame rates. The approach facilitates the implementation of interactive tools that adjust the appearance of filtering effects at run-time, which is demonstrated by an on-screen painting interface for per-pixel parameterization that fosters the casual creativity of non-artists.
}, booktitle = { Proceedings ACM SIGGRAPH Posters }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/STDDP2016/asemmo-siggraph2016-poster.pdf }, doi = { 10.1145/2945078.2945120 }, sorting = { 1536 } } @inproceedings{STPD2016, author = { Semmo, Amir and Trapp, Matthias and Pasewaldt, Sebastian and D{\"o}llner, J{\"u}rgen }, title = { Interactive Oil Paint Filtering On Mobile Devices }, year = { 2016 }, abstract = {
Image stylization enjoys a growing popularity on mobile devices to foster casual creativity. However, the implementation and provision of high-quality image filters for artistic rendering is still faced by the inherent limitations of mobile graphics hardware such as computing power and memory resources. This work presents a mobile implementation of a filter that transforms images into an oil paint look, thereby highlighting concepts and techniques on how to perform multi-stage nonlinear image filtering on mobile devices. The proposed implementation is based on OpenGL ES and the OpenGL ES shading language, and supports on-screen painting to interactively adjust the appearance in local image regions, e.g., to vary the level of abstraction, brush, and stroke direction. Evaluations of the implementation indicate interactive performance and results that are of similar aesthetic quality than its original desktop variant.
}, booktitle = { Expressive 2016 - Posters, Artworks, and Bridging Papers }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/STPD2016/asemmo-exressive2016-poster.pdf }, doi = { 10.2312/exp.20161255 }, sorting = { 1280 } } @inproceedings{LSLD2016, author = { Limberger, Daniel and Scheibel, Willy and Lemme, Stefan and J{\"u}rgen, D{\"o}llner }, title = { Dynamic 2.5D Treemaps using Declarative 3D on the Web }, year = { 2016 }, pages = { 33--36 }, abstract = { The 2.5D treemap represents a general purpose visualization technique to map multi-variate hierarchical data in a scalable, interactive, and consistent way used in a number of application fields. In this paper, we explore the capabilities of Declarative 3D for the web-based implementation of 2.5D treemap clients. Particularly, we investigate how X3DOM and XML3D can be used to implement clients with equivalent features that interactively display 2.5D treemaps with dynamic mapping of attributes. We also show a first step towards a glTF-based implementation. These approaches are benchmarked focusing on their interaction capabilities with respect to rendering and speed of dynamic data mapping. We discuss the results for our representative example of a complex 3D interactive visualization technique and summerize recommendations for improvements towards operational web clients. }, keywords = { 2.5D treemap, Dec3D, X3DOM, XML3D, glTF }, booktitle = { Proceedings of the 21st International Conference on Web3D Technology }, project = { HPI;NFGII }, isbn = { 978-1-4503-4428-9 }, doi = { 10.1145/2945292.2945313 }, sorting = { 1024 } } @inproceedings{DRD2016, author = { Discher, Sören and Richter, Rico and Döllner, Jürgen }, title = { Interactive and View-Dependent See-Through Lenses for Massive 3D Point Clouds }, year = { 2016 }, booktitle = { Advances in 3D Geoinformation }, project = { NFGII }, sorting = { 1408 } } @inproceedings{STD2016, author = { Scheibel, Willy and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Interactive Revision Exploration using Small Multiples of Software Maps }, year = { 2016 }, pages = { 131-138 }, abstract = { To explore and to compare different revisions of complex software systems is a challenging task as it requires to constantly switch between different revisions and the corresponding information visualization. This paper proposes to combine the concept of small multiples and focus+context techniques for software maps to facilitate the comparison of multiple software map themes and revisions simultaneously on a single screen. This approach reduces the amount of switches and helps to preserve the mental map of the user. Given a software project the small multiples are based on a common dataset but are specialized by specific revisions and themes. The small multiples are arranged in a matrix where rows and columns represents different themes and revisions, respectively. To ensure scalability of the visualization technique we also discuss two rendering pipelines to ensure interactive frame-rates. The capabilities of the proposed visualization technique are demonstrated in a collaborative exploration setting using a high-resolution, multi-touch display. }, affiliation = { Hasso Plattner Institute, University of Potsdam }, keywords = { Software visualization, visual analytics, software maps, small multiples, interactive visualization techniques }, series = { IVAPP 2016 }, booktitle = { 7th International Conference on Information Visualization Theory and Applications }, project = { HPI;NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/STD2016/smallmultiples_ivapp2016-short.pdf,fileadmin/user_upload/fachgebiete/doellner/publications/2016/STD2016/smallmultiples-poster-landscape.pdf }, sorting = { 2816 } } @inproceedings{HTWD15, author = { Hahn, Sebastian and Trapp, Matthias and Wuttke, Nikolai and D{\"o}llner, J{\"u}rgen }, title = { ThreadCity: Combined Visualization of Structure and Activity for the Exploration of Multi-threaded Software Systems }, year = { 2015 }, month = { 7 }, abstract = {

This paper presents a novel visualization technique for the interactive exploration of multi-threaded software systems. It combines the visualization of static system structure based on the EvoStreets approach with an additional traffic metaphor to communicate the runtime characteristics of multiple threads simultaneously. To improve visual scalability with respect to the visualization of complex software systems, we further present an effective level-of-detail visualization based on hierarchical aggregation of system components by taking viewing parameters into account. We demonstrate our technique by means of a prototypical implementation and compare our result with existing visualization techniques.

© The Authors 2015. This is the authors' version of the work. It is posted here for your personal use. Not for redistribution. The definitive version will be published in Proceedings of the 19th International Conference on Information Visualization (IV'15).
}, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { Visual Software Analytics, Trace-Visualization, Multi-threaded Software Systems }, booktitle = { Proceedings of the 19th International Conference of Information Visualization (IV'15) }, project = { HPI;NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/HTWD2015/ThreadCity.pdf }, sorting = { 2048 } } @inproceedings{SLKD15, author = { Semmo, Amir and Limberger, Daniel and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen }, title = { Image Stylization by Oil Paint Filtering using Color Palettes }, year = { 2015 }, pages = { 149--158 }, month = { 6 }, abstract = {
This paper presents an approach for transforming images into an oil paint look. To this end, a color quantization scheme is proposed that performs feature-aware recolorization using the dominant colors of the input image. In addition, an approach for real-time computation of paint textures is presented that builds on the smoothed structure adapted to the main feature contours of the quantized image. Our stylization technique leads to homogeneous outputs in the color domain and enables creative control over the visual output, such as color adjustments and per-pixel parametrizations by means of interactive painting.

© The Authors 2015. This is the authors' version of the work. It is posted here for your personal use. Not for redistribution. The definitive version will be published in Proceedings of the International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe'15).
}, booktitle = { Proceedings International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/SLKD2015/asemmo-cae2015-authors-version.pdf }, doi = { 10.2312/exp.20151188 }, sorting = { 2304 } } @inproceedings{RDD2014, author = { Richter, Rico and Discher, Sören and Döllner, Jürgen }, title = { Out-of-Core Visualization of Classified 3D Point Clouds }, year = { 2015 }, pages = { 227-242 }, abstract = { 3D point clouds represent an essential category of geodata used in a variety of geoinformation applications and systems. We present a novel, interactive out-of-core rendering technique for massive 3D point clouds based on a layered, multi-resolution kd-tree, whereby point-based rendering techniques are selected according to each point's classification (e.g., vegetation, buildings, terrain). The classification-dependent rendering leads to an improved visual representation, enhances recognition of objects within 3D point cloud depictions, and facilitates visual filtering and highlighting. To interactively explore objects, structures, and relations represented by 3D point clouds, our technique provides efficient means for an instantaneous, ad-hoc visualization compared to approaches that visualize 3D point clouds by deriving mesh-based 3D models. We have evaluated our approach for massive laser scan datasets of urban areas. The results show the scalability of the technique and how different configurations allow for designing task and domain-specific analysis and inspection tools.
© The Authors 2014. This is the authors' version of the work. It is posted here for your personal use. Not for redistribution. The definitive version will be published in 3D Geoinformation Science: The Selected Papers of the 3D GeoInfo 2014 by Springer International Publishing. http://dx.doi.org/10.1007/978-3-319-12181-9.
}, keywords = { 3D point clouds, LiDAR, visualization, point-based rendering }, publisher = { Cham: Springer International Publishing }, booktitle = { 3D Geoinformation Science: The Selected Papers of the 3D GeoInfo 2014 }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/3DGeoInfo2014/richter_discher_doeelner_3dgeoinfo2014_draft.pdf }, isbn = { 978-3-319-12180-2 }, doi = { 10.1007/978-3-319-12181-9 }, sorting = { 768 } } @inproceedings{TSD2015, author = { Trapp, Matthias and Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Interactive Rendering and Stylization of Transportation Networks Using Distance Fields }, year = { 2015 }, pages = { 207-219 }, abstract = {
Transportation networks, such as streets, railroads or metro systems, constitute primary elements in cartography for reckoning and navigation. In recent years, they have become an increasingly important part of 3D virtual environments for the interactive analysis and communication of complex hierarchical information, for example in routing, logistics optimization, and disaster management. A variety of rendering techniques have been proposed that deal with integrating transportation networks within these environments, but have so far neglected the many challenges of an interactive design process to adapt their spatial and thematic granularity (i.e., level-of-detail and level-of-abstraction) according to a user's context. This paper presents an efficient real-time rendering technique for the view-dependent rendering of geometrically complex transportation networks within 3D virtual environments. Our technique is based on distance fields using deferred texturing that shifts the design process to the shading stage for real-time stylization. We demonstrate and discuss our approach by means of street networks using cartographic design principles for context-aware stylization, including view-dependent scaling for clutter reduction, contour-lining to provide figure-ground, handling of street crossings via shading-based blending, and task-dependent colorization. Finally, we present potential usage scenarios and applications together with a performance evaluation of our implementation.
}, keywords = { transportation networks, 3D visualization, image-based rendering, distance fields, shading, map design }, booktitle = { Proceedings of the 10th International Conference on Computer Graphics Theory and Applications (GRAPP 2015) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/TSD2015/streets.pdf }, sorting = { 3072 } } @inproceedings{TD2015, author = { Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Geometry Batching Using Texture-Arrays }, year = { 2015 }, pages = { 239-246 }, abstract = {
High-quality rendering of 3D virtual environments typically depends on high-quality 3D models with significant geometric complexity and texture data. One major bottleneck for real-time image-synthesis represents the number of state changes, which a specific rendering API has to perform. To improve performance, batching can be used to group and sort geometric primitives into batches to reduce the number of required state changes, whereas the size of the batches determines the number of required draw-calls, and therefore, is critical for rendering performance. For example, in the case of texture atlases, which provide an approach for efficient texture management, the batch size is limited by the efficiency of the texture-packing algorithm and the texture resolution itself. This paper presents a pre-processing approach and rendering technique that overcomes these limitations by further grouping textures or texture atlases and thus enables the creation of larger geometry batches. It is based on texture arrays in combination with an additional indexing schema that is evaluated at run-time using shader programs. This type of texture management is especially suitable for real-time rendering of large-scale texture-rich 3D virtual environments, such as virtual city and landscape models.
}, keywords = { Batching, Texture-array Processing, Real-time Rendering. }, booktitle = { Proceedings of the 10th International Conference on Computer Graphics Theory and Applications (GRAPP 2015) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/TD2015/TextureStacks.pdf }, sorting = { 3328 } } @inproceedings{MTD2015, author = { Meier, Benjamin-Heinz and Trapp, Matthias and Döllner, Jürgen }, title = { VideoMR: A Map and Reduce Framework for Real-time Video Processing }, year = { 2015 }, abstract = { This paper presents VideoMR: a novel map and reduce framework for real-time video processing on graphic processing units (GPUs). Using the advantages of implicit parallelism and bounded memory allocation, our approach enables developers to focus on implementing video operations without taking care of GPU memory handling or the details of code parallelization. Therefore, a new concept for map and reduce is introduced, redefining both operations to fit to the specific requirements of video processing. A prototypical implementation using OpenGL facilitates various operating platforms, including mobile development, and will be widely interoperable with other state-of-the-art video processing frameworks. }, url = { file:195536 }, booktitle = { International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision (WSCG 2015) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/MTD2015/wscgVideoMR.pdf }, sorting = { 1792 } } @inproceedings{BTD2015, author = { Buschmann, Stefan and Trapp, Matthias and Döllner, Jürgen }, title = { Real-Time Visualization of Massive Movement Data in Digital Landscapes }, year = { 2015 }, pages = { 213-220 }, abstract = { Due to continuing advances in sensor technology and increasing availability of digital infrastructure that allows for acquisition, transfer, and storage of big data sets, large amounts of movement data (e.g., road, naval, or air-traffic) become available. In the near future, movement data such as traffic data may even be available in real-time. In a growing number of application fields (e.g., landscape planning and design, urban development, and infrastructure planning), movement data enables new analysis and simulation applications. In this paper, we present an interactive technique for visualizing massive 3D movement trajectories. It is based on mapping massive movement data to graphics primitives and their visual variables in real-time, supporting a number of visualization schemes such as sphere, line, or tube-based trajectories, including animations of direction and speed. This generic technique enhances the functionality of VR and interactive 3D systems using virtual environments such as digital landscape models, city models, or virtual globes by adding support for this important category of spatio-temporal data. }, booktitle = { 16th Conference on Digital Landscape Architecture (DLA 2015) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/BTD2015/dla2015-draft.pdf }, sorting = { 1536 } } @inproceedings{ORD2015, author = { Oehlke, Christoph and Richter, Rico and Döllner, Jürgen }, title = { Automatic Detection and Large-Scale Visualization of Trees for Digital Landscapes and City Models based on 3D Point Clouds }, year = { 2015 }, pages = { 151-160 }, booktitle = { 16th Conference on Digital Landscape Architecture (DLA 2015) }, project = { NFGII }, sorting = { 1280 } } @inproceedings{WTLD2015, author = { Würfel, Hannes and Trapp, Matthias and Limberger, Daniel and Döllner, Jürgen }, title = { Natural Phenomena as Metaphors for Visualization of Trend Data in Interactive Software Maps }, year = { 2015 }, abstract = { Software maps are a commonly used tool for code quality monitoring in software-development projects and decision making processes. While providing an important visualization technique for the hierarchical system structure of a single software revision, they lack capabilities with respect to the visualization of changes over multiple revisions. This paper presents a novel technique for visualizing the evolution of the software system structure based on software metric trends. These trend maps extend software maps by using real-time rendering techniques for natural phenomena yielding additional visual variables that can be effectively used for the communication of changes. Therefore, trend data is automatically computed by hierarchically aggregating software metrics. We demonstrate and discuss the presented technique using two real world data sets of complex software systems. }, url = { file:195534 }, booktitle = { Computer Graphics and Visual Computing (CGVC) }, organization = { The Eurographics Association }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/WTLD2015/natural-metaphors-cgvc2015-final.pdf }, doi = { 10.2312/cgvc.20151246 }, sorting = { 768 } } @inproceedings{SD2014_3, author = { Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { An Interaction Framework for Level-of-Abstraction Visualization of 3D Geovirtual Environments }, year = { 2014 }, pages = { 43--49 }, month = { 11 }, abstract = {
3D geovirtual environments constitute effective media for the analysis and communication of complex geospatial data. Today, these environments are often visualized using static graphical variants (e.g., 2D maps, 3D photorealistic) from which a user is able to choose from. To serve the different interests of users in specific information, however, the spatial and thematic granularity at which model contents are represented (i.e., level of abstraction) should be dynamically adapted to the user's context, which requires specialized interaction techniques for parameterization. In this work, we present a framework that enables interaction interfaces to parameterize the level-of-abstraction visualization according to spatial, semantic, and thematic data. The framework is implemented in a visualization system that provides image-based rendering techniques for context-aware abstraction and highlighting. Using touch and natural language interfaces, we demonstrate its versatile application to geospatial tasks, including exploration, navigation, and orientation.
}, booktitle = { Proceedings 2nd ACM SIGSPATIAL Workshop on MapInteraction }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/SD2014_3/asemmo-mapinteract2014-authors-version.pdf }, doi = { 10.1145/2677068.2677072 }, sorting = { 256 } } @inproceedings{DRST2014, author = { D{\"u}bel, Steve and R{\"o}hlig, Martin and Schumann, Heidrun and Trapp, Matthias }, title = { 2D and 3D Presentation of Spatial Data: A Systematic Review }, year = { 2014 }, month = { 11 }, abstract = { The question whether to use 2D or 3D for data visualization is generally difficult to decide. Two-dimensional and three-dimensional visualization techniques exhibit different advantages and disadvantages related to various perceptual and technical aspects such as occlusion, clutter, distortion, or scalability. To facilitate problem understanding and comparison of existing visualization techniques with regard to these aspects, this report introduces a systematization based on presentation characteristics. It enables a categorization with respect to combinations of static 2D and 3D presentations of attributes and their spatial reference. Further, it complements ex-isting systematizations of data in an effort to formalize a common terminology and theoretical framework for this problem domain. We demonstrate our approach by reviewing different visualization techniques of spatial data according to the presented systematization. }, url = { file:195535 }, booktitle = { IEEE VIS International Workshop on 3DVis }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/DRST2014/survey2d3d.pdf }, sorting = { 64 } } @inproceedings{SD14, author = { Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Image Filtering for Interactive Level-of-Abstraction Visualization of 3D Scenes }, year = { 2014 }, pages = { 5--14 }, month = { 8 }, abstract = {

Texture mapping is a key technology in computer graphics for visual design of rendered 3D scenes. An effective information transfer of surface properties, encoded by textures, however, depends significantly on how important information is highlighted and cognitively processed by the user in an application context. Edge-preserving image filtering is a promising approach to address this concern while preserving global salient structures. Much research has focused on applying image filters in a post-process stage to foster an artistically stylized rendering, but these approaches are generally not able to preserve depth cues important for 3D visualization (e.g., texture gradient). To this end, filtering that processes texture data coherently with respect to linear perspective and spatial relationships is required. In this work, we present a system that enables to process textured 3D scenes with perspective coherence by arbitrary image filters. We propose decoupled deferred texturing with (1) caching strategies to interactively perform image filtering prior to texture mapping, and (2) for each mipmap level separately to enable a progressive level of abstraction. We demonstrate the potentials of our methods on several applications, including illustrative visualization, focus+context visualization, geometric detail removal, and depth of field. Our system supports frame-to-frame coherence, order-independent transparency, multitexturing, and content-based filtering.

© The Authors 2014. This is the authors' version of the work. It is posted here for your personal use. Not for redistribution. The definitive version will be published in Proceedings of the International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe'14). http://dx.doi.org/10.1145/2630099.2630101.
}, booktitle = { Proceedings International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/SD2014/asemmo-cae2014-authors-version.pdf,fileadmin/user_upload/fachgebiete/doellner/publications/2014/SD2014/asemmo-cae2014-additional_material.pdf }, doi = { 10.1145/2630099.2630101 }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=Vqh3OQbWPpI }, sorting = { 1536 } } @inproceedings{STMD2014, author = { Hahn, Sebastian and Tr{\"u}mper, Jonas and Moritz, Dominik and D{\"o}llner, J{\"u}rgen }, title = { Visualization of Varying Hierarchies by Stable Layout of Voronoi Treemaps }, year = { 2014 }, pages = { 50-58 }, month = { 1 }, abstract = { Space-restricted techniques for visualizing hierarchies generally achieve high scalability and readability (e.g., tree maps, bundle views, sunburst). However, the visualization layout directly depends on the hierarchy, that is, small changes to the hierarchy can cause wide-ranging changes to the layout. For this reason, it is difficult to use these techniques to compare similar variants of a hierarchy because users are confronted with layouts that do not expose the expected similarity. Voronoi treemaps appear to be promising candidates to overcome this limitation. However, existing Voronoi treemap algorithms do not provide deterministic layouts or assume a fixed hierarchy. In this paper we present an extended layout algorithm for Voronoi treemaps that provides a high degree of layout similiarity for varying hierarchies, such as software-system hierarchies. The implementation uses a deterministic initial-distribution approach that reduces the variation in node positioning even if changes in the underlying hierarchy data occur. Compared to existing layout algorithms, our algorithm achieves lower error rates with respect to node areas in the case of weighted Voronoi diagrams, which we show in a comparative study. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { Hierarchical Visualization, Voronoi Treemaps, Stable Layout, Changing Hierarchies }, publisher = { SCITEPRESS – Science and Technology Publications }, booktitle = { Proceedings of the 5th International Conference on Information Visualization Theory and Applications (IVAPP 2014) }, project = { HPI;NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/STMD2014/Visualization_of_Varying_Hierarchies_by_Stable_Layout_of_Voronoi_Treemaps.pdf }, sorting = { 2816 } } @inproceedings{BTLD2014, author = { Buschmann, Stefan and Trapp, Matthias and L{\"uhne}, Patrick and D{\"o}llner, J{\"u}rgen }, title = { Hardware-Accelerated Attribute Mapping for Interactive Visualization of Complex 3D Trajectories }, year = { 2014 }, pages = { 355-363 }, month = { 1 }, abstract = { The visualization of 3D trajectories of moving objects and related attributes in 3D virtual environments represents a fundamental functionality in various visualization domains. Interactive rendering and visual analytics of such attributed trajectories involves both conceptual questions as well as technical challenges. Specifically, the mapping of trajectory attributes to rendering primitives and appearance represents a challenging task in the case of large data sets of high geometric complexity. There are various visualization approaches and rendering techniques considering specific aspects of these mappings to facilitate visualization and analysis of this kind of data. To solve the underlying general mapping problem efficiently, we developed an approach that uses and combines diverse types of visualizations, rather than being tailored to a specific use case. This paper describes an interactive rendering system for the visualization of 3D trajectories that enables the combinations of different mappings as well as their dynamic configuration at runtime. A fully hardware-accelerated implementation enables the processing of large sets of attributed 3D trajectories in real-time. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { 3D Attributed Trajectories, Real-time Rendering, Attribute Mapping }, publisher = { SCITEPRESS – Science and Technology Publications }, booktitle = { Proceedings of the 5th International Conference on Information Visualization Theory and Applications (IVAPP 2014) }, project = { NFGII }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2014/BTLD2014/appearance-mapping.pdf }, sorting = { 2560 } } @inproceedings{SD2014_2, author = { Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Oil Paint Filtering Using Color Palettes For Colorization }, year = { 2014 }, abstract = {
We present a novel technique for oil paint filtering that uses color palettes for colorization. First, dominant feature-aware colors are derived from the input image via entropy-based metrics. Seed pixels are then determined and propagated to the remaining pixels by adopting the optimization framework of Levin et al. [2004] for feature-aware colorization. Finally, the quantized output is combined with flow-based highlights and contour lines to simulate paint texture. Our technique leads to homogeneous outputs in the color domain and enables interactive control over color definitions.
}, booktitle = { Expressive Poster Session }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/SD2014_2/expressive_2014_semmo.pdf }, sorting = { 512 } } @inproceedings{LD2014, author = { Limberger, Daniel and D{\"o}llner, J{\"u}rgen }, title = { Painting Per-Pixel Parametrization for Interactive Image Filtering }, year = { 2014 }, abstract = {
We present a photo-editing method that enables per-pixel parameter manipulation of image filtering by means of interactive painting. Predefined as well as custom image filters are exposed to the user, as a parametrizable composition of image operations. Brushes, as a sequences of actions mapping user inputs (in terms of brush shape, flow, pressure, etc.) to arbitrary functions or convolution operators, are used to draw within the parameter space. Our tool demonstrates that interactive painting can be used to, e.g., locally tweak inadequate parametrization and, furthermore, provides a blueprint for an open, collaborative photo-editing platform.
}, booktitle = { Expressive Poster Session }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/LD2014/expressive_2014_limberger.pdf }, sorting = { 128 } } @inproceedings{DRD2014, author = { Discher, Sören and Richter, Rico and Döllner, Jürgen }, title = { Konzepte für eine Service-basierte Systemarchitektur zur Integration, Prozessierung und Analyse von massiven 3D-Punktwolken }, year = { 2014 }, abstract = { Die Nutzung von hoch aufgelösten, räumlich überlappenden und multitemporalen 3D-Punktwolken im Kontext von Geoinformationssystemen stellt hohe Anforderungen an die Leistungsfähigkeit der zugrundeliegenden Software- und Hardwaresysteme. Um angesichts eines weiter zunehmenden Datenaufkommens ein effizientes und wirtschaftliches Arbeiten mit solchen Daten zu ermöglichen, schlagen wir die Nutzung einer service-basierten Software- und Geodateninfrastruktur vor, die eine Erfassung, Aktualisierung und Bereitstellung von 3D-Punktwolken im Sinne eines kontinuierlichen Prozesses ermöglicht. In diesem Beitrag erläutern wir die grundlegenden Anforderungen und den konzeptionellen Aufbau einer entsprechenden Infrastruktur, die unter anderem die bedarfsgerechte Bereitstellung ausgewählter Bereiche einer 3D Punktwolke anhand von semantischen oder temporalen Attributen unterstützt. }, booktitle = { Tagungsbände der 34. Wissenschaftlich-Technischen Jahrestagung der DGPF }, project = { NFGII }, sorting = { 2304 } } @inproceedings{KTD2013, author = { Benjamin Karran and Jonas Trümper and Jürgen Döllner }, title = { SyncTrace: Visual Thread-Interplay Analysis }, year = { 2013 }, pages = { 10 }, month = { 9 }, abstract = { In software comprehension, program traces are important to gain insight into certain aspects of concurrent runtime behavior, e.g., thread-interplay. Here, key tasks are finding usages of blocking operations, such as synchronization and I/O operations, assessing temporal order of such operations, and analyzing their effects. This is a hard task for large and complex program traces due to their size and number of threads involved. In this paper, we present SyncTrace, a new visualization technique based on (bended) activity diagrams and edge bundles that allows for parallel analysis of multiple threads and their inter-thread correspondences. We demonstrate how the technique, implemented as a tool, can be applied on real-world trace datasets to support understanding concurrent behavior. }, keywords = { trace analysis, software visualization, program comprehension, software maintenance, visualization }, publisher = { IEEE Computer Society }, booktitle = { Proceedings (electronic) of the 1st Working Conference on Software Visualization (VISSOFT) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2013/KTD2013/synctrace_preprint.pdf }, doi = { 10.1109/VISSOFT.2013.6650534 }, link1 = { Video (YouTube) http://youtu.be/rTQlyVMre_w }, sorting = { 1024 } } @inproceedings{LTSD13, author = { Lux, Roland and Trapp, Matthias and Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Interactive Projective Texturing for Non-Photorealistic Shading of Technical 3D Models }, year = { 2013 }, pages = { 101--108 }, month = { 9 }, abstract = { This paper presents a novel interactive rendering technique for creating and editing shadings for man-made objects in technical 3D visualizations. In contrast to shading approaches that use intensities computed based on surface normals (e.g., Phong, Gooch, Toon shading), the presented approach uses one-dimensional gradient textures, which can be parametrized and interactively manipulated based on per-object bounding volume approximations. The fully hardware-accelerated rendering technique is based on projective texture mapping and customizable intensity transfer functions. A provided performance evaluation shows comparable results to traditional normal-based shading approaches. The work also introduce simple direct-manipulation metaphors that enables interactive user control of the gradient texture alignment and intensity transfer functions. }, affiliation = { Hasso-Plattner-Insititut, University of Potsdam }, editor = { Silvester Czanner, Wen Tang }, publisher = { The Eurographics Association }, booktitle = { Proceedings of 11th Theory and Practice of Computer Graphics 2013 Conference (TP.CG.2013) }, project = { NFGII }, isbn = { 978-3-905673-98-2 }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=PmBTK8TbpPA }, sorting = { 768 } } @inproceedings{PTD2013, author = { Pasewaldt, Sebastian and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Multi-Perspective Detail+Overview Visualization for 3D Building Exploration }, year = { 2013 }, pages = { 57--64 }, month = { 9 }, abstract = { This paper presents a multi-perspective rendering technique that enables detail+overview visualization and interactive exploration of virtual 3D building model. Virtual 3D building models, as main elements of virtual 3D city models, are used in a growing number of application domains, such as geoanalysis, disaster management and architectural planning. Visualization systems for such building models often rely on perspective or orthogonal projections using a single viewpoint. Therefore, the exploration of a complete model requires a user to change the viewpoint multiple times and to memorize the content of each view to obtain a comprehensive mental model. Since this is usually a time-consuming task, which implies context switching, current visualization systems use multiple viewports to simultaneously depict an object from different perspectives. Our approach extends the idea of multiple viewports by combining two linked views for the interactive exploration of virtual 3D buildings model and their facades. In contrast to traditional approaches, we automatically generate a multi-perspective view that simultaneously depicts all facades of the building in one overview image. This facilitates the process of obtaining overviews and supports fast and direct navigation to various points-of-interest. We describe the concept and implementations of our Multiple-Center-of-Projection camera model for real-time multi-perspective image synthesis. Further, we provide insights into different interaction techniques for linked multi-perspective views and outline approaches of future work. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam }, editor = { Silvester Czanner, Wen Tang }, publisher = { The Eurographics Association }, booktitle = { Proceedings of 11th Theory and Practice of Computer Graphics 2013 Conference (TP.CG.2013) }, project = { HPI; NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2013/PTD2013/PTD2013.pdf }, isbn = { 978-3-905673-98-2 }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=Ywo4gpx0rE8&feature=share&list=UURf7yK_n8IfSBtpWh8uP0mA }, sorting = { 512 } } @inproceedings{ESTD2013, author = { Engel, Juri and Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Evaluating the Perceptual Impact of Rendering Techniques on Thematic Color Mappings in 3D Virtual Environments }, year = { 2013 }, pages = { 25-32 }, month = { 9 }, abstract = {

Using colors for thematic mapping is a fundamental approach in visualization, and has become essential for 3D virtual environments to effectively communicate multidimensional, thematic information. Preserving depth cues within these environments to emphasize spatial relations between geospatial features remains an important issue.A variety of rendering techniques have been developed to preserve depth cues in 3D information visualization, including shading, global illumination, and image stylization. However, these techniques alter color values, which may lead to ambiguity in a color mapping and loss of information. Depending on the applied rendering techniques and color mapping, this loss should be reduced while still preserving depth cues when communicating thematic information. This paper presents the results of a quantitative and qualitative user study that evaluates the impact of rendering techniques on information and spatial perception when using visualization of thematic data in 3D virtual environments. We report the results of this study with respect to four perception-related tasks, showing significant differences in error rate and task completion time for different rendering techniques and color mappings.
}, editor = { Michael Bronstein, Jean Favre, and Kai Hormann }, publisher = { The Eurographics Association }, booktitle = { Proceedings of 18th International Workshop on Vision, Modeling and Visualization (VMV 2013) }, project = { NFGII }, doi = { 10.2312/PE.VMV.VMV13.025-032 }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2013/ESTD2013/jengel-vmv2013-authors-version-hq.pdf }, link2 = { User Study Raw Data (ZIP) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2013/ESTD2013/user_study_raw_data_txt.zip }, sorting = { 128 } } @inproceedings{SKTD13, author = { Semmo, Amir and Kyprianidis, Jan Eric and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Real-Time Rendering of Water Surfaces with Cartography-Oriented Design }, year = { 2013 }, pages = { 5--14 }, month = { 7 }, abstract = {

More than 70% of the Earth's surface is covered by oceans, seas, and lakes, making water surfaces one of the primary elements in geospatial visualization. Traditional approaches in computer graphics simulate and animate water surfaces in the most realistic ways. However, to improve orientation, navigation, and analysis tasks within 3D virtual environments, these surfaces need to be carefully designed to enhance shape perception and land-water distinction. We present an interactive system that renders water surfaces with cartography-oriented design using the conventions of mapmakers. Our approach is based on the observation that hand-drawn maps utilize and align texture features to shorelines with non-linear distance to improve figure-ground perception and express motion. To obtain local orientation and principal curvature directions, first, our system computes distance and feature-aligned distance maps. Given these maps, waterlining, water stippling, contour-hatching, and labeling are applied in real-time with spatial and temporal coherence. The presented methods can be useful for map exploration, landscaping, urban planning, and disaster management, which is demonstrated by various real-world virtual 3D city and landscape models.

© ACM, 2013. This is the authors' version of the work. It is posted here by permission of ACM for your personal use. Not for redistribution. The definitive version was published in Proceedings of the International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe'13). http://dx.doi.org/10.1145/2487276.2487277.
}, series = { Proceedings International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2013/SKTD13/asemmo-cae2013-authors-version-hq.pdf,fileadmin/user_upload/fachgebiete/doellner/publications/2013/SKTD13/asemmo-cae2013-slides.pdf }, doi = { 10.1145/2487276.2487277 }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=DFjjcMRWWoE }, sorting = { 1536 } } @inproceedings{TDT2013, author = { Jonas Trümper and Jürgen Döllner and Alexandru Telea }, title = { Multiscale Visual Comparison of Execution Traces }, year = { 2013 }, pages = { 53-62 }, month = { 5 }, abstract = { Understanding the execution of programs by means of program traces is a key strategy in software comprehension. An important task in this context is comparing two traces in order to find similarities and differences in terms of executed code, execution order, and execution duration. For large and complex program traces, this is a difficult task due to the cardinality of the trace data. In this paper, we propose a new visualization method based on icicle plots and edge bundles. We address visual scalability by several multiscale visualization metaphors, which help users navigating from the main differences between two traces to intermediate structural-difference levels, and, finally fine-grained function call levels. We show how our approach, implemented in a tool called TraceDiff, is applicable in several scenarios for trace difference comprehension on real-world trace datasets. }, keywords = { trace analysis, software visualization, program comprehension, software maintenance, visualization }, publisher = { IEEE Computer Society }, booktitle = { Proceedings of the International Conference on Program Comprehension }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2013/TDT2013/truemper2013_tracediff_preprint.pdf }, link1 = { Video (YouTube) http://youtu.be/9x2mOsf_fjU }, sorting = { 2304 } } @inproceedings{LWTD2013, author = { Limberger, Daniel and Wasty, Benjamin and Jonas Trümper and Döllner, Jürgen }, title = { Interactive Software Maps for Web-Based Source Code Analysis }, year = { 2013 }, pages = { 8 }, month = { 5 }, abstract = { Software maps -- linking rectangular 3D-Treemaps, software system structure, and performance indicators -- are commonly used to support informed decision making in software-engineering processes. A key aspect for this decision making is that software maps provide the structural context required for correct interpretation of these performance indicators. In parallel, source code repositories and collaboration platforms are an integral part of today's software-engineering tool set, but cannot properly incorporate software maps since implementations are only available as stand-alone applications. Hence, software maps are 'disconnected' from the main body of this tool set, rendering their use and provisioning overly complicated, which is one of the main reasons against regular use. We thus present a web-based rendering system for software maps that achieves both fast client-side page load time and interactive frame rates even with large software maps. We significantly reduce page load time by efficiently encoding hierarchy and geometry data for the net transport. Apart from that, appropriate interaction, layouting, and labeling techniques as well as common image enhancements aid evaluation of project-related quality aspects. Metrics provisioning can further be implemented by predefined attribute mappings to simplify communication of project specific quality aspects. The system is integrated into dashboards to demonstrate how our web-based approach makes software maps more accessible to many different stakeholders in software-engineering projects. }, publisher = { ACM }, booktitle = { In Proceedings of the International Web3D Conference }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2013/LWTD2013/web3d2013-treemaps-limberger.pdf }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=AaHJRVQ3Z1E }, sorting = { 2048 } } @inproceedings{TSD2013, author = { Trapp, Matthias and Hahn, Sebastian and D{\"o}llner, J{\"u}rgen }, title = { Interactive Rendering of Complex 3D-Treemaps }, year = { 2013 }, pages = { 165-175 }, month = { 2 }, abstract = { 3D-Treemaps are an important visualization technique for hierarchical views. In contrast to 2D-Treemaps, height can be used to map one additional attribute of the data items. Using the Treemap technique in combination with large datasets (more than 500k) a fast rendering and interaction techniques that are beyond collapsing/uncollapsing nodes is still one of the main challenges. This paper presents a novel rendering technique that enables the image synthesis of geometrical complex 3D-Treemaps in real-time. The fully hardware accelerated approach is based on shape generation using geometry shaders. This approach offers increased rendering performance and low update latency compared to existing techniques and through it enables new real-time interaction techniques to large datasets. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, keywords = { 3D-treemaps, real-time rendering, performance evaluation }, editor = { Sabine Coquillart, Carlos Andujar, Robert S. Laramee, Andreas Kerren and José Braz }, publisher = { SCITEPRESS – Science and Technology Publications }, booktitle = { Proceedings of the 8th International Conference on Computer Graphics Theory and Applications (GRAPP 2013) }, project = { NFGII;HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2013/TSD2013/TreeMap.pdf }, isbn = { 978-989-8565-46-4 }, link1 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-1702377-rendering-complex-3d-tree-maps-grapp-2013/ }, sorting = { 2816 } } @inproceedings{Pasewaldt2012a, author = { Pasewaldt, Sebastian and Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Towards Comprehensible Digital 3D Maps }, year = { 2012 }, pages = { 261-276 }, month = { 11 }, abstract = { Digital mapping services have become fundamental tools in economy and society to provide domain experts and non-experts with customized, multi-layered map contents. In particular because of the continuous advancements in the acquisition, provision, and visualization of virtual 3D city and landscape models, 3D mapping services, today, represent key components to a growing number of applications, like car navigation, education, or disaster management. However, current systems and applications providing digital 3D maps are faced by drawbacks and limitations, such as occlusion, visual clutter, or insufficient use of screen space, that impact an effective comprehension of geoinformation. To this end, cartographers and computer graphics engineers developed design guidelines, rendering and visualization techniques that aim to increase the effectiveness and expressiveness of digital 3D maps, but whose seamless combination has yet to be achieved. This work discusses potentials of digital 3D maps that are based on combining cartography-oriented rendering techniques and multi-perspective views. For this purpose, a classification of cartographic design principles, visualization techniques, as well as suitable combinations are identified that aid comprehension of digital 3D maps. According to this classification, a prototypical implementation demonstrates the benefits of multi-perspective and non-photorealistic rendering techniques for visualization of 3D map contents. In particular, it enables (1) a seamless combination of cartography-oriented and photorealistic graphic styles while (2) increasing screen-space utilization, and (3) simultaneously directing a viewer’s gaze to important or prioritized information. }, editor = { Markus Jobst }, publisher = { Jobstmedia Management Verlag, Wien }, chapter = { 4 }, booktitle = { Service-Oriented Mapping 2012 (SOMAP2012) }, organization = { Internation Cartographic Association }, project = { NFGII;HPI }, language = { English }, isbn = { 3-9502039-2-3 }, link1 = { Slides http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2012/PSTD2012/somap2012_pasewaldt_towards_comprehensible_3D_maps.pdf }, link2 = { Paper http://www.hpi.de/fileadmin/user_upload/fachgebiete/doellner/publications/2012/PSTD2012/PSTD_2012_SOMAP.pdf }, sorting = { 32 } } @inproceedings{MED12, author = { Limberger, Daniel and Engel, Juri and D{\"o}llner, J{\"u}rgen }, title = { Single-Pass Rendering of Day and Night Sky Phenomena }, year = { 2012 }, pages = { 55-62 }, month = { 11 }, abstract = { This paper presents astronomical based rendering of skies as seen from low altitudes on earth, in respect to location, date, and time. The technique allows to compose an atmosphere with sun, multiple cloud layers, moon, bright stars, and Milky Way, into a holistic sky with unprecedented high level of detail and diversity. GPU generated, viewpoint-aligned billboards are used to render stars with approximated color, brightness, and scintillations. A similar approach is used to synthesize the moon considering lunar phase, earthshine, shading, and lunar eclipses. Atmosphere and clouds are rendered using existing methods adapted to our needs. Rendering is done in a single pass supporting interactive day-night cycles with low performance impact, and allows for easy integration in existing rendering systems. Results of our approach are compared to related renderings and photos, and the performance impact is discussed. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, publisher = { Eurographics Association }, booktitle = { Proceedings of the Vision, Modeling, and Visualization Workshop 2012 }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/MED12/paper_1056_cr.pdf }, link1 = { https://code.google.com/p/osghimmel/ }, sorting = { 3968 } } @inproceedings{TTD2012, author = { Jonas Trümper and Alexandru Telea and Jürgen Döllner }, title = { ViewFusion: Correlating Structure and Activity Views for Execution Traces }, year = { 2012 }, pages = { 45-52 }, month = { 9 }, abstract = { Visualization of data on structure and related temporal activity supports the analysis of correlations between the two types of data. This is typically done by linked views. This has shortcomings with respect to efficient space usage and makes mapping the effect of user input into one view into the other view difficult. We propose here a novel, space-efficient technique that `fuses' the two information spaces -- structure and activity -- in one view. We base our technique on the idea that user interaction should be simple, yet easy to understand and follow. We apply our technique, implemented in a prototype tool, for the understanding of software engineering datasets, namely static structure and execution traces of the Chromium web browser. }, keywords = { visualization, linking, correlation, software, execution traces, program comprehension, correlation, view, fusion, information space, best student application-paper }, publisher = { European Association for Computer Graphics }, address = { **Best Application-Paper** }, booktitle = { Proceedings of the 10th Theory and Practice of Computer Graphics Conference }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/TTD2012/truemper_tpcg2012_preprint.pdf }, doi = { 10.2312/LocalChapterEvents/TPCG/TPCG12/045-052 }, link1 = { Definitive version http://diglib.eg.org/EG/DL/LocalChapterEvents/TPCG/TPCG12/045-052.pdf.abstract.pdf;internal&action=action.digitallibrary.ShowPaperAbstract }, link2 = { Video (Youtube) http://youtu.be/czhXPtt-Eoo }, link3 = { Project page at University of Groningen, NL http://www.cs.rug.nl/svcg/SoftVis/ViewFusion }, sorting = { 512 } } @inproceedings{EPTD12, author = { Engel, Juri and Pasewaldt, Sebastian and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { An Immersive Visualization System for Virtual 3D City Models }, year = { 2012 }, month = { 6 }, abstract = { Virtual 3D city models are essential visualization tools for effective communication of complex urban spatial information. Immersive visualization of virtual 3D city models offers an intuitive access to and an effective way of realization of urban spatial information, enabling new collaborative applications and decision-support systems. This paper discusses techniques for and usage of fully immersive environments for visualizing virtual 3D city models by advanced 3D rendering techniques. Fully immersive environments imply a number of specific requirements for both hardware and software, which are discussed in detail. Further, we identify and outline conceptual and technical challenges as well as possible solution approaches by visualization system prototypes for large-scale, fully immersive environments. We evaluate the presented concepts using two application examples and discuss the results. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, publisher = { IEEE GRSS }, booktitle = { 20th International Conference on Geoinformatics (GEOINFORMATICS), 2012 }, project = { NFGII;HPI }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/EPTD12/EPTD12_draft.pdf }, sorting = { 3840 } } @inproceedings{ED12, author = { Engel, Juri and D{\"o}llner, J{\"u}rgen }, title = { Immersive Visualisierung von virtuellen 3D-Stadtmodellen und ihr Einsatz in der Stadtplanung }, year = { 2012 }, volume = { 21 }, pages = { 165-172 }, abstract = { Virtuelle 3D-Stadtmodelle ermöglichen die effektive Kommunikation komplexer stadträumlicher Informationen. Immersive Visualisierung von virtuellen 3D-Stadtmodellen bietet einen intuitiven Zugang zu diesen Informationen und eröffnet neue Anwendungsfelder in der Stadtplanung, z. B. bei der Entscheidungsfindung, dem Marketing und der Öffentlichkeitspräsentation von Projekten, Vorgängen oder Konzepten. Immersive Visualisierung impliziert zahlreiche Anforderungen an das Softwaresystem. In diesem Beitrag untersuchen wir die softwaretechnischen Herausforderungen bei der Entwicklung eines solchen Systems und zeigen anhand eines Prototyps zur immersiven Visualisierung von virtuellen 3D-Stadtmodellen wie man diese Herausforderungen bewältigen kann. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, publisher = { DGPF }, booktitle = { Publikationen der Deutschen Gesellschaft für Photogrammetrie, Fernerkundung und Geoinformation }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/ED12/jengel_dgpf2012_draft.pdf }, issn = { 0942-2870 }, sorting = { 3904 } } @inproceedings{RD12, author = { Richter, Rico and Döllner, Jürgen }, title = { Semantische Klassifizierung von 3D-Punktwolken für Stadtgebiete }, journal = { 121. DVW-Seminar Terrestrisches-Laser-Scanning 2012 }, year = { 2012 }, pages = { 127 - 134 }, abstract = { 3D-Scanner-Technologien und bildbasierte Verfahren ermöglichen die flächen-deckende Erfassung von Städten und Metropolregionen in Form von georefe-renzierten 3D-Punktwolken. Diese Geobasisdaten stellen eine diskrete Oberflä-chenrepräsentation dar und verfügen über punktbezogene Metadaten, die bei der Erfassung generiert werden (z.B. Farbe, Intensität). Zentrale Herausforderungen für Systeme und Anwendungen, die mit diesen Daten arbeiten, sind das massive Datenaufkommen und die daraus resultierenden Verarbeitungszeiten. Die ziel-gerichtete und anwendungsspezifische Prozessierung von Teilmengen ist in der Regel nicht möglich, da die 3D-Punktwolken keine Informationen über zu Grunde liegenden Objektklassen (z.B. Bebauung, Vegetation, Gelände) beinhal-ten. In diesem Beitrag werden Konzepte und Techniken vorgestellt, die eine Klassifizierung von 3D-Punktwolken für ein Stadtgebiet ermöglichen. Durch die Anwendung von Out-of-Core Techniken können 3D Punktwolke mit mehreren Milliarden Punkten klassifiziert und in Objektklassen unterteilt werden. Analyse- und Visualisierungswerkzeuge können mit diesen zusätzlichen Informationen die benötigten Daten reduzieren, Verarbeitungszeiten verringern, Algorithmen optimieren sowie 3D Punktwolken selbst effektiver visualisieren. }, publisher = { Wißner-Verlag }, edition = { 1 }, booktitle = { Terrestrisches Laserscanning 2012 (TLS 2012) }, project = { NFGII }, isbn = { 978-3-89639-899-4 }, sorting = { 4 } }