# This BibTeX File has been generated by # the Typo3 extension 'Sixpack-4-T3 by Sixten Boeck' # # URL: # Date: 05/27/2017 # Non-Standard BibTex fields are included. # state: 0 = published, 1 = accepted, 2 = submitted, 3 = to be published // if missing, published is assumed # extern,deleted,hidden: 0 = false, 1 = true // if missing, false is assumed # link format: Title Url // separated by a whitespace @article{SLKD2016, author = { Semmo, Amir and Limberger, Daniel and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen }, title = { Image Stylization by Interactive Oil Paint Filtering }, journal = { Computers \& Graphics }, year = { 2016 }, volume = { 55 }, pages = { 157--171 }, abstract = {

This paper presents an interactive system for transforming images into an oil paint look. The system comprises two major stages. First, it derives dominant colors from an input image for feature-aware recolorization and quantization to conform with a global color palette. Afterwards, it employs non-linear filtering based on the smoothed structure adapted to the main feature contours of the quantized image to synthesize a paint texture in real-time. Our filtering approach leads to homogeneous outputs in the color domain and enables creative control over the visual output, such as color adjustments and per-pixel parametrizations by means of interactive painting. To this end, our system introduces a generalized brush-based painting interface that operates within parameter spaces to locally adjust the level of abstraction of the filtering effects. Several results demonstrate the various applications of our filtering approach to different genres of photography.
}, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/SLKD2016/oilpaint-cag2016_authors_version.pdf }, doi = { 10.1016/j.cag.2015.12.001 }, sorting = { 2560 } } @article{SD2015, author = { Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Interactive Image Filtering for Level-of-Abstraction Texturing of Virtual 3D Scenes }, journal = { Computers \& Graphics }, year = { 2015 }, volume = { 52 }, pages = { 181--198 }, abstract = {

Texture mapping is a key technology in computer graphics. For the visual design of 3D scenes, in particular, effective texturing depends significantly on how important contents are expressed, e.g., by preserving global salient structures, and how their depiction is cognitively processed by the user in an application context. Edge-preserving image filtering is one key approach to address these concerns. Much research has focused on applying image filters in a post-process stage to generate artistically stylized depictions. However, these approaches generally do not preserve depth cues, which are important for the perception of 3D visualization (e.g., texture gradient). To this end, filtering is required that processes texture data coherently with respect to linear perspective and spatial relationships. In this work, we present an approach for texturing 3D scenes with perspective coherence by arbitrary image filters. We propose decoupled deferred texturing with (1) caching strategies to interactively perform image filtering prior to texture mapping and (2) for each mipmap level separately to enable a progressive level of abstraction, using (3) direct interaction interfaces to parameterize the visualization according to spatial, semantic, and thematic data. We demonstrate the potentials of our method by several applications using touch or natural language inputs to serve the different interests of users in specific information, including illustrative visualization, focus+context visualization, geometric detail removal, and semantic depth of field. The approach supports frame-to-frame coherence, order-independent transparency, multitexturing, and content-based filtering. In addition, it seamlessly integrates into real-time rendering pipelines, and is extensible for custom interaction techniques.
}, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/SD2015/asemmo-cag2015-authors-version.pdf }, doi = { 10.1016/j.cag.2015.02.001 }, sorting = { 2560 } } @article{SD2015, author = { Semmo, Amir and Trapp, Matthias and Jobst, Markus and D{\"o}llner, J{\"u}rgen }, title = { Cartography-Oriented Design of 3D Geospatial Information Visualization - Overview and Techniques }, journal = { The Cartographic Journal }, year = { 2015 }, volume = { 52 }, number = { 2 }, pages = { 95--106 }, abstract = {

In economy, society and personal life map-based, interactive geospatial visualization becomes a natural element of a growing number of applications and systems. The visualization of 3D geospatial information, however, raises the question how to represent the information in an effective way. Considerable research has been done in technology-driven directions in the fields of cartography and computer graphics (e.g., design principles, visualization techniques). Here, non-photorealistic rendering represents a promising visualization category–situated between both fields–that offers a large number of degrees for the cartography-oriented visual design of complex 2D and 3D geospatial information for a given application context. Still today, however, specifications and techniques for mapping cartographic design principles to the state-of-the-art rendering pipeline of 3D computer graphics remain to be explored. This paper revisits cartographic design principles for 3D geospatial visualization and introduces an extended 3D semiotic model that complies with the general, interactive visualization pipeline. Based on this model, we propose non-photorealistic rendering techniques to interactively synthesize cartographic renditions of basic feature types, such as terrain, water, and buildings. In particular, it includes a novel iconification concept to seamlessly interpolate between photorealistic and cartographic representations of 3D landmarks. Our work concludes with a discussion of open challenges in this field of research, including topics such as user interaction and evaluation.
}, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/STJD2015/icc2015_semmo_authors_version.pdf }, doi = { 10.1080/00087041.2015.1119462 }, sorting = { 2816 } } @article{PSTD2014, author = { Pasewaldt, Sebastian and Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Multi-Perspective 3D Panoramas }, journal = { International Journal of Geographical Information Science (IJGIS) }, year = { 2014 }, volume = { 28 }, number = { 10 }, pages = { 2030-2051 }, abstract = {
This article presents multi-perspective 3D panoramas that focus on visualizing 3D geovirtual environments (3D GeoVEs) for navigation and exploration tasks. Their key element, a multi-perspective view, seamlessly combines what is seen from multiple viewpoints into a single image. This approach facilitates thepresentation of information for virtual 3D city and landscape models, particularly by reducing occlusions, increasing screen-space utilization, and providing additional context within a single image. We complement multi-perspective views with cartographic visualization techniques to stylize features according to their semantics and highlight important or prioritized information. When combined, both techniques constitute the core implementation of interactive, multi-perspective 3D panoramas. They offer a large number of effective means for visual communication of 3D spatial information, a high degree of customization with respect to cartographic design, and manifold applications in different domains. We discuss design decisions of 3D panoramas for the exploration of and navigation in 3D GeoVEs. We also discuss a preliminary user study that indicates that 3D panoramas are a promising approach for navigation systems using 3D GeoVEs.
}, keywords = { multi-perspective visualization, panorama, focus+context visualization, 3D geovirtual environments, cartographic design }, project = { HPI;NFGII }, doi = { 10.1080/13658816.2014.922686 }, link1 = { http://dx.doi.org/10.1080/13658816.2014.922686 }, sorting = { 1792 } } @article{S2014, author = { Semmo, Amir }, title = { Nichtfotorealistische Visualisierung virtueller 3D-Stadtmodelle }, journal = { HPImgzn }, year = { 2014 }, number = { 15 }, pages = { 32-35 }, abstract = { Die nichtfotorealistische Bildsynthese stellt ein umfangreiches, innovatives Repertoire zur grafischen Gestaltung bereit, die eine wirkungsvolle Visualisierung komplexer raumbezogener Informationen ermöglicht. Der Fachbereich für computergrafische Systeme beschäftigt sich u.a. mit dem Design, der Implementierung und Evaluierung von nichtfotorealistischen Visualisierungstechniken für virtuelle 3D-Umgebungen – ein Forschungsbericht von Amir Semmo. }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/S2014/asemmo-hpimgzn2014.pdf }, sorting = { 1280 } } @article{TSPHDEH12, author = { Trapp, Matthias and Semmo, Amir and Pokorski, Rafael and Herrmann, Claus-Daniel and Döllner, Jürgen and Eichhorn, Michael and Heinzelmann, Michael }, title = { Colonia 3D - Communication of Virtual 3D Reconstructions in Public Spaces }, journal = { International Journal of Heritage in the Digital Era (IJHDE) }, year = { 2012 }, volume = { 1 }, number = { 1 }, pages = { 45-74 }, month = { 1 }, abstract = { The communication of cultural heritage in public spaces such as museums or exhibitions, gain more and more importance during the last years. The possibilities of interactive 3D applications open a new degree of freedom beyond the mere presentation of static visualizations, such as pre-produced video or image data. A user is now able to directly interact with 3D virtual environments that enable the depiction and exploration of digital cultural heritage artifacts in real-time. However, such technology requires concepts and strategies for guiding a user throughout these scenarios, since varying levels of experiences within interactive media can be assumed. This paper presents a concept as well as implementation for communication of digital cultural heritage in public spaces, by example of the project Roman Cologne. It describes the results achieved by an interdisciplinary team of archaeologists, designers, and computer graphics engineers with the aim to virtually reconstruct an interactive high-detail 3D city model of Roman Cologne. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam }, note = { Cover Image }, editor = { Marinos Ioannides }, publisher = { Multi-Science Publishing }, issn = { 2047-4970 }, doi = { 10.1260/2047-4970.1.1.45 }, link1 = { Paper (HQ) http://multi-science.metapress.com/content/b4wn417605744380/fulltext.pdf }, sorting = { 4608 } } @article{STKD12, author = { Semmo, Amir and Trapp, Matthias and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen }, title = { Interactive Visualization of Generalized Virtual 3D City Models using Level-of-Abstraction Transitions }, journal = { Computer Graphics Forum }, year = { 2012 }, volume = { 31 }, number = { 3 }, pages = { 885--894 }, abstract = {

Virtual 3D city models play an important role in the communication of complex geospatial information in a growing number of applications, such as urban planning, navigation, tourist information, and disaster management. In general, homogeneous graphic styles are used for visualization. For instance, photorealism is suitable for detailed presentations, and non-photorealism or abstract stylization is used to facilitate guidance of a viewer's gaze to prioritized information. However, to adapt visualization to different contexts and contents and to support saliency-guided visualization based on user interaction or dynamically changing thematic information, a combination of different graphic styles is necessary. Design and implementation of such combined graphic styles pose a number of challenges, specifically from the perspective of real-time 3D visualization. In this paper, the authors present a concept and an implementation of a system that enables different presentation styles, their seamless integration within a single view, and parametrized transitions between them, which are defined according to tasks, camera view, and image resolution. The paper outlines potential usage scenarios and application fields together with a performance evaluation of the implementation.
}, note = { Proceedings EuroVis 2012 }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/STKD12/asemmo-eurovis2012.pdf }, doi = { 10.1111/j.1467-8659.2012.03081.x }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=VXqtw44KxY4 }, sorting = { 2304 } } @article{SHTD2012, author = { Semmo, Amir and Hildebrandt, Dieter and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Concepts for Cartography-Oriented Visualization of Virtual 3D City Models }, journal = { Photogrammetrie - Fernerkundung - Geoinformation (PFG) }, year = { 2012 }, number = { 4 }, pages = { 455-465 }, abstract = {

Virtual 3D city models serve as an effective medium with manifold applications in geoinformation systems and services. To date, most 3D city models are visualized using photorealistic graphics. But an effective communication of geoinformation significantly depends on how important information is designed and cognitively processed in the given application context. One possibility to visually emphasize important information is based on non-photorealistic rendering, which comprehends artistic depiction styles and is characterized by its expressiveness and communication aspects. However, a direct application of non-photorealistic rendering techniques primarily results in monotonic visualization that lacks cartographic design aspects. In this work, we present concepts for cartography-oriented visualization of virtual 3D city models. These are based on coupling non-photorealistic rendering techniques and semantics-based information for a user, context, and media-dependent representation of thematic information. This work highlights challenges for cartography-oriented visualization of 3D geovirtual environments, presents stylization techniques and discusses their applications and ideas for a standardized visualization. In particular, the presented concepts enable a real-time and dynamic visualization of thematic geoinformation.
}, keywords = { 3D city models, cartography-oriented visualization, style description languages, real-time rendering }, publisher = { E. Schweizerbart'sche Verlagsbuchhandlung }, address = { Johannesstrasse 3A, D-70176 Stuttgart, Germany }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2012/SHTD2012/asemmo-PFG2012.pdf }, issn = { 1432-8364 }, doi = { 10.1127/1432-8364/2012/0131 }, sorting = { 16 } } @incollection{SKD10, author = { Semmo, Amir and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen }, title = { Automated Image-Based Abstraction of Aerial Images }, year = { 2010 }, pages = { 359-378 }, month = { 5 }, abstract = {

Aerial images represent a fundamental type of geodata with a broad range of applications in GIS and geovisualization. The perception and cognitive processing of aerial images by the human, however, still is faced with the specific limitations of photorealistic depictions such as low contrast areas, unsharp object borders as well as visual noise. In this paper we present a novel technique to automatically abstract aerial images that enhances visual clarity and generalizes the contents of aerial images to improve their perception and recognition. The technique applies non-photorealistic image processing by smoothing local image regions with low contrast and emphasizing edges in image regions with high contrast. To handle the abstraction of large images, we introduce an image tiling procedure that is optimized for post-processing images on GPUs and avoids visible artifacts across junctions. This is technically achieved by filtering additional connection tiles that overlap the main tiles of the input image. The technique also allows the generation of different levels of abstraction for aerial images by computing a mipmap pyramid, where each of the mipmap levels is filtered with adapted abstraction parameters. These mipmaps can then be used to perform level-of-detail rendering of abstracted aerial images. Finally, the paper contributes a study to aerial image abstraction by analyzing the results of the abstraction process on distinctive visible elements in common aerial image types. In particular, we have identified a high abstraction straction potential in landscape images and a higher benefit from edge enhancement in urban environments.
}, editor = { Painho, Marco and Santos, Maribel Yasmina and Pundt, Hardy }, publisher = { Springer }, series = { Lecture Notes in Geoinformation and Cartography }, booktitle = { Geospatial Thinking }, project = { flowabs }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/SKD10/asemmo-agile2010.pdf }, doi = { 10.1007/978-3-642-12326-9_19 }, sorting = { 4352 } } @inproceedings{SDTKDP2016, author = { Semmo, Amir and D{\"u}rschmid, Tobias and Trapp, Matthias and Klingbeil, Mandy and D{\"o}llner, J{\"u}rgen and Pasewaldt, Sebastian }, title = { Interactive Image Filtering with Multiple Levels-of-Control on Mobile Devices }, year = { 2016 }, month = { 12 }, abstract = {
With the continuous development of mobile graphics hardware, interactive high-quality image stylization based on nonlinear filtering is becoming feasible and increasingly used in casual creativity apps. However, these apps often only serve high-level controls to parameterize image filters and generally lack support for low-level (artistic) control, thus automating art creation rather than assisting it. This work presents a GPU-based framework that enables to parameterize image filters at three levels of control: (1) presets followed by (2) global parameter adjustments can be interactively refined by (3) complementary on-screen painting that operates within the filters' parameter spaces for local adjustments. The framework provides a modular XML-based effect scheme to effectively build complex image processing chains-using these interactive filters as building blocks-that can be efficiently processed on mobile devices. Thereby, global and local parameterizations are directed with higher-level algorithmic support to ease the interactive editing process, which is demonstrated by state-of-the-art stylization effects, such as oil paint filtering and watercolor rendering.
}, booktitle = { Proceedings ACM SIGGRAPH Asia Symposium on Mobile Graphics and Interactive Applications }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/SDTKDP2016/asemmo-mgia2016-authors-version.pdf }, doi = { 10.1145/2999508.2999521 }, sorting = { 768 } } @inproceedings{PSDS2016, author = { Pasewaldt, Sebastian and Semmo, Amir and D{\"o}llner, J{\"u}rgen and Schlegel, Frank }, title = { BeCasso: Artistic Image Processing and Editing on Mobile Devices }, year = { 2016 }, month = { 12 }, abstract = { BeCasso is a mobile app that enables users to transform photos into high-quality, high-resolution non-photorealistic renditions, such as oil and watercolor paintings, cartoons, and colored pencil drawings, which are inspired by real-world paintings or drawing techniques. In contrast to neuronal network and physically-based approaches, the app employs state-of-the-art nonlinear image filtering. For example, oil paint and cartoon effects are based on smoothed structure information to interactively synthesize renderings with soft color transitions. BeCasso empowers users to easily create aesthetic renderings by implementing a two-fold strategy: First, it provides parameter presets that may serve as a starting point for a custom stylization based on global parameter adjustments. Thereby, users can obtain initial renditions that may be fine-tuned afterwards. Second, it enables local style adjustments: using on-screen painting metaphors, users are able to locally adjust different stylization features, e.g., to vary the level of abstraction, pen, brush and stroke direction or the contour lines. In this way, the app provides tools for both higher-level interaction and low-level control [Isenberg 2016] to serve the different needs of non-experts and digital artists.

References:
Isenberg, T. 2016. Interactive NPAR: What Type of Tools Should We Create? In Proc. NPAR, The Eurographics Association, Goslar, Germany, 89–96 }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, booktitle = { Proceedings ACM SIGGRAPH Asia Symposium on Mobile Graphics and Interactive Applications (Demo) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/PSDS2016/mgia-demo2016_authors_version.pdf }, doi = { 10.1145/2999508.2999518 }, sorting = { 512 }, state = { 1 } } @inproceedings{SDS2016, author = { Semmo, Amir and D{\"o}llner, J{\"u}rgen and Schlegel, Frank }, title = { BeCasso: Image Stylization by Interactive Oil Paint Filtering on Mobile Devices }, year = { 2016 }, month = { 7 }, abstract = { BeCasso is a mobile app that enables users to transform photos into an oil paint look that is inspired by traditional painting elements. In contrast to stroke-based approaches, the app uses state-of-the-art nonlinear image filtering techniques based on smoothed structure information to interactively synthesize oil paint renderings with soft color transitions. BeCasso empowers users to easily create aesthetic oil paint renderings by implementing a two-fold strategy. First, it provides parameter presets that may serve as a starting point for a custom stylization based on global parameter adjustments. Second, it introduces a novel interaction approach that operates within the parameter spaces of the stylization effect to facilitate creative control over the visual output: on-screen painting enables users to locally adjust the appearance in image regions, e.g., to vary the level of abstraction, brush and stroke direction. This way, the app provides tools for both higher-level interaction and low-level control [Isenberg 2016] to serve the different needs of non-experts and digital artists.

References:
Isenberg, T. 2016. Interactive NPAR: What Type of Tools Should We Create? In Proc. NPAR, The Eurographics Association, Goslar, Germany, 89–96 }, affiliation = { Hasso-Plattner-Institut, University of Potsdam, Germany }, booktitle = { Proceedings ACM SIGGRAPH Appy Hour }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/SDS2016/asemmo-siggraph2016-appyhour.pdf }, doi = { 10.1145/2936744.2936750 }, sorting = { 1792 } } @inproceedings{STDDP2016, author = { Semmo, Amir and Trapp, Matthias and D{\"u}rschmid, Tobias and D{\"o}llner, J{\"u}rgen and Pasewaldt, Sebastian }, title = { Interactive Multi-scale Oil Paint Filtering on Mobile Devices }, year = { 2016 }, abstract = {
This work presents an interactive mobile implementation of a filter that transforms images into an oil paint look. At this, a multi-scale approach that processes image pyramids is introduced that uses flow-based joint bilateral upsampling to achieve deliberate levels of abstraction at multiple scales and interactive frame rates. The approach facilitates the implementation of interactive tools that adjust the appearance of filtering effects at run-time, which is demonstrated by an on-screen painting interface for per-pixel parameterization that fosters the casual creativity of non-artists.
}, booktitle = { Proceedings ACM SIGGRAPH Posters }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/STDDP2016/asemmo-siggraph2016-poster.pdf }, doi = { 10.1145/2945078.2945120 }, sorting = { 1536 } } @inproceedings{STPD2016, author = { Semmo, Amir and Trapp, Matthias and Pasewaldt, Sebastian and D{\"o}llner, J{\"u}rgen }, title = { Interactive Oil Paint Filtering On Mobile Devices }, year = { 2016 }, abstract = {
Image stylization enjoys a growing popularity on mobile devices to foster casual creativity. However, the implementation and provision of high-quality image filters for artistic rendering is still faced by the inherent limitations of mobile graphics hardware such as computing power and memory resources. This work presents a mobile implementation of a filter that transforms images into an oil paint look, thereby highlighting concepts and techniques on how to perform multi-stage nonlinear image filtering on mobile devices. The proposed implementation is based on OpenGL ES and the OpenGL ES shading language, and supports on-screen painting to interactively adjust the appearance in local image regions, e.g., to vary the level of abstraction, brush, and stroke direction. Evaluations of the implementation indicate interactive performance and results that are of similar aesthetic quality than its original desktop variant.
}, booktitle = { Expressive 2016 - Posters, Artworks, and Bridging Papers }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2016/STPD2016/asemmo-exressive2016-poster.pdf }, doi = { 10.2312/exp.20161255 }, sorting = { 1280 } } @inproceedings{SLKD15, author = { Semmo, Amir and Limberger, Daniel and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen }, title = { Image Stylization by Oil Paint Filtering using Color Palettes }, year = { 2015 }, pages = { 149--158 }, month = { 6 }, abstract = {
This paper presents an approach for transforming images into an oil paint look. To this end, a color quantization scheme is proposed that performs feature-aware recolorization using the dominant colors of the input image. In addition, an approach for real-time computation of paint textures is presented that builds on the smoothed structure adapted to the main feature contours of the quantized image. Our stylization technique leads to homogeneous outputs in the color domain and enables creative control over the visual output, such as color adjustments and per-pixel parametrizations by means of interactive painting.

© The Authors 2015. This is the authors' version of the work. It is posted here for your personal use. Not for redistribution. The definitive version will be published in Proceedings of the International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe'15).
}, booktitle = { Proceedings International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/SLKD2015/asemmo-cae2015-authors-version.pdf }, doi = { 10.2312/exp.20151188 }, sorting = { 2304 } } @inproceedings{TSD2015, author = { Trapp, Matthias and Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Interactive Rendering and Stylization of Transportation Networks Using Distance Fields }, year = { 2015 }, pages = { 207-219 }, abstract = {
Transportation networks, such as streets, railroads or metro systems, constitute primary elements in cartography for reckoning and navigation. In recent years, they have become an increasingly important part of 3D virtual environments for the interactive analysis and communication of complex hierarchical information, for example in routing, logistics optimization, and disaster management. A variety of rendering techniques have been proposed that deal with integrating transportation networks within these environments, but have so far neglected the many challenges of an interactive design process to adapt their spatial and thematic granularity (i.e., level-of-detail and level-of-abstraction) according to a user's context. This paper presents an efficient real-time rendering technique for the view-dependent rendering of geometrically complex transportation networks within 3D virtual environments. Our technique is based on distance fields using deferred texturing that shifts the design process to the shading stage for real-time stylization. We demonstrate and discuss our approach by means of street networks using cartographic design principles for context-aware stylization, including view-dependent scaling for clutter reduction, contour-lining to provide figure-ground, handling of street crossings via shading-based blending, and task-dependent colorization. Finally, we present potential usage scenarios and applications together with a performance evaluation of our implementation.
}, keywords = { transportation networks, 3D visualization, image-based rendering, distance fields, shading, map design }, booktitle = { Proceedings of the 10th International Conference on Computer Graphics Theory and Applications (GRAPP 2015) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2015/TSD2015/streets.pdf }, sorting = { 3072 } } @inproceedings{SD2014_3, author = { Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { An Interaction Framework for Level-of-Abstraction Visualization of 3D Geovirtual Environments }, year = { 2014 }, pages = { 43--49 }, month = { 11 }, abstract = {
3D geovirtual environments constitute effective media for the analysis and communication of complex geospatial data. Today, these environments are often visualized using static graphical variants (e.g., 2D maps, 3D photorealistic) from which a user is able to choose from. To serve the different interests of users in specific information, however, the spatial and thematic granularity at which model contents are represented (i.e., level of abstraction) should be dynamically adapted to the user's context, which requires specialized interaction techniques for parameterization. In this work, we present a framework that enables interaction interfaces to parameterize the level-of-abstraction visualization according to spatial, semantic, and thematic data. The framework is implemented in a visualization system that provides image-based rendering techniques for context-aware abstraction and highlighting. Using touch and natural language interfaces, we demonstrate its versatile application to geospatial tasks, including exploration, navigation, and orientation.
}, booktitle = { Proceedings 2nd ACM SIGSPATIAL Workshop on MapInteraction }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/SD2014_3/asemmo-mapinteract2014-authors-version.pdf }, doi = { 10.1145/2677068.2677072 }, sorting = { 256 } } @inproceedings{SD14, author = { Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Image Filtering for Interactive Level-of-Abstraction Visualization of 3D Scenes }, year = { 2014 }, pages = { 5--14 }, month = { 8 }, abstract = {

Texture mapping is a key technology in computer graphics for visual design of rendered 3D scenes. An effective information transfer of surface properties, encoded by textures, however, depends significantly on how important information is highlighted and cognitively processed by the user in an application context. Edge-preserving image filtering is a promising approach to address this concern while preserving global salient structures. Much research has focused on applying image filters in a post-process stage to foster an artistically stylized rendering, but these approaches are generally not able to preserve depth cues important for 3D visualization (e.g., texture gradient). To this end, filtering that processes texture data coherently with respect to linear perspective and spatial relationships is required. In this work, we present a system that enables to process textured 3D scenes with perspective coherence by arbitrary image filters. We propose decoupled deferred texturing with (1) caching strategies to interactively perform image filtering prior to texture mapping, and (2) for each mipmap level separately to enable a progressive level of abstraction. We demonstrate the potentials of our methods on several applications, including illustrative visualization, focus+context visualization, geometric detail removal, and depth of field. Our system supports frame-to-frame coherence, order-independent transparency, multitexturing, and content-based filtering.

© The Authors 2014. This is the authors' version of the work. It is posted here for your personal use. Not for redistribution. The definitive version will be published in Proceedings of the International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe'14). http://dx.doi.org/10.1145/2630099.2630101.
}, booktitle = { Proceedings International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/SD2014/asemmo-cae2014-authors-version.pdf,fileadmin/user_upload/fachgebiete/doellner/publications/2014/SD2014/asemmo-cae2014-additional_material.pdf }, doi = { 10.1145/2630099.2630101 }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=Vqh3OQbWPpI }, sorting = { 1536 } } @inproceedings{SD2014_2, author = { Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Oil Paint Filtering Using Color Palettes For Colorization }, year = { 2014 }, abstract = {
We present a novel technique for oil paint filtering that uses color palettes for colorization. First, dominant feature-aware colors are derived from the input image via entropy-based metrics. Seed pixels are then determined and propagated to the remaining pixels by adopting the optimization framework of Levin et al. [2004] for feature-aware colorization. Finally, the quantized output is combined with flow-based highlights and contour lines to simulate paint texture. Our technique leads to homogeneous outputs in the color domain and enables interactive control over color definitions.
}, booktitle = { Expressive Poster Session }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2014/SD2014_2/expressive_2014_semmo.pdf }, sorting = { 512 } } @inproceedings{LTSD13, author = { Lux, Roland and Trapp, Matthias and Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Interactive Projective Texturing for Non-Photorealistic Shading of Technical 3D Models }, year = { 2013 }, pages = { 101--108 }, month = { 9 }, abstract = { This paper presents a novel interactive rendering technique for creating and editing shadings for man-made objects in technical 3D visualizations. In contrast to shading approaches that use intensities computed based on surface normals (e.g., Phong, Gooch, Toon shading), the presented approach uses one-dimensional gradient textures, which can be parametrized and interactively manipulated based on per-object bounding volume approximations. The fully hardware-accelerated rendering technique is based on projective texture mapping and customizable intensity transfer functions. A provided performance evaluation shows comparable results to traditional normal-based shading approaches. The work also introduce simple direct-manipulation metaphors that enables interactive user control of the gradient texture alignment and intensity transfer functions. }, affiliation = { Hasso-Plattner-Insititut, University of Potsdam }, editor = { Silvester Czanner, Wen Tang }, publisher = { The Eurographics Association }, booktitle = { Proceedings of 11th Theory and Practice of Computer Graphics 2013 Conference (TP.CG.2013) }, project = { NFGII }, isbn = { 978-3-905673-98-2 }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=PmBTK8TbpPA }, sorting = { 768 } } @inproceedings{ESTD2013, author = { Engel, Juri and Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Evaluating the Perceptual Impact of Rendering Techniques on Thematic Color Mappings in 3D Virtual Environments }, year = { 2013 }, pages = { 25-32 }, month = { 9 }, abstract = {

Using colors for thematic mapping is a fundamental approach in visualization, and has become essential for 3D virtual environments to effectively communicate multidimensional, thematic information. Preserving depth cues within these environments to emphasize spatial relations between geospatial features remains an important issue.A variety of rendering techniques have been developed to preserve depth cues in 3D information visualization, including shading, global illumination, and image stylization. However, these techniques alter color values, which may lead to ambiguity in a color mapping and loss of information. Depending on the applied rendering techniques and color mapping, this loss should be reduced while still preserving depth cues when communicating thematic information. This paper presents the results of a quantitative and qualitative user study that evaluates the impact of rendering techniques on information and spatial perception when using visualization of thematic data in 3D virtual environments. We report the results of this study with respect to four perception-related tasks, showing significant differences in error rate and task completion time for different rendering techniques and color mappings.
}, editor = { Michael Bronstein, Jean Favre, and Kai Hormann }, publisher = { The Eurographics Association }, booktitle = { Proceedings of 18th International Workshop on Vision, Modeling and Visualization (VMV 2013) }, project = { NFGII }, doi = { 10.2312/PE.VMV.VMV13.025-032 }, link1 = { Paper (PDF) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2013/ESTD2013/jengel-vmv2013-authors-version-hq.pdf }, link2 = { User Study Raw Data (ZIP) http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2013/ESTD2013/user_study_raw_data_txt.zip }, sorting = { 128 } } @inproceedings{SKTD13, author = { Semmo, Amir and Kyprianidis, Jan Eric and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Real-Time Rendering of Water Surfaces with Cartography-Oriented Design }, year = { 2013 }, pages = { 5--14 }, month = { 7 }, abstract = {

More than 70% of the Earth's surface is covered by oceans, seas, and lakes, making water surfaces one of the primary elements in geospatial visualization. Traditional approaches in computer graphics simulate and animate water surfaces in the most realistic ways. However, to improve orientation, navigation, and analysis tasks within 3D virtual environments, these surfaces need to be carefully designed to enhance shape perception and land-water distinction. We present an interactive system that renders water surfaces with cartography-oriented design using the conventions of mapmakers. Our approach is based on the observation that hand-drawn maps utilize and align texture features to shorelines with non-linear distance to improve figure-ground perception and express motion. To obtain local orientation and principal curvature directions, first, our system computes distance and feature-aligned distance maps. Given these maps, waterlining, water stippling, contour-hatching, and labeling are applied in real-time with spatial and temporal coherence. The presented methods can be useful for map exploration, landscaping, urban planning, and disaster management, which is demonstrated by various real-world virtual 3D city and landscape models.

© ACM, 2013. This is the authors' version of the work. It is posted here by permission of ACM for your personal use. Not for redistribution. The definitive version was published in Proceedings of the International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe'13). http://dx.doi.org/10.1145/2487276.2487277.
}, series = { Proceedings International Symposium on Computational Aesthetics in Graphics, Visualization, and Imaging (CAe) }, project = { NFGII }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2013/SKTD13/asemmo-cae2013-authors-version-hq.pdf,fileadmin/user_upload/fachgebiete/doellner/publications/2013/SKTD13/asemmo-cae2013-slides.pdf }, doi = { 10.1145/2487276.2487277 }, link1 = { Video (Youtube) http://www.youtube.com/watch?v=DFjjcMRWWoE }, sorting = { 1536 } } @inproceedings{Pasewaldt2012a, author = { Pasewaldt, Sebastian and Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Towards Comprehensible Digital 3D Maps }, year = { 2012 }, pages = { 261-276 }, month = { 11 }, abstract = { Digital mapping services have become fundamental tools in economy and society to provide domain experts and non-experts with customized, multi-layered map contents. In particular because of the continuous advancements in the acquisition, provision, and visualization of virtual 3D city and landscape models, 3D mapping services, today, represent key components to a growing number of applications, like car navigation, education, or disaster management. However, current systems and applications providing digital 3D maps are faced by drawbacks and limitations, such as occlusion, visual clutter, or insufficient use of screen space, that impact an effective comprehension of geoinformation. To this end, cartographers and computer graphics engineers developed design guidelines, rendering and visualization techniques that aim to increase the effectiveness and expressiveness of digital 3D maps, but whose seamless combination has yet to be achieved. This work discusses potentials of digital 3D maps that are based on combining cartography-oriented rendering techniques and multi-perspective views. For this purpose, a classification of cartographic design principles, visualization techniques, as well as suitable combinations are identified that aid comprehension of digital 3D maps. According to this classification, a prototypical implementation demonstrates the benefits of multi-perspective and non-photorealistic rendering techniques for visualization of 3D map contents. In particular, it enables (1) a seamless combination of cartography-oriented and photorealistic graphic styles while (2) increasing screen-space utilization, and (3) simultaneously directing a viewer’s gaze to important or prioritized information. }, editor = { Markus Jobst }, publisher = { Jobstmedia Management Verlag, Wien }, chapter = { 4 }, booktitle = { Service-Oriented Mapping 2012 (SOMAP2012) }, organization = { Internation Cartographic Association }, project = { NFGII;HPI }, language = { English }, isbn = { 3-9502039-2-3 }, link1 = { Slides http://www.hpi.uni-potsdam.de/fileadmin/user_upload/fachgebiete/doellner/publications/2012/PSTD2012/somap2012_pasewaldt_towards_comprehensible_3D_maps.pdf }, link2 = { Paper http://www.hpi.de/fileadmin/user_upload/fachgebiete/doellner/publications/2012/PSTD2012/PSTD_2012_SOMAP.pdf }, sorting = { 32 } } @inproceedings{TSD11, author = { Trapp, Matthias and Semmo, Amir and D{\"o}llner, J{\"u}rgen }, title = { Colonia3D }, journal = { Tagungsband der 9. Konferenz Kultur und Informatik - Multimediale Systeme }, year = { 2011 }, pages = { 201-212 }, month = { 5 }, abstract = { Dieser Beitrag stellt die Ergebnisse des interdisziplinären Projektes Colonia3D - Visualisierung des Römischen Kölns vor. Die digitale 3D Rekonstruktion des antiken Köln ist das Ergebnis eines gemeinsamen Forschungsprojekts des Archäologischen Instituts der Universität zu Köln, der Köln International School of Design (KISD) der Fachhochschule Köln, des Hasso-Plattner Instituts an der Universität Potsdam und des Römisch Germanischen Museums (RGM) Köln. Der Beitrag präsentiert die wesentlichen Konzepte dieses interaktiven, auf Museen ausgerichteten 3D-Informationssystems, beschreibt verschiedene Präsentationsmodi und deren technische Umsetzung. Er diskutiert Vorgehensweisen und Interaktionskonzepte, die den Benutzer während der Erkundung und Bewegung im virtuellen 3D-Stadtmodell unterstützen. Weiter werden die Techniken für den Austausch, die Aufbereitung und die Optimierung komplexer 3D-Datensätze beschrieben sowie Potenziale für digitale Museen und Ausstellungen skizziert. Der vorgestellte Ansatz stellt insbesondere eine IT-Lösung für einen vereinfachten, räumlich-kontextintegrierten informellen Wissenszugang zu archäologischer Fachinformation dar. }, publisher = { Werner H{\"u}lsbusch Verlag }, booktitle = { Tagungsband der 9. Konferenz Kultur und Informatik - Multimediale Systeme }, project = { NFG }, sorting = { 1792 } } @inproceedings{STD11, author = { Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen }, title = { Ansätze zur kartographischen Gestaltung von 3D-Stadtmodellen }, journal = { 31. Wissenschaftlich-Technische Jahrestagung der DGPF }, year = { 2011 }, volume = { 20 }, pages = { 473-482 }, month = { 4 }, abstract = { Interaktive virtuelle 3D-Stadtmodelle haben sich zu einem bewährten Medium für die effektive und effiziente Kommunikation von Geoinformation entwickelt. Sie präsentieren eine spezialisierte Form geovirtueller Umgebungen und sind gekennzeichnet durch ein zugrunde liegendes 3D-Geländemodell, einer darin befindlichen 3D-Bebauung sowie des dazu komplementären Straßen-, Grünflächen- und Naturraumes. 3D-Stadtmodell-Systeme ermöglichen es dem Nutzer, sich im Modell interaktiv zu bewegen und sie stellen die Grundfunktionen für die Exploration, Analyse, Präsentation und das Editieren der raumbezogenen Information bereit. Besonders im Gebiet der kartenähnlichen und kartenverwandten 3D-Darstellungen stellen u.a. automatische Verfahren und Techniken zur Stilisierung und Abstraktion von Objekten eines 3D Stadtmodell ein Hauptproblem für die interaktive 3D-Bildsynthese dar. Hier spielt insbesondere die Abstraktion und Illustration potentiell wichtiger Information und somit die Reduzierung der kognitiven Belastung des Nutzers eine tragende Rolle. Diesbezüglich sind Verfahren und Techniken zur nicht-photorealistischen Bildsynthese ein bewährtes Mittel der Computergrafik, deren direkte Anwendung auf ein komplettes 3D-Stadtmodell jedoch häufig monotone sowie gestalterisch und kartographisch stark eingeschränkte Resultate liefert. Eine effiziente und kontextsensitive Kommunikation von 3D-Geoinformation bedarf jedoch der Kopplung von Objektsemantik und Abstraktionsverfahren. Diese Arbeit präsentiert ein Konzept und dessen Umsetzung, das die Auswahl und Parametrisierung von nicht-photorealistischen Darstellungstechniken auf Basis von Objektsemantiken erlaubt (Abbildung 1). Dies ermöglicht die Zuweisung unterschiedlicher automatischer Abstraktionstechniken zu Objekten und Objektgruppen. Der vorgestellte Ansatz ist echtzeitfähig und erlaubt eine interaktive Klassifikation von Objekten und Features zur Laufzeit, wodurch sich u.a. Szenarien zur interaktiven Exploration von thematisch-stilisierten Features bzw. feature-bezogenen Daten visualisieren lassen. Dieser Ansatz eröffnet Möglichkeiten für eine gezielte und systematische kartographische Gestaltung von 3D-Stadtmodellen sowie deren echtzeitfähige Implementierung durch entsprechende 3D-Visualisierungsdienste. }, publisher = { Landesvermessung und Geobasisinformation Brandenburg }, series = { Publikationen der Deutschen Gesellschaft f{\"u}r Photogrammetrie, Fernerkundung und Geoinformation e.V. }, booktitle = { 31. Wissenschaftlich-Technische Jahrestagung der DGPF }, project = { NFG }, sorting = { 2048 } } @inproceedings{TSPHDEH10, author = { Trapp, Matthias and Semmo, Amir and Pokorski, Rafael and Herrmann, Claus-Daniel and Döllner, Jürgen and Eichhorn, Michael and Heinzelmann, Michael }, title = { Communication of Digital Cultural Heritage in Public Spaces by the Example of Roman Cologne }, year = { 2010 }, pages = { 262-276 }, month = { 11 }, abstract = { The communication of cultural heritage in public spaces such as museums or exhibitions, gain more and more importance during the last years. The possibilities of interactive 3D applications open a new degree of freedom beyond the mere presentation of static visualizations, such as pre-produced video or image data. A user is now able to directly interact with 3D virtual environments that enable the depiction and exploration of digital cultural heritage artefacts in real-time. However, such technology requires concepts and strategies for guiding a user throughout these scenarios, since varying levels of experiences within interactive media can be assumed. This paper presents a concept as well as implementation for communication of digital cultural heritage in public spaces, by example of the project Roman Cologne. It describes the results achieved by an interdisciplinary team of archaeologists, designers, and computer graphics engineers with the aim to virtually reconstruct an interactive high-detail 3D city model of Roman Cologne. }, affiliation = { Hasso-Plattner-Institut, University of Potsdam }, note = { Best-Paper-Award }, editor = { M. Ioannides }, publisher = { Springer-Verlag Berlin Heidelberg }, series = { Lecture Notes in Computer Science (LNCS) }, booktitle = { Digital Heritage, Proceedings of 3rd EuroMed Conference }, project = { NFG }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/TSPHDEH10/EuroMed2010Coloniad3D_CRC_HQ.pdf }, issn = { 0302-9743 }, link1 = { Paper (Google Books) http://books.google.de/books?id=lLGWMJc_s24C&lpg=PA262&ots=gFwW_7fmJI&dq=Communication%20of%20Digital%20Cultural%20Heritage%20in%20Public%20Spaces%20by%20the%20Example%20of%20Roman%20Cologne&pg=PA250#v=onepage&q&f=false }, link2 = { Video (Youtube) http://www.youtube.com/watch?v=HoC_mmy51CE }, link3 = { Slides (AuthorStream) http://www.authorstream.com/Presentation/autopilot-645625-colonia3d/ }, sorting = { 1024 } } @inproceedings{KSKD10b, author = { Kyprianidis, Jan Eric and Semmo, Amir and Kang, Henry and D{\"o}llner, J{\"u}rgen }, title = { Anisotropic Kuwahara Filtering with Polynomial Weighting Functions }, year = { 2010 }, pages = { 25--30 }, month = { 9 }, abstract = { In this work we present new weighting functions for the anisotropic Kuwahara filter. The anisotropic Kuwahara filter is an edge-preserving filter that is especially useful for creating stylized abstractions from images or videos. It is based on a generalization of the Kuwahara filter that is adapted to the local shape of features. For the smoothing process, the anisotropic Kuwahara filter uses weighting functions that use convolution in their definition. For an efficient implementation, these weighting functions are therefore usually sampled into a texture map. By contrast, our new weighting functions do not require convolution and can be efficiently computed directly during the filtering in real-time. We show that our approach creates output of similar quality as the original anisotropic Kuwahara filter and present an evaluation scheme to compute the new weighting functions efficiently by using rotational symmetries. }, booktitle = { Proc. EG UK Theory and Practice of Computer Graphics }, project = { gpuakf }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/KSKD10b/jkyprian-tpcg2010.pdf }, sorting = { 2048 } } @inproceedings{WSASH, author = { Hosain Wasty, Benjamin and Semmo, Amir and Appeltauer, Malte and Steinert, Bastian and Hirschfeld, Robert }, title = { ContextLua: Dynamic Behavioral Variations in Computer Games }, year = { 2010 }, pages = { 5:1--5:6 }, abstract = { Behavioral variations are central to modern computer games as they are making the gameplay a more interesting user experience. However, these variations significantly add to the implementation complexity. We discuss the domain of computer games with respect to dynamic behavioral variations and argue that context-oriented programming is of special interest for this domain. This motivates our extension to the dynamic scripting language Lua, which is frequently used in the development of computer games. Our newly provided programming constructs allow game developers to use layers for defining and activating variations of the basic gameplay. }, booktitle = { Proceedings of the 2nd International Workshop on Context-Oriented Programming }, doi = { 10.1145/1930021.1930026 }, sorting = { 3712 } } @other{KSKD10a, author = { Kyprianidis, Jan Eric and Semmo, Amir and Kang, Henry and D{\"o}llner, J{\"u}rgen }, title = { Anisotropic Kuwahara Filtering with Polynomial Weighting Functions }, year = { 2010 }, month = { 6 }, abstract = { In this work we present new weighting functions for the anisotropic Kuwahara filter. The anisotropic Kuwahara filter is an edge-preserving filter that is especially useful for creating stylized abstractions from images or videos. It is based on a generalization of the Kuwahara filter that is adapted to the local shape of features. For the smoothing process, the anisotropic Kuwahara filter uses weighting functions that use convolution in their definition. For an efficient implementation, these weighting functions are therefore usually sampled into a texture map. By contrast, our new weighting functions do not require convolution and can be efficiently computed directly during the filtering in real-time. We show that our approach creates output of similar quality as the original anisotropic Kuwahara filter and present an evaluation scheme to compute the new weighting functions efficiently by using rotational symmetries. }, booktitle = { NPAR Poster Session }, project = { gpuakf }, files = { fileadmin/user_upload/fachgebiete/doellner/publications/2010/KSKD10a/jkyprian-npar2010.pdf }, sorting = { 2304 } }