@article{CGF31-2:219-228:2012,
journal = {Computer Graphics Forum},
title = {{How Not to Be Seen -- Object Removal from Videos of Crowded
Scenes}},
author = {Miguel Granados and James Tompkin and Kwang In Kim and Oliver Grau
and Jan Kautz and Christian Theobalt },
pages = {219-228},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp219-228.pdf},
DOI = {10.1111/j.1467-8659.2012.03000.x},
abstract = {Removing dynamic objects from videos is an extremely challenging
problem that even visual effects professionals often solve with
time-consuming manual frame-by-frame editing. We propose a new approach to
video completion that can deal with complex scenes containing dynamic
background and non-periodical moving objects. We build upon the idea that the
spatio-temporal hole left by a removed object can be filled with data
available on other regions of the video where the occluded objects were
visible. Video completion is performed by solving a large combinatorial
problem that searches for an optimal pattern of pixel offsets from occluded
to unoccluded regions. Our contribution includes an energy functional that
generalizes well over different scenes with stable parameters, and that has
the desirable convergence properties for a graph-cut-based optimization. We
provide an interface to guide the completion process that both reduces
computation time and allows for efficient correction of small errors in the
result. We demonstrate that our approach can effectively complete complex,
high-resolution occlusions that are greater in difficulty than what existing
methods have shown.}
}
@article{CGF31-2:229-236:2012,
journal = {Computer Graphics Forum},
title = {{Robust Image Retargeting via Axis-Aligned Deformation}},
author = {Daniele Panozzo and Ofir Weber and Olga Sorkine },
pages = {229-236},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp229-236.pdf},
DOI = {10.1111/j.1467-8659.2012.03001.x},
abstract = {We propose the space of axis-aligned deformations as the
meaningful space for content-aware image retargeting. Such deformations
exclude local rotations, avoiding harmful visual distortions, and they are
parameterized in 1D. We show that standard warping energies for image
retargeting can be minimized in the space of axis-aligned deformations while
guaranteeing that bijectivity constraints are satisfied, leading to
high-quality, smooth and robust retargeting results. Thanks to the 1D
parameterization, our method only requires solving a small quadratic program,
which can be done within a few milliseconds on the CPU with no precomputation
overhead. We demonstrate how the image size and the saliency map can be
changed in real time with our approach, and present results on various input
../images, including the RETARGETME benchmark. We compare our results with
six other algorithms in a user study to demonstrate that the space of
axis-aligned deformations is suitable for the problem at hand.}
}
@article{CGF31-2:237-246:2012,
journal = {Computer Graphics Forum},
title = {{Iterative Image Warping}},
author = {Huw Bowles and Kenny Mitchell and Robert W. Sumner and Jeremy Moore
and Markus Gross },
pages = {237-246},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp237-246.pdf},
DOI = {10.1111/j.1467-8659.2012.03002.x},
abstract = {Animated image sequences often exhibit a large amount of
inter-frame coherence which standard rendering algorithms and pipelines are
ill-equipped to exploit, limiting their efficiency. To address this
inefficiency we transfer rendering results across frames using a novel image
warping algorithm based on fixed point iteration. We analyze the behavior of
the iteration and describe two alternative algorithms designed to suit
different performance requirements. Further, to demonstrate the versatility
of our approach we apply it to a number of spatio-temporal rendering problems
including 30-to-60Hz frame upsampling, stereoscopic 3D conversion, defocus
and motion blur. Finally we compare our approach against existing image
warping methods and demonstrate a significant performance improvement.}
}
@article{CGF31-2:247-256:2012,
journal = {Computer Graphics Forum},
title = {{Coherent Spatiotemporal Filtering, Upsampling and Rendering of RGBZ
Videos}},
author = {Christian Richardt and Carsten Stoll and Neil A. Dodgson and
Hans-Peter Seidel and Christian Theobalt },
pages = {247-256},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp247-256.pdf},
DOI = {10.1111/j.1467-8659.2012.03003.x},
abstract = {Sophisticated video processing effects require both image and
geometry information. We explore the possibility to augment a video camera
with a recent infrared time-of-flight depth camera, to capture
high-resolution RGB and low-resolution, noisy depth at video frame rates. To
turn such a setup into a practical RGBZ video camera, we develop efficient
data filtering techniques that are tailored to the noise characteristics of
IR depth cameras. We first remove typical artefacts in the RGBZ data and then
apply an efficient spatiotemporal denoising and upsampling scheme. This
allows us to record temporally coherent RGBZ videos at interactive frame
rates and to use them to render a variety of effects in unprecedented
quality. We show effects such as video relighting, geometry-based abstraction
and stylisation, background segmentation and rendering in stereoscopic
3D.}
}
@article{CGF31-2:257-264:2012,
journal = {Computer Graphics Forum},
title = {{Creating Picture Legends for Group Photos}},
author = {Junhong Gao and Seon Joo Kim and Michael S. Brown },
pages = {257-264},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp257-264.pdf},
DOI = {10.1111/j.1467-8659.2012.03004.x},
abstract = {Group photos are one of the most common types of digital
images found in personal image collections and on social networks. One
typical post-processing task for group photos is to produce a key or legend
to identify the people in the photo. This is most often done using simple
bounding boxes. A more professional approach is to create a picture legend
that uses either a full or partial silhouette to identify the individuals.
This paper introduces an efficient method for producing picture legends for
group photos. Our approach combines face detection with human shape priors
into an interactive selection framework to allow users to quickly segment the
individuals in a group photo. Our results are better than those obtained by
general selection tools and can be produced in a fraction of the time.}
}
@article{CGF31-2:265-274:2012,
journal = {Computer Graphics Forum},
title = {{Data-Driven Object Manipulation in Images}},
author = {Chen Goldberg and Tao Chen and Fang-Lue Zhang and Ariel Shamir and
Shi-Min Hu },
pages = {265-274},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp265-274.pdf},
DOI = {10.1111/j.1467-8659.2012.03005.x},
abstract = {We present a framework for interactively manipulating objects in
a photograph using related objects obtained from internet images. Given an
image, the user selects an object to modify, and provides keywords to
describe it. Objects with a similar shape are retrieved and segmented from
online images matching the keywords, and deformed to correspond with the
selected object. By matching the candidate object and adjusting manipulation
parameters, our method appropriately modifies candidate objects and
composites them into the scene. Supported manipulations include transferring
texture, color and shape from the matched object to the target in a seamless
manner. We demonstrate the versatility of our framework using several inputs
of varying complexity, for object completion, augmentation, replacement and
revealing. Our results are evaluated using a user study.}
}
@article{CGF31-2:275-284:2012,
journal = {Computer Graphics Forum},
title = {{Real-Time Disparity Map-Based Pictorial Depth Cue
Enhancement}},
author = {Christoph Rößing and Johannes Hanika and Hendrik Lensch
},
pages = {275-284},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp275-284.pdf},
DOI = {10.1111/j.1467-8659.2012.03006.x},
abstract = {The availability of stereoscopic image material is increasing
rapidly. In contrast to the generation of distance information, displaying it
is still a challenging task. To overcome the need for special 3D display
hardware, we present a novel real-time video processing framework-based on
edge-avoiding à trous wavelets. The framework adds and
emphasizes monocular depth cues corresponding to the depth information of a
supplemental disparity map. This creates a compelling depth sensation on 2D
display devices. The framework enhances multiple depth cues in parallel, such
as depth of field, local contrast, ambient occlusion and saturation. At the
same time, it improves the disparity map quality. Depth cues control how a
human explores an image, since the perception of distance is coupled to
visual attention. The presented work demonstrates the effectiveness of the
proposed framework in guiding the viewer, without destroying the image
content, by evaluating the performance in searchand- find tasks. A user study
analyzes the connection between faster response times and the boosting of
particular monocular depth cues.}
}
@article{CGF31-2:285-293:2012,
journal = {Computer Graphics Forum},
title = {{Interactive Multi-perspective Imagery from Photos and Videos}},
author = {Henrik Lieng and James Tompkin and Jan Kautz },
pages = {285-293},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp285-293.pdf},
DOI = {10.1111/j.1467-8659.2012.03007.x},
abstract = {Photographs usually show a scene from a single perspective.
However, as commonly seen in art, scenes and objects can be visualized from
multiple perspectives. Making such images manually is time consuming and
tedious. We propose a novel system for designing multi-perspective ../images
and videos. First, the images in the input sequence are aligned using
structure from motion. This enables us to track feature points across the
sequence. Second, the user chooses portal polygons in a target image into
which different perspectives are to be embedded. The corresponding image
regions from the other images are then copied into these portals. Due to
the tracking feature and automatic warping, this approach is considerably
faster than current tools. We explore a wide range of artistic applications
using our system with image and video data, such as looking around corners
and up and down stair cases, recursive multi-perspective imaging, cubism and
panoramas.}
}
@article{CGF31-2:295-303:2012,
journal = {Computer Graphics Forum},
title = {{Light-Field Retargeting}},
author = {Clemens Birklbauer and Oliver Bimber },
pages = {295-303},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp295-303.pdf},
DOI = {10.1111/j.1467-8659.2012.03008.x},
abstract = {We present a first approach to light-field retargeting using
z-stack seam carving, which allows light-field compression and extension
while retaining angular consistency. Our algorithm first converts an input
light field into a set of perspective-sheared focal stacks. It then applies
3D deconvolution to convert the focal stacks into z-stacks, and seam-carves
the z-stack of the center perspective. The computed seams of the center
perspective are sheared and applied to the z-stacks of all off-center
perspectives. Finally, the carved z-stacks are converted back into the
perspective images of the output light field. To our knowledge, this is
the first approach to light-field retargeting. Unlike existing stereo-pair
retargeting or 3D retargeting techniques, it does not require depth
information.}
}
@article{CGF31-2:305-314:2012,
journal = {Computer Graphics Forum},
title = {{Unstructured Light Fields}},
author = {Abe Davis and Marc Levoy and Fredo Durand },
pages = {305-314},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp305-314.pdf},
DOI = {10.1111/j.1467-8659.2012.03009.x},
abstract = {We present a system for interactively acquiring and rendering
light fields using a hand-held commodity camera. The main challenge we
address is assisting a user in achieving good coverage of the 4D domain
despite the challenges of hand-held acquisition. We define coverage by
bounding reprojection error between viewpoints, which accounts for all 4
dimensions of the light field. We use this criterion together with a recent
Simultaneous Localization and Mapping technique to compute a coverage map on
the space of viewpoints. We provide users with real-time feedback and direct
them toward under-sampled parts of the light field. Our system is lightweight
and has allowed us to capture hundreds of light fields. We further present a
new rendering algorithm that is tailored to the unstructured yet dense data
we capture. Our method can achieve piecewise-bicubic reconstruction using a
triangulation of the captured viewpoints and subdivision rules applied to
reconstruction weights.}
}
@article{CGF31-2:315-324:2012,
journal = {Computer Graphics Forum},
title = {{Data Driven Surface Reflectance from Sparse and Irregular
Samples}},
author = {Roland Ruiters and Christopher Schwartz and Reinhard Klein },
pages = {315-324},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp315-324.pdf},
DOI = {10.1111/j.1467-8659.2012.03010.x},
abstract = {In recent years, measuring surface reflectance has become an
established method for high quality renderings. In this context, especially
non-parametric representations got a lot of attention as they allow for a
very accurate representation of complex reflectance behavior. However, the
acquisition of this data is a challenging task especially if complex object
geometry is involved. Capturing images of the object under varying
illumination and view conditions results in irregular angular samplings of
the reflectance function with a limited angular resolution. Classical
data-driven techniques, like tensor factorization, are not well suited for
such data sets as they require a resampling of the high dimensional
measurement data to a regular grid. This grid has to be on a much higher
angular resolution to avoid resampling artifacts which in turn would lead to
data sets of enormous size. To overcome these problems we introduce a novel,
compact data-driven representation of reflectance functions based on a sum of
separable functions which are fitted directly to the irregular set of data
without any further resampling. The representation allows for efficient
rendering and is also well suited for GPU applications. By exploiting spatial
coherence of the reflectance function over the object a very precise
reconstruction even of specular materials becomes possible already with a
sparse input sampling. This would be impossible using standard data
interpolation techniques. Since our algorithm exclusively operates on the
compressed representation, it is both efficient in terms of memory use and
computational complexity, depending only sub-linearly on the size of the
fully tabulated data. The quality of the reflectance function is evaluated on
synthetic data sets as ground truth as well as on real world
measurements.}
}
@article{CGF31-2:325-333:2012,
journal = {Computer Graphics Forum},
title = {{Novel-View Synthesis of Outdoor Sport Events Using an Adaptive
View-Dependent Geometry}},
author = {Marcel Germann and Tiberiu Popa and Richard Keiser and Remo Ziegler
and Markus Gross },
pages = {325-333},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp325-333.pdf},
DOI = {10.1111/j.1467-8659.2012.03011.x},
abstract = {We propose a novel fully automatic method for novel-viewpoint
synthesis. Our method robustly handles multicamera setups featuring
wide-baselines in an uncontrolled environment. In a first step, robust and
sparse point correspondences are found based on an extension of the Daisy
features [TLF10]. These correspondences together with back-projection errors
are used to drive a novel adaptive coarse to fine reconstruction method,
allowing to approximate detailed geometry while avoiding an extreme triangle
count. To render the scene from arbitrary viewpoints we use a view-dependent
blending of color information in combination with a view-dependent geometry
morph. The view-dependent geometry compensates for misalignments caused by
calibration errors. We demonstrate that our method works well under arbitrary
lighting conditions with as little as two cameras featuring wide-baselines.
The footage taken from real sports broadcast events contains fine geometric
structures, which result in nice novel-viewpoint renderings despite of the
low resolution in the images.}
}
@article{CGF31-2:335-344:2012,
journal = {Computer Graphics Forum},
title = {{Analytic Anti-Aliasing of Linear Functions on Polytopes}},
author = {Thomas Auzinger and Michael Guthe and Stefan Jeschke },
pages = {335-344},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp335-344.pdf},
DOI = {10.1111/j.1467-8659.2012.03012.x},
abstract = {This paper presents an analytic formulation for anti-aliased
sampling of 2D polygons and 3D polyhedra. Our framework allows the exact
evaluation of the convolution integral with a linear function defined on the
polytopes. The filter is a spherically symmetric polynomial of any order,
supporting approximations to refined variants such as the Mitchell-Netravali
filter family. This enables high-quality rasterization of triangles and
tetrahedra with linearly interpolated vertex values to regular and
non-regular grids. A closed form solution of the convolution is presented and
an efficient implementation on the GPU using DirectX and CUDA C is
described.}
}
@article{CGF31-2:345-353:2012,
journal = {Computer Graphics Forum},
title = {{SimpleFlow: A Non-iterative, Sublinear Optical Flow
Algorithm}},
author = {Michael Tao and Jiamin Bai and Pushmeet Kohli and Sylvain Paris
},
pages = {345-353},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp345-353.pdf},
DOI = {10.1111/j.1467-8659.2012.03013.x},
abstract = {Optical flow is a critical component of video editing
applications, e.g. for tasks such as object tracking, segmentation, and
selection. In this paper, we propose an optical flow algorithm called
SimpleFlow whose running times increase sublinearly in the number of pixels.
Central to our approach is a probabilistic representation of the motion flow
that is computed using only local evidence and without resorting to global
optimization. To estimate the flow in image regions where the motion is
smooth, we use a sparse set of samples only, thereby avoiding the expensive
computation inherent in traditional dense algorithms. We show that our
results can be used as is for a variety of video editing tasks. For
applications where accuracy is paramount, we use our result to bootstrap a
global optimization. This significantly reduces the running times of such
methods without sacrificing accuracy. We also demonstrate that the SimpleFlow
algorithm can process HD and 4K footage in reasonable times.}
}
@article{CGF31-2:355-364:2012,
journal = {Computer Graphics Forum},
title = {{SMAA: Enhanced Subpixel Morphological Antialiasing}},
author = {Jorge Jimenez and Jose I. Echevarria and Tiago Sousa and Diego
Gutierrez },
pages = {355-364},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp355-364.pdf},
DOI = {10.1111/j.1467-8659.2012.03014.x},
abstract = {We present a new image-based, post-processing antialiasing
technique, which offers practical solutions to the common, open problems of
existing filter-based real-time antialiasing algorithms. Some of the new
features include local contrast analysis for more reliable edge detection,
and a simple and effective way to handle sharp geometric features and
diagonal lines. This, along with our accelerated and accurate pattern
classification allows for a better reconstruction of silhouettes. Our method
shows for the first time how to combine morphological antialiasing (MLAA)
with additional multi/supersampling strategies (MSAA, SSAA) for accurate
subpixel features, and how to couple it with temporal reprojection; always
preserving the sharpness of the image. All these solutions combine synergies
making for a very robust technique, yielding results of better overall
quality than previous approaches while more closely converging to MSAA/SSAA
references but maintaining extremely fast execution times. Additionally, we
propose different presets to better fit the available resources or particular
needs of each scenario.}
}
@article{CGF31-2:365-372:2012,
journal = {Computer Graphics Forum},
title = {{Black is Green: Adaptive Color Transformation For Reduced Ink
Usage}},
author = {Lior Shapira and Boris Oicherman },
pages = {365-372},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp365-372.pdf},
DOI = {10.1111/j.1467-8659.2012.03015.x},
abstract = {A vast majority of color transformations applied to an image in
the digital press industry are static and precalculated. In order to achieve
the best quality on a wide variety of different ../images, these
transformations tend to be highly conservative with respect to the use of
black ink. This results in excessive use of inks, which has a negative
economic and environmental impact. We present a method for dynamic
computation of color transformation based on image content, with the aim to
reduce ink usage. We analyze the image, and predict areas in which quality
artifacts that may result from such a reduction will be masked by the image
content. These areas include detailed textures, noisy areas and structure. We
then replace the image CMYK values by a new combination with increased black.
Our algorithm ensures negligible color shifts in the resulting image, and no
visible reduction in quality. We achieve an average of over 10% ink
savings.}
}
@article{CGF31-2:373-382:2012,
journal = {Computer Graphics Forum},
title = {{Real-time Realistic Rendering and Lighting of Forests}},
author = {Eric Bruneton and Fabrice Neyret },
pages = {373-382},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp373-382.pdf},
DOI = {10.1111/j.1467-8659.2012.03016.x},
abstract = {Realistic real-time rendering and lighting of forests is an
important aspect for simulators and video games. This is a difficult problem,
due to the massive amount of geometry: aerial forest views display millions
of trees on a wide range of distances, from the camera to the horizon. Light
interactions, whose effects are visible at all scales, are also a problem:
sun and sky dome contributions, shadows between trees, inside trees, on the
ground, and view-light masking correlations. In this paper we present a
method to render very large forest scenes in realtime, with realistic
lighting at all scales, and without popping nor aliasing. Our method is based
on two new forest representations, z-fields and shader-maps, with a seamless
transition between them. Our first model builds on light fields and height
fields to represent and render the nearest trees individually, accounting for
all lighting effects. Our second model is a location, view and light
dependent shader mapped on the terrain, accounting for the cumulated subpixel
effects. Qualitative comparisons with photos show that our method produces
realistic results.}
}
@article{CGF31-2:383-392:2012,
journal = {Computer Graphics Forum},
title = {{Interactive Editing of GigaSample Terrain Fields}},
author = {Marc Treib and Florian Reichl and Stefan Auer and Rüdiger
Westermann },
pages = {383-392},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp383-392.pdf},
DOI = {10.1111/j.1467-8659.2012.03017.x},
abstract = {Previous terrain rendering approaches have addressed the aspect
of data compression and fast decoding for rendering, but applications where
the terrain is repeatedly modified and needs to be buffered on disk have not
been considered so far. Such applications require both decoding and encoding
to be faster than disk transfer. We present a novel approach for editing
gigasample terrain fields at interactive rates and high quality. To achieve
high decoding and encoding throughput, we employ a compression scheme for
height and pixel maps based on a sparse wavelet representation. On recent
GPUs it can encode and decode up to 270 and 730 MPix/s of color data,
respectively, at compression rates and quality superior to JPEG, and it
achieves more than twice these rates for lossless height field compression.
The construction and rendering of a height field triangulation is avoided by
using GPU ray-casting directly on the regular grid underlying the compression
scheme. We show the efficiency of our method for interactive editing and
continuous level-of-detail rendering of terrain fields comprised of several
hundreds of gigasamples.}
}
@article{CGF31-2:393-402:2012,
journal = {Computer Graphics Forum},
title = {{A GPU-based Approach for Massive Model Rendering with
Frame-to-Frame Coherence}},
author = {Chao Peng and Yong Cao },
pages = {393-402},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp393-402.pdf},
DOI = {10.1111/j.1467-8659.2012.03018.x},
abstract = {Rendering massive 3D models in real-time has long been recognized
as a very challenging problem because of the limited computational power and
memory space available in a workstation. Most existing rendering techniques,
especially level of detail (LOD) processing, have suffered from their
sequential execution natures. We present a GPU-based approach which enables
interactive rendering of large 3D models with hundreds of millions of
triangles. Our work contributes to the massive rendering research in two
ways. First, we present a simple and efficient mesh simplification algorithm
towards GPU architecture. Second, we propose a novel GPU out-of-core approach
that adopts a frame-to-frame coherence scheme in order to minimize the high
communication cost between CPU and GPU. Our results show that the parallel
algorithm of mesh simplification and the GPU out-of-core approach
significantly improve the overall rendering performance.}
}
@article{CGF31-2:403-412:2012,
journal = {Computer Graphics Forum},
title = {{Rasterized Bounding Volume Hierarchies}},
author = {Jan Novák and Carsten Dachsbacher },
pages = {403-412},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp403-412.pdf},
DOI = {10.1111/j.1467-8659.2012.03019.x},
abstract = {We present the rasterized bounding volume hierarchy (RBVH), a
compact data structure that accelerates approximate ray casting of complex
meshes and provides adjustable level of detail. During construction, we
identify subtrees of BVHs containing surfaces that can be represented by
height fields. For these subtrees the conventional ray-surface intersection,
which possibly involves a large number of triangles, is replaced by a simple
ray marching procedure to find the intersection with the surface. We describe
GPU algorithms for construction, ray casting, and data querying of the RBVH
that achieve comparable or higher performance than state of the art
acceleration structures for triangle meshes. Moreover, RBVHs provide an
inherent surface parameterization for storing data on the surfaces and
natively handle triangle and point-based surface representations. We also
show that RBVHs support adaptive level-of-detail and can be combined with
traditional BVHs to handle complex scenes.}
}
@article{CGF31-2:413-420:2012,
journal = {Computer Graphics Forum},
title = {{Procedural Texture Preview}},
author = {Anass Lasram and Sylvain Lefebvre and Cyrille Damez },
pages = {413-420},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp413-420.pdf},
DOI = {10.1111/j.1467-8659.2012.03020.x},
abstract = {Procedural textures usually require spending time testing
parameters to realize the diversity of appearances. This paper introduces the
idea of a procedural texture preview: A single static image summarizing in a
limited pixel space the appearances produced by a given procedure. Unlike
grids of thumbnails our previews present a continuous image of appearances,
analog to a map. The main challenge is to ensure that most appearances are
visible, are allocated a similar pixel area, and are ordered in a smooth
manner throughout the preview. To reach this goal, we introduce a new layout
algorithm accounting simultaneously for these criteria. After computing a
layout of appearances, we rely on by-example texture synthesis to produce the
final preview. We demonstrate our approach on a database of production-level
procedural textures.}
}
@article{CGF31-2:421-430:2012,
journal = {Computer Graphics Forum},
title = {{Mesh Colorization}},
author = {George Leifman and Ayellet Tal },
pages = {421-430},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp421-430.pdf},
DOI = {10.1111/j.1467-8659.2012.03021.x},
abstract = {This paper proposes a novel algorithm for colorization of meshes.
This is important for applications in which the model needs to be colored by
just a handful of colors or when no relevant image exists for texturing the
model. For instance, archaeologists argue that the great Roman or Greek
statues were full of color in the days of their creation, and traces of the
original colors can be found. In this case, our system lets the user scribble
some desired colors in various regions of the mesh. Colorization is then
formulated as a constrained quadratic optimization problem, which can be
readily solved. Special care is taken to avoid color bleeding between
regions, through the definition of a new direction field on meshes.}
}
@article{CGF31-2:431-438:2012,
journal = {Computer Graphics Forum},
title = {{3D Material Style Transfer}},
author = {Chuong H. Nguyen and Tobias Ritschel and Karol Myszkowski and Elmar
Eisemann and Hans-Peter Seidel },
pages = {431-438},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp431-438.pdf},
DOI = {10.1111/j.1467-8659.2012.03022.x},
abstract = {This work proposes a technique to transfer the material style or
mood from a guide source such as an image or video onto a target 3D scene. It
formulates the problem as a combinatorial optimization of assigning discrete
materials extracted from the guide source to discrete objects in the target
3D scene. The assignment is optimized to fulfill multiple goals: overall
image mood based on several image statistics; spatial material organization
and grouping as well as geometric similarity between objects that were
assigned to similar materials. To be able to use common uncalibrated
images and videos with unknown geometry and lighting as guides, a material
estimation derives perceptually plausible reflectance, specularity,
glossiness, and texture. Finally, results produced by our method are compared
to manual material assignments in a perceptual study.}
}
@article{CGF31-2:439-448:2012,
journal = {Computer Graphics Forum},
title = {{Repetition Maximization based Texture Rectification}},
author = {Dror Aiger and Daniel Cohen-Or and Niloy J. Mitra },
pages = {439-448},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp439-448.pdf},
DOI = {10.1111/j.1467-8659.2012.03023.x},
abstract = {Many photographs are taken in perspective. Techniques for
rectifying resulting perspective distortions typically rely on the existence
of parallel lines in the scene. In scenarios where such parallel lines are
hard to automatically extract or manually annotate, the unwarping process
remains a challenge. In this paper, we introduce an automatic algorithm to
rectifying images containing textures of repeated elements lying on an
unknown plane. We unwrap the input by maximizing for image self-similarity
over the space of homography transformations. We map a set of detected
regional descriptors to surfaces in a transformation space, compute the
intersection points among triplets of such surfaces, and then use consensus
among the projected intersection points to extract the correcting transform.
Our algorithm is global, robust, and does not require explicit or accurate
detection of similar elements. We evaluate our method on a variety of
challenging textures and images. The rectified outputs are directly useful
for various tasks including texture synthesis, image completion, etc.}
}
@article{CGF31-2:449-458:2012,
journal = {Computer Graphics Forum},
title = {{Practical Spectral Photography}},
author = {Ralf Habel and Michael Kudenov and Michael Wimmer },
pages = {449-458},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp449-458.pdf},
DOI = {10.1111/j.1467-8659.2012.03024.x},
abstract = {We introduce a low-cost and compact spectral imaging camera
design based on unmodified consumer cameras and a custom camera objective.
The device can be used in a high-resolution configuration that measures the
spectrum of a column of an imaged scene with up to 0.8 nm spectral
resolution, rivalling commercial non-imaging spectrometers, and a
mid-resolution hyperspectral mode that allows the spectral measurement of a
whole image, with up to 5 nm spectral resolution and 120x120 spatial
resolution. We develop the necessary calibration methods based on
halogen/fluorescent lamps and laser pointers to acquire all necessary
information about the optical system. We also derive the mathematical methods
to interpret and reconstruct spectra directly from the Bayer array ../images
of a standard RGGB camera. This objective design introduces accurate spectral
remote sensing to computational photography, with numerous applications in
color theory, colorimetry, vision and rendering, making the acquisition of a
spectral image as simple as taking a high-dynamic-range image.}
}
@article{CGF31-2:459-468:2012,
journal = {Computer Graphics Forum},
title = {{Coding Depth through Mask Structure}},
author = {Horacio E. Fortunato and Manuel M. Oliveira },
pages = {459-468},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp459-468.pdf},
DOI = {10.1111/j.1467-8659.2012.03025.x},
abstract = {We present a coded-aperture method based on a family of masks
obtained as the convolution of one "hole" with a structural component
consisting of an arrangement of Dirac delta functions. We call the
arrangement of delta functions the structural component of the mask, and use
it to efficiently encode scene distance information. We illustrate the
potential of our approach by analyzing a family of masks defined by a
circular hole component and a structural component consisting of a linear
combination of three Dirac deltas. We show that the structural component
transitions from well conditioned to ill conditioned as the relative weight
of the central peak varies with respect to the lateral ones. For the
well-conditioned structural components, deconvolution is efficiently
performed by inverse filtering, allowing for fast estimation of scene depth.
We demonstrate the effectiveness of our approach by constructing a mask for
distance coding and using it to recover pairs of distance maps and
structurally-deconvolved images from single photographs. For this
application, we obtain significant speedup, and extended range and depth
resolution compared to previous techniques.}
}
@article{CGF31-2:469-478:2012,
journal = {Computer Graphics Forum},
title = {{Soft Stacking}},
author = {James McCann and Nancy S. Pollard },
pages = {469-478},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp469-478.pdf},
DOI = {10.1111/j.1467-8659.2012.03026.x},
abstract = {In this paper, we present a continuous approach to ordering 2D
images when compositing. Previous methods for stacking image layers
require them to appear in a single (though possibly different) order at every
point in the image. Our soft stacking approach removes this restriction -
allowing layers to stack as if they were volumes of fog, appearing partially
in front of and partially in back of other layers within the same pixel, and
moving smoothly through other layers across the image. Our approach involves
augmenting each pixel with stacking coefficients - a necessary and sufficient
representation for sub-pixel stacking complexity. These stacking coefficients
arise naturally when considering sub-pixel stacking complexity, much as
continuous (alpha) transparency arises when considering sub-pixel coverage
complexity. While the number of stacking coefficients required to represent
all possible sub-pixel stacking arrangements is factorial in the number of
layers in the stack, in many practical situations only a small subset of the
stacking coefficients are nonzero. We use this sparsity as the basis of a
prototype that allows artists to interactively paint stacking adjustments
into composites. Additionally, we demonstrate how to generate
optimally-stacked images under a generalized notion of stacking
consistency.}
}
@article{CGF31-2:479-488:2012,
journal = {Computer Graphics Forum},
title = {{Metering for Exposure Stacks}},
author = {Orazio Gallo and Marius Tico and Roberto Manduchi and Natasha
Gelfand and Kari Pulli },
pages = {479-488},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp479-488.pdf},
DOI = {10.1111/j.1467-8659.2012.03027.x},
abstract = {When creating a High-Dynamic-Range (HDR) image from a sequence of
differently exposed Low-Dynamic-Range (LDR) ../images, the set of LDR
images is usually generated by sampling the space of exposure times with a
geometric progression and without explicitly accounting for the distribution
of irradiance values of the scene. We argue that this choice can produce
sub-optimal results both in terms of the number of acquired pictures and the
quality of the resulting HDR image. This paper presents a method to estimate
the full irradiance histogram of a scene, and a strategy to select the set of
exposures that need to be acquired. Our selection usually requires a smaller
or equal set of LDRs, yet produces higher quality HDR images.}
}
@article{CGF31-2:489-498:2012,
journal = {Computer Graphics Forum},
title = {{Realistic Following Behaviors for Crowd Simulation}},
author = {Samuel Lemercier and Asja Jelic and Richard Kulpa and Jiale Hua and
Jérôme Fehrenbach and Pierre Degond and Cécile
Appert-Rolland and Stéphane Donikian and Julien Pettré },
pages = {489-498},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp489-498.pdf},
DOI = {10.1111/j.1467-8659.2012.03028.x},
abstract = {While walking through a crowd, a pedestrian experiences a large
number of interactions with his neighbors. The nature of these interactions
is varied, and it has been observed that macroscopic phenomena emerge from
the combination of these local interactions. Crowd models have hitherto
considered collision avoidance as the unique type of interactions between
individuals, few have considered walking in groups. By contrast, our paper
focuses on interactions due to the following behaviors of pedestrians.
Following is frequently observed when people walk in corridors or when they
queue. Typical macroscopic stop-and-go waves emerge under such traffic
conditions. Our contributions are, first, an experimental study on following
behaviors, second, a numerical model for simulating such interactions, and
third, its calibration, evaluation and applications. Through an experimental
approach, we elaborate and calibrate a model from microscopic analysis of
real kinematics data collected during experiments. We carefully evaluate our
model both at the microscopic and the macroscopic levels. We also demonstrate
our approach on applications where following interactions are prominent.}
}
@article{CGF31-2:499-508:2012,
journal = {Computer Graphics Forum},
title = {{Manipulation of Flexible Objects by Geodesic Control}},
author = {He Wang and Taku Komura },
pages = {499-508},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp499-508.pdf},
DOI = {10.1111/j.1467-8659.2012.03029.x},
abstract = {We propose an effective and intuitive method for controlling
flexible models such as ropes and cloth. Automating manipulation of such
flexible objects is not an easy task due to the high dimensionality of the
objects and the low dimensionality of the control. In order to cope with this
problem, we introduce a method called Geodesic Control, which greatly helps
to manipulate flexible objects. The core idea is to decrease the degrees of
freedom of the flexible object by moving it along the geodesic line of the
object that it is interacting with. By repeatedly applying this control,
users can easily synthesize animations of twisting and knotting a piece of
rope or wrapping a cloth around an object. We show examples of ''furoshiki
wrapping'', in which an object is wrapped by a cloth by a series of maneuvers
based on Geodesic Control. As our representation can abstract such maneuvers
well, the procedure designed by a user can be re-applied for different
combinations of cloth and an object. The method is applicable not only for
computer animation but also for 3D computer games and virtual reality
systems.}
}
@article{CGF31-2:509-518:2012,
journal = {Computer Graphics Forum},
title = {{Super-Clothoids}},
author = {Florence Bertails-Descoubes },
pages = {509-518},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp509-518.pdf},
DOI = {10.1111/j.1467-8659.2012.03030.x},
abstract = {Piecewise clothoids are 2D curves with continuous, piecewise
linear curvature. Due to their smoothness properties, they have been
extensively used in road design and robot path planning, as well as for the
compact representation of hand-drawn curves. In this paper we present the
Super-Clothoid model, a new mechanical model that for the first time allows
for the computing of the dynamics of an elastic, inextensible piecewise
clothoid. We first show that the kinematics of this model can be computed
analytically depending on the Fresnel integrals, and precisely evaluated when
required. Secondly, the discrete dynamics, naturally emerging from the
Lagrange equations of motion, can be robustly and efficiently computed by
performing and storing formal computations as far as possible, recoursing to
numerical evaluation only when assembling the linear system to be solved at
each time step. As a result, simulations turn out to be both interactive and
stable, even for large displacements of the rod. Finally, we demonstrate the
versatility of our model by handling various boundary conditions for the rod
as well as complex external constraints such as frictional contact, and show
that our model is perfectly adapted to inverse statics. Compared to
lower-order models, the super-clothoid appears as a more natural and
aesthetic primitive for bridging the gap between 2D geometric design and
physics-based deformation.}
}
@article{CGF31-2:519-528:2012,
journal = {Computer Graphics Forum},
title = {{Data-Driven Estimation of Cloth Simulation Models}},
author = {Eder Miguel and Derek Bradley and Bernhard Thomaszewski and Bernd
Bickel and Wojciech Matusik and Miguel A. Otaduy and Steve Marschner },
pages = {519-528},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp519-528.pdf},
DOI = {10.1111/j.1467-8659.2012.03031.x},
abstract = {Progress in cloth simulation for computer animation and apparel
design has led to a multitude of deformation models, each with its own way of
relating geometry, deformation, and forces. As simulators improve,
differences between these models become more important, but it is difficult
to choose a model and a set of parameters to match a given real material
simply by looking at simulation results. This paper provides measurement and
fitting methods that allow nonlinear models to be fit to the observed
deformation of a particular cloth sample. Unlike standard textile testing,
our system measures complex 3D deformations of a sheet of cloth, not just
one-dimensional force-displacement curves, so it works under a wider range of
deformation conditions. The fitted models are then evaluated by comparison to
measured deformations with motions very different from those used for
fitting.}
}
@article{CGF31-2:529-534:2012,
journal = {Computer Graphics Forum},
title = {{A Computational Model of After../images}},
author = {Tobias Ritschel and Elmar Eisemann },
pages = {529-534},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp529-534.pdf},
DOI = {10.1111/j.1467-8659.2012.03053.x},
abstract = {Afterimages are optical illusions, particularly well perceived
when fixating an image for an extended period of time and then looking at a
neutral background, where an inverted copy of the original stimulus appears.
The full mechanism that produces the perceived specific colors and shapes is
complex and not entirely understood, but most of the important attributes can
be well explained by bleaching of retinal photoreceptors (retinal kinetics).
We propose a model to compute afterimages that allows us to simulate their
temporal, color and time-frequency behavior. Using this model, high dynamic
range (HDR) content can be processed to add realistic afterimages to low
dynamic range (LDR) media. Hereby, our approach helps in conveying the
original source's luminance and contrast. It can be applied in real-time on
full-HD HDR content using standard graphics hardware. Finally, our approach
is validated in a perceptual study.}
}
@article{CGF31-2:535-544:2012,
journal = {Computer Graphics Forum},
title = {{Perceptually Linear Parameter Variations}},
author = {Norbert Lindow and Daniel Baum and Hans-Christian Hege },
pages = {535-544},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp535-544.pdf},
DOI = {10.1111/j.1467-8659.2012.03054.x},
abstract = {Most visual analysis tasks require interactive adjustment of
parameter values. In general, a linear variation of a parameter, using for
instance a GUI slider, changes the visual result in a perceptually non-linear
way. This hampers interactive adjustment of parameters, especially in regions
where rapid perceptual changes occur. Selecting a good parameter value
therefore remains a time-consuming and often difficult task. We propose a
novel technique to build a non-linear function that maps a new parameter to
the original parameter. By prefixing this function to the original parameter
and using the new parameter as input, a linear relationship between input and
visual feedback is obtained. To construct the non-linear function, we measure
the variation of the visual result using image metrics. Given a suitable
perceptual image metric, perceptually linear image variations are achieved.
We demonstrate the practical utility of our approach by implementing two
common image metrics, a perceptual and a non-perceptual one, and by applying
the method to a few visual analysis tasks.}
}
@article{CGF31-2:545-554:2012,
journal = {Computer Graphics Forum},
title = {{NoRM: No-Reference Image Quality Metric for Realistic Image
Synthesis}},
author = {Robert Herzog and Martin Cadík and Tunç O. Aydin and
Kwang In Kim and Karol Myszkowski and Hans-Peter Seidel },
pages = {545-554},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp545-554.pdf},
DOI = {10.1111/j.1467-8659.2012.03055.x},
abstract = {Synthetically generating images and video frames of complex 3D
scenes using some photo-realistic rendering software is often prone to
artifacts and requires expert knowledge to tune the parameters. The manual
work required for detecting and preventing artifacts can be automated through
objective quality evaluation of synthetic images. Most practical objective
quality assessment methods of natural images rely on a ground-truth
reference, which is often not available in rendering applications. While
general purpose no-reference image quality assessment is a difficult problem,
we show in a subjective study that the performance of a dedicated
no-reference metric as presented in this paper can match the state-of-the-art
metrics that do require a reference. This level of predictive power is
achieved exploiting information about the underlying synthetic scene (e.g.,
3D surfaces, textures) instead of merely considering color, and training our
learning framework with typical rendering artifacts. We show that our method
successfully detects various non-trivial types of artifacts such as noise and
clamping bias due to insufficient virtual point light sources, and shadow map
discretization artifacts. We also briefly discuss an inpainting method for
automatic correction of detected artifacts.}
}
@article{CGF31-2:555-564:2012,
journal = {Computer Graphics Forum},
title = {{Unsharp Masking, Countershading and Halos: Enhancements or
Artifacts?}},
author = {Matthew Trentacoste and Rafal Mantiuk and Wolfgang Heidrich and
Florian Dufrot },
pages = {555-564},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp555-564.pdf},
DOI = {10.1111/j.1467-8659.2012.03056.x},
abstract = {Countershading is a common technique for local image contrast
manipulations, and is widely used both in automatic settings, such as image
sharpening and tonemapping, as well as under artistic control, such as in
paintings and interactive image processing software. Unfortunately,
countershading is a double-edged sword: while correctly chosen parameters for
a given viewing condition can significantly improve the image sharpness or
trick the human visual system into perceiving a higher contrast than
physically present in an image, wrong parameters, or different viewing
conditions can result in objectionable halo artifacts. In this paper we
investigate the perception of countershading in the context of a novel
mask-based contrast enhancement algorithm and analyze the circumstances under
which the resulting profiles turn from image enhancement to artifact for a
range of parameters and viewing conditions. Our experimental results can be
modeled as a function of the width of the countershading profile. We employ
this empirical function in a range of applications such as image resizing,
view dependent tone mapping, and countershading analysis in photographs and
works of fine art.}
}
@article{CGF31-2:565-574:2012,
journal = {Computer Graphics Forum},
title = {{Crowd Light: Evaluating the Perceived Fidelity of Illuminated
Dynamic Scenes}},
author = {Adrian Jarabo and Tom Van Eyck and Veronica Sundstedt and Kavita
Bala and Diego Gutierrez and Carol O'Sullivan },
pages = {565-574},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp565-574.pdf},
DOI = {10.1111/j.1467-8659.2012.03057.x},
abstract = {Rendering realistic illumination effects for complex animated
scenes with many dynamic objects or characters is computationally expensive.
Yet, it is not obvious how important such accurate lighting is for the
overall perceived realism in these scenes. In this paper, we present a
methodology to evaluate the perceived fidelity of illumination in scenes with
dynamic aggregates, such as crowds, and explore several factors which may
affect this perception. We focus in particular on evaluating how a popular
spherical harmonics lighting method can be used to approximate realistic
lighting of crowds. We conduct a series of psychophysical experiments to
explore how a simple approach to approximating global illumination, using
interpolation in the temporal domain, affects the perceived fidelity of
dynamic scenes with high geometric, motion, and illumination complexity. We
show that the complexity of the geometry and temporal properties of the crowd
entities, the motion of the aggregate as a whole, the type of interpolation
(i.e., of the direct and/or indirect illumination coefficients), and the
presence or absence of colour all affect perceived fidelity. We show that
high (i.e., above 75%) levels of perceived scene fidelity can be maintained
while interpolating indirect illumination for intervals of up to 30 frames,
resulting in a greater than three-fold rendering speed-up}
}
@article{CGF31-2:575-582:2012,
journal = {Computer Graphics Forum},
title = {{Pixel Art with Refracted Light by Rearrangeable Sticks}},
author = {Yonghao Yue and Kei Iwasaki and Bing-Yu Chen and Yoshinori Dobashi
and Tomoyuki Nishita },
pages = {575-582},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp575-582.pdf},
DOI = {10.1111/j.1467-8659.2012.03036.x},
abstract = {Pixel art is a kind of digital art that through per-pixel
manipulation enables production of a diverse array of artistic images. In
this paper, we present a new way for people to experience and express pixel
art. Our digital art consists of a set of sticks made of acrylate resin, each
of which refracts light from a parallel light source, in certain directions.
Artistic users are able to easily rearrange these sticks and view their
digital art through the refracted light projection on any planar surface. As
we demonstrate in this paper, a user can generate various artistic ../images
using only a single set of sticks. We additionally envision that our pixel
art with rearrangeable sticks would have great entertainment appeal, e.g., as
an art puzzle.}
}
@article{CGF31-2:583-592:2012,
journal = {Computer Graphics Forum},
title = {{crdbrd: Shape Fabrication by Sliding Planar Slices}},
author = {Kristian Hildebrand and Bernd Bickel and Marc Alexa },
pages = {583-592},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp583-592.pdf},
DOI = {10.1111/j.1467-8659.2012.03037.x},
abstract = {We introduce an algorithm and representation for fabricating 3D
shape abstractions using mutually intersecting planar cut-outs. The planes
have prefabricated slits at their intersections and are assembled by sliding
them together. Often such abstractions are used as a sculptural art form or
in architecture and are colloquially called 'cardboard sculptures'. Based on
an analysis of construction rules, we propose an extended binary space
partitioning tree as an efficient representation of such cardboard models
which allows us to quickly evaluate the feasibility of newly added planar
elements. The complexity of insertion order quickly increases with the number
of planar elements and manual analysis becomes intractable. We provide tools
for generating cardboard sculptures with guaranteed constructibility. In
combination with a simple optimization and sampling strategy for new
elements, planar shape abstraction models can be designed by iteratively
adding elements. As an output, we obtain a fabrication plan that can be
printed or sent to a laser cutter. We demonstrate the complete process by
designing and fabricating cardboard models of various well-known 3D
shapes.}
}
@article{CGF31-2:593-602:2012,
journal = {Computer Graphics Forum},
title = {{SHADOWPIX: Multiple Images from Self Shadowing}},
author = {Amit Bermano and Ilya Baran and Marc Alexa and Wojciech Matusik
},
pages = {593-602},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp593-602.pdf},
DOI = {10.1111/j.1467-8659.2012.03038.x},
abstract = {SHADOWPIX are white surfaces that display several prescribed
images formed by the self-shadowing of the surface when lit from certain
directions. The effect is surprising and not commonly seen in the real world.
We present algorithms for constructing SHADOWPIX that allow up to four
images to be embedded in a single surface. SHADOWPIX can produce a variety
of unusual effects depending on the embedded ../images: moving the light can
animate or relight the object in the image, or three colored lights may be
used to produce a single colored image. SHADOWPIX are easy to manufacture
using a 3D printer and we present photographs, videos, and renderings
demonstrating these effects.}
}
@article{CGF31-2:603-610:2012,
journal = {Computer Graphics Forum},
title = {{Manufacturing Layered Attenuators for Multiple Prescribed Shadow
Images}},
author = {Ilya Baran and Philipp Keller and Derek Bradley and Stelian Coros
and Wojciech Jarosz and Derek Nowrouzezahrai and Markus Gross },
pages = {603-610},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp603-610.pdf},
DOI = {10.1111/j.1467-8659.2012.03039.x},
abstract = {We present a practical and inexpensive method for creating
physical objects that cast different color shadow images when illuminated
by prescribed lighting configurations. The input to our system is a number of
lighting configurations and corresponding desired shadow images. Our
approach computes attenuation masks, which are then printed on transparent
materials and stacked to form a single multi-layer attenuator. When
illuminated with the input lighting configurations, this multi-layer
attenuator casts the prescribed color shadow images. Alternatively, our
method can compute layers so that their permutations produce different
prescribed shadow images under fixed lighting. Each multi-layer attenuator
is quick and inexpensive to produce, can generate multiple full-color
shadows, and can be designed to respond to different types of natural or
synthetic lighting setups. We illustrate the effectiveness of our multi-layer
attenuators in simulation and in reality, with the sun as a light
source.}
}
@article{CGF31-2:611-620:2012,
journal = {Computer Graphics Forum},
title = {{Rationalization of Triangle-Based Point-Folding Structures}},
author = {Henrik Zimmer and Marcel Campen and David Bommes and Leif Kobbelt
},
pages = {611-620},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp611-620.pdf},
DOI = {10.1111/j.1467-8659.2012.03040.x},
abstract = {In mechanical engineering and architecture, structural elements
with low material consumption and high loadbearing capabilities are essential
for light-weight and even self-supporting constructions. This paper deals
with so called point-folding elements - non-planar, pyramidal panels, usually
formed from thin metal sheets, which exploit the increased structural
capabilities emerging from folds or creases. Given a triangulated free-form
surface, a corresponding point-folding structure is a collection of pyramidal
elements basing on the triangles. User-specified or material-induced
geometric constraints often imply that each individual folding element has a
different shape, leading to immense fabrication costs. We present a
rationalization method for such structures which respects the prescribed
aesthetic and production constraints and finds a minimal set of molds for the
production process, leading to drastically reduced costs. For each base
triangle we compute and parametrize the range of feasible folding elements
that satisfy the given constraints within the allowed tolerances. Then we
pose the rationalization task as a geometric intersection problem, which we
solve so as to maximize the re-use of mold dies. Major challenges arise from
the high precision requirements and the non-trivial parametrization of the
search space. We evaluate our method on a number of practical examples where
we achieve rationalization gains of more than 90%.}
}
@article{CGF31-2:621-630:2012,
journal = {Computer Graphics Forum},
title = {{Interactive Self-Organizing Windows}},
author = {Markus Steinberger and Manuela Waldner and Dieter Schmalstieg
},
pages = {621-630},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp621-630.pdf},
DOI = {10.1111/j.1467-8659.2012.03041.x},
abstract = {In this paper, we present the design and implementation of a
dynamic window management technique that changes the perception of windows as
fixed-sized rectangles. The primary goal of self-organizing windows is to
automatically display the most relevant information for a user's current
activity, which removes the burden of organizing and arranging windows from
the user. We analyze the image-based representation of each window and
identify coherent pieces of information. The windows are then automatically
moved, scaled and composed in a contentaware manner to fit the most relevant
information into the limited area of the screen. During the design process,
we consider findings from previous experiments and show how users can benefit
from our system. We also describe how the immense processing power of current
graphics processing units can be exploited to build an interactive system
that finds an optimal solution within the complex design space of all
possible window transformations in real time.}
}
@article{CGF31-2:631-640:2012,
journal = {Computer Graphics Forum},
title = {{Exploring Shape Variations by 3D-Model Decomposition and Part-based
Recombination}},
author = {Arjun Jain and Thorsten Thormählen and Tobias Ritschel and
Hans-Peter Seidel },
pages = {631-640},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp631-640.pdf},
DOI = {10.1111/j.1467-8659.2012.03042.x},
abstract = {We present a system that allows new shapes to be created by
blending between shapes taken from a database. We treat the shape as a
composition of parts; blending is performed by recombining parts from
different shapes according to constraints deduced by shape analysis. The
analysis involves shape segmentation, contact analysis, and symmetry
detection. The system can be used to rapidly instantiate new models that have
similar symmetry and adjacency structure to the database shapes, yet vary in
appearance.}
}
@article{CGF31-2:641-650:2012,
journal = {Computer Graphics Forum},
title = {{Linear Analysis of Nonlinear Constraints for Interactive Geometric
Modeling}},
author = {Martin Habbecke and Leif Kobbelt },
pages = {641-650},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp641-650.pdf},
DOI = {10.1111/j.1467-8659.2012.03043.x},
abstract = {Thanks to its flexibility and power to handle even complex
geometric relations, 3D geometric modeling with nonlinear constraints is an
attractive extension of traditional shape editing approaches. However,
existing approaches to analyze and solve constraint systems usually fail to
meet the two main challenges of an interactive 3D modeling system: For each
atomic editing operation, it is crucial to adjust as few auxiliary vertices
as possible in order to not destroy the user's earlier editing effort.
Furthermore, the whole constraint resolution pipeline is required to run in
real-time to enable a fluent, interactive workflow. To address both issues,
we propose a novel constraint analysis and solution scheme based on a key
observation: While the computation of actual vertex positions requires
nonlinear techniques, under few simplifying assumptions the determination of
the minimal set of to-be-updated vertices can be performed on a linearization
of the constraint functions. Posing the constraint analysis phase as the
solution of an under-determined linear system with as few non-zero elements
as possible enables us to exploit an efficient strategy for the Cardinality
Minimization problem known from the field of Compressed Sensing, resulting in
an algorithm capable of handling hundreds of vertices and constraints in
real-time. We demonstrate at the example of an image-based modeling system
for architectural models that this approach performs very well in practical
applications.}
}
@article{CGF31-2:651-660:2012,
journal = {Computer Graphics Forum},
title = {{Multitouch Gestures for Constrained Transformation of 3D
Objects}},
author = {Oscar Kin-Chung Au and Chiew-Lan Tai and Hongbo Fu },
pages = {651-660},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp651-660.pdf},
DOI = {10.1111/j.1467-8659.2012.03044.x},
abstract = {3D transformation widgets allow constrained manipulations of 3D
objects and are commonly used in many 3D applications for fine-grained
manipulations. Since traditional transformation widgets have been mainly
designed for mouse-based systems, they are not user friendly for multitouch
screens. There is little research on how to use the extra input bandwidth of
multitouch screens to ease constrained transformation of 3D objects. This
paper presents a small set of multitouch gestures which offers a seamless
control of manipulation constraints (i.e., axis or plane) and modes (i.e.,
translation, rotation or scaling). Our technique does not require any complex
manipulation widgets but candidate axes, which are for visualization rather
than direct manipulation. Such design not only minimizes visual clutter but
also tolerates imprecise touch-based inputs. To further expand our axis-based
interaction vocabulary, we introduce intuitive touch gestures for relative
manipulations, including snapping and borrowing axes of another object. A
preliminary evaluation shows that our technique is more effective than a
direct adaption of standard transformation widgets to the tactile
paradigm.}
}
@article{CGF31-2:661-670:2012,
journal = {Computer Graphics Forum},
title = {{Interactive Coherence-Based Façade Modeling}},
author = {Przemyslaw Musialski and Michael Wimmer and Peter Wonka },
pages = {661-670},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp661-670.pdf},
DOI = {10.1111/j.1467-8659.2012.03045.x},
abstract = {We propose a novel interactive framework for modeling building
façades from images. Our method is based on the notion of
coherence-based editing which allows exploiting partial symmetries across the
façade at any level of detail. The proposed workflow mixes manual
interaction with automatic splitting and grouping operations based on
unsupervised cluster analysis. In contrast to previous work, our approach
leads to detailed 3d geometric models with up to several thousand regions per
façade. We compare our modeling scheme to others and evaluate our
approach in a user study with an experienced user and several novice
users.}
}
@article{CGF31-2:671-680:2012,
journal = {Computer Graphics Forum},
title = {{Factored Facade Acquisition using Symmetric Line
Arrangements}},
author = {Duygu Ceylan and Niloy J. Mitra and Hao Li and Thibaut Weise and
Mark Pauly },
pages = {671-680},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp671-680.pdf},
DOI = {10.1111/j.1467-8659.2012.03046.x},
abstract = {We introduce a novel framework for image-based 3D reconstruction
of urban buildings based on symmetry priors. Starting from image-level edges,
we generate a sparse and approximate set of consistent 3D lines. These lines
are then used to simultaneously detect symmetric line arrangements while
refining the estimated 3D model. Operating both on 2D image data and
intermediate 3D feature representations, we perform iterative feature
consolidation and effective outlier pruning, thus eliminating reconstruction
artifacts arising from ambiguous or wrong stereo matches. We exploit
non-local coherence of symmetric elements to generate precise model
reconstructions, even in the presence of a significant amount of outlier
image-edges arising from reflections, shadows, outlier objects, etc. We
evaluate our algorithm on several challenging test scenarios, both synthetic
and real. Beyond reconstruction, the extracted symmetry patterns are useful
towards interactive and intuitive model manipulations.}
}
@article{CGF31-2:681-690:2012,
journal = {Computer Graphics Forum},
title = {{Procedural Generation of Parcels in Urban Modeling}},
author = {Carlos A. Vanegas and Tom Kelly and Basil Weber and Jan Halatsch
and Daniel G. Aliaga and Pascal Müller },
pages = {681-690},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp681-690.pdf},
DOI = {10.1111/j.1467-8659.2012.03047.x},
abstract = {We present a method for interactive procedural generation of
parcels within the urban modeling pipeline. Our approach performs a
partitioning of the interior of city blocks using user-specified subdivision
attributes and style parameters. Moreover, our method is both robust and
persistent in the sense of being able to map individual parcels from before
an edit operation to after an edit operation - this enables transferring
most, if not all, customizations despite small to large-scale interactive
editing operations. The guidelines guarantee that the resulting subdivisions
are functionally and geometrically plausible for subsequent building modeling
and construction. Our results include visual and statistical comparisons that
demonstrate how the parcel configurations created by our method can closely
resemble those found in real-world cities of a large variety of styles. By
directly addressing the block subdivision problem, we intend to increase the
editability and realism of the urban modeling pipeline and to become a
standard in parcel generation for future urban modeling methods.}
}
@article{CGF31-2:691-700:2012,
journal = {Computer Graphics Forum},
title = {{Procedural Interpolation of Historical City Maps}},
author = {Lars Krecklau and Christopher Manthei and Leif Kobbelt },
pages = {691-700},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp691-700.pdf},
DOI = {10.1111/j.1467-8659.2012.03048.x},
abstract = {We propose a novel approach for the temporal interpolation of
city maps. The input to our algorithm is a sparse set of historical city maps
plus optional additional knowledge about construction or destruction events.
The output is a fast forward animation of the city map development where
roads and buildings are constructed and destroyed over time in order to match
the sparse historical facts and to look plausible where no precise facts are
available. A smooth transition between any real-world data could be
interesting for educational purposes, because our system conveys an intuition
of the city development. The insertion of data, like when and where a certain
building or road existed, is efficiently performed by an intuitive graphical
user interface. Our system collects all this information into a global
dependency graph of events. By propagating time intervals through the
dependency graph we can automatically derive the earliest and latest possible
date for each event which are guaranteeing temporal as well as geographical
consistency (e.g. buildings can only appear along roads that have been
constructed before). During the simulation of the city development, events
are scheduled according to a score function that rates the plausibility of
the development (e.g. cities grow along major roads). Finally, the events are
properly distributed over time to control the dynamics of the city
development. Based on the city map animation we create a procedural city
model in order to render a 3D animation of the city development over
decades.}
}
@article{CGF31-2:701-710:2012,
journal = {Computer Graphics Forum},
title = {{Importance Caching for Complex Illumination}},
author = {Iliyan Georgiev and Jaroslav Krivánek and Stefan Popov and
Philipp Slusallek },
pages = {701-710},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp701-710.pdf},
DOI = {10.1111/j.1467-8659.2012.03049.x},
abstract = {Realistic rendering requires computing the global illumination in
the scene, and Monte Carlo integration is the best-known method for doing
that. The key to good performance is to carefully select the costly
integration samples, which is usually achieved via importance sampling.
Unfortunately, visibility is difficult to factor into the importance
distribution, which can greatly increase variance in highly occluded scenes
with complex illumination. In this paper, we present importance caching - a
novel approach that selects those samples with a distribution that includes
visibility, while maintaining efficiency by exploiting illumination
smoothness. At a sparse set of locations in the scene, we construct and cache
several types of probability distributions with respect to a set of virtual
point lights (VPLs), which notably include visibility. Each distribution type
is optimized for a specific lighting condition. For every shading point, we
then borrow the distributions from nearby cached locations and use them for
VPL sampling, avoiding additional bias. A novel multiple importance sampling
framework finally combines the many estimators. In highly occluded scenes,
where visibility is a major source of variance in the incident radiance, our
approach can reduce variance by more than an order of magnitude. Even in such
complex scenes we can obtain accurate and low noise previews with full global
illumination in a couple of seconds on a single mid-range CPU.}
}
@article{CGF31-2:711-718:2012,
journal = {Computer Graphics Forum},
title = {{Selective Inspection and Interactive Visualization of Light
Transport in Virtual Scenes}},
author = {Tim Reiner and Anton Kaplanyan and Marcel Reinhard and Carsten
Dachsbacher },
pages = {711-718},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp711-718.pdf},
DOI = {10.1111/j.1467-8659.2012.03050.x},
abstract = {This paper presents novel interactive visualization techniques
for inspecting the global light transport in virtual scenes. First, we
propose a simple extension to photon mapping to gather required lighting
information. We then introduce a set of five light inspection tools which
process this data to provide further insights. Corresponding visualizations
help the user to comprehend how light travels within a scene, how the
lighting affects the appearance of a surface, and how objects cause lighting
effects such as caustics. We implemented all tools for direct usage in real
production environments. Rendering is based on progressive photon mapping,
providing interactivity and immediate visual feedback. We conducted a user
study to evaluate all techniques in various application scenarios and hence
discuss their individual strengths and weaknesses. Moreover, we present
feedback from domain experts.}
}
@article{CGF31-2:719-726:2012,
journal = {Computer Graphics Forum},
title = {{Stochastic Progressive Photon Mapping for Dynamic Scenes}},
author = {Maayan Weiss and Thorsten Grosch },
pages = {719-726},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp719-726.pdf},
DOI = {10.1111/j.1467-8659.2012.03051.x},
abstract = {Stochastic Progressive Photon Mapping (SPPM) is a method to
simulate consistent global illumination. It is especially useful for
complicated light paths like caustics seen through a glass surface. Up to
now, SPPM can only be applied to a static scene and noise-free ../images
require hours to compute. Our approach is to extend this method to dynamic
scenes (DSPPM) for an efficient simulation of animated objects and materials.
We identify both hit point and photon information that can be re-used for the
pixel statistics of multiple frames. In comparison to an SPPM simulation
performed for each frame, we achieve a 1.96 - 9.53 speedup in our test scenes
without changing correctness or simulation quality.}
}
@article{CGF31-2:727-734:2012,
journal = {Computer Graphics Forum},
title = {{Real-time Rendering of Dynamic Scenes under All-frequency Lighting
using Integral Spherical Gaussian}},
author = {Kei Iwasaki and Wataru Furuya and Yoshinori Dobashi and Tomoyuki
Nishita },
pages = {727-734},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp727-734.pdf},
DOI = {10.1111/j.1467-8659.2012.03052.x},
abstract = {We propose an efficient rendering method for dynamic scenes under
all-frequency environmental lighting. To render the surfaces of objects
illuminated by distant environmental lighting, the triple product of the
lighting, the visibility function and the BRDF is integrated at each shading
point on the surfaces. Our method represents the environmental lighting and
the BRDF with a linear combination of spherical Gaussians, replacing the
integral of the triple product with the sum of the integrals of spherical
Gaussians over the visible region of the hemisphere. We propose a new form of
spherical Gaussian, the integral spherical Gaussian, that enables the fast
and accurate integration of spherical Gaussians with various sharpness over
the visible region on the hemisphere. The integral spherical Gaussian
simplifies the integration to a sum of four pre-integrated values, which are
easily evaluated on-the-fly. With a combination of a set of spheres to
approximate object geometries and the integral spherical Gaussian, our method
can render object surfaces very efficiently. Our GPU implementation
demonstrates realtime rendering of dynamic scenes with dynamic viewpoints,
lighting, and BRDFs.}
}
@article{CGF31-2:735-744:2012,
journal = {Computer Graphics Forum},
title = {{A Continuous, Editable Representation for Deforming Mesh Sequences
with Separate Signals for Time, Pose and Shape}},
author = {Thomas J. Cashman and Kai Hormann },
pages = {735-744},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp735-744.pdf},
DOI = {10.1111/j.1467-8659.2012.03032.x},
abstract = {It is increasingly popular to represent non-rigid motion using a
deforming mesh sequence: a discrete sequence of frames, each of which is
given as a mesh with a common graph structure. Such sequences have the
flexibility to represent a wide range of mesh deformations used in practice,
but they are also highly redundant, expensive to store, and difficult to edit
in a time-coherent manner. We address these limitations with a continuous
representation that extracts redundancy in three separate phases, leading to
separate editable signals in time, pose and shape. The representation can be
applied to any deforming mesh sequence, in contrast to previous
domain-specific approaches. By modifying the three signal components, we
demonstrate time-coherent editing operations such as local repetition of part
of a sequence, frame rate conversion and deformation transfer. We also show
that our representation makes it possible to design new deforming sequences
simply by sketching a curve in a 2D pose space.}
}
@article{CGF31-2:745-754:2012,
journal = {Computer Graphics Forum},
title = {{Interaction Retrieval by Spacetime Proximity Graphs}},
author = {Jeff K. T. Tang and Jacky C. P. Chan and Howard Leung and Taku
Komura },
pages = {745-754},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp745-754.pdf},
DOI = {10.1111/j.1467-8659.2012.03033.x},
abstract = {In this paper, we propose a new method to index and retrieve
animation scenes in which multiple characters closely interact with one
another. Such a technique can be an important tool for animators when they
want to automatically extract the desired scene from a large database of
animation sequence. Existing methods for single character movements do not
scale well for multiple characters as they do not take into account the
interaction of different body parts. In this paper, we propose a new distance
function that computes the similarity of twocharacter interations using the
spatial relationship of the body parts. For each interaction, we produce a
timevarying graph structure based on the proximity of different joints, and
compute the similarity of interactions by comparing the topology and
Laplacian coordinates of the time-varying graph. Experimental results show
that the proposed method outperforms previous methods which are based on the
kinematics of individual characters. The top retrieved samples are found
similar in high level semantics while containing style variations.}
}
@article{CGF31-2:755-764:2012,
journal = {Computer Graphics Forum},
title = {{Automatically Rigging Multi-component Characters}},
author = {Gaurav Bharaj and Thorsten Thormählen and Hans-Peter Seidel
and Christian Theobalt },
pages = {755-764},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp755-764.pdf},
DOI = {10.1111/j.1467-8659.2012.03034.x},
abstract = {Rigging an arbitrary 3D character by creating an animation
skeleton is a time-consuming process even for experienced animators. In this
paper, we present an algorithm that automatically creates animation rigs for
multicomponent 3D models, as they are typically found in online shape
databases. Our algorithm takes as input a multi-component model and an input
animation skeleton with associated motion data. It then creates a target
skeleton for the input model, calculates the rigid skinning weights, and a
mapping between the joints of the target skeleton and the input animation
skeleton. The automatic approach does not need additional semantic
information, such as component labels or user-provided correspondences, and
succeeds on a wide range of models where the number of components is
significantly different. It implicitly handles large scale and proportional
differences between input and target skeletons and can deal with certain
morphological differences, e.g., if input and target have different numbers
of limbs. The output of our algorithm can be directly used in a retargeting
system to create a plausible animated character.}
}
@article{CGF31-2:765-774:2012,
journal = {Computer Graphics Forum},
title = {{Fast Grasp Synthesis for Various Shaped Objects}},
author = {Fumihito Kyota and Suguru Saito },
pages = {765-774},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp765-774.pdf},
DOI = {10.1111/j.1467-8659.2012.03035.x},
abstract = {Human-like grasp planning is difficult because a human hand has a
high number of degrees of freedom, and there are many grasping styles
depending on the shape of an object and purpose. We propose a fast grasp
synthesis system which enables a user to choose the desired grasping styles
from a set of grasp types in a human grasp taxonomy. Given a 3D model of an
object, our system detects graspable positions and generates grasping hand
postures in every applicable grasp types in the grasp taxonomy for each
grasping position. Hand postures are generated separately for each digit, and
hand alignment is then refined iteratively. A user can also specify the
grasping position by moving the cursor onto object surface, as well as grasp
type. The generated hand postures are shown as a table of thumbnail
../images, and the user can select the grasping hand posture by clicking on
one of those postures. Our system enables interactive generation of various
grasping hand postures in real time.}
}
@article{CGF31-2:775-784:2012,
journal = {Computer Graphics Forum},
title = {{Centroidal Voronoi Tessellation of Line Segments and Graphs}},
author = {Lin Lu and Bruno Lévy and Wenping Wang },
pages = {775-784},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp775-784.pdf},
DOI = {10.1111/j.1467-8659.2012.03058.x},
abstract = {Centroidal Voronoi Tessellation (CVT) of points has many
applications in geometry processing, including remeshing and segmentation, to
name but a few. In this paper, we generalize the CVT concept to graphs via a
variational characterization. Given a graph and a 3D polygonal surface, our
method optimizes the placement of the vertices of the graph in such a way
that the graph segments best approximate the shape of the surface. We
formulate the computation of CVT for graphs as a continuous variational
problem, and present a simple, approximate method for solving this problem.
Our method is robust in the sense that it is independent of degeneracies in
the input mesh, such as skinny triangles, T-junctions, small gaps or multiple
connected components. We present some applications, to skeleton fitting and
to shape segmentation.}
}
@article{CGF31-2:785-794:2012,
journal = {Computer Graphics Forum},
title = {{A Simple Algorithm for Maximal Poisson-Disk Sampling in High
Dimensions}},
author = {Mohamed S. Ebeida and Scott A. Mitchell and Anjul Patney and Andrew
A. Davidson and John D. Owens },
pages = {785-794},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp785-794.pdf},
DOI = {10.1111/j.1467-8659.2012.03059.x},
abstract = {We provide a simple algorithm and data structures for
d-dimensional unbiased maximal Poisson-disk sampling. We use an order of
magnitude less memory and time than the alternatives. Our results become more
favorable as the dimension increases. This allows us to produce bigger
samplings. Domains may be non-convex with holes. The generated point cloud is
maximal up to round-off error. The serial algorithm is provably bias-free.
For an output sampling of size n in fixed dimension d, we use a linear memory
budget and empirical O(n) runtime. No known methods scale well with
dimension, due to the ''curse of dimensionality.'' The serial algorithm is
practical in dimensions up to 5, and has been demonstrated in 6d. We have
efficient GPU implementations in 2d and 3d. The algorithm proceeds through a
finite sequence of uniform grids. The grids guide the dart throwing and track
the remaining disk-free area. The top-level grid provides an efficient way to
test if a candidate dart is disk-free. Our uniform grids are like quadtrees,
except we delay splits and refine all leaves at once. Since the quadtree is
flat it can be represented using very little memory: we just need the indices
of the active leaves and a global level. Also it is very simple to sample
from leaves with uniform probability.}
}
@article{CGF31-2:795-804:2012,
journal = {Computer Graphics Forum},
title = {{Medial Kernels}},
author = {Matthew Berger and Claudio T. Silva },
pages = {795-804},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp795-804.pdf},
DOI = {10.1111/j.1467-8659.2012.03060.x},
abstract = {We introduce the medial kernel, an association measure which
provides for a robust construction of volume-aware distances defined directly
on point clouds. The medial kernel is a similarity measure defined as the
likelihood of two points belonging to a common interior medial ball. We use
the medial kernel to construct a random walk on the point cloud, where
movement in the walk is restricted to regions containing similar medial
balls. Our distances are defined as the diffusion distances of this random
walk, assigning low distance to points belonging to similar medial regions.
These distances allow for a robust means of processing incomplete point
clouds, capable of distinguishing nearby yet separate undersampled
components, while also associating points which are far in Euclidean distance
yet mutually share an interior volume. We leverage these distances for
several applications: volumetric part segmentation, the construction of
function bases, and reconstruction-by-parts - a surface reconstruction method
which adheres to the medial kernel.}
}
@article{CGF31-2:805-814:2012,
journal = {Computer Graphics Forum},
title = {{Generalized Swept Mid-structure for Polygonal Models}},
author = {Tobias Martin and Guoning Chen and Suraj Musuvathy and Elaine Cohen
and Charles Hansen },
pages = {805-814},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp805-814.pdf},
DOI = {10.1111/j.1467-8659.2012.03061.x},
abstract = {We introduce a novel mid-structure called the generalized swept
mid-structure (GSM) of a closed polygonal shape, and a framework to compute
it. The GSM contains both curve and surface elements and has consistent
sheet-by-sheet topology, versus triangle-by-triangle topology produced by
other mid-structure methods. To obtain this structure, a harmonic function,
defined on the volume that is enclosed by the surface, is used to decompose
the volume into a set of slices. A technique for computing the 1D
mid-structures of these slices is introduced. The mid-structures of adjacent
slices are then iteratively matched through a boundary similarity computation
and triangulated to form the GSM. This structure respects the topology of the
input surface model is a hybrid mid-structure representation. The
construction and topology of the GSM allows for local and global
simplification, used in further applications such as parameterization,
volumetric mesh generation and medical applications.}
}
@article{CGF31-2:815-824:2012,
journal = {Computer Graphics Forum},
title = {{Explicit Mesh Surfaces for Particle Based Fluids}},
author = {Jihun Yu and Chris Wojtan and Greg Turk and Chee Yap },
pages = {815-824},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp815-824.pdf},
DOI = {10.1111/j.1467-8659.2012.03062.x},
abstract = {We introduce the idea of using an explicit triangle mesh to track
the air/fluid interface in a smoothed particle hydrodynamics (SPH) simulator.
Once an initial surface mesh is created, this mesh is carried forward in time
using nearby particle velocities to advect the mesh vertices. The mesh
connectivity remains mostly unchanged across time-steps; it is only modified
locally for topology change events or for the improvement of triangle
quality. In order to ensure that the surface mesh does not diverge from the
underlying particle simulation, we periodically project the mesh surface onto
an implicit surface defined by the physics simulation. The mesh surface gives
us several advantages over previous SPH surface tracking techniques. We
demonstrate a new method for surface tension calculations that clearly
outperforms the state of the art in SPH surface tension for computer
graphics. We also demonstrate a method for tracking detailed surface
information (like colors) that is less susceptible to numerical diffusion
than competing techniques. Finally, our temporally-coherent surface mesh
allows us to simulate highresolution surface wave dynamics without being
limited by the particle resolution of the SPH simulation.}
}
@article{CGF31-2:825-834:2012,
journal = {Computer Graphics Forum},
title = {{Advected Tangent Curves: A General Scheme for Characteristic Curves
of Flow Fields}},
author = {Tino Weinkauf and Hans-Christian Hege and Holger Theisel },
pages = {825-834},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp825-834.pdf},
DOI = {10.1111/j.1467-8659.2012.03063.x},
abstract = {We present the first general scheme to describe all four types of
characteristic curves of flow fields - stream, path, streak, and time lines -
as tangent curves of a derived vector field. Thus, all these lines can be
obtained by a simple integration of an autonomous ODE system. Our approach
draws on the principal ideas of the recently introduced tangent curve
description of streak lines. We provide the first description of time lines
as tangent curves of a derived vector field, which could previously only be
constructed in a geometric manner. Furthermore, our scheme gives rise to new
types of curves. In particular, we introduce advected stream lines as a
parameterfree variant of the time line metaphor. With our novel mathematical
description of characteristic curves, a large number of feature extraction
and analysis tools becomes available for all types of characteristic curves,
which were previously only available for stream and path lines. We will
highlight some of these possible applications including the computation of
time line curvature fields and the extraction of cores of swirling advected
stream lines.}
}
@article{CGF31-2:835-844:2012,
journal = {Computer Graphics Forum},
title = {{Computational Design of Rubber Balloons}},
author = {Mélina Skouras and Bernhard Thomaszewski and Bernd Bickel
and Markus Gross },
pages = {835-844},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp835-844.pdf},
DOI = {10.1111/j.1467-8659.2012.03064.x},
abstract = {This paper presents an automatic process for fabrication-oriented
design of custom-shaped rubber balloons. We cast computational balloon design
as an inverse problem: given a target shape, we compute an optimal balloon
that, when inflated, approximates the target as closely as possible. To solve
this problem numerically, we propose a novel physics-driven shape
optimization method, which combines physical simulation of inflatable elastic
membranes with a dedicated constrained optimization algorithm. We validate
our approach by fabricating balloons designed with our method and comparing
their inflated shapes to the results predicted by simulation. An extensive
set of manufactured sample balloons demonstrates the shape diversity that can
be achieved by our method.}
}
@article{CGF31-2:845-854:2012,
journal = {Computer Graphics Forum},
title = {{A Cell-Based Light Interaction Model for Human Blood}},
author = {Daniel Yim and Gladimir V. G. Baranoski and Brad W. Kimmel and T.
Francis Chen and Erik Miranda },
pages = {845-854},
volume= {31},
number= {2},
year = {2012},
URL = {http://diglib.eg.org/EG/CGF/volume31/issue2/v31i2pp845-854.pdf},
DOI = {10.1111/j.1467-8659.2012.03065.x},
abstract = {The development of predictive appearance models for organic
tissues is a challenging task due to the inherent complexity of these
materials. In this paper, we closely examine the biophysical processes
responsible for the appearance attributes of whole blood, one the most
fundamental of these materials. We describe a new appearance model that
simulates the mechanisms of light propagation and absorption within the
cellular and fluid portions of this specialized tissue. The proposed model
employs a comprehensive, and yet flexible first principles approach based on
the morphological, optical and biochemical properties of blood cells. This
approach allows for environment driven changes in the cells' anatomy and
orientation to be appropriately included into the light transport
simulations. The correctness and predictive capabilities of the proposed
model are quantitatively and qualitatively evaluated through comparisons of
modeled results with actual measured data and experimental observations
reported in the scientific literature. Its incorporation into rendering
systems is illustrated through images of blood samples depicting
appearance variations controlled by physiologically meaningful parameters.
Besides the contributions to the modeling of material appearance, the
research presented in this paper is also expected to have applications in a
wide range of biomedical areas, from optical diagnostics to the visualization
and noninvasive imaging of blood-perfused tissues.}
}