Index: sparse_sample/damkjer_sparse_sample.bib
===================================================================
--- sparse_sample/damkjer_sparse_sample.bib	(revision 0)
+++ sparse_sample/damkjer_sparse_sample.bib	(revision 4)
@@ -5,5 +5,5 @@
 year={2012},
 address={Bethesda, MD, USA},
-pages={1--27},
+pages={1--27}
 }
 @inproceedings{Demantke:2011,
@@ -27,4 +27,18 @@
 URL       = {http://www.eg.org/EG/DL/WS/SPBG/SPBG05/039-046.pdf},
 DOI       = {10.2312/SPBG/SPBG05/039-046}
+}
+@article{Dyn:2008,
+author={Dyn, Nira and Iske, Armin and Wendland, Holger},
+title={Meshfree Thinning of 3{D}~Point Clouds},
+journal={Foundations of Computational Mathematics},
+year={2008},
+publisher={Springer-Verlag New York, Inc.},
+address={Secaucus, NJ, USA},
+volume={8},
+number={4},
+pages={409--425},
+abstract={An efficient data reduction scheme for the simplification of a surface given by a large set X of 3D point-samples is proposed. The data reduction relies on a recursive point removal algorithm, termed thinning, which outputs a data hierarchy of point-samples for multiresolution surface approximation. The thinning algorithm works with a point removal criterion, which measures the significances of the points in their local neighbourhoods, and which removes a least significant point at each step. For any point x in the current point set Y \subset X, its significance reflects the approximation quality of a local surface reconstructed from neighbouring points in Y. The local surface reconstruction is done over an estimated tangent plane at x by using radial basis functions. The approximation quality of the surface reconstruction around x is measured by using its maximal deviation from the given point-samples X in a local neighbourhood of x. The resulting thinning algorithm is meshfree, i.e., its performance is solely based upon the geometry of the input 3D surface point-samples, and so it does not require any further topological information, such as point connectivities. Computational details of the thinning algorithm and the required data structures for efficient implementation are explained and its complexity is discussed. Two examples are presented for illustration.},
+issn={1615-3375},
+doi={10.1007/s10208-007-9008-7}
 }
 @article{Fattal:2002,
@@ -41,5 +55,5 @@
 isbn={1-58113-521-1},
 issn={0730-0301},
-doi={10.1145/566654.566573},
+doi={10.1145/566654.566573}
 }
 @inproceedings{Gressin:2012,
@@ -50,5 +64,5 @@
 pages={111--116},
 abstract={Automatic 3D point cloud registration is a main issue in computer vision and photogrammetry. The most commonly adopted solution is the well-known ICP (Iterative Closest Point) algorithm. This standard approach performs a fine registration of two overlapping point clouds by iteratively estimating the transformation parameters, and assuming that good a priori alignment is provided. A large body of literature has proposed many variations of this algorithm in order to improve each step of the process. The aim of this paper is to demonstrate how the knowledge of the optimal neighborhood of each 3D point can improve the speed and the accuracy of each of these steps. We will first present the geometrical features that are the basis of this work. These low-level attributes describe the shape of the neighborhood of each 3D point, computed by combining the eigenvalues of the local structure tensor. Furthermore, they allow to retrieve the optimal size for analyzing the neighborhood as well as the privileged local dimension (linear, planar, or volumetric). Besides, several variations of each step of the ICP process are proposed and analyzed by introducing these features. These variations are then compared on real datasets, as well with the original algorithm in order to retrieve the most efficient algorithm for the whole process. Finally, the method is successfully applied to various 3D lidar point clouds both from airborne, terrestrial and mobile mapping systems.},
-doi={10.5194/isprsannals-I-3-111-2012},
+doi={10.5194/isprsannals-I-3-111-2012}
 }
 @inproceedings{Gross:2006,
@@ -60,5 +74,5 @@
 mon=sep,
 pages={86--91},
-abstract={Three dimensional building models have become important during the past for various applications like urban planning, enhanced navigation or visualization of touristy or historic objects. 3D models can increase the understanding and explanation of complex urban scenes and support decision processes. A 3D model of the urban environment gives the possibility for simulation and rehearsal, to {\textquoteleft}{\textquoteleft}fly through{\textquoteright}{\textquoteright} the local urban terrain on different paths, and to visualize the scene from different viewpoints. The automatic generation of 3D models using Laser height data is one challenge for actual research.In many proposals for 3D model generation the process is starting by extraction of the border lines of man made objects. In our paper we are presenting an automatic generation method for lines based on the analysis of the 3D point clouds in the Laser height data. For each 3D point additional features considering the neighborhood are calculated. Invariance with respect to position, scale and rotation is achieved. Investigations concerning the required point density to get reliable results are accomplished. Comparing the new features with analytical results of typical point configurations provide discriminating features to select points which may belong to a line. Assembling these points to lines the borders of the objects were achieved. First results are presented.Possibilities for the enhancement of the calculation of the covariance matrix by including the intensity of the Laser signal and a refined consideration of the neighborhood are discussed.},
+abstract={Three dimensional building models have become important during the past for various applications like urban planning, enhanced navigation or visualization of touristy or historic objects. 3D models can increase the understanding and explanation of complex urban scenes and support decision processes. A 3D model of the urban environment gives the possibility for simulation and rehearsal, to {\textquoteleft}{\textquoteleft}fly through{\textquoteright}{\textquoteright} the local urban terrain on different paths, and to visualize the scene from different viewpoints. The automatic generation of 3D models using Laser height data is one challenge for actual research.In many proposals for 3D model generation the process is starting by extraction of the border lines of man made objects. In our paper we are presenting an automatic generation method for lines based on the analysis of the 3D point clouds in the Laser height data. For each 3D point additional features considering the neighborhood are calculated. Invariance with respect to position, scale and rotation is achieved. Investigations concerning the required point density to get reliable results are accomplished. Comparing the new features with analytical results of typical point configurations provide discriminating features to select points which may belong to a line. Assembling these points to lines the borders of the objects were achieved. First results are presented.Possibilities for the enhancement of the calculation of the covariance matrix by including the intensity of the Laser signal and a refined consideration of the neighborhood are discussed.}
 }
 @inproceedings{Gumhold:2001,
@@ -71,5 +85,5 @@
 pages={293--305},
 abstract={This paper describes a new method to extract feature lines directly from a surface point cloud. No surface reconstruction is needed in advance, only the inexpensive computation of a neighbor graph connecting nearby points. The feature extraction is performed in two stages. The 
-fist stage consists of assigning a penalty weight to each point  that indicates the unlikelihood that the point is part of a feature and assigning these penalty weights to the edges of a neighbor graph. Extracting a sub-graph of the neighbor graph that minimizes the edge penalty weights then produces a set of feature patterns. The second stage is especially useful for noisy data. It recovers feature lines and junctions by fitting wedges to the crease lines and corners to the junctions. As the method works on the local neighbor graph only, it is fast and automatically adapts to the sampling resolution. This makes the approach ideal as a pre-processing step in mesh generation.},
+fist stage consists of assigning a penalty weight to each point  that indicates the unlikelihood that the point is part of a feature and assigning these penalty weights to the edges of a neighbor graph. Extracting a sub-graph of the neighbor graph that minimizes the edge penalty weights then produces a set of feature patterns. The second stage is especially useful for noisy data. It recovers feature lines and junctions by fitting wedges to the crease lines and corners to the junctions. As the method works on the local neighbor graph only, it is fast and automatically adapts to the sampling resolution. This makes the approach ideal as a pre-processing step in mesh generation.}
 }
 @inproceedings{Isenburg:2011,
@@ -95,5 +109,5 @@
 abstract={We present a robust framework for extracting lines of curvature from point clouds. First, we show a novel approach to denoising the input point cloud using robust statistical estimates of surface normal and curvature which automatically rejects outliers and corrects points by energy minimization. Then the lines of curvature are constructed on the point cloud with controllable density. Our approach is applicable to surfaces of arbitrary genus, with or without boundaries, and is statistically robust to noise and outliers while preserving sharp surface features. We show our approach to be effective over a range of synthetic and real-world input datasets with varying amounts of noise and outliers. The extraction of curvature information can benefit many applications in CAD, computer vision and graphics for point cloud shape analysis, recognition and segmentation. Here, we show the possibility of using the lines of curvature for feature-preserving mesh construction directly from noisy point clouds.},
 issn={0010-4485},
-doi={10.1016/j.cad.2008.12.004},
+doi={10.1016/j.cad.2008.12.004}
 }
 @inproceedings{Konig:2009,
@@ -104,5 +118,5 @@
 year={2009},
 pages={83--92},
-abstract={Many algorithms for point cloud processing especially surface reconstruction rely on normal information available at each point. Normal directions are typically taken from a local tangent plane approximation which is obtained by fitting a surface model to the neighboring point samples. While the direction can be estimated locally, finding a consistent normal orientation over the whole surface is only possible in a global context. Existing methods for this problem can be classified into volumetric and propagation based approaches. Volumetric methods are trying to divide the space into inside and outside regions which is often complicated to implement and have problems with open surfaces and large holes. Propagation based methods can deal with open surfaces but often fail on sharp features. This paper analyses the behavior of surficial orientation methods, gives a better understanding of the underlying model assumptions of existing techniques and proposes a novel and improved propagation heuristic.},
+abstract={Many algorithms for point cloud processing especially surface reconstruction rely on normal information available at each point. Normal directions are typically taken from a local tangent plane approximation which is obtained by fitting a surface model to the neighboring point samples. While the direction can be estimated locally, finding a consistent normal orientation over the whole surface is only possible in a global context. Existing methods for this problem can be classified into volumetric and propagation based approaches. Volumetric methods are trying to divide the space into inside and outside regions which is often complicated to implement and have problems with open surfaces and large holes. Propagation based methods can deal with open surfaces but often fail on sharp features. This paper analyses the behavior of surficial orientation methods, gives a better understanding of the underlying model assumptions of existing techniques and proposes a novel and improved propagation heuristic.}
 }
 @article{Mitra:2004,
@@ -117,6 +131,15 @@
 publisher={World Scientific Publishing Company},
 issn={0218-1959},
-doi={10.1142/S0218195904001470},
-}
+doi={10.1142/S0218195904001470}
+}
+
+@inproceedings{Moenning:2003,
+crossref={VIIP:2003},
+author={Carsten Moenning and Neil A. Dodgson},
+title={A new point cloud simplification algorithm},
+booktitle={Proceedings of the 3rd IASTED International Conference on Visualization, Imaging, and Image Processing, September 8-10, 2003, Benalmadena, Spain},
+year={2003}
+}
+
 @incollection{Parrish:2012,
 crossref={Renslow:2012},
@@ -138,5 +161,5 @@
 abstract={Algorithms that use point-cloud models make heavy use of the neighborhoods of the points. These neighborhoods are used to compute the surface normals for each point, mollification, and noise removal. All of these primitive operations require the seemingly repetitive process of finding the \textit{k} nearest neighbors (\textit{kNN}s) of each point. These algorithms are primarily designed to run in main memory. However, rapid advances in scanning technologies have made available point-cloud models that are too large to fit in the main memory of a computer. This calls for more efficient methods of computing the \textit{kNN}s of a large collection of points many of which are already in close proximity. A fast \textit{kNN} algorithm is presented that makes use of the locality of successive points whose \textit{k} nearest neighbors are sought to reduce significantly the time needed to compute the neighborhood needed for the primitive operation as well as enable it to operate in an environment where the data is on disk. Results of experiments demonstrate an \textit{order} of magnitude improvement in the \textit{time} to perform the algorithm and \textit{several orders} of magnitude improvement in \textit{work efficiency} when compared with several prominent existing methods.},
 issn={0097-8493},
-doi={10.1016/j.cag.2006.11.011},
+doi={10.1016/j.cag.2006.11.011}
 }
 @incollection{Smith:2012,
@@ -157,5 +180,5 @@
 pages={133--143},
 abstract={This paper summarizes a system, and its component algorithms, for context-driven target vehicle detection in 3-D data that was developed under the Defense Advanced Research Projects Agency (DARPA) Exploitation of 3-D Data (E3D) Program. In order to determine the power of shape and geometry for the extraction of context objects and the detection of targets, our algorithm research and development concentrated on the geometric aspects of the problem and did not utilize intensity information. Processing begins with extraction of context information and initial target detection at reduced resolution, followed by a detailed, full-resolution analysis of candidate targets. Our reduced-resolution processing includes a probabilistic procedure for finding the ground that is effective even in rough terrain; a hierarchical, graph-based approach for the extraction of context objects and potential vehicle hide sites; and a target detection process that is driven by context-object and hide-site locations. Full-resolution processing includes statistical false alarm reduction and decoy mitigation. When results are available from previously collected data, we also perform object-level change detection, which affects the probabilities that objects are context objects or targets. Results are presented for both synthetic and collected LADAR data.},
-doi={10.1117/12.542536},
+doi={10.1117/12.542536}
 }
 @inproceedings{Yianilos:1993,
@@ -167,6 +190,7 @@
 year={1993},
 pages={311--321},
-abstract={We consider the computational problem of finding nearest neighbors in general metric spaces. Of particular interest are spaces that may not be conveniently embedded or approximated in Euclidian space, or where the dimensionality of a Euclidian representation is very high.Also relevant are high-dimensional Euclidian settings in which the distribution of data is in some sense of lower dimension and embedded in the space.The \textit{vp-tree} (vantage point tree) is introduced in several forms, together with associated algorithms, as an improved method for these difficult search problems. Tree construction executes in \textit{O}(\textit{n}log(\textit{n}) time, and search is under certain circumstances and in the limit, \textit{O}(log(\textit{n})) expected time. The theoretical basis for this approach is developed and the results of several experiments are reported. In Euclidian cases, kd-tree performance is compared.},
-}
+abstract={We consider the computational problem of finding nearest neighbors in general metric spaces. Of particular interest are spaces that may not be conveniently embedded or approximated in Euclidian space, or where the dimensionality of a Euclidian representation is very high.Also relevant are high-dimensional Euclidian settings in which the distribution of data is in some sense of lower dimension and embedded in the space.The \textit{vp-tree} (vantage point tree) is introduced in several forms, together with associated algorithms, as an improved method for these difficult search problems. Tree construction executes in \textit{O}(\textit{n}log(\textit{n}) time, and search is under certain circumstances and in the limit, \textit{O}(log(\textit{n})) expected time. The theoretical basis for this approach is developed and the results of several experiments are reported. In Euclidian cases, kd-tree performance is compared.}
+}
+
 @incollection{Young:2012,
 crossref={Renslow:2012},
@@ -177,4 +201,29 @@
 pages={17-37},
 year={2012}
+}
+
+@Article{Yu:2010,
+author="Yu, Zhiwen
+and Wong, Hau-San
+and Peng, Hong
+and Ma, Qianli",
+editor="Horv{\'a}th, Imre
+and Lee, Kunwoo
+and Patrikalakis, Nicholas M.",
+title="{ASM}: An adaptive simplification method for 3D point-based models",
+journal="Computer-Aided Design",
+year="2010",
+publisher="Elsevier Science",
+address="New York, NY, USA",
+volume="42",
+number="7",
+pages="598--612",
+optkeywords="Clustering",
+optkeywords="Model simplification",
+optkeywords="Point clouds",
+abstract="Due to the popularity of computer games and computer-animated movies, 3{D} models are fast becoming an important element in multimedia applications. In addition to the conventional polygonal representation for these models, the direct adoption of the original scanned 3{D} point set for model representation is recently gaining more and more attention due to the possibility of bypassing the time consuming mesh construction stage, and various approaches have been proposed for directly processing point-based models. In particular, the design of a simplification approach which can be directly applied to 3{D} point-based models to reduce their size is important for applications such as 3{D} model transmission and archival. Given a point-based 3{D} model which is defined by a point set $P$ ($P = \left\{\bm{p}_{a} \in R^{3}\right\}$) and a desired reduced number of output samples $n_{s}$, the simplification approach finds a point set $P_{s}$ which \begin{inparaenum}[(i)]\item satisfies $\lvert P_{s} \rvert = n_{s}$ ($\lvert P_{s} \rvert$ being the cardinality of $P_{s}$ ) and \item minimizes the difference of the corresponding surface $S_{s}$ (defined by $P_{s}$ ) and the original surface $S$ (defined by $P$)\end{inparaenum}. Although a number of previous approaches has been proposed for simplification, most of them \begin{inparaenum}[(i)]\item do not focus on point-based 3{D} models, \item do not consider efficiency, quality and generality together and \item do not consider the distribution of the output samples\end{inparaenum}. In this paper, we propose an Adaptive Simplification Method ({ASM}) which is an efficient technique for simplifying point-based complex 3{D} models. Specifically, the {ASM} consists of three parts: a hierarchical cluster tree structure, the specification of simplification criteria and an optimization process. The {ASM} achieves a low computation time by clustering the points locally based on the preservation of geometric characteristics. We analyze the performance of the {ASM} and show that it outperforms most of the current state-of-the-art methods in terms of efficiency, quality and generality.",
+issn="0010-4485",
+doi="10.1016/j.cad.2010.03.003",
+opturl="http://linkinghub.elsevier.com/retrieve/pii/S0010448510000588"
 }
 
@@ -214,5 +263,5 @@
 address={Bellingham, WA},
 volume={5426},
-issn={0277-786X},
+issn={0277-786X}
 }
 @proceedings{Forstner:2006,
@@ -234,5 +283,5 @@
 organization={Sandia National Laboratories},
 publisher={Sandia National Laboratories},
-address={Albuquerque, NM, USA},
+address={Albuquerque, NM, USA}
 }
 @proceedings{Lichti:2011,
@@ -246,5 +295,5 @@
 address={G\"{o}ttingen, Germany},
 series={ISPRS - International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
-volume={XXXVIII-5/W12},
+volume={XXXVIII-5/W12}
 }
 @proceedings{Magnor:2009,
@@ -255,5 +304,5 @@
 publisher={Institut f\"{u}r Simulation und Graphik},
 address={Magdeburg, Germany},
-isbn={978-3-9804874-8-1},
+isbn={978-3-9804874-8-1}
 }
 @proceedings{Shortis:2012,
@@ -266,5 +315,5 @@
 publisher={Copernicus Publications},
 address={G\"{o}ttingen, Germany},
-issn={2194-9050},
+issn={2194-9050}
 }
 @proceedings{SODA:1993,
@@ -276,2 +325,14 @@
 isbn={9780898713138}
 }
+
+@proceedings{VIIP:2003,
+editor={M. H. Hamza},
+organization={International Association for Science and Technology for Development},
+title={Proceedings of the 3rd IASTED International Conference on Visualization, Imaging, and Image Processing, September 8-10, 2003, Benalmadena, Spain},
+volume={2},
+year={2003},
+publisher={ACTA Press},
+address={Anaheim; Calgary},
+issn={1482-7921},
+isbn={0-88986-382-2}
+}
Index: sparse_sample/damkjer_sparse_sample.tex
===================================================================
--- sparse_sample/damkjer_sparse_sample.tex	(revision 3)
+++ sparse_sample/damkjer_sparse_sample.tex	(revision 4)
@@ -16,4 +16,7 @@
 %%    2013-JUN-02  K. Damkjer
 %%       General edits. Expanded Structure Features section.
+%%    2013-NOV-01  K. Damkjer
+%%       Marked up sections in work. Added Prior work section for discussion on
+%%       similar approaches and foundational material. Cleaned up equations.
 %%=============================================================================
 
@@ -27,12 +30,15 @@
 %% Including AMS packages for equation formatting.
 %%***
+\usepackage{color}
 \usepackage{amsmath}
 \usepackage{amsfonts}
 \usepackage{amssymb}
 \usepackage{bm}
+\usepackage{pgfplots}
 \usepackage{graphicx}
 \usepackage{latexsym}
 \usepackage{xcolor}
 \usepackage[pagebackref,colorlinks]{hyperref}
+\usepackage{paralist}
 
 %%***
@@ -41,4 +47,5 @@
 \definecolor{darkgreen}{rgb}{0 0.5 0}
 \definecolor{darkblue}{rgb}{0 0 0.7}
+\definecolor{brickred}{RGB}{182 50 28}
 
 \hypersetup{%
@@ -61,4 +68,6 @@
 \fi}
 
+\pgfplotsset{compat=1.5}
+
 %%***
 %% Current working title
@@ -87,5 +96,7 @@
 \begin{abstract}
 %TODO Add Abstract.
+{\color{brickred}
 To do: Add abstract.
+}
 \end{abstract}
 
@@ -94,5 +105,5 @@
 Mapping and surveying Light Detection and Ranging (LiDAR) systems produce a staggering amount of information-rich true three-dimensional (3D) data. Modern systems sample several thousand to over a million points per second resulting in several million to billions of point samples per product to be stored, processed, analyzed and distributed.\cite{Parrish:2012,Smith:2012,Young:2012}
 
-Managing such large data sets through a production pipeline consisting of collection, processing, analysis, storage and dissemination presents a host of challenges to content providers. Limiting data sets to small areas of interest (AOIs) can mitigate data management issues inherent in processing and storing individual LiDAR point clouds. However, user demands for simultaneous wide area coverage and precise scene content keep data sizing considerations at the forefront of content provider concerns. Further, AOIs do not address the ultimate storage constraints imposed on processing and archival systems that must maintain large libraries of raw and processed data sets.
+Managing such large data sets through a production pipeline consisting of collection, processing, analysis, storage and dissemination presents a host of challenges to content providers. Production strategies, for example limiting data sets to small areas of interest (AOIs), can mitigate data management issues inherent in processing and storing individual LiDAR point clouds. However, user demands for simultaneous wide area coverage and high-fidelity scene content keep data sizing considerations at the forefront of content provider concerns. Further, production strategies do not address the ultimate storage constraints imposed on processing and archival systems that must maintain large libraries of raw and processed data sets.
 
 Raw LiDAR data taken directly from collection systems is usually in a compact, vendor-specific, proprietary binary format that must be converted into a common public format to facilitate analysis and information exchange. The conversion to an exchange format has traditionally inflated the size of the raw LiDAR data holdings. A significant cause of inflation is the conversion of 1D range data into 3D spatial data. Further inflation and loss of precision may result depending on the data types and structures used to represent the 3D point clouds.
@@ -108,4 +119,9 @@
 %TODO Provide overview of paper structure.
 
+\section{Prior Work}
+{\color{brickred}
+Cover point cloud simplification approaches by Dyn, Moenning, and Yu. Discuss commonalities like avoiding intermediate mesh representations and segmentation, identification of salient sections, spatial-only consideration, \textit{etc.} Discuss differences like adaptability in the case of Yu, progressive simplification in the case of Moenning, \textit{etc.}
+} \cite{Dyn:2008} \cite{Moenning:2003} \cite{Yu:2010}
+
 \section{Local Statistic Attribution}
 
@@ -122,8 +138,8 @@
 \begin{align}
 \bm{x} \in \mathbb{R}^{n} \Leftrightarrow
-\bm{x} = \left(\begin{array}{c}x_{1}\\
-             x_{2}\\
-             \vdots\\
-             x_{n}\end{array}\right), x_{1\ldots n} \in \mathbb{R}
+\bm{x} = \begin{pmatrix}x_{1}\\
+                        x_{2}\\
+                        \vdots\\
+                        x_{n}\end{pmatrix}, x_{1\ldots n} \in \mathbb{R}
 \end{align}
 
@@ -142,5 +158,5 @@
 Our analysis is performed on neighborhoods of points from the point cloud  about the query points, $\mathcal{V}_{\bm{q}}\subseteq\mathcal{D}$. The neighborhoods are defined by an $m$-dimensional distance metric $d$ between the query points, $\bm{q}\in\mathcal{Q}$, and the data points, $\bm{x}\in\mathcal{D}$. Note that there is no restriction that the query points, $\bm{q}$, must be a member of the point cloud itself. Though, in practice, we typically perform our analysis treating each  $\bm{x}\in\mathcal{D}$ as a query location (\textit{i.e.}, $\mathcal{Q}=\mathcal{D}$). This approach requires a reasonable all nearest-neighbor search.
 
-We investigated two neighborhood definitions that each present merits. The $k$-nearest neighborhood, $\mathcal{V}^{k}_{\bm{q}}$, consists of the $k$ closest points to $\bm{q}$ in $\mathcal{D}$ whereas the fixed-radius neighborhood, $\mathcal{V}^{r}_{\bm{q}}$, consists of all points within the ball of radius $r$ centered at $\bm{q}$ in $\mathcal{D}$.
+We investigated two neighborhood definitions that each present merits. The $k$-nearest neighborhood, $\mathcal{V}^{k}_{\bm{q}}$, consists of the $k$ closest points to $\bm{q}$ in $\mathcal{D}$ whereas the fixed-radius neighborhood, $\mathcal{V}^{r}_{\bm{q}}$, consists of all points in $\mathcal{D}$ within the ball of radius $r$ centered at $\bm{q}$.
 
 \begin{equation}
@@ -165,7 +181,12 @@
 \subsection{Density}
 
-One of the simplest local attributes that can be defined for the neighborhood of points is an estimate of the concentration of observations in the neighborhood. This attribute is especially useful for visualizing Geiger-mode LiDAR data since the native point data contains no intensity information. The point density with respect to spatial dimensions is proportional to the reflectivity of the scene at the imaging wavelength and acts as a suitable estimate of "intensity" for many applications.
-
-The definition of density can be generalized to arbitrarily large dimensional spaces, but is most informative in low-dimensional spaces. This is due to the fact that "volume" tends to zero at high dimensions.
+One of the simplest local attributes that can be defined for the neighborhood of points is an estimate of the concentration of observations in the neighborhood, or \textit{density}. This attribute is especially useful for visualizing Geiger-mode LiDAR data since the native point data contains no intensity information. The point density with respect to spatial dimensions is proportional to the reflectivity of the scene at the imaging wavelength and acts as a suitable estimate of \textit{intensity} for many applications.
+
+\begin{figure}[h]
+\includegraphics[width=\linewidth]{images/Armstrong.png}
+\caption{Density} \label{fig:Density}
+\end{figure}
+
+The definition of \textit{density} can be generalized to arbitrarily large dimensional spaces, but is most informative in low-dimensional spaces. This is due to the fact that \textit{volume} tends to zero at high dimensions.
 
 The radius used to define the neighborhood volume is based on the maximal distance between the query point and the neighborhood points, $R_{\bm{q}}$. For a fixed-radius neighborhood, this value is simply the query radius, $r$.
@@ -191,4 +212,7 @@
 \subsection{Principal Component Analysis}
 
+{\color{brickred}
+\textbf{This section documents mathematics and approaches that should be well understood by the community at large. I primarily captured this data for complete notes on my process. I fully expect to remove from any final draft of the paper.}
+
 The remainder of the attributes explored in this paper are derived from eigenanalysis of the query neighborhoods. Eigenanalysis is performed via eigenvalue decomposition of the empirical covarance matrix for each query neighborhood. For simplicity, we will refer to the cardinality of the neighborhood as $k$, even when considering the fixed-radius neighborhood definition.
 
@@ -197,13 +221,11 @@
 \end{align}
 
-The process begins by organizing the point data for each query neighborhood into a data matrix.
-
-\begin{align}
-\bm{X}=\left[\begin{array}{ccc}\bm{x}_{1}&\cdots&\bm{x}_{k}\end{array}\right],\forall\bm{x}\in\mathcal{V}_{\bm{q}}
-\end{align}
-
-
-
-The empirical covariance matrix for the neighborhood is computed by re-centering the data matrix about its mean and computing the outer product of the resulting matrix with itself. This can be represented succinctly as follows:
+The process begins by organizing the point data for each query neighborhood into a data matrix, $\bm{X}$.
+
+\begin{align}
+\bm{X}=\begin{bmatrix}\bm{x}_{1}&\cdots&\bm{x}_{k}\end{bmatrix},\forall\bm{x}\in\mathcal{V}_{\bm{q}}
+\end{align}
+
+The empirical covariance matrix for the neighborhood, $\bm{C}$, is computed by re-centering the data matrix about its mean and computing the outer product of the resulting matrix with itself. This can be represented succinctly as follows:
 
 \begin{align}
@@ -211,5 +233,5 @@
 \end{align}
 
-However, data volume generally prohibits computing the empirical covariance matrix in this manner. Instead, it is more computationally efficient to compute the mean of the data matrix and the matrix of mean deviations directly.
+However, data volume generally prohibits computing the empirical covariance matrix in this manner. Instead, it is more computationally efficient to compute the mean of the data matrix and the matrix of mean deviations directly resulting in the following equivalent formulation of the empirical covariance matrix.
 
 \begin{align}
@@ -218,10 +240,10 @@
 
 \begin{align}
-\bm{M}=\left[\begin{array}{cccc}
+\bm{M}=\begin{bmatrix}
 \left(\bm{x}_{1}-\bar{\bm{x}}\right)&
 \left(\bm{x}_{2}-\bar{\bm{x}}\right)&
 \cdots&
 \left(\bm{x}_{k}-\bar{\bm{x}}\right)
-\end{array}\right]
+\end{bmatrix}
 \end{align}
 
@@ -239,16 +261,16 @@
 
 \begin{align}
-\bm{U}=\left[\begin{array}{cccc}
+\bm{U}=\begin{bmatrix}
 \bm{e}_{1}&\bm{e}_{2}&\cdots&\bm{e}_{n}
-\end{array}\right]
-\end{align}
-
-\begin{align}
-{\bm{\Lambda}}=\left[\begin{array}{cccc}
-\lambda_{1}&0&\cdots&0\\
-0&\lambda_{2}&\cdots&0\\
-\vdots&\vdots&\ddots&\vdots\\
-0&0&\cdots&\lambda_{n}
-\end{array}\right]
+\end{bmatrix}
+\end{align}
+
+\begin{align}%
+{\bm{\Lambda}}=\begin{bmatrix}
+\lambda_{1} & 0          & \cdots & 0\\
+0           & \lambda_{2}& \cdots & 0\\
+\vdots      & \vdots     & \ddots & \vdots\\
+0           & 0          & \cdots & \lambda_{n}
+\end{bmatrix}
 \end{align}
 
@@ -264,11 +286,11 @@
 
 \begin{align}
-\bm{\lambda}=\left(\begin{array}{c}\bm{\Lambda}_{1,1}\\
+\bm{\lambda}=\begin{pmatrix}\bm{\Lambda}_{1,1}\\
              \bm{\Lambda}_{2,2}\\
              \vdots\\
-             \bm{\Lambda}_{n,n}\end{array}\right)=\left(\begin{array}{c}\lambda_{1}\\
+             \bm{\Lambda}_{n,n}\end{pmatrix}=\begin{pmatrix}\lambda_{1}\\
              \lambda_{2}\\
              \vdots\\
-             \lambda_{n}\end{array}\right)
+             \lambda_{n}\end{pmatrix}
 \end{align}
 
@@ -276,30 +298,34 @@
 
 \begin{align}
-\bm{\sigma}=\left(\begin{array}{c}\bm{\Sigma}_{1,1}\\
+\bm{\sigma}=\begin{pmatrix}\bm{\Sigma}_{1,1}\\
              \bm{\Sigma}_{2,2}\\
              \vdots\\
-             \bm{\Sigma}_{n,n}\end{array}\right)=\left(\begin{array}{c}\sigma_{1}\\
+             \bm{\Sigma}_{n,n}\end{pmatrix}=\begin{pmatrix}\sigma_{1}\\
              \sigma_{2}\\
              \vdots\\
-             \sigma_{n}\end{array}\right)=\left(\begin{array}{c}\sqrt{\lambda_{1}}\\
+             \sigma_{n}\end{pmatrix}=\begin{pmatrix}\sqrt{\lambda_{1}}\\
              \sqrt{\lambda_{2}}\\
              \vdots\\
-             \sqrt{\lambda_{n}}\end{array}\right)
+             \sqrt{\lambda_{n}}\end{pmatrix}
 \end{align}
 
 These two feature vectors provide the basis for all subsequent analysis in this paper.
 
+}
+
 \subsection{Structure Features}
 
 The Defense Advanced Research Projects Agency (DARPA) Exploitation of 3-D Data (E3D) Program  identified several structure tensor features to facilitate automated target recognition. West \textit{et al.} present six features that proved most applicable to their work in segmentation and object recognition: omnivariance, anisotropy, linearity, planarity, sphericity, and entropy.\cite{West:2004} They define each of the features with respect to the three spatial dimensions since their analysis was limited to structure tensors of points and normals. However, each of these features can be generalized to higher order dimensions.
 
-Linearity, planarity, and sphericity are closely related features that each represent the concept of dimensional participation. That is, how much does the local neighborhood "spread" into the dimension under consideration. We generalize this concept as "dimensionality".
-
-The highest order dimensionality considered by West \textit{et al.} is 3-dimensionality, which they referred to as "sphericity". We feel that it is worth considering the highest order dimensionality for the data set as a unique feature as well, and generalize the concept to "isotropy". The complement of this value, "anisotropy", is thus easily understood and maintains a definition consistent with West's.
-
-In addition to the 
+Linearity, planarity, and sphericity are closely related features that each represent the concept of the neigborhood's participation in subsequently higher dimensions. That is, the values attempt to capture the degree to which the local neighborhood spreads into the multiple dimensions. We generalize this concept as \textit{dimensionality}.
+
+The highest order dimensionality considered by West \textit{et al.} is 3-dimensionality, which they referred to as ``sphericity''. We feel that it is worth considering the highest order dimensionality for the data set as a unique feature as well, and generalize the concept to "isotropy". The complement of this value, "anisotropy", is thus easily understood and maintains a definition consistent with West's.
+
+In addition to the features identified by the DARPA E3D Program, we also consider the 
 
 \subsubsection{Isotropy}
+{\color{brickred}
 Further discussion of information provided by this classifier.
+}
 
 \begin{align}
@@ -307,6 +333,13 @@
 \end{align}
 
+\begin{figure}[h]
+\includegraphics[width=\linewidth]{images/Armstrong_iso.png}
+\caption{Isotropy} \label{fig:Isotropy}
+\end{figure}
+
 \subsubsection{Anisotropy}
+{\color{brickred}
 Further discussion of information provided by this classifier. Estimate of degree of structure present in local area feature. Complement of isotropy.
+}
 
 \begin{align}
@@ -315,6 +348,18 @@
 \end{align}
 
+\begin{align}
+d\left(\bm{p},\bm{q}\right)=
+\sqrt{\sum_{i=0}^{n}{\left(\bm{p}_{i}-\bm{q}_{i}\right)^{2}}}
+\end{align}
+
+\begin{figure}[h]
+\includegraphics[width=\linewidth]{images/Armstrong_ani.png}
+\caption{Anisotropy} \label{fig:Anisotropy}
+\end{figure}
+
 \subsubsection{Dimensionality}
+{\color{brickred}
 Further discussion of information provided by this classifier. Estimates the embedded dimension of local area feature.
+}
 
 \begin{align}
@@ -325,4 +370,9 @@
 \end{align}
 
+\begin{figure}[h]
+\includegraphics[width=\linewidth]{images/Armstrong_dim.png}
+\caption{Dimensionality} \label{fig:Dimensionality}
+\end{figure}
+
 \subsubsection{Low-Dimensional Embedding}
 
@@ -331,6 +381,13 @@
 \end{align}
 
+\begin{figure}[h]
+\includegraphics[width=\linewidth]{images/Armstrong_emb.png}
+\caption{Low-Dimensional Embedding} \label{fig:Low-Dimensional Embedding}
+\end{figure}
+
 \subsubsection{Entropy}
+{\color{brickred}
 Further discussion of information provided by this classifier. Estimates the number of dimensions needed to encode information content of local area feature.
+}
 
 \begin{align}
@@ -338,6 +395,13 @@
 \end{align}
 
+\begin{figure}[h]
+\includegraphics[width=\linewidth]{images/Armstrong_ent.png}
+\caption{Entropy} \label{fig:Entropy}
+\end{figure}
+
 \subsubsection{Omnivariance}
+{\color{brickred}
 Further discussion of information provided by this classifier. Geometric mean of structure-tensor eigenvalues. Estimates data variance across dimensions of local area feature.
+}
 
 \begin{align}
@@ -346,7 +410,24 @@
 \end{align}
 
+%%***
+%% FIXME: LaTeX figures experiment... captured here for future reference.
+%%        Remove from any final draft of the paper.
+%%***
+%\begin{figure}[h]
+%\begin{tikzpicture}
+%\begin{axis}
+%\addplot3[scatter,only marks,mark size=0.01pt] table {
+%x y z
+%0 0 0
+%};
+%\end{axis}
+%\end{tikzpicture}
+%\caption{Omnivariance} \label{fig:Omnivariance}
+%\end{figure}
+
 \subsubsection{Fractional Anisotropy}
-
-Further discussion of information provided by this classifier. 
+{\color{brickred}
+Further discussion of information provided by this classifier.
+}
 
 \begin{align}
@@ -357,6 +438,24 @@
 \end{align}
 
+%%***
+%% FIXME: LaTeX figures experiment... captured here for future reference.
+%%        Remove from any final draft of the paper.
+%%***
+%\begin{figure}[h]
+%\begin{tikzpicture}
+%\begin{axis}
+%\addplot3[scatter,only marks,mark size=0.01pt] table {
+%x y z
+%0 0 0
+%};
+%\end{axis}
+%\end{tikzpicture}
+%\caption{Fractional Anisotropy} \label{fig:Fractional Anisotropy}
+%\end{figure}
+
 \subsection{Normal Estimation}
+{\color{brickred}
 Perform normal estimation by planar fit to point data. Resolve ambiguity using sensor location.
+}
 
 Normals\cite{Dey:2005}
@@ -367,6 +466,7 @@
 
 \subsection{Curvature Estimation}
-
+{\color{brickred}
 Perform curvature estimation by planar fit to normal data.
+}
 
 Curvature\cite{Gumhold:2001}
@@ -375,28 +475,42 @@
 
 \section{Salience Weighting}
-
+{\color{brickred}
 Discuss combination of feature classification into salience weights.
+}
 
 \section{Tiered Filtering}
-
+{\color{brickred}
 Discuss tiered filtering approach.
+}
 
 \subsection{Noise Elimination}
+{\color{brickred}
 Discuss first pass noise filtering to improve salience estimates.
+}
 
 \subsection{Point Selection}
+{\color{brickred}
 Discuss point selection approach to target variable amounts of data reduction or desired file size.
+}
 
 \section{Results and Discussion}
-Discuss results against various data sets. Discuss quantitative and qualitative measures of information content preserved versus data reduction achieved.
+{\color{brickred}
+Discuss results against various data sets. Discuss quantitative and qualitative measures of information content preserved versus data reduction achieved. Present comparisons to Dey, Moenning, and Yu approaches.
+}
 
 \subsection{Noise Sensitivity}
+{\color{brickred}
 Discuss importance of noise filtering in salience processing phase.
+}
 
 \subsection{Alignment Sensitivity}
+{\color{brickred}
 Discuss importance of alignment when processing merged data sets.
+}
 
 \subsection{Neighborhood Selection}
+{\color{brickred}
 Discuss trade of fixed radius versus $k$-neighbor versus dynamic sizing.
+}
 \cite{Gressin:2012}
 \cite{Sankaranarayanan:2007}
@@ -404,11 +518,13 @@
 
 \section{Conclusion}
-Conclusions section.
-
+{\color{brickred}Conclusions section.
 Discuss future work. Hilbert space filling curve ordering. Combination with LAZ.  Point connectivity attribution to facilitate meshing. Incorporation of dynamic neighborhood sizing. \textit{etc.}
+}
 \cite{Fattal:2002}
 
 \section*{Acknowledgement}
+{\color{brickred}
 Acknowledgements, if any, will appear here.
+}
 
 \bibliographystyle{acm}
