Index: sparse_sample_icip2014/IEEEbib.bst
===================================================================
--- sparse_sample_icip2014/IEEEbib.bst	(revision 10)
+++ sparse_sample_icip2014/IEEEbib.bst	(revision 10)
@@ -0,0 +1,1021 @@
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%  IEEE.bst  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Bibliography Syle file for articles according to IEEE instructions
+% balemi@aut.ee.ethz.ch     <22-JUN-93>
+% modified from unsrt.bib. Contributions by Richard H. Roy
+
+ENTRY
+  { address
+    author
+    booktitle
+    chapter
+    edition
+    editor
+    howpublished
+    institution
+    journal
+    key
+    month
+    note
+    number
+    organization
+    pages
+    publisher
+    school
+    series
+    title
+    type
+    volume
+    year
+  }
+  {}
+  { label }
+
+INTEGERS { output.state before.all mid.sentence after.sentence after.block }
+
+FUNCTION {init.state.consts}
+{ #0 'before.all :=
+  #1 'mid.sentence :=
+  #2 'after.sentence :=
+  #3 'after.block :=
+}
+
+STRINGS { s t }
+
+FUNCTION {output.nonnull}
+{ 's :=
+  output.state mid.sentence =
+    { ", " * write$ }
+    { output.state after.block =
+% next line commented out by rhr and changed to write comma
+%	{ add.period$ write$
+	{ ", " * write$ 
+	  newline$
+	  "\newblock " write$
+	}
+	{ output.state before.all =
+	    'write$
+	    { add.period$ " " * write$ }
+	  if$
+	}
+      if$
+      mid.sentence 'output.state :=
+    }
+  if$
+  s
+}
+
+FUNCTION {output}
+{ duplicate$ empty$
+    'pop$
+    'output.nonnull
+  if$
+}
+
+FUNCTION {output.check}
+{ 't :=
+  duplicate$ empty$
+    { pop$ "empty " t * " in " * cite$ * warning$ }
+    'output.nonnull
+  if$
+}
+
+FUNCTION {output.bibitem}
+{ newline$
+  "\bibitem{" write$
+  cite$ write$
+  "}" write$
+  newline$
+  ""
+  before.all 'output.state :=
+}
+
+FUNCTION {fin.entry}
+{ add.period$
+  write$
+  newline$
+}
+
+% 5/24/89 rhr
+%  modified fin.entry function - prints note field after body of entry  
+%FUNCTION {fin.entry}
+%{ add.period$
+%  note empty$
+%    'write$
+%    { "\par\bgroup\parindent=0em  " * annote * "\par\egroup " * write$
+%    }
+%  if$
+%  newline$
+%}
+
+FUNCTION {new.block}
+{ output.state before.all =
+    'skip$
+    { after.block 'output.state := }
+  if$
+}
+
+% new block without terminating last block with a comma
+FUNCTION {new.ncblock}
+{
+  write$ 
+  newline$
+  "\newblock "
+  before.all 'output.state :=
+}
+
+FUNCTION {new.nccont}
+{
+  write$ 
+  " "
+  before.all 'output.state :=
+}
+
+FUNCTION {new.sentence}
+{ output.state after.block =
+    'skip$
+    { output.state before.all =
+	'skip$
+	{ after.sentence 'output.state := }
+      if$
+    }
+  if$
+}
+
+FUNCTION {not}
+{   { #0 }
+    { #1 }
+  if$
+}
+
+FUNCTION {and}
+{   'skip$
+    { pop$ #0 }
+  if$
+}
+
+FUNCTION {or}
+{   { pop$ #1 }
+    'skip$
+  if$
+}
+
+FUNCTION {new.block.checka}
+{ empty$
+    'skip$
+    'new.block
+  if$
+}
+
+FUNCTION {new.block.checkb}
+{ empty$
+  swap$ empty$
+  and
+    'skip$
+    'new.block
+  if$
+}
+
+FUNCTION {new.sentence.checka}
+{ empty$
+    'skip$
+    'new.sentence
+  if$
+}
+
+FUNCTION {new.sentence.checkb}
+{ empty$
+  swap$ empty$
+  and
+    'skip$
+    'new.sentence
+  if$
+}
+
+FUNCTION {field.or.null}
+{ duplicate$ empty$
+    { pop$ "" }
+    'skip$
+  if$
+}
+
+FUNCTION {emphasize}
+{ duplicate$ empty$
+    { pop$ "" }
+    { "{\em " swap$ * "}" * }
+  if$
+}
+
+FUNCTION {boldface}
+{ duplicate$ empty$
+    { pop$ "" }
+    { "{\bf " swap$ * "}" * }
+  if$
+}
+
+%FUNCTION {boldface}
+%{ 's swap$ :=
+%  s "" =
+%    { "" }
+%    { "{\bf " s * "}" * }
+%  if$
+%}
+%
+INTEGERS { nameptr namesleft numnames }
+
+FUNCTION {format.names}
+{ 's :=
+  #1 'nameptr :=
+  s num.names$ 'numnames :=
+  numnames 'namesleft :=
+    { namesleft #0 > }
+    { s nameptr "{ff~}{vv~}{ll}{, jj}" format.name$ 't :=
+      nameptr #1 >
+	{ namesleft #1 >
+	    { ", " * t * }
+	    { numnames #2 >
+		{ "," * }
+		'skip$
+	      if$
+	      t "others" =
+		{ " et~al." * }
+		{ " and " * t * }
+	      if$
+	    }
+	  if$
+	}
+	't
+      if$
+      nameptr #1 + 'nameptr :=
+      namesleft #1 - 'namesleft :=
+    }
+  while$
+}
+
+FUNCTION {format.authors}
+{ author empty$
+    { "" }
+    { author format.names }
+  if$
+}
+
+FUNCTION {format.editors}
+{ editor empty$
+    { "" }
+    { editor format.names
+      editor num.names$ #1 >
+	{ ", Eds." * }
+	{ ", Ed." * }
+      if$
+    }
+  if$
+}
+
+FUNCTION {format.title}
+{ title empty$
+    { "" }
+    { "``" title "t" change.case$ * }
+  if$
+}
+
+FUNCTION {n.dashify}
+{ 't :=
+  ""
+    { t empty$ not }
+    { t #1 #1 substring$ "-" =
+	{ t #1 #2 substring$ "--" = not
+	    { "--" *
+	      t #2 global.max$ substring$ 't :=
+	    }
+	    {   { t #1 #1 substring$ "-" = }
+		{ "-" *
+		  t #2 global.max$ substring$ 't :=
+		}
+	      while$
+	    }
+	  if$
+	}
+	{ t #1 #1 substring$ *
+	  t #2 global.max$ substring$ 't :=
+	}
+      if$
+    }
+  while$
+}
+
+FUNCTION {format.date}
+{ year empty$
+    { month empty$
+	{ "" }
+	{ "there's a month but no year in " cite$ * warning$
+	  month
+	}
+      if$
+    }
+    { month empty$
+	'year
+	{ month " " * year * }
+      if$
+    }
+  if$
+}
+
+% FUNCTION {format.date}
+% { year empty$
+% 	'year 
+% 	{ " "  year * }
+%   if$
+% }
+
+FUNCTION {format.btitle}
+{ title emphasize
+}
+
+FUNCTION {tie.or.space.connect}
+{ duplicate$ text.length$ #3 <
+    { "~" }
+    { " " }
+  if$
+  swap$ * *
+}
+
+FUNCTION {either.or.check}
+{ empty$
+    'pop$
+    { "can't use both " swap$ * " fields in " * cite$ * warning$ }
+  if$
+}
+
+FUNCTION {format.bvolume}
+{ volume empty$
+    { "" }
+    { "vol." volume tie.or.space.connect
+      series empty$
+	'skip$
+	{ " of " * series emphasize * }
+      if$
+      "volume and number" number either.or.check
+    }
+  if$
+}
+
+FUNCTION {format.number.series}
+{ volume empty$
+    { number empty$
+	{ series field.or.null }
+	{ output.state mid.sentence =
+	    { "number" }
+	    { "Number" }
+	  if$
+	  number tie.or.space.connect
+	  series empty$
+	    { "there's a number but no series in " cite$ * warning$ }
+	    { " in " * series * }
+	  if$
+	}
+      if$
+    }
+    { "" }
+  if$
+}
+
+FUNCTION {format.edition}
+{ edition empty$
+    { "" }
+    { output.state mid.sentence =
+	{ edition "l" change.case$ " edition" * }
+	{ edition "t" change.case$ " edition" * }
+      if$
+    }
+  if$
+}
+
+INTEGERS { multiresult }
+
+FUNCTION {multi.page.check}
+{ 't :=
+  #0 'multiresult :=
+    { multiresult not
+      t empty$ not
+      and
+    }
+    { t #1 #1 substring$
+      duplicate$ "-" =
+      swap$ duplicate$ "," =
+      swap$ "+" =
+      or or
+	{ #1 'multiresult := }
+	{ t #2 global.max$ substring$ 't := }
+      if$
+    }
+  while$
+  multiresult
+}
+
+FUNCTION {format.pages}
+{ pages empty$
+    { "" }
+    { pages multi.page.check
+	{ "pp." pages n.dashify tie.or.space.connect }
+	{ "p." pages tie.or.space.connect }
+      if$
+    }
+  if$
+}
+
+FUNCTION {format.vol.num.pages}
+{ 
+volume empty$
+   {"" }
+   {"vol. " volume *}
+if$
+number empty$
+   'skip$
+   {", no. " number * *}
+if$
+pages empty$
+   'skip$
+    { duplicate$ empty$
+	{ pop$ format.pages }
+	{ ", pp. " * pages n.dashify * }
+      if$
+    }
+if$
+}
+
+%FUNCTION {format.vol.num.pages}
+%%boldface added 3/17/87 rhr
+%{ volume field.or.null boldface
+%  number empty$
+%    'skip$
+%    { "(" number * ")" * *
+%      volume empty$
+%	{ "there's a number but no volume in " cite$ * warning$ }
+%	'skip$
+%      if$
+%    }
+%  if$
+%  pages empty$
+%    'skip$
+%    { duplicate$ empty$
+%	{ pop$ format.pages }
+%	{ ":" * pages n.dashify * }
+%      if$
+%    }
+%  if$
+%}
+
+FUNCTION {format.chapter.pages}
+{ chapter empty$
+    'format.pages
+    { type empty$
+	{ "chapter" }
+	{ type "l" change.case$ }
+      if$
+      chapter tie.or.space.connect
+      pages empty$
+	'skip$
+	{ ", " * format.pages * }
+      if$
+    }
+  if$
+}
+
+FUNCTION {format.in.ed.booktitle}
+{ booktitle empty$
+    { "" }
+    { editor empty$
+	{ "in " booktitle emphasize * }
+	{ "in "  booktitle emphasize *  ", " * format.editors * }
+      if$
+    }
+  if$
+}
+
+FUNCTION {empty.misc.check}
+{ author empty$ title empty$ howpublished empty$
+  month empty$ year empty$ note empty$
+  and and and and and
+    { "all relevant fields are empty in " cite$ * warning$ }
+    'skip$
+  if$
+}
+
+FUNCTION {format.thesis.type}
+{ type empty$
+    'skip$
+    { pop$
+      type "t" change.case$
+    }
+  if$
+}
+
+FUNCTION {format.tr.number}
+{ type empty$
+    { "Tech. {R}ep." }
+    'type
+  if$
+  number empty$
+    { "t" change.case$ }
+    { number tie.or.space.connect }
+  if$
+}
+
+FUNCTION {format.article.crossref}
+{ key empty$
+    { journal empty$
+	{ "need key or journal for " cite$ * " to crossref " * crossref *
+	  warning$
+	  ""
+	}
+	{ "In {\em " journal * "\/}" * }
+      if$
+    }
+    { "In " key * }
+  if$
+  " \cite{" * crossref * "}" *
+}
+
+FUNCTION {format.crossref.editor}
+{ editor #1 "{vv~}{ll}" format.name$
+  editor num.names$ duplicate$
+  #2 >
+    { pop$ " et~al." * }
+    { #2 <
+	'skip$
+	{ editor #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" =
+	    { " et~al." * }
+	    { " and " * editor #2 "{vv~}{ll}" format.name$ * }
+	  if$
+	}
+      if$
+    }
+  if$
+}
+
+FUNCTION {format.book.crossref}
+{ volume empty$
+    { "empty volume in " cite$ * "'s crossref of " * crossref * warning$
+      "In "
+    }
+    { "vol." volume tie.or.space.connect
+      " of " *
+    }
+  if$
+  editor empty$
+  editor field.or.null author field.or.null =
+  or
+    { key empty$
+	{ series empty$
+	    { "need editor, key, or series for " cite$ * " to crossref " *
+	      crossref * warning$
+	      "" *
+	    }
+	    { "{\em " * series * "\/}" * }
+	  if$
+	}
+	{ key * }
+      if$
+    }
+    { format.crossref.editor * }
+  if$
+  " \cite{" * crossref * "}" *
+}
+
+FUNCTION {format.incoll.inproc.crossref}
+{ editor empty$
+  editor field.or.null author field.or.null =
+  or
+    { key empty$
+	{ booktitle empty$
+	    { "need editor, key, or booktitle for " cite$ * " to crossref " *
+	      crossref * warning$
+	      ""
+	    }
+	    { "In {\em " booktitle * "\/}" * }
+	  if$
+	}
+	{ "In " key * }
+      if$
+    }
+    { "In " format.crossref.editor * }
+  if$
+  " \cite{" * crossref * "}" *
+}
+
+FUNCTION {article}
+{ output.bibitem
+  format.authors "author" output.check
+  new.block
+  format.title ",''" * "title" output.check
+  new.ncblock
+  crossref missing$
+    { journal emphasize "journal" output.check
+      format.vol.num.pages output
+      format.date "year" output.check
+    }
+    { format.article.crossref output.nonnull
+      format.pages output
+    }
+  if$
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {book}
+{ output.bibitem
+  author empty$
+    { format.editors "author and editor" output.check }
+    { format.authors output.nonnull
+      crossref missing$
+	{ "author and editor" editor either.or.check }
+	'skip$
+      if$
+    }
+  if$
+  new.block
+  format.btitle "title" output.check
+  crossref missing$
+    { format.bvolume output
+      new.block
+      format.number.series output
+      new.sentence
+      publisher "publisher" output.check
+      address output
+    }
+    { new.block
+      format.book.crossref output.nonnull
+    }
+  if$
+  format.edition output
+  format.date "year" output.check
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {booklet}
+{ output.bibitem
+  format.authors output
+  new.block
+  format.title ",''" * "title" output.check
+  new.nccont
+  howpublished address new.block.checkb
+  howpublished output
+  address output
+  format.date output
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {inbook}
+{ output.bibitem
+  author empty$
+    { format.editors "author and editor" output.check }
+    { format.authors output.nonnull
+      crossref missing$
+	{ "author and editor" editor either.or.check }
+	'skip$
+      if$
+    }
+  if$
+  new.block
+  format.btitle "title" output.check
+  crossref missing$
+    { format.bvolume output
+      format.chapter.pages "chapter and pages" output.check
+      new.block
+      format.number.series output
+      new.sentence
+      publisher "publisher" output.check
+      address output
+    }
+    { format.chapter.pages "chapter and pages" output.check
+      new.block
+      format.book.crossref output.nonnull
+    }
+  if$
+  format.edition output
+  format.date "year" output.check
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {incollection}
+{ output.bibitem
+  format.authors "author" output.check
+  new.block
+  format.title ",''" * "title" output.check
+  new.ncblock
+  crossref missing$
+    { format.in.ed.booktitle "booktitle" output.check
+      format.bvolume output
+      format.number.series output
+      format.chapter.pages output
+      new.sentence
+      publisher "publisher" output.check
+      address output
+      format.edition output
+      format.date "year" output.check
+    }
+    { format.incoll.inproc.crossref output.nonnull
+      format.chapter.pages output
+    }
+  if$
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {inproceedings}
+{ output.bibitem
+  format.authors "author" output.check
+  new.block
+  format.title ",''" * "title" output.check
+  new.ncblock
+  crossref missing$
+    { format.in.ed.booktitle "booktitle" output.check
+      address empty$
+	{ organization publisher new.sentence.checkb
+	  organization output
+	  format.date "year" output.check
+	}
+	{ address output.nonnull
+	  format.date "year" output.check
+	  organization output
+	}
+      if$
+      format.bvolume output
+      format.number.series output
+      format.pages output
+      publisher output
+    }
+    { format.incoll.inproc.crossref output.nonnull
+      format.pages output
+    }
+  if$
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {conference} { inproceedings }
+
+FUNCTION {manual}
+{ output.bibitem
+  author empty$
+    { organization empty$
+	'skip$
+	{ organization output.nonnull
+	  address output
+	}
+      if$
+    }
+    { format.authors output.nonnull }
+  if$
+  new.block
+  format.btitle "title" output.check
+  author empty$
+    { organization empty$
+	{ address new.block.checka
+	  address output
+	}
+	'skip$
+      if$
+    }
+    { organization address new.block.checkb
+      organization output
+      address output
+    }
+  if$
+  format.edition output
+  format.date output
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {mastersthesis}
+{ output.bibitem
+  format.authors "author" output.check
+  new.block
+  format.title ",''" * "title" output.check
+  new.ncblock
+  "M.S. thesis" format.thesis.type output.nonnull
+  school "school" output.check
+  address output
+  format.date "year" output.check
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {misc}
+{ output.bibitem
+  format.authors output
+  title howpublished new.block.checkb
+  format.title ",''" * output
+  new.nccont
+  howpublished new.block.checka
+  howpublished output
+  format.date output
+  new.block
+  note output
+  fin.entry
+  empty.misc.check
+}
+
+FUNCTION {phdthesis}
+{ output.bibitem
+  format.authors "author" output.check
+  new.block
+  format.btitle "title" output.check
+  new.block
+  "Ph.D. thesis" format.thesis.type output.nonnull
+  school "school" output.check
+  address output
+  format.date "year" output.check
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {proceedings}
+{ output.bibitem
+  editor empty$
+    { organization output }
+    { format.editors output.nonnull }
+  if$
+  new.block
+  format.btitle "title" output.check
+  format.bvolume output
+  format.number.series output
+  address empty$
+    { editor empty$
+	{ publisher new.sentence.checka }
+	{ organization publisher new.sentence.checkb
+	  organization output
+	}
+      if$
+      publisher output
+      format.date "year" output.check
+    }
+    { address output.nonnull
+      format.date "year" output.check
+      new.sentence
+      editor empty$
+	'skip$
+	{ organization output }
+      if$
+      publisher output
+    }
+  if$
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {techreport}
+{ output.bibitem
+  format.authors "author" output.check
+  new.block
+  format.title ",''" * "title" output.check
+  new.ncblock
+  format.tr.number output.nonnull
+  institution "institution" output.check
+  address output
+  format.date "year" output.check
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {unpublished}
+{ output.bibitem
+  format.authors "author" output.check
+  new.block
+  format.title ",''" * "title" output.check
+  new.ncblock
+  note "note" output.check
+  format.date output
+  fin.entry
+}
+
+FUNCTION {default.type} { misc }
+
+MACRO {jan} {"Jan."}
+
+MACRO {feb} {"Feb."}
+
+MACRO {mar} {"Mar."}
+
+MACRO {apr} {"Apr."}
+
+MACRO {may} {"May"}
+
+MACRO {jun} {"June"}
+
+MACRO {jul} {"July"}
+
+MACRO {aug} {"Aug."}
+
+MACRO {sep} {"Sept."}
+
+MACRO {oct} {"Oct."}
+
+MACRO {nov} {"Nov."}
+
+MACRO {dec} {"Dec."}
+
+MACRO {acmcs} {"ACM Computing Surveys"}
+
+MACRO {acta} {"Acta Informatica"}
+
+MACRO {cacm} {"Communications of the ACM"}
+
+MACRO {ibmjrd} {"IBM Journal of Research and Development"}
+
+MACRO {ibmsj} {"IBM Systems Journal"}
+
+MACRO {ieeese} {"IEEE Transactions on Software Engineering"}
+
+MACRO {ieeetc} {"IEEE Transactions on Computers"}
+
+MACRO {ieeetcad}
+ {"IEEE Transactions on Computer-Aided Design of Integrated Circuits"}
+
+MACRO {ipl} {"Information Processing Letters"}
+
+MACRO {jacm} {"Journal of the ACM"}
+
+MACRO {jcss} {"Journal of Computer and System Sciences"}
+
+MACRO {scp} {"Science of Computer Programming"}
+
+MACRO {sicomp} {"SIAM Journal on Computing"}
+
+MACRO {tocs} {"ACM Transactions on Computer Systems"}
+
+MACRO {tods} {"ACM Transactions on Database Systems"}
+
+MACRO {tog} {"ACM Transactions on Graphics"}
+
+MACRO {toms} {"ACM Transactions on Mathematical Software"}
+
+MACRO {toois} {"ACM Transactions on Office Information Systems"}
+
+MACRO {toplas} {"ACM Transactions on Programming Languages and Systems"}
+
+MACRO {tcs} {"Theoretical Computer Science"}
+
+READ
+
+STRINGS { longest.label }
+
+INTEGERS { number.label longest.label.width }
+
+FUNCTION {initialize.longest.label}
+{ "" 'longest.label :=
+  #1 'number.label :=
+  #0 'longest.label.width :=
+}
+
+FUNCTION {longest.label.pass}
+{ number.label int.to.str$ 'label :=
+  number.label #1 + 'number.label :=
+  label width$ longest.label.width >
+    { label 'longest.label :=
+      label width$ 'longest.label.width :=
+    }
+    'skip$
+  if$
+}
+
+EXECUTE {initialize.longest.label}
+
+ITERATE {longest.label.pass}
+
+FUNCTION {begin.bib}
+{ preamble$ empty$
+    'skip$
+    { preamble$ write$ newline$ }
+  if$
+  "\begin{thebibliography}{"  longest.label  * "}" * write$ newline$
+}
+
+EXECUTE {begin.bib}
+
+EXECUTE {init.state.consts}
+
+ITERATE {call.type$}
+
+FUNCTION {end.bib}
+{ newline$
+  "\end{thebibliography}" write$ newline$
+}
+
+EXECUTE {end.bib}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%% End of IEEE.bst %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Index: sparse_sample_icip2014/Makefile
===================================================================
--- sparse_sample_icip2014/Makefile	(revision 10)
+++ sparse_sample_icip2014/Makefile	(revision 10)
@@ -0,0 +1,45 @@
+FILENAME = icip_paper
+PS_FILE = $(FILENAME).ps
+PDF_FILE = $(FILENAME).pdf
+
+# PAPERSIZE = a4
+PAPERSIZE = letter
+
+LATEX_FILES = *.dvi *.log *.toc *.tof *.aux *.blg *.lof *.lot *.bbl
+CLEAN_FILES = $(LATEX_FILES) *.bak core $(PS_FILE) $(PDF_FILE)
+COMPRESS_FILES = *.tex *.bib *.sty *.eps *.ps *.fig *.m *.txt *.pgm *.bst *.cls
+UNCOMPRESS_FILES = *.Z *.gz
+
+COMPRESS_DIRS = . Tables Figures
+COMPRESS = gzip -q
+UNCOMPRESS = gunzip
+
+all: main
+
+
+main:
+	latex main
+	@if grep "Warning: Citation" main.log; then bibtex main; \
+	latex main; fi;
+	@while grep Rerun main.log; do latex main; done;
+
+ps: main
+	dvips -t $(PAPERSIZE) -o $(PS_FILE) main
+
+pdf: main
+	dvips -t $(PAPERSIZE) -Ppdf -j0 -G0 -o $(PS_FILE) main
+	ps2pdf -sPAPERSIZE=$(PAPERSIZE) \
+		-dPDFSETTINGS=/prepress \
+		-dCompatibilityLevel=1.7 \
+		$(PS_FILE) $(PDF_FILE)
+
+clean:
+	@for i in $(COMPRESS_DIRS) ; \
+	do \
+	(if (test -d $$i) ; \
+	then cd $$i ; \
+	echo "Cleaning $$i" ; \
+	rm -f *~ ; \
+	rm -f $(CLEAN_FILES) ; \
+	fi) \
+	done
Index: sparse_sample_icip2014/README.txt
===================================================================
--- sparse_sample_icip2014/README.txt	(revision 10)
+++ sparse_sample_icip2014/README.txt	(revision 10)
@@ -0,0 +1,23 @@
+LaTeX Templates for ICIP 2014
+
+Files:
+- spconf.sty: LaTeX style file with margin, page layout, font, etc. definitions
+- IEEEbib.bst: BiBTeX style file with bibliography style definitions
+- main.tex: LaTeX template file
+- icip_paper.pdf: PDF generated from the template file
+- refs.bib: example file of bibliographic references
+- Figures/{image1.eps, image2.eps, image3.eps}: example image files
+- Makefile: provides for automatic LaTeX compilation and PDF generation
+
+It is recommended to use the included Makefile to produce the PDF document to
+submit to the conference:
+
+  - "make": runs LaTeX and BiBTeX to produce the file icip_paper.dvi.
+            Multiple runs are conducted as needed to resolve cross references.
+  - "make pdf": produces a format-compliant PDF, icip_paper.pdf, for
+                submission. dvips and ps2pdf are used to completely embed and
+                subset all fonts.
+
+The Makefile should work on any modern Unix system with dvips and ps2pdf
+installed. Windows users will either need to compile by hand or install
+Cygwin (http://cygwin.com/).
Index: sparse_sample_icip2014/main.tex
===================================================================
--- sparse_sample_icip2014/main.tex	(revision 10)
+++ sparse_sample_icip2014/main.tex	(revision 10)
@@ -0,0 +1,488 @@
+% Template for ICIP-2014 paper; to be used with:
+%          spconf.sty  - ICASSP/ICIP LaTeX style file, and
+%          IEEEbib.bst - IEEE bibliography style file.
+% --------------------------------------------------------------------------
+\documentclass{article}%
+\usepackage{spconf,amsmath,graphicx}%
+%
+%%***
+%% Including AMS packages for equation formatting.
+%%***
+\usepackage{color}%
+\usepackage{amsfonts}%
+\usepackage{amssymb}%
+\usepackage{bm}%
+\usepackage{latexsym}%
+\usepackage{xcolor}%
+%\usepackage[pagebackref,colorlinks,bookmarks=false]{hyperref}%
+\usepackage{nohyperref}%
+\usepackage{subcaption}%
+\usepackage{mleftright}%
+\usepackage{algorithm}%
+\usepackage[noend]{algpseudocode}%
+\usepackage{booktabs,array}%
+\usepackage{tabularx}%
+\usepackage{siunitx}%
+\usepackage{paralist}
+%\usepackage{epstopdf}%
+%\usepackage{soul}
+%
+%%***
+%% Set up packages
+%%***
+\definecolor{darkgreen}{rgb}{0 0.5 0}%
+\definecolor{darkblue}{rgb}{0 0 0.7}%
+%
+\hypersetup{%
+   linkcolor=darkblue,%
+   citecolor=darkgreen,%
+   urlcolor=blue,%
+   pdfpagemode=UseNone,%
+   pdfstartview=Fit,%
+   pdfpagelayout=TwoPageLeft,%
+}
+%
+%\renewcommand*{\backref}[1]{}%
+%\renewcommand*{\backrefalt}[4]{%
+%\ifcase #1 %
+%   % case: not cited
+%   (not~cited).%
+%\or%
+%   % case: cited on exactly one page
+%   (see~p.~#2).%
+%\else%
+%   % case: cited on multiple pages
+%   (see~pp.~#2).%
+%\fi}%
+%
+\makeatletter
+\newcommand*\if@single[3]{%
+  \setbox0\hbox{${\mathaccent"0362{#1}}^H$}%
+  \setbox2\hbox{${\mathaccent"0362{\kern0pt#1}}^H$}%
+  \ifdim\ht0=\ht2 #3\else #2\fi
+  }
+%The bar will be moved to the right by a half of \macc@kerna, which is computed by amsmath:
+\newcommand*\rel@kern[1]{\kern#1\dimexpr\macc@kerna}
+%If there's a superscript following the bar, then no negative kern may follow the bar;
+%an additional {} makes sure that the superscript is high enough in this case:
+\newcommand*\widebar[1]{\@ifnextchar^{{\wide@bar{#1}{0}}}{\wide@bar{#1}{1}}}
+%Use a separate algorithm for single symbols:
+\newcommand*\wide@bar[2]{\if@single{#1}{\wide@bar@{#1}{#2}{1}}{\wide@bar@{#1}{#2}{2}}}
+\newcommand*\wide@bar@[3]{%
+  \begingroup
+  \def\mathaccent##1##2{%
+%If there's more than a single symbol, use the first character instead (see below):
+    \if#32 \let\macc@nucleus\first@char \fi
+%Determine the italic correction:
+    \setbox\z@\hbox{$\macc@style{\macc@nucleus}_{}$}%
+    \setbox\tw@\hbox{$\macc@style{\macc@nucleus}{}_{}$}%
+    \dimen@\wd\tw@
+    \advance\dimen@-\wd\z@
+%Now \dimen@ is the italic correction of the symbol.
+    \divide\dimen@ 3
+    \@tempdima\wd\tw@
+    \advance\@tempdima-\scriptspace
+%Now \@tempdima is the width of the symbol.
+    \divide\@tempdima 10
+    \advance\dimen@-\@tempdima
+%Now \dimen@ = (italic correction / 3) - (Breite / 10)
+    \ifdim\dimen@>\z@ \dimen@0pt\fi
+%The bar will be shortened in the case \dimen@<0 !
+    \rel@kern{0.6}\kern-\dimen@
+    \if#31
+      \overline{\rel@kern{-0.6}\kern\dimen@\macc@nucleus\rel@kern{0.4}\kern\dimen@}%
+      \advance\dimen@0.4\dimexpr\macc@kerna
+%Place the combined final kern (-\dimen@) if it is >0 or if a superscript follows:
+      \let\final@kern#2%
+      \ifdim\dimen@<\z@ \let\final@kern1\fi
+      \if\final@kern1 \kern-\dimen@\fi
+    \else
+      \overline{\rel@kern{-0.6}\kern\dimen@#1}%
+    \fi
+  }%
+  \macc@depth\@ne
+  \let\math@bgroup\@empty \let\math@egroup\macc@set@skewchar
+  \mathsurround\z@ \frozen@everymath{\mathgroup\macc@group\relax}%
+  \macc@set@skewchar\relax
+  \let\mathaccentV\macc@nested@a
+%The following initialises \macc@kerna and calls \mathaccent:
+  \if#31
+    \macc@nested@a\relax111{#1}%
+  \else
+%If the argument consists of more than one symbol, and if the first token is
+%a letter, use that letter for the computations:
+    \def\gobble@till@marker##1\endmarker{}%
+    \futurelet\first@char\gobble@till@marker#1\endmarker
+    \ifcat\noexpand\first@char A\else
+      \def\first@char{}%
+    \fi
+    \macc@nested@a\relax111{\first@char}%
+  \fi
+  \endgroup
+}
+\makeatother
+%
+% Title.
+% ------
+\title{Mesh-Free Sparse Representation of Multidimensional LiDAR Data}%
+%
+% Single address.
+% ---------------
+\name{Kristian L. Damkjer, Hassan Foroosh}%
+\address{University of Central Florida\\%
+         Department of Electrical Engineering and Computer Science\\%
+         Orlando, Florida}%
+%
+\begin{document}%
+\ninept%
+%
+\maketitle%
+%
+\begin{abstract}%
+Modern LiDAR collection systems generate very large data sets approaching several million to billions of point samples per product. Compression techniques have been developed to help manage the large data sets. However, sparsifying LiDAR survey data by means other than random decimation remains largely unexplored. In contrast, surface model simplification algorithms are well-established, especially with respect to the complementary problem of surface reconstruction. Unfortunately, surface model simplification algorithms are often not directly applicable to LiDAR survey data due to the true \num{3}D nature of the data sets. Further, LiDAR data is often attributed with additional user data that should be considered as potentially salient information. This paper makes the following main contributions in this area:
+\begin{inparaenum}[(i)]
+\item{We generalize some features defined on spatial coordinates to arbitrary dimensions and extend these features to provide local multidimensional statistics.}
+\item{We propose an approach for sparsifying point clouds similar to mesh-free surface simplification that preserves saliency with respect to the multi-dimensional information content.}
+\item{We show direct application to LiDAR data and evaluate the benefits in terms of level of sparsity versus entropy.}
+\end{inparaenum}
+\end{abstract}%
+%
+\begin{keywords}%
+LiDAR, multidimensional systems, point cloud, mesh-free simplification, principal component analysis
+\end{keywords}%
+%
+\section{Introduction}%
+\label{sec:introduction}%
+%
+Mapping and surveying Light Detection and Ranging (LiDAR) systems produce large amounts of true three-dimensional (\num{3}D) data. Modern systems sample several thousand to over a million points per second resulting in several million to billions of point samples per product to be stored, processed, analyzed and distributed \cite{Parrish:2012,Smith:2012,Young:2012}.
+
+Managing such large data sets presents a host of challenges to content providers. Production strategies have been developed to mitigate data management issues inherent in processing large-scale projects \cite{David:2008}. However, user demands for simultaneous wide-area coverage, high-fidelity scene content, and low-latency access keep data sizing considerations at the forefront of content provider concerns.
+
+The LAS file format was developed to facilitate the exchange of LiDAR data \cite{ASPRS:2012}. Extensions to the LAS format, \textit{e.g.} LASzip, and generic exchange formats, \textit{e.g.} HDF5, further address data sizing concerns by offering support for lossless compression with typical performance yielding files between 10 and 20 percent of the original file size \cite{Isenburg:2011, HDF:2011}. However, even with an effective compression strategy, explicit data reduction is often necessary to support users in bandwidth-limited and mobile device environments. It is therefore necessary to establish approaches to intelligently reduce point data in a manner that preserves information content. Current approaches focus primarily on preserving the surface structures represented by the spatial coordinates \cite{Pauly:2002}. We describe an approach that also allows for the preservation of non-surface structures and includes point attribution in the salience criterion.
+%
+\section{Novelty and Relationship to Prior Work}%
+\label{sec:prior}%
+%
+Simplification of LiDAR survey data remains largely unexplored, however point-based surface model simplification algorithms are well-established, especially with respect to the complementary problem of surface reconstruction. We refer to the survey conducted by Pauly \textit{et al.} for an overview of point-based surface simplification \cite{Pauly:2002}. In this problem domain, there is an underlying assumption that points in the cloud all belong to surfaces embedded in the spatial dimensions. This assumption is frequently violated in LiDAR data where points often belong to non-surface features. Further, survey data is often attributed with additional information that should be considered in the simplification process lest salient information be lost \cite{David:2008}. Regardless of these limitations, we draw inspiration for our approach from mesh-free surface simplification approaches.
+
+Dyn \textit{et al.} \cite{Dyn:2008} present an iterative sub-sampling approach supported by local surface approximation. Their approach operates in a fine-to-coarse manner terminated by a desired point set size, $\tau$. Their point selection is solely based on the input point cloud geometry, $\mathcal{P}\subset\mathbb{R}^{3}$, and a salience criterion, $s\colon\mathcal{T}\subseteq\mathcal{P}\setminus\mleft\{{\varnothing}\mright\}\to\mathbb{R}$. An important aspect of $s$ is that it updates with respect to the current subset $\mathcal{T}\subseteq\mathcal{P}$ throughout the point removal process.
+
+Yu \textit{et al.} \cite{Yu:2010} present a similar approach that enforces a post-condition of a terminal point set size and operates in an adaptive manner driven by point clustering and a user-specified simplification criteria and optimization process.
+ 
+While these approaches operate without generating an explicit mesh surface, they carry forward the legacy of mesh-based approaches by limiting their analysis to spatial coordinates and operating under the assumption that points locally approximate a surface. In contrast, natural scenes are complex and contain significant points belonging to linear, planar, and isotropic structures. LiDAR survey data is also frequently attributed with intensity or color data, classification, or other user-defined features. These additional dimensions may contain content that is salient to end-user applications which suggests the need for a multidimensional approach to point removal.
+
+The primary goal of this paper, therefore, is to create a data sparsifying algorithm by developing a multidimensional salience measure, $s\colon\mathcal{P}\to\mathbb{R}$, and therefore demonstrate that such multidimensional approach produces sparse point representations that preserve salience. Several approaches have been developed to identify salient points based solely on \num{3}D spatial coordinates. West \textit{et al.} introduce features based on structure-tensor--eigenvalue analysis of local point neighborhoods \cite{West:2004}. These feature descriptors have been enhanced to extract strong spatially linear features to support scene modeling applications \cite{Gross:2006}. Methods have also been developed to direct optimal neighborhood scale selection for feature attribution \cite{Demantke:2011}. Next, we generalize these attribute definitions to arbitrary dimensions to serve as the basis for measuring salience.
+%
+\section{Local Statistic Attribution}%
+\label{sec:attributes}%
+%
+Our salience measure is based on attributes defined by neighborhoods in arbitrary dimensions. In this section, we establish our definition for locality in arbitrary dimensions and generalize the definitions for previously-established features in the spatial domain to arbitrary dimensions. Figure~\ref{fig:attributes} illustrates the features we consider based on evaluation of \num{3}D spatial point data.
+
+\begin{figure}[t!]%
+\begin{subfigure}[b]{0.24\linewidth}%
+  \centering%
+  \centerline{\includegraphics[width=\linewidth]{Figures/Armstrong_dens}}%
+  \caption{}\label{fig:density}\medskip%
+\end{subfigure}%
+\hfill%
+\begin{subfigure}[b]{0.24\linewidth}%
+  \centering%
+  \centerline{\includegraphics[width=\linewidth]{Figures/Armstrong_omni}}%
+  \caption{}\label{fig:omnivariance}\medskip%
+\end{subfigure}%
+\hfill%
+\begin{subfigure}[b]{0.24\linewidth}%
+  \centering%
+  \centerline{\includegraphics[width=\linewidth]{Figures/Armstrong_iso}}%
+  \caption{}\label{fig:isotropy}\medskip%
+\end{subfigure}%
+\hfill%
+\begin{subfigure}[b]{0.24\linewidth}%
+  \centering%
+  \centerline{\includegraphics[width=\linewidth]{Figures/Armstrong_ani}}%
+  \caption{}\label{fig:anisotropy}\medskip%
+\end{subfigure}%
+\hfill%
+\begin{subfigure}[b]{0.24\linewidth}%
+  \centering%
+  \centerline{\includegraphics[width=\linewidth]{Figures/Armstrong_dim}}%
+  \caption{}\label{fig:dimensionality}\medskip%
+\end{subfigure}%
+\hfill%
+\begin{subfigure}[b]{0.24\linewidth}%
+  \centering%
+  \centerline{\includegraphics[width=\linewidth]{Figures/Armstrong_emb}}%
+  \caption{}\label{fig:label}\medskip%
+\end{subfigure}%
+\hfill%
+\begin{subfigure}[b]{0.24\linewidth}%
+  \centering%
+  \centerline{\includegraphics[width=\linewidth]{Figures/Armstrong_ent}}%
+  \caption{}\label{fig:entropy}\medskip%
+\end{subfigure}%
+\hfill%
+\begin{subfigure}[b]{0.24\linewidth}%
+  \centering%
+  \centerline{\includegraphics[width=\linewidth]{Figures/Armstrong_de}}%
+  \caption{}\label{fig:de}\medskip%
+\end{subfigure}%
+\caption{Visualization of Neighborhood Features on section of \textit{Armstrong/Enderby} data set from Applied Imagery. (a) Density, (b) Omnivariance, (c) Isotropy, (d) Anisotropy, (e) Dimensionality, (f) Dimension Label, (g), Component Entropy, (h) Dimensional Entropy}\label{fig:attributes}%
+\end{figure}%
+%
+\subsection{Data Conditioning}%
+\label{sec:conditioning}%
+%
+Our attributes are based on principal components analysis which is sensitive to differences in scale within the feature space. The source data should therefore be conditioned prior to analysis so that different classes of attributes have approximately the same precision scale or measurement resolution. Without this adjustment, insignificant variations within one dimension can easily dominate significant variations in another. We perform this conditioning by first decentering the data then normalizing each class by an estimate of the measurement resolution for the class. We estimate the measurement resolution by computing the standard deviation within a flat response region for each attribute in the class. We then take the minimum class attribute standard deviation as the measurement resolution for the class.
+%
+\subsection{Locality}%
+\label{sec:locality}%
+%
+We consider the analysis of multidimensional points, $\bm{x}\in\mathbb{R}^{n}$, where $\mathcal{N}$ is the set of native attributes for the point and $\mleft\lvert{\mathcal{N}}\mright\rvert = n$ is the dimension of the native feature space. All attributes are assumed to be real-valued. While boolean and finite-class attributes may be simply represented by an appropriate integer enumeration, our approach is unlikely to yield meaningful results with such classes due to the conditioning issues mentioned previously. Our definition of a point cloud, $\mathcal{D}\subset\mathbb{R}^{n}$, then is simply a database of real-valued multidimensional points with consistent feature space definition.
+
+In most cases, it is desirable to restrict neighborhood definition to a subset of the available native feature space. To support this capability, we establish a database of query points, $\mathcal{Q}\subset\mathbb{R}^{m}$, where $\mathcal{M}\subseteq\mathcal{N}$ is the search space of attributes for the determination of locality and $\mleft\lvert{\mathcal{M}}\mright\rvert = m$ is the dimension of the search space.
+
+We proceed by analyzing the neighborhoods of points about the query points, $\mathcal{V}_{\bm{q}}\subseteq\mathcal{D}$. The neighborhoods are defined by an $m$-dimensional distance metric, $\delta$, between the query points, $\bm{q}\in\mathcal{Q}$, and the data points, $\bm{x}\in\mathcal{D}$. For point cloud simplification, we treat each  $\bm{x}\in\mathcal{D}$ as a query location (\textit{i.e.}, $\mathcal{Q}=\mathcal{D}$). This approach requires a reasonable all nearest-neighbor search algorithm to be practical, that is one with complexity no worse than $O\mleft(p \log p\mright)$ where $p=\lvert\mathcal{D}\rvert$. 
+
+We investigated two neighborhood definitions that each present merits. The $k$-nearest neighborhood, $\mathcal{V}^{k}_{\bm{q}}$, consists of the $k$ closest points to $\bm{q}$ in $\mathcal{D}$ whereas the fixed-radius neighborhood, $\mathcal{V}^{r}_{\bm{q}}$, consists of all points in $\mathcal{D}$ within the ball of radius $r$ centered at $\bm{q}$. Similar to Dyn \textit{et al.}, we enforce the condition that $\bm{q}\not\in\mathcal{V}_{\bm{q}}$ \cite{Dyn:2008}. This condition is imposed so that $\mathcal{V}_{\bm{q}}$ can be used to estimate the effects of eliminating $\bm{q}$ during the simplification process.
+%
+\subsection{Structure Features}%
+\label{sec:features}%
+%
+West \textit{et al.} and Demantk\'{e} \textit{et al.} define several features for describing \num{3}D point neighborhoods. In this section, we generalize, and in some cases modify, their proposed features to support multidimensional analysis and interpretability. The generalized features are summarized in table~\ref{tab:features}.
+
+\begin{table}[ht!]%
+\centering%
+\begin{tabularx}{\linewidth}{lX}%
+\toprule%
+Name & \multicolumn{1}{c}{Equation} \\%
+\midrule%
+Omnivariance &%
+\begin{minipage}{\linewidth}%
+   \begin{equation}%
+      \label{eqn:omnivariance}%
+      {Omni}\colon{\mleft({\prod_{d=1}^{n}{\lambda_{d}}}\mright)^{\frac{1}{n}}}
+   \end{equation}%
+\end{minipage} \\%
+Isotropy &%
+\begin{minipage}{\linewidth}%
+   \begin{equation}%
+      \label{eqn:isotropy}%
+      {Iso}\colon
+         {\frac{\sigma_{n}}{\sigma_{1}}}
+   \end{equation}%
+\end{minipage} \\%
+Anisotropy &%
+\begin{minipage}{\linewidth}%
+   \begin{equation}%
+      \label{eqn:anisotropy}%
+      {Ani}\colon
+         {\frac{{\sigma_{1}}-{\sigma_{n}}}{\sigma_{1}}}
+    \end{equation}%
+\end{minipage} \\%
+Dimensionality &%
+\begin{minipage}{\linewidth}%
+   \begin{equation}%
+      \label{eqn:dimensionality}%
+      {\alpha_{d}}\colon
+         {\begin{cases}
+            {\frac{{\sigma_{d}}-{\sigma_{d+1}}}{\sigma_{1}}}&,d<n\\
+            {Iso}&,d=n
+         \end{cases}}
+   \end{equation}%
+\end{minipage} \\%
+Dimension Label &%
+\begin{minipage}{\linewidth}%
+   \begin{equation}%
+      \label{eqn:label}%
+      {d^{*}}\colon{\operatorname*{\arg\!\max}_{d\in\mleft\{{1,\dotsc,n}\mright\}}
+          \alpha_{d}}
+   \end{equation}%
+\end{minipage} \\%
+Component Entropy &%
+\begin{minipage}{\linewidth}%
+   \begin{equation}%
+      \label{eqn:entropy}%
+      {H_{\sigma}}\colon{-{\sum_{d=1}^{n}{\hat{\sigma}_{d}\log_{n}\hat{\sigma}_{d}}}}
+   \end{equation}%
+\end{minipage} \\%
+Dimensional Entropy &%
+\begin{minipage}{\linewidth}%
+   \begin{equation}%
+      \label{eqn:de}%
+      {H_{\alpha}}\colon{-{\sum_{d=1}^{n}{\alpha_{d}\log_{n}\alpha_{d}}}}
+   \end{equation}%
+\end{minipage} \\%
+\bottomrule%
+\end{tabularx}%
+\caption{Features defined on $\mathcal{V}_{\bm{q}}$}%
+\label{tab:features}%
+\end{table}%
+
+West \textit{et al.} present six features that proved to be most applicable to their work in segmentation and object recognition: \textit{omnivariance}, \textit{anisotropy}, \textit{linearity}, \textit{planarity}, \textit{sphericity}, and \textit{eigenentropy} \cite{West:2004}. Each of the features they describe are derived from the eigenvalues resulting from the principal components analysis of the query neighborhoods, $\mathcal{V}_{\bm{q}}$. However, while West \textit{et al.} define the features with respect to the eigenvalues, $\lambda_{1}\geq\lambda_{2}\geq\dotsb\geq\lambda_{n}$, we generally prefer to use the singular values, $\sigma_{1}\geq\sigma_{2}\geq\dotsb\geq\sigma_{n}$, as demonstrated by Demantk\'{e} \textit{et al.} \cite{Demantke:2011}. The sole exception to this recommendation is the \textit{omnivariance} feature which is used to meaningfully compare the total variance of the neighborhoods to each other. Re-defining the feature with respect to the singular values, while still meaningful, would be more directly related to the standard deviation.
+
+\textit{Linearity}, \textit{planarity}, and \textit{sphericity} are closely related features that each represent the concept of the neigborhood's participation in subsequently higher dimensions. That is, the values attempt to capture the degree to which the local neighborhood spreads into each of the respective dimensions \cite{West:2004}. We generalize this concept as \textit{dimensionality} and define the family of features by equation~\eqref{eqn:dimensionality}. We feel that it is worth considering the highest order dimensionality of the data set as a unique feature as well and generalize the concept to \textit{isotropy} as defined by equation~\eqref{eqn:isotropy}. The complement of this value, \textit{anisotropy}, is thus easily understood and maintains a definition consistent with West \textit{et al.} as expressed by equation~\eqref{eqn:anisotropy} 
+
+\textit{Eigenentropy} is a feature based on the Shannon entropy of the principal component eigenvalues. It describes the dimensional participation of the neighborhood. That is, higher values imply greater participation across more of the available dimensions \cite{West:2004}. We generalize this feature by modifying the logarithmic base to the number of dimensions, $n$, and operating on normalized singular values, $\hat{\sigma}_{d}$, instead of raw eigenvalues. We normalize the singular values by the sum over all singular values for the neighborhood so that each value can be treated as a probability that a point in the neighborhood has the respective eigenvector as its dominant local coordinate axis. The resulting feature, which we call \textit{component entropy}, describes the unpredictability of the neighborhood in the $n$-dimensional space and is expressed by equation~\eqref{eqn:entropy}.
+
+Demantk\'{e} \textit{et al.} introduce two additional features to support automated neighborhood scale selection: \textit{dimensionality labeling} and \textit{dimensional entropy} \cite{Demantke:2011}. The \textit{dimension label} is simply the dimension that maximizes equation~\eqref{eqn:dimensionality}. We use this feature to establish an equivalence relation on $\mathcal{D}\times\mathcal{D}, \bm{x}\sim\bm{y}\iff {d^{*}}\mleft(\mathcal{V}_{\bm{x}}\mright)={d^{*}}\mleft(\mathcal{V}_{\bm{y}}\mright)$. This equivalence relation creates a partition on $\mathcal{D}$ that we leverage as part of our simplification algorithm as described in section~\ref{sec:approach}. \textit{Dimensional entropy} is very similar in concept to the \textit{component entropy}, with the exception that it describes the Shannon entropy of the \textit{dimensionality} feature. This feature describes the unpredictability of the \textit{dimension label} feature and acts as a figure of merit for the selected label.
+%
+\section{Approach}%
+\label{sec:approach}%
+%
+In this section, we describe a general point cloud sparsifying algorithm, derive the multidimensional salience measure, and describe the update operations that must take place per iteration to enforce the correct dynamic behavior of the salience measure. Algorithm~\ref{alg:ndthin} describes our solution that supports sparsifying points in arbitrary dimensions. Our objective is to remove least salient points, while preserving the proportional distribution of dimension labels in the final point set. We also wish to maintain the behavior that the algorithm computes a unique nested sequence of subsets that can be used to define a multiresolution model.
+
+\begin{algorithm}[ht!]%
+\caption{Multidimensional Point Cloud Simplification}%
+\label{alg:ndthin}%
+\begin{algorithmic}[1]%
+\Require $\mathcal{D}\subset\mathbb{R}^{n}\setminus\mleft\{{\varnothing}\mright\},\mleft\lvert{\mathcal{D}}\mright\rvert =N,\tau\in\mathbb{Z}_{N}$%
+\Ensure $\mathcal{T}\subset\mathcal{D}, \mleft\lvert{\mathcal{T}}\mright\rvert =\tau$%
+\Function{MultiDimRemovePoints}{$\mathcal{D}, \tau$}%
+\State $\mathcal{T}\gets\mathcal{D}$%
+\State $\mathcal{M}\gets\mathcal{D}/{\sim}$\label{alg:ndthinpart}%
+\While{$\mleft\lvert{\mathcal{T}}\mright\rvert>\tau$}%
+\State $d^{*}\gets\operatorname*{\arg\!\min}\limits_{d\in\mleft\{1,\dotsc,n\mright\}}\min\mathcal{P}_{d}$%
+\State $\mathcal{P}_{d^{*}}\gets\mathcal{P}_{d^{*}}\setminus\mleft\{\min\mathcal{P}_{d^{*}}\mright\}$%
+\State $\bm{x}^{*}\gets\operatorname*{\arg\!\min}\limits_{\bm{x}\in\mathcal{M}_{d^{*}}}s\mleft({\bm{x}}\mright)$%
+\State $\mathcal{M}_{d^{*}}\gets\mathcal{M}_{d^{*}}\setminus\mleft\{{\bm{x}^{*}}\mright\}$%
+\State $\mathcal{T}\gets\mathcal{T}\setminus\mleft\{{\bm{x}^{*}}\mright\}$%
+\EndWhile%
+\State \Return{$\mathcal{T}$}%
+\EndFunction%
+\end{algorithmic}%
+\end{algorithm}%
+
+The dimensional partitioning at line~\ref{alg:ndthinpart} of algorithm~\ref{alg:ndthin} is simply achieved by segregating points according to equivalence relation established by equation~\eqref{eqn:label}. This partitioning only happens once to establish the apparent local dimension of the point neighborhoods. Points are not moved out of their initial partition, regardless of how their descriptive features evolve through the sparsifying process.
+
+We simultaneously enforce the proportional sparsifying constraint and the nested subset constraint by removing points from the partitions in an interleaved manner. We order the partitions so that $\lvert\mathcal{M}_{1}\rvert\geq\dotsb\geq\lvert\mathcal{M}_{n}\rvert$.  The pre-computed priorities for each partition are given by equation~\eqref{eqn:priority} where $M=\max_{d\in\mleft\{1,\dotsc,n\mright\}}\lvert\mathcal{M}_{d}\rvert$.
+
+\begin{equation}\label{eqn:priority}%
+\begin{aligned}%
+\mathcal{P}_{d}=\mleft\{\dfrac{mM}{\lvert\mathcal{M}_{d}\rvert}+\dfrac{d-1}{n}:\forall m \in 1,\dotsc,\lvert\mathcal{M}_{d}\rvert\mright\}
+\end{aligned}%
+\end{equation}%
+
+In each iteration, we seek to select the point that minimizes the change of information content in the point cloud. Dyn \textit{et al.} use a salience measure that increases in value as points in the neighborhood diverge from the local fit of a smoothed surface \cite{Dyn:2008}. Obviously, we are unable to use a similar model for salience since our measure must be defined for arbitrary dimension. However, recall from section~\ref{sec:features} that equation~\ref{eqn:entropy} describes the unpredictability of the neighborhood and acts as a measure of information content in the local neighborhood. We therefore select this feature, which is defined for arbitrary dimension, as the basis for our salience measure.
+
+To estimate the change of information content caused by the removal of a point, we first establish a baseline estimate. The baseline, $H_{\sigma,0}$, is based on the \textit{component entropy} of the initial point neighborhoods as described by equation~\eqref{eqn:baseline}.
+
+\begin{equation}\label{eqn:baseline}%
+\begin{aligned}%
+{H_{\sigma,0}}\mleft(\bm{x}\mright)={H}_{\sigma}\mleft(\mathcal{V}_{\bm{x}}\cup\left\{\bm{x}\right\}\mright)
+\end{aligned}%
+\end{equation}%
+
+We estimate the change of information content caused by the removal of a point as the maximum absolute deviation of the neighborhood component entropy from the component baselines as described by equation~\eqref{eqn:significance}. This measure acts as the salience function for our sparsifying process.
+
+\begin{equation}\label{eqn:significance}%
+\begin{aligned}%
+{s}\mleft(\bm{x}\mright)=\max_{\bm{y}\in\mathcal{C}_{\bm{x}}}\lvert{H_{\sigma,0}}\mleft(\bm{y}\mright)-{H}_{\sigma}\mleft(\mathcal{V}_{\bm{x}}\mright)\rvert
+\end{aligned}%
+\end{equation}%
+
+In each iteration, the point, $\bm{x}^{*}$, that minimizes equation~\eqref{eqn:significance} is selected for removal. To ensure that removed points continue to influence the sparsifying process, we maintain a constituency, $\mathcal{C}_{\bm{x}}$, for each $\bm{x}\in\mathcal{D}$.  The constituency sets serve an identical function to the test sets described by Dyn \textit{et al.} and are updated in a similar manner \cite{Dyn:2008}.
+
+The constituency contains the set of points represented by $\bm{x}$. Initially, each point represents only itself, \textit{i.e.} $\mathcal{C}_{\bm{x}}=\mleft\{\bm{x}\mright\}$. When a point, $\bm{x}^{*}$, is selected for removal, its constituency, $\mathcal{C}_{\bm{x}^{*}}$, is distributed among its neighbors' constituencies, $\mleft\{\mathcal{C}_{\bm{y}}:\bm{y}\in\mathcal{V}_{\bm{x}^{*}}\mright\}$, by selecting the closest $\bm{y}\in\mathcal{V}_{\bm{x}^{*}}$ as a representative for each $\bm{z}\in\mathcal{C}_{\bm{x}^{*}}$.
+
+In addition to updating the constituencies, we must also update the neighborhoods containing the removed point to make sure that it does not continue to influence estimates of the current point cloud state. The set of back-references to the neighborhoods containing each point, $\mathcal{B}_{\bm{x}}=\mleft\{\mathcal{V}_{\bm{y}}:\bm{x}\in\mathcal{V}_{\bm{y}}\mright\}$, are maintained to keep this update operation efficient. The neighborhoods containing the removed point replace it with a new closest point from their neighbor's neighborhoods. That is, from the set
+
+\begin{equation}\label{eqn:newPtSet}%
+\begin{aligned}%
+\bigcup_{\bm{z}\in\mathcal{V}_{\bm{y}}}
+\mathcal{V}_{\bm{z}}\setminus
+\mleft(
+   \mleft\{
+      \bm{x}^{*}
+   \mright\}
+   \cup
+   \mathcal{V}_{\bm{y}}
+\mright)
+\end{aligned}%
+\end{equation}%
+
+If the set described by equation~\eqref{eqn:newPtSet} is empty, a closest point from the current set of remaining points is selected instead.
+
+Finally, the salience measures for each $\bm{x}\in\mathcal{V}_{\bm{x}^{*}}\cup\mathcal{B}_{\bm{x}^{*}}$ are updated according to equation~\eqref{eqn:significance}.
+%
+\section{Results and Discussion}%
+\label{sec:results}%
+%
+We have implemented our approach using \textit{vantage point} tree \cite{Yianilos:1993}
+ for the spatial indexing structure in support of all nearest neighbor searching and \textit{splay} tree \cite{Sleator:1985} for managing the salience heap. The selection of these data structures maintains aymptotic complexity equivalent to the approach proposed by Dyn \textit{et al.} while compensating for higher dimensional data.
+
+\begin{figure}[t!]%
+\begin{subfigure}[b]{0.48\linewidth}%
+  \centering%
+  \centerline{\includegraphics[width=\linewidth]{Figures/dragon_75}}%
+  \caption{75\%, $\widebar{H}_{\sigma,0}\approx 0.7369$}\label{fig:dt75}\medskip%
+\end{subfigure}%
+\hfill%
+\begin{subfigure}[b]{0.48\linewidth}%
+  \centering%
+  \centerline{\includegraphics[width=\linewidth]{Figures/dragon_50}}%
+  \caption{50\%, $\widebar{H}_{\sigma,0}\approx 0.7367$}\label{fig:dt50}\medskip%
+\end{subfigure}%
+\hfill%
+\begin{subfigure}[b]{0.48\linewidth}%
+  \centering%
+  \centerline{\includegraphics[width=\linewidth]{Figures/dragon_25}}%
+  \caption{25\%, $\widebar{H}_{\sigma,0}\approx 0.7384$}\label{fig:dt25}\medskip%
+\end{subfigure}%
+\hfill%
+\begin{subfigure}[b]{0.48\linewidth}%
+  \centering%
+  \centerline{\includegraphics[width=\linewidth]{Figures/dragon_10}}%
+  \caption{10\%, $\widebar{H}_{\sigma,0}\approx 0.7393$}\label{fig:dt10}\medskip%
+\end{subfigure}%
+\caption{Data output by our approach on \textit{Dragon} from the Stanford \num{3}D Scanning Repository}\label{fig:dragon}%
+\end{figure}%
+%
+\begin{figure}[t!]%
+\begin{subfigure}[b]{0.48\linewidth}%
+  \centering%
+  \centerline{\includegraphics[width=\linewidth]{Figures/max_sigs_notitle}}%
+  \caption{Least Salience}\label{fig:max_sigs}\medskip%
+\end{subfigure}%
+\hfill%
+\begin{subfigure}[b]{0.48\linewidth}%
+  \centering%
+  \centerline{\includegraphics[width=\linewidth]{Figures/mean_entropy_notitle}}%
+  \caption{Model Entropy}\label{fig:mean_ents}\medskip%
+\end{subfigure}%
+\caption{Least salience and mean baseline entropy trends during simplification of \textit{Dragon} to 1\% of the original point cloud size}\label{fig:metrics}%
+\end{figure}%
+%
+To illustrate the effectiveness of our approach, we first applied our algorithm to the standard \textit{Dragon} data set from the Stanford \num{3}D scanning repository which contains only spatial coordinates with no additional attribution. Figure~\ref{fig:dragon} shows results for data sparsified to 75\%, 50\%, 25\% and 10\% of the original point cloud size, $\lvert\mathcal{D}\rvert = 435545$. This test case demonstrates that our approach produces a sparse representation of the original data that preserves features that are salient with respect to representing the original surface. Figure~\ref{fig:metrics} illustrates the behavior of the algorithm during the sparsifying process. The salience measure does not increase monotonically throughout the sparsifying process since the point removal and update process does not enforce any guarantees on the entropies of the affected neighborhoods. However, figure~\ref{fig:max_sigs} illustrates that the least salience trend increases monotonically throughout the sparsifying process. Figure~\ref{fig:mean_ents} illustrates the effect of our salience measure on the mean baseline entropy for the model. Since we define salience to minimize change in entropy, the mean entropy remains very flat through most of the sparsifying process and in fact increases slightly as redundant points are removed. However, there is a point beyond which significant points are removed and mean entropy drops sharply as a result. For the \textit{Dragon} test case, this occurs once approximately 90\% of the original points have been removed.
+
+\begin{figure}[t!]%
+\begin{subfigure}[b]{0.32\linewidth}%
+  \centering%
+  \centerline{\includegraphics[width=\linewidth]{Figures/manuscript_100_interp}}%
+  \caption{100\%}\label{fig:mt100}\medskip%
+\end{subfigure}%
+\hfill%
+\begin{subfigure}[b]{0.32\linewidth}%
+  \centering%
+  \centerline{\includegraphics[width=\linewidth]{Figures/manuscript_010_interp}}%
+  \caption{10\%}\label{fig:mt010}\medskip%
+\end{subfigure}%
+\hfill%
+\begin{subfigure}[b]{0.32\linewidth}%
+  \centering%
+  \centerline{\includegraphics[width=\linewidth]{Figures/manuscript_001_interp}}%
+  \caption{1\%}\label{fig:mt001}\medskip%
+\end{subfigure}%
+\caption{Mesh reconstruction from data output by our approach on \textit{Vellum Manuscript} from the Stanford \num{3}D Scanning Repository}\label{fig:manuscript}%
+\end{figure}%
+%
+Next, to illustrate the effectiveness of our approach on multidimensional data, we applied our algorithm to the \textit{Vellum Manuscript} data set from the Stanford \num{3}D scanning repository which contains spatial coordinates with color attribution per point. Figure~\ref{fig:manuscript} shows mesh reconstructions of data sparsified to 100\%, 10\%, and 1\% of the original point cloud size, $\lvert\mathcal{D}\rvert = 2155617$. This test case demonstrates that our salience measure generalizes to multidimensional data. The example illustrates preservation of fine features in the $n$D data set up to high levels of sparsity. The thin red margin lines are visible and paper edges are preserved even when data is sparsified to just 1\% of the original data size. Our approach is lossy, though, and significant degradation is noticeable at the 1\% level. However, we are able to create a very faithful reconstruction of the data set with just 10\% of the original data.
+%
+\section{Conclusion and Future Work}%
+\label{sec:conclusion}%
+%
+In this paper, we develop extensions of established \num{3}D features to arbitrary dimensions and present an application to sparse representation of point clouds. We believe that this approach may be further enhanced by better selecting the initial neighborhood sizes using an approach such as the one proposed by Demantk\'{e} \textit{et al.} \cite{Demantke:2011} We also believe that there are other potentially interesting applications of these features that warrant investigation, for example as features that support correlation and registration algorithms.
+%
+\bibliographystyle{IEEEbib}
+\bibliography{refs}
+%
+\end{document}
Index: sparse_sample_icip2014/refs.bib
===================================================================
--- sparse_sample_icip2014/refs.bib	(revision 10)
+++ sparse_sample_icip2014/refs.bib	(revision 10)
@@ -0,0 +1,438 @@
+@manual{ASPRS:2012,
+organization={American Society for Photogrammetry and Remote Sensing},
+title={{LAS} Specification Version 1.4 R12},
+mon=jun,
+year={2012},
+address={Bethesda, MD, USA},
+pages={1--27}
+}
+@misc{HDF:2011,
+author={The {HDF} Group},
+title={Hierarchical data format version 5},
+howpublished={http://www.hdfgroup.org/HDF5/},
+year={2000-2014},
+note={[February 12, 2014]}
+}
+@inproceedings{David:2008,
+crossref={Hay:2008},
+author={Nicolas David and Cl\'{e}ment Mallet and Fr\'{e}d\'{e}ric Bretar},
+title={Library Concept and Design for {LiDAR} Data Processing},
+booktitle={{GEOBIA} 2008 - Pixels, Objects, Intelligence.
+{GEO}graphic Object Based Image Analysis for the 21st Century},
+year={2008},
+month=aug,
+numpages = {6},
+abstract={Airborne Laser Scanning (ALS) is nowadays a very popular technology providing accurate altimetric data for remote sensing and mapping purposes. Therefore, many algorithms have been developed so far to process these data, depending on the application. Nevertheless, for researchers, it is still a challenging task to handle large amount of heterogeneous data and adapt them for their specific aim and processes. This paper reports the thoughts and the strategy developed by the MATIS laboratory of the Institut G\´{e}ographique National ({IGN}) about an efficient lidar library design in order to tackle these issues. The specification of an efficient and versatile lidar file format is first discussed. The standard and current lidar file formats are first reviewed and a new one, dedicated to raw data processing with high feature modularity is presented. Besides, existing code components and libraries are reviewed with regard to their compatibility for research development. Modularity, availability and license conditions are here the main selection criteria. Then, differents strategies for large data set handling are summarized and extended by a new solution, both based on lidar strip and raw sensor topology. These workflows are illustrated through a UML activity diagram dedicated to 2D spatial query. Finally, the current status of the implementation of this federative software as well as the perspectives of development are sketched.}
+}
+@inproceedings{Demantke:2011,
+crossref={Lichti:2011},
+author={J\'{e}r\^{o}me Demantk\'{e} and Cl\'{e}ment Mallet and Nicolas David and Bruno Vallet},
+title={Dimensionality Based Scale Selection in 3{D} {LIDAR} Point Clouds},
+booktitle={WG V/3, I/3, I/2, III/2, III/4, VII/7, V/1 ISPRS Workshop Laser Scanning 2011},
+year={2011},
+month=aug,
+pages = {97--102},
+abstract={This papers presents a multi-scale method that computes robust geometric features on lidar point clouds in order to retrieve the optimal neighborhood size for each point. Three dimensionality features are calculated on spherical neighborhoods at various radius sizes. Based on combinations of the eigenvalues of the local structure tensor, they describe the shape of the neighborhood, indicating whether the local geometry is more linear (1D), planar (2D) or volumetric (3D). Two radius selection criteria have been tested and compared for finding automatically the optimal neighborhood radius for each point. Besides, such procedure allows a dimensionality labelling, giving significant hints for classification and segmentation purposes. The method is successfully applied to 3D point clouds from airborne, terrestrial, and mobile mapping systems since no a priori knowledge on the distribution of the 3D points is required. Extracted dimensionality features and labellings are then favorably compared to those computed from constant size neighborhoods.},
+DOI = {10.5194/isprsarchives-XXXVIII-5-W12-97-2011}
+}
+@inproceedings{Dey:2005,
+crossref  = {Alexa:2005},
+author    = {Tamal K. Dey and Gang Li and Jian Sun},
+title     = {Normal Estimation for Point Clouds: A Comparison Study for a {V}oronoi Based Method},
+booktitle = {{Symposium on Point-Based Graphics}},
+pages     = {39--46},
+abstract  = {Many applications that process a point cloud data benefit from a reliable normal estimation step. Given a point cloud presumably sampled from an unknown surface, the problem is to estimate the normals of the surface at the data points. Two approaches, one based on numerical optimizations and another based on Voronoi diagrams are known for the problem. Variations of numerical approaches work well even when point clouds are contaminated with noise. Recently a variation of the Voronoi based method is proposed for noisy point clouds. The centrality of the normal estimation step in point cloud processing begs a thorough study of the two approaches so that one knows which approach is appropriate for what circumstances. This paper presents such results.},
+URL       = {http://www.eg.org/EG/DL/WS/SPBG/SPBG05/039-046.pdf},
+DOI       = {10.2312/SPBG/SPBG05/039-046}
+}
+@article{Dyn:2008,
+author={Dyn, Nira and Iske, Armin and Wendland, Holger},
+title={Meshfree Thinning of 3{D}~Point Clouds},
+journal={Foundations of Computational Mathematics},
+year={2008},
+publisher={Springer-Verlag New York, Inc.},
+address={Secaucus, NJ, USA},
+volume={8},
+number={4},
+pages={409--425},
+abstract={An efficient data reduction scheme for the simplification of a surface given by a large set X of 3D point-samples is proposed. The data reduction relies on a recursive point removal algorithm, termed thinning, which outputs a data hierarchy of point-samples for multiresolution surface approximation. The thinning algorithm works with a point removal criterion, which measures the significances of the points in their local neighbourhoods, and which removes a least significant point at each step. For any point x in the current point set Y \subset X, its significance reflects the approximation quality of a local surface reconstructed from neighbouring points in Y. The local surface reconstruction is done over an estimated tangent plane at x by using radial basis functions. The approximation quality of the surface reconstruction around x is measured by using its maximal deviation from the given point-samples X in a local neighbourhood of x. The resulting thinning algorithm is meshfree, i.e., its performance is solely based upon the geometry of the input 3D surface point-samples, and so it does not require any further topological information, such as point connectivities. Computational details of the thinning algorithm and the required data structures for efficient implementation are explained and its complexity is discussed. Two examples are presented for illustration.},
+issn={1615-3375},
+doi={10.1007/s10208-007-9008-7}
+}
+@article{Fattal:2002,
+author={Raanan Fattal and Dani Lischinski and Michael Werman},
+title={Gradient Domain High Dynamic Range Compression},
+journal={ACM Transactions on Graphics},
+year={2002},
+publisher={ACM},
+address={New York, NY, USA},
+volume={21},
+number={3},
+pages={249--256},
+abstract={We present a new method for rendering high dynamic range images on conventional displays. Our method is conceptually simple, computationally efficient, robust, and easy to use. We manipulate the gradient field of the luminance image by attenuating the magnitudes of large gradients. A new, low dynamic range image is then obtained by solving a Poisson equation on the modified gradient field. Our results demonstrate that the method is capable of drastic dynamic range compression, while preserving fine details and avoiding common artifacts, such as halos, gradient reversals, or loss of local contrast. The method is also able to significantly enhance ordinary images by bringing out detail in dark regions.},
+isbn={1-58113-521-1},
+issn={0730-0301},
+doi={10.1145/566654.566573}
+}
+@inproceedings{Gressin:2012,
+crossref={Shortis:2012},
+author={Adrien Gressin and Cl\'{e}ment Mallet and Nicolas David},
+title={IMPROVING 3{D} {LIDAR} POINT CLOUD REGISTRATION USING OPTIMAL NEIGHBORHOOD KNOWLEDGE},
+booktitle={XXII ISPRS Congress, Technical Commission III},
+pages={111--116},
+abstract={Automatic 3D point cloud registration is a main issue in computer vision and photogrammetry. The most commonly adopted solution is the well-known ICP (Iterative Closest Point) algorithm. This standard approach performs a fine registration of two overlapping point clouds by iteratively estimating the transformation parameters, and assuming that good a priori alignment is provided. A large body of literature has proposed many variations of this algorithm in order to improve each step of the process. The aim of this paper is to demonstrate how the knowledge of the optimal neighborhood of each 3D point can improve the speed and the accuracy of each of these steps. We will first present the geometrical features that are the basis of this work. These low-level attributes describe the shape of the neighborhood of each 3D point, computed by combining the eigenvalues of the local structure tensor. Furthermore, they allow to retrieve the optimal size for analyzing the neighborhood as well as the privileged local dimension (linear, planar, or volumetric). Besides, several variations of each step of the ICP process are proposed and analyzed by introducing these features. These variations are then compared on real datasets, as well with the original algorithm in order to retrieve the most efficient algorithm for the whole process. Finally, the method is successfully applied to various 3D lidar point clouds both from airborne, terrestrial and mobile mapping systems.},
+doi={10.5194/isprsannals-I-3-111-2012}
+}
+@inproceedings{Gross:2006,
+crossref={Forstner:2006},
+author={Hermann Gross and Ulrich Thoennessen},
+title={EXTRACTION OF LINES FROM LASER POINT CLOUDS},
+booktitle={Symposium of ISPRS Commission III Photogrammetric Computer Vision PCV '06},
+year={2006},
+mon=sep,
+pages={86--91},
+abstract={Three dimensional building models have become important during the past for various applications like urban planning, enhanced navigation or visualization of touristy or historic objects. 3D models can increase the understanding and explanation of complex urban scenes and support decision processes. A 3D model of the urban environment gives the possibility for simulation and rehearsal, to {\textquoteleft}{\textquoteleft}fly through{\textquoteright}{\textquoteright} the local urban terrain on different paths, and to visualize the scene from different viewpoints. The automatic generation of 3D models using Laser height data is one challenge for actual research.In many proposals for 3D model generation the process is starting by extraction of the border lines of man made objects. In our paper we are presenting an automatic generation method for lines based on the analysis of the 3D point clouds in the Laser height data. For each 3D point additional features considering the neighborhood are calculated. Invariance with respect to position, scale and rotation is achieved. Investigations concerning the required point density to get reliable results are accomplished. Comparing the new features with analytical results of typical point configurations provide discriminating features to select points which may belong to a line. Assembling these points to lines the borders of the objects were achieved. First results are presented.Possibilities for the enhancement of the calculation of the covariance matrix by including the intensity of the Laser signal and a refined consideration of the neighborhood are discussed.}
+}
+@inproceedings{Gumhold:2001,
+crossref={IMR:2010},
+author={Gumhold, S. and Wang, X. and Macleod, R.},
+title={Feature Extraction from Point Clouds},
+booktitle={Proceedings of the 10th International Meshing Roundtable},
+year={2001},
+mon=oct,
+pages={293--305},
+abstract={This paper describes a new method to extract feature lines directly from a surface point cloud. No surface reconstruction is needed in advance, only the inexpensive computation of a neighbor graph connecting nearby points. The feature extraction is performed in two stages. The 
+fist stage consists of assigning a penalty weight to each point  that indicates the unlikelihood that the point is part of a feature and assigning these penalty weights to the edges of a neighbor graph. Extracting a sub-graph of the neighbor graph that minimizes the edge penalty weights then produces a set of feature patterns. The second stage is especially useful for noisy data. It recovers feature lines and junctions by fitting wedges to the crease lines and corners to the junctions. As the method works on the local neighbor graph only, it is fast and automatically adapts to the sampling resolution. This makes the approach ideal as a pre-processing step in mesh generation.}
+}
+@inproceedings{Isenburg:2011,
+crossref={ELMF:2011},
+author={Martin Isenburg},
+title={{LASzip}: lossless compression of {LiDAR} data},
+booktitle={Proceedings of the 2011 European LiDAR Mapping Forum},
+year={2011},
+mon=nov,
+pages={1--9}
+}
+@article{Kalogerakis:2009,
+author={Evangelos Kalogerakis and Derek Nowrouzezahrai and Patricio Simari and Karan Singh},
+editor={Xiaoping Qian and Imre Horv{\'a}th},
+title={Extracting lines of curvature from noisy point clouds},
+journal={Computer-Aided Design},
+year={2009},
+publisher={Elsevier},
+address={New York, NY, USA},
+volume={41},
+number={4},
+pages={282--292},
+abstract={We present a robust framework for extracting lines of curvature from point clouds. First, we show a novel approach to denoising the input point cloud using robust statistical estimates of surface normal and curvature which automatically rejects outliers and corrects points by energy minimization. Then the lines of curvature are constructed on the point cloud with controllable density. Our approach is applicable to surfaces of arbitrary genus, with or without boundaries, and is statistically robust to noise and outliers while preserving sharp surface features. We show our approach to be effective over a range of synthetic and real-world input datasets with varying amounts of noise and outliers. The extraction of curvature information can benefit many applications in CAD, computer vision and graphics for point cloud shape analysis, recognition and segmentation. Here, we show the possibility of using the lines of curvature for feature-preserving mesh construction directly from noisy point clouds.},
+issn={0010-4485},
+doi={10.1016/j.cad.2008.12.004}
+}
+@inproceedings{Konig:2009,
+crossref={Magnor:2009},
+author={S\"{o}ren K\"{o}nig and Stefan Gumhold},
+title={Consistent Propagation of Normal Orientations in Point Clouds},
+booktitle={Proceedings of the Vision, Modeling, and Visualization Workshop                2009, November 16-18, 2009, Braunschweig, Germany},
+year={2009},
+pages={83--92},
+abstract={Many algorithms for point cloud processing especially surface reconstruction rely on normal information available at each point. Normal directions are typically taken from a local tangent plane approximation which is obtained by fitting a surface model to the neighboring point samples. While the direction can be estimated locally, finding a consistent normal orientation over the whole surface is only possible in a global context. Existing methods for this problem can be classified into volumetric and propagation based approaches. Volumetric methods are trying to divide the space into inside and outside regions which is often complicated to implement and have problems with open surfaces and large holes. Propagation based methods can deal with open surfaces but often fail on sharp features. This paper analyses the behavior of surficial orientation methods, gives a better understanding of the underlying model assumptions of existing techniques and proposes a novel and improved propagation heuristic.}
+}
+@article{Mitra:2004,
+author={Niloy J. Mitra and An Nguyen and Leonidas Guibas},
+title={Estimating Surface Normals in Noisy Point Cloud Data},
+journal={International Journal of Computational Geometry \& Applications},
+year={2004},
+volume={14},
+number={04n05},
+pages={261--276},
+abstract={In this paper we describe and analyze a method based on local least square fitting for estimating the normals at all sample points of a point cloud data (PCD) set, in the presence of noise. We study the effects of neighborhood size, curvature, sampling density, and noise on the normal estimation when the PCD is sampled from a smooth curve in ?{\texttwosuperior} or a smooth surface in ?{\textthreesuperior}, and noise is added. The analysis allows us to find the optimal neighborhood size using other local information from the PCD. Experimental results are also provided.},
+publisher={World Scientific Publishing Company},
+issn={0218-1959},
+doi={10.1142/S0218195904001470}
+}
+
+@inproceedings{Moenning:2003,
+crossref={VIIP:2003},
+author={Carsten Moenning and Neil A. Dodgson},
+title={A new point cloud simplification algorithm},
+booktitle={Proceedings of the 3rd IASTED International Conference on Visualization, Imaging, and Image Processing, September 8-10, 2003, Benalmadena, Spain},
+year={2003}
+}
+
+@incollection{Parrish:2012,
+crossref={Renslow:2012},
+author={Christopher E. Parrish},
+chapter={2.4},
+title={Full-Waveform Lidar},
+booktitle={Manual of Airborne Topographic Lidar},
+pages={54-61},
+year={2012}
+}
+
+@inproceedings{Pauly:2002,
+crossref={Pfister:2002},
+author = {Pauly, Mark and Gross, Markus and Kobbelt, Leif P.},
+title = {Efficient Simplification of Point-sampled Surfaces},
+booktitle = {Proceedings of the Conference on Visualization '02},
+series = {VIS '02},
+year = {2002},
+isbn = {0-7803-7498-3},
+location = {Boston, Massachusetts},
+pages = {163--170},
+numpages = {8},
+url = {http://dl.acm.org/citation.cfm?id=602099.602123},
+acmid = {602123},
+publisher = {IEEE Computer Society},
+address = {Washington, DC, USA},
+}
+
+@article{Sankaranarayanan:2007,
+author={Jagan Sankaranarayanan and Hanan Samet and Amitabh Varshney},
+title={A fast all nearest neighbor algorithm for applications involving large point-clouds},
+journal={Computers \& Graphics},
+year={2007},
+volume={31},
+number={2},
+pages={157--174},
+abstract={Algorithms that use point-cloud models make heavy use of the neighborhoods of the points. These neighborhoods are used to compute the surface normals for each point, mollification, and noise removal. All of these primitive operations require the seemingly repetitive process of finding the \textit{k} nearest neighbors (\textit{kNN}s) of each point. These algorithms are primarily designed to run in main memory. However, rapid advances in scanning technologies have made available point-cloud models that are too large to fit in the main memory of a computer. This calls for more efficient methods of computing the \textit{kNN}s of a large collection of points many of which are already in close proximity. A fast \textit{kNN} algorithm is presented that makes use of the locality of successive points whose \textit{k} nearest neighbors are sought to reduce significantly the time needed to compute the neighborhood needed for the primitive operation as well as enable it to operate in an environment where the data is on disk. Results of experiments demonstrate an \textit{order} of magnitude improvement in the \textit{time} to perform the algorithm and \textit{several orders} of magnitude improvement in \textit{work efficiency} when compared with several prominent existing methods.},
+issn={0097-8493},
+doi={10.1016/j.cag.2006.11.011}
+}
+
+@article{Schroeder:1992,
+author = {Schroeder, William J. and Zarge, Jonathan A. and Lorensen, William E.},
+title = {Decimation of Triangle Meshes},
+journal = {SIGGRAPH Comput. Graph.},
+issue_date = {July 1992},
+volume = {26},
+number = {2},
+month = jul,
+year = {1992},
+issn = {0097-8930},
+pages = {65--70},
+numpages = {6},
+url = {http://doi.acm.org/10.1145/142920.134010},
+doi = {10.1145/142920.134010},
+acmid = {134010},
+publisher = {ACM},
+address = {New York, NY, USA},
+keywords = {computer graphics, geometric modeling, medical imaging, terrain modeling, volume modeling}
+} 
+
+@article{Sleator:1985,
+ author = {Sleator, Daniel Dominic and Tarjan, Robert Endre},
+ title = {Self-adjusting Binary Search Trees},
+ journal = {J. ACM},
+ issue_date = {July 1985},
+ volume = {32},
+ number = {3},
+ month = jul,
+ year = {1985},
+ issn = {0004-5411},
+ pages = {652--686},
+ numpages = {35},
+ url = {http://doi.acm.org/10.1145/3828.3835},
+ doi = {10.1145/3828.3835},
+ acmid = {3835},
+ publisher = {ACM},
+ address = {New York, NY, USA}
+}
+
+@incollection{Smith:2012,
+crossref={Renslow:2012},
+author={Philip W. Smith},
+chapter={2.6},
+title={Geiger Mode Lidar},
+booktitle={Manual of Airborne Topographic Lidar},
+pages={91-97},
+year={2012}
+}
+@inproceedings{West:2004,
+crossref={Firooz:2004},
+author={Karen F. West and Brian N. Webb and James R. Lersch and Steven Pothier and Joseph M. Triscari and A. Evan Iverson},
+title={Context-driven automated target detection in 3{D} data},
+booktitle={Proceeding of SPIE},
+year={2004},
+pages={133--143},
+abstract={This paper summarizes a system, and its component algorithms, for context-driven target vehicle detection in 3-D data that was developed under the Defense Advanced Research Projects Agency (DARPA) Exploitation of 3-D Data (E3D) Program. In order to determine the power of shape and geometry for the extraction of context objects and the detection of targets, our algorithm research and development concentrated on the geometric aspects of the problem and did not utilize intensity information. Processing begins with extraction of context information and initial target detection at reduced resolution, followed by a detailed, full-resolution analysis of candidate targets. Our reduced-resolution processing includes a probabilistic procedure for finding the ground that is effective even in rough terrain; a hierarchical, graph-based approach for the extraction of context objects and potential vehicle hide sites; and a target detection process that is driven by context-object and hide-site locations. Full-resolution processing includes statistical false alarm reduction and decoy mitigation. When results are available from previously collected data, we also perform object-level change detection, which affects the probabilities that objects are context objects or targets. Results are presented for both synthetic and collected LADAR data.},
+doi={10.1117/12.542536}
+}
+@inproceedings{Yianilos:1993,
+crossref={SODA:1993},
+author={Peter N. Yianilos},
+title={Data Structures and Algorithms for Nearest Neighbor Search in General Metric Spaces},
+booktitle={Proceedings of the Fourth Annual ACM-SIAM Symposium on Discrete Algorithms},
+series={SODA '93},
+year={1993},
+pages={311--321},
+abstract={We consider the computational problem of finding nearest neighbors in general metric spaces. Of particular interest are spaces that may not be conveniently embedded or approximated in Euclidian space, or where the dimensionality of a Euclidian representation is very high.Also relevant are high-dimensional Euclidian settings in which the distribution of data is in some sense of lower dimension and embedded in the space.The \textit{vp-tree} (vantage point tree) is introduced in several forms, together with associated algorithms, as an improved method for these difficult search problems. Tree construction executes in \textit{O}(\textit{n}log(\textit{n}) time, and search is under certain circumstances and in the limit, \textit{O}(log(\textit{n})) expected time. The theoretical basis for this approach is developed and the results of several experiments are reported. In Euclidian cases, kd-tree performance is compared.}
+}
+
+@incollection{Young:2012,
+crossref={Renslow:2012},
+author={Jamie Young},
+chapter={2.2},
+title={Key Elements of {ALS} Technology},
+booktitle={Manual of Airborne Topographic Lidar},
+pages={17-37},
+year={2012}
+}
+
+@article{Yu:2010,
+author="Yu, Zhiwen
+and Wong, Hau-San
+and Peng, Hong
+and Ma, Qianli",
+editor="Horv{\'a}th, Imre
+and Lee, Kunwoo
+and Patrikalakis, Nicholas M.",
+title="{ASM}: An adaptive simplification method for 3D point-based models",
+journal="Computer-Aided Design",
+year="2010",
+publisher="Elsevier Science",
+address="New York, NY, USA",
+volume="42",
+number="7",
+pages="598--612",
+optkeywords="Clustering",
+optkeywords="Model simplification",
+optkeywords="Point clouds",
+abstract="Due to the popularity of computer games and computer-animated movies, 3{D} models are fast becoming an important element in multimedia applications. In addition to the conventional polygonal representation for these models, the direct adoption of the original scanned 3{D} point set for model representation is recently gaining more and more attention due to the possibility of bypassing the time consuming mesh construction stage, and various approaches have been proposed for directly processing point-based models. In particular, the design of a simplification approach which can be directly applied to 3{D} point-based models to reduce their size is important for applications such as 3{D} model transmission and archival. Given a point-based 3{D} model which is defined by a point set $P$ ($P = \left\{\bm{p}_{a} \in R^{3}\right\}$) and a desired reduced number of output samples $n_{s}$, the simplification approach finds a point set $P_{s}$ which \begin{inparaenum}[(i)]\item satisfies $\lvert P_{s} \rvert = n_{s}$ ($\lvert P_{s} \rvert$ being the cardinality of $P_{s}$ ) and \item minimizes the difference of the corresponding surface $S_{s}$ (defined by $P_{s}$ ) and the original surface $S$ (defined by $P$)\end{inparaenum}. Although a number of previous approaches has been proposed for simplification, most of them \begin{inparaenum}[(i)]\item do not focus on point-based 3{D} models, \item do not consider efficiency, quality and generality together and \item do not consider the distribution of the output samples\end{inparaenum}. In this paper, we propose an Adaptive Simplification Method ({ASM}) which is an efficient technique for simplifying point-based complex 3{D} models. Specifically, the {ASM} consists of three parts: a hierarchical cluster tree structure, the specification of simplification criteria and an optimization process. The {ASM} achieves a low computation time by clustering the points locally based on the preservation of geometric characteristics. We analyze the performance of the {ASM} and show that it outperforms most of the current state-of-the-art methods in terms of efficiency, quality and generality.",
+issn="0010-4485",
+doi="10.1016/j.cad.2010.03.003",
+opturl="http://linkinghub.elsevier.com/retrieve/pii/S0010448510000588"
+}
+
+@book{Renslow:2012,
+editor={Michael S. Renslow},
+title={Manual of Airborne Topographic Lidar},
+year={2012},
+publisher={American Society for Photogrammetry and Remote Sensing},
+address={Bethesda, MD, USA},
+abstract={Most geospatial practitioners agree that lidar (light detection and ranging) is the most significant mapping technology to emerge in the last several years. At first, processing lidar data to map the bare earth appeared to be the prime use of this technology, but having millions or billions of natively 3D, georeferenced points has fundamentally changed the perception of the service providers and the user community. Professionals in forestry, civil engineering, gology, geography, wetlands analysis, natural disaster response, flood plain mapping, and urban planning have embraced lidar as their data source of choice.The ASPRS \textit{Manual of Airborne Topographic Lidar} covers all the relevant topics relating to the science behind lidar systems, mission planning, data collection and management, quality control/quality assurance, and product development. Selected topics are discussed in-depth for the Global Navigation Satellite System, Full Waveform Lidar, Digital Terrain Modeling using GIS, Rotary-Wing and Fixed-Wing Installations, Calibration, Flood Prone Area Mapping, Hydro-enforcement, Building Feature Extraction, Transportation Engineering, Natural Hazards Mapping, and Airport Surveying.Lidar experts from industry and academia have contributed to ten chapters and four appendices that serve as a reference book to document how this technology developed, and introduce new sensors and applications. There is emphasis on {\textquoteleft}{\textquoteleft}best practices{\textquoteright}{\textquoteright} and real-world solutions for the lidar data and product users. Also, since quality control and quality assurance are so critical to a successful data collection, processing, and validation, the reader will find several subchapters to support QA/QC throughout the Manual.New lidar technologies are presented for 3D Flash Lidar and Geiger-mode lidar. The performance of these systems is explained in detail with examples of applications and processing lidar data in real-time for on-site decision making.The contributing authors are leading experts who are sharing the results of many years of work pioneering lidar technology and taking it to the forefront of geospatial data and analysis. Their experiences and solutions will be of great benefit to professionals, researchers, technologists, and students.},
+isbn={1-57083-097-5}
+}
+
+@proceedings{Alexa:2005,
+editor = {Marc Alexa and Szymon Rusinkiewicz and Mark Pauly and Matthias Zwicker},
+title = {{Symposium on Point-Based Graphics}},
+year = {2005},
+isbn = {3-905673-20-7},
+issn = {1811-7813},
+address = {Stony Brook, NY, USA},
+organization = {Eurographics Association},
+publisher = {Eurographics Association}
+}
+@proceedings{ELMF:2011,
+organization={Intelligent Exhibitions Limited},
+title={Proceedings of the 2011 European LiDAR Mapping Forum},
+year={2011},
+address={Salzburg, Austria},
+mon=nov
+}
+@proceedings{Firooz:2004,
+editor={Firooz A. Sadjadi},
+title={Proceeding of SPIE},
+year={2004},
+publisher={SPIE},
+organization={SPIE},
+address={Bellingham, WA},
+volume={5426},
+issn={0277-786X}
+}
+@proceedings{Forstner:2006,
+editor={Wolfgang F{\"o}rstner and Richard Steffen},
+title={Symposium of ISPRS Commission III Photogrammetric Computer Vision PCV '06},
+organization={ISPRS},
+publisher={Copernicus Publications},
+address={G\"{o}ttingen, Germany},
+series={ISPRS - International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
+year={2006},
+mon=sep,
+volume={XXXVI-3},
+issn={1682-1750}
+}
+@proceedings{Hay:2008,
+editor={Geoffrey J. Hay and Thomas Blaschke and Danielle Marceau},
+title={{GEOBIA} 2008 - Pixels, Objects, Intelligence
+{GEO}graphic Object Based Image Analysis for the 21st Century, August 5-8, 2008, Calgary, Alberta, Canada},
+series={ISPRS - International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
+year={2008},
+mon=aug,
+organization={ISPRS},
+publisher={University of Calgary, Calgary, Alberta, Canada},
+address={Calgary, Alberta, Canada},
+volume={XXXVIII-4/C1},
+issn={1682-1777}
+}
+
+@proceedings{IMR:2010,
+title={Proceedings of the 10th International Meshing Roundtable},
+year={2001},
+mon=oct,
+organization={Sandia National Laboratories},
+publisher={Sandia National Laboratories},
+address={Albuquerque, NM, USA}
+}
+@proceedings{Lichti:2011,
+editor={Derek D. Lichti and Ayman F. Habib},
+title={WG V/3, I/3, I/2, III/2, III/4, VII/7, V/1 ISPRS Workshop Laser Scanning 2011},
+year={2011},
+eissn={2194-9034},
+issn={1682-1777},
+organization={ISPRS},
+publisher={Copernicus Publications},
+address={G\"{o}ttingen, Germany},
+series={ISPRS - International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
+volume={XXXVIII-5/W12}
+}
+@proceedings{Magnor:2009,
+editor={Marcus A. Magnor and Bodo Rosenhahn and Holger Theisel},
+title={Proceedings of the Vision, Modeling, and Visualization Workshop                2009, November 16-18, 2009, Braunschweig, Germany},
+year={2009},
+organization={Eurographics Association},
+publisher={Institut f\"{u}r Simulation und Graphik},
+address={Magdeburg, Germany},
+isbn={978-3-9804874-8-1}
+}
+
+@proceedings{Pfister:2002,
+title = {VIS '02: Proceedings of the Conference on Visualization '02},
+year = {2002},
+isbn = {0-7803-7498-3},
+location = {Boston, Massachusetts},
+publisher = {IEEE Computer Society},
+address = {Washington, DC, USA}
+} 
+
+@proceedings{Shortis:2012,
+editor={Mark Shortis and Nicolas Paparoditis and Cl\'{e}ment Mallet},
+title={XXII ISPRS Congress, Technical Commission III},
+series={ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences},
+volume={I-3},
+year={2012},
+organization={ISPRS},
+publisher={Copernicus Publications},
+address={G\"{o}ttingen, Germany},
+issn={2194-9050}
+}
+@proceedings{SODA:1993,
+organization={Association for Computing Machinery and Society for Industrial and Applied Mathematics},
+title={Proceedings of the Fourth Annual ACM-SIAM Symposium on Discrete Algorithms},
+year={1993},
+publisher={Society for Industrial and Applied Mathematics},
+address={Philadelphia, PA, USA},
+isbn={9780898713138}
+}
+
+@proceedings{VIIP:2003,
+editor={M. H. Hamza},
+organization={International Association for Science and Technology for Development},
+title={Proceedings of the 3rd IASTED International Conference on Visualization, Imaging, and Image Processing, September 8-10, 2003, Benalmadena, Spain},
+volume={2},
+year={2003},
+publisher={ACTA Press},
+address={Anaheim; Calgary},
+issn={1482-7921},
+isbn={0-88986-382-2}
+}
Index: sparse_sample_icip2014/spconf.sty
===================================================================
--- sparse_sample_icip2014/spconf.sty	(revision 10)
+++ sparse_sample_icip2014/spconf.sty	(revision 10)
@@ -0,0 +1,252 @@
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%
+% File:     spconf.sty          (LaTeX Document style option "spconf")
+%
+% Usage:    \documentclass{article}
+%           \usepackage{spconf}
+%
+%           Or for LaTeX 2.09:
+% Usage:    \documentstyle[...,spconf,...]{article}
+%
+% Purpose:
+%
+% Style file for Signal Processing Society Conferences (ICASSP, ICIP).
+% Features:
+%    - correct page size (175mm x 226mm)
+%    - twocolumn format
+%    - boldfaced, numbered, and centered section headings
+%    - correct subsection and subsubsection headings
+%    - use \title{xx} for title, will be typeset all uppercase
+%    - use \name{xx} for author name(s) only, will be typeset in italics
+%    - use \address{xx} for one address of all authors
+%    - use \twoauthors{author1}{address1}{author2}{address2}
+%         for two (or more) authors with two separate addresses
+%    - note: no need for \author nor \date
+%    - optional: can use \thanks{xx} within \name or \twoauthors,
+%         asterisk is not printed after name nor in footnote
+%    - optional: can use \sthanks{xx} after each name within \name or
+%         \twoauthors if different thanks for each author,
+%         footnote symbol will appear for each name and footnote
+%    - optional: use \ninept to typeset text in 9 pt; default is 10pt.
+%
+% Example of use for one or more authors at a common address and
+%    common support. For distinct support acknowledgments,
+%    use \sthanks{xx} after each name.
+%
+%                 \documentclass{article}
+%                 \usepackage{spconf}
+%                 \title{Title of the paper}
+%                 \name{George P. Burdell and John Q. Professor
+%                       \thanks{This work was supported by...}}
+%                 \address{Common address, department \\
+%                          City, etc \\
+%                          optional e-mail address}
+%
+%                 \begin{document}
+%  OPTIONAL -->   \ninept            <-- OPTIONAL, for nine pt only
+%                 \maketitle
+%                 \begin{abstract}
+%                 This is the abstract for my paper.
+%                 \end{abstract}
+%                         .
+%                 Insert text of paper
+%                         .
+%                 \end{document}
+%
+% Example of use for two authors at two distinct addresses with only
+%    one support acknowledgment. For distinct support acknowledgments,
+%    use \sthanks{xx} after each name.
+%
+%                 \documentclass{article}
+%                 \usepackage{spconf}
+%                 \title{Title of the paper}
+%                 \twoauthors{John Doe
+%                       \thanks{This work was supported by...}}
+%                            {Doe's address, department \\
+%                             City, etc \\
+%                             optional e-mail address}
+%                            {Judy Smith}
+%                            {Smith's address, department \\
+%                             City, etc \\
+%                             optional e-mail address}
+%
+%                 \begin{document}
+%  OPTIONAL -->   \ninept            <-- OPTIONAL, for nine pt only
+%                 \maketitle
+%                 \begin{abstract}
+%                 This is the abstract for my paper.
+%                 \end{abstract}
+%                         .
+%                 Insert text of paper
+%                         .
+%                 \end{document}
+%
+% Preprint Option (Only for preprints, not for submissions!):
+%    - can create a preprint titlepage footer by using the
+%         "preprint" option with the \usepackage{spconf} command
+%    - use \copyrightnotice{\copyright xx} for copyright information
+%    - use \toappear{To appear in xx} for publication name
+% Example of preprint use:
+%
+%                 \documentclass{article}
+%                 \usepackage[preprint]{spconf}
+%                         .
+%                 \copyrightnotice{\copyright\ IEEE 2000}
+%                 \toappear{To appear in {\it Proc.\ ICASSP2000,
+%                    June 05-09, 2000, Istanbul, Turkey}}
+%
+%
+% PLEASE REPORT ANY BUGS
+%
+% Author:  Stephen Martucci  -- stephen.martucci@ieee.org
+%
+% Date:    3 May 2000
+%
+% Updated: Lance Cotton, Ulf-Dietrich Braumann, 11 May 2006
+% Change:  Added keywords/Index Terms section
+% Change:  Added \emergencystretch=11pt, Lance Cotton, 26-Sept-2007
+%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+% These commands change default text fonts to the scalable PostScript
+% fonts Times, Helvetica, and Courier. However, they do not change
+% the default math fonts. After conversion to PDF, text will look good
+% at any scale but math symbols and equations may not.
+% If instead you use the PostScript Type 1 implementation of the
+% Computer Modern fonts from the American Mathematical Society, which
+% will make all fonts (text and math) scalable, comment out the
+% following three lines. Those fonts use the same metrics as the Knuth
+% Computer Modern fonts and therefore no font redefinition is needed.
+\renewcommand{\sfdefault}{phv}
+\renewcommand{\rmdefault}{ptm}
+\renewcommand{\ttdefault}{pcr}
+
+%\oddsidemargin  -0.31in
+%\evensidemargin -0.31in
+\oddsidemargin  -6.2truemm
+\evensidemargin -6.2truemm
+
+\topmargin 0truept
+\headheight 0truept
+\headsep 0truept
+%\footheight 0truept
+%\footskip 0truept
+\textheight 229truemm
+\textwidth 178truemm
+
+\twocolumn
+\columnsep 6truemm
+\pagestyle{empty}
+
+\emergencystretch=11pt
+
+\def\ninept{\def\baselinestretch{.95}\let\normalsize\small\normalsize}
+
+\def\maketitle{\par
+ \begingroup
+ \def\thefootnote{}
+ \def\@makefnmark{\hbox
+ {$^{\@thefnmark}$\hss}}
+ \if@twocolumn
+ \twocolumn[\@maketitle]
+ \else \newpage
+ \global\@topnum\z@ \@maketitle \fi\@thanks
+ \endgroup
+ \setcounter{footnote}{0}
+ \let\maketitle\relax
+ \let\@maketitle\relax
+ \gdef\thefootnote{\arabic{footnote}}\gdef\@@savethanks{}%
+ \gdef\@thanks{}\gdef\@author{}\gdef\@title{}\let\thanks\relax}
+
+\def\@maketitle{\newpage
+ \null
+ \vskip 2em \begin{center}
+ {\large \bf \@title \par} \vskip 1.5em {\large \lineskip .5em
+\begin{tabular}[t]{c}\@name \\ \@address
+ \end{tabular}\par} \end{center}
+ \par
+ \vskip 1.5em}
+
+\def\title#1{\gdef\@title{\uppercase{#1}}}
+\def\name#1{\gdef\@name{{\em #1}\\}}
+\def\address#1{\gdef\@address{#1}}
+\gdef\@title{\uppercase{title of paper}}
+\gdef\@name{{\em Name of author}\\}
+\gdef\@address{Address - Line 1 \\
+               Address - Line 2 \\
+               Address - Line 3}
+
+\let\@@savethanks\thanks
+\def\thanks#1{\gdef\thefootnote{}\@@savethanks{#1}}
+\def\sthanks#1{\gdef\thefootnote{\fnsymbol{footnote}}\@@savethanks{#1}}
+
+\def\twoauthors#1#2#3#4{\gdef\@address{}
+   \gdef\@name{\begin{tabular}{@{}c@{}}
+        {\em #1} \\ \\
+        #2\relax
+   \end{tabular}\hskip 1in\begin{tabular}{@{}c@{}}
+        {\em #3} \\ \\
+        #4\relax
+\end{tabular}}}
+
+\def\@sect#1#2#3#4#5#6[#7]#8{
+   \refstepcounter{#1}\edef\@svsec{\csname the#1\endcsname.\hskip 0.6em}
+       \begingroup \ifnum #2=1\bf\centering
+          {\interlinepenalty \@M
+             \@svsec\uppercase{#8}\par}\else\ifnum #2=2\bf
+          \noindent{\interlinepenalty \@M \@svsec #8\par}\else\it
+          \noindent{\interlinepenalty \@M
+             \@svsec #8\par}\fi\fi\endgroup
+       \csname #1mark\endcsname{#7}\addcontentsline
+         {toc}{#1}{\protect\numberline{\csname the#1\endcsname} #7}
+     \@tempskipa #5\relax
+     \@xsect{\@tempskipa}}
+
+\def\abstract{\begin{center}
+{\bf ABSTRACT\vspace{-.5em}\vspace{0pt}}
+\end{center}}
+\def\endabstract{\par}
+
+% Keyword section, added by Lance Cotton, adapted from IEEEtrans, corrected by Ulf-Dietrich Braumann
+\def\keywords{\vspace{.5em}
+{\bfseries\textit{Index Terms}---\,\relax%
+}}
+\def\endkeywords{\par} 
+
+\def\copyrightnotice#1{\gdef\@copyrightnotice{#1}}
+\let\@copyrightnotice\relax
+\def\toappear#1{\gdef\@toappear{#1}}\let\@toappear\relax
+
+\newif\if@preprint\@preprintfalse
+\@namedef{ds@preprint}{\global\@preprinttrue}
+\@options
+\def\ps@preprint{\def\mypage{}\let\@mkboth\@gobbletwo\def\@oddhead{}
+  \def\@oddfoot{\rlap{\@toappear}\hfil\mypage\hfil
+    \llap{\@copyrightnotice}
+    \gdef\mypage{\thepage}\gdef\@toappear{}\gdef\@copyrightnotice{}}}
+
+\if@preprint\ps@preprint
+\else\ps@empty\flushbottom\fi
+
+\def\thebibliography#1{\section{References}\list
+ {[\arabic{enumi}]}{\settowidth\labelwidth{[#1]}\leftmargin\labelwidth
+ \advance\leftmargin\labelsep
+ \usecounter{enumi}}
+ \def\newblock{\hskip .11em plus .33em minus .07em}
+ \sloppy\clubpenalty4000\widowpenalty4000
+ \sfcode`\.=1000\relax}
+\let\endthebibliography=\endlist
+
+\long\def\@makecaption#1#2{
+ \vskip 10pt
+ \setbox\@tempboxa\hbox{#1. #2}
+ \ifdim \wd\@tempboxa >\hsize #1. #2\par \else \hbox
+to\hsize{\hfil\box\@tempboxa\hfil}
+ \fi}
+
+\def\fnum@figure{{\bf Fig.\ \thefigure}}
+\def\fnum@table{{\bf Table \thetable}}
+
+\flushbottom
+
+%%%% EOF
