<?xml version="1.0" encoding="utf-8" standalone="no"?>
<dublin_core schema="dc">
<dcvalue element="contributor" qualifier="author">Jang,&#x20;Jae&#x20;Won</dcvalue>
<dcvalue element="contributor" qualifier="author">Kwon,&#x20;Young&#x20;Chan</dcvalue>
<dcvalue element="contributor" qualifier="author">Lim,&#x20;Hwasup</dcvalue>
<dcvalue element="contributor" qualifier="author">Choi,&#x20;Ouk</dcvalue>
<dcvalue element="date" qualifier="accessioned">2024-01-19T18:33:29Z</dcvalue>
<dcvalue element="date" qualifier="available">2024-01-19T18:33:29Z</dcvalue>
<dcvalue element="date" qualifier="created">2021-09-05</dcvalue>
<dcvalue element="date" qualifier="issued">2019-12</dcvalue>
<dcvalue element="identifier" qualifier="issn">2169-3536</dcvalue>
<dcvalue element="identifier" qualifier="uri">https:&#x2F;&#x2F;pubs.kist.re.kr&#x2F;handle&#x2F;201004&#x2F;119253</dcvalue>
<dcvalue element="description" qualifier="abstract">Three-dimensional&#x20;human&#x20;shape&#x20;reconstruction&#x20;is&#x20;important&#x20;in&#x20;many&#x20;applications,&#x20;such&#x20;as&#x20;virtual&#x20;or&#x20;augmented&#x20;reality&#x20;(VR&#x2F;AR),&#x20;virtual&#x20;clothing&#x20;fitting,&#x20;and&#x20;healthcare.&#x20;In&#x20;this&#x20;paper,&#x20;we&#x20;propose&#x20;a&#x20;learning-based&#x20;method&#x20;for&#x20;reconstructing&#x20;a&#x20;whole-body&#x20;point&#x20;cloud&#x20;from&#x20;a&#x20;single&#x20;front-view&#x20;human-depth&#x20;image.&#x20;Because&#x20;actual&#x20;depth&#x20;images&#x20;typically&#x20;suffer&#x20;from&#x20;noise&#x20;and&#x20;missing&#x20;data,&#x20;an&#x20;accurate&#x20;point&#x20;cloud&#x20;cannot&#x20;be&#x20;reasonably&#x20;obtained&#x20;by&#x20;simply&#x20;predicting&#x20;a&#x20;back-view&#x20;depth&#x20;image.&#x20;To&#x20;solve&#x20;this&#x20;problem,&#x20;we&#x20;propose&#x20;to&#x20;use&#x20;convolutional&#x20;neural&#x20;networks&#x20;that&#x20;not&#x20;only&#x20;predict&#x20;a&#x20;back-view&#x20;depth&#x20;image&#x20;but&#x20;also&#x20;refine&#x20;the&#x20;input&#x20;front-view&#x20;depth&#x20;image.&#x20;To&#x20;train&#x20;the&#x20;networks,&#x20;we&#x20;propose&#x20;a&#x20;carefully&#x20;designed&#x20;method&#x20;for&#x20;generating&#x20;synthetic&#x20;but&#x20;realistic&#x20;human-depth&#x20;images&#x20;with&#x20;noise&#x20;and&#x20;missing&#x20;data.&#x20;Experiments&#x20;show&#x20;that&#x20;the&#x20;proposed&#x20;method&#x20;is&#x20;effective&#x20;for&#x20;obtaining&#x20;seamless&#x20;whole-body&#x20;point&#x20;clouds.&#x20;In&#x20;addition,&#x20;the&#x20;experiments&#x20;show&#x20;that&#x20;the&#x20;networks&#x20;trained&#x20;on&#x20;the&#x20;synthetic&#x20;depth&#x20;images&#x20;are&#x20;ready&#x20;for&#x20;application&#x20;to&#x20;actual&#x20;depth&#x20;images.</dcvalue>
<dcvalue element="language" qualifier="none">English</dcvalue>
<dcvalue element="publisher" qualifier="none">IEEE-INST&#x20;ELECTRICAL&#x20;ELECTRONICS&#x20;ENGINEERS&#x20;INC</dcvalue>
<dcvalue element="subject" qualifier="none">HUMAN&#x20;BODIES</dcvalue>
<dcvalue element="subject" qualifier="none">RECONSTRUCTION</dcvalue>
<dcvalue element="subject" qualifier="none">SHAPE</dcvalue>
<dcvalue element="title" qualifier="none">CNN-Based&#x20;Denoising,&#x20;Completion,&#x20;and&#x20;Prediction&#x20;of&#x20;Whole-Body&#x20;Human-Depth&#x20;Images</dcvalue>
<dcvalue element="type" qualifier="none">Article</dcvalue>
<dcvalue element="identifier" qualifier="doi">10.1109&#x2F;ACCESS.2019.2957862</dcvalue>
<dcvalue element="description" qualifier="journalClass">1</dcvalue>
<dcvalue element="identifier" qualifier="bibliographicCitation">IEEE&#x20;ACCESS,&#x20;v.7,&#x20;pp.175842&#x20;-&#x20;175856</dcvalue>
<dcvalue element="citation" qualifier="title">IEEE&#x20;ACCESS</dcvalue>
<dcvalue element="citation" qualifier="volume">7</dcvalue>
<dcvalue element="citation" qualifier="startPage">175842</dcvalue>
<dcvalue element="citation" qualifier="endPage">175856</dcvalue>
<dcvalue element="description" qualifier="journalRegisteredClass">scie</dcvalue>
<dcvalue element="description" qualifier="journalRegisteredClass">scopus</dcvalue>
<dcvalue element="identifier" qualifier="wosid">000509399500055</dcvalue>
<dcvalue element="identifier" qualifier="scopusid">2-s2.0-85076969995</dcvalue>
<dcvalue element="relation" qualifier="journalWebOfScienceCategory">Computer&#x20;Science,&#x20;Information&#x20;Systems</dcvalue>
<dcvalue element="relation" qualifier="journalWebOfScienceCategory">Engineering,&#x20;Electrical&#x20;&amp;&#x20;Electronic</dcvalue>
<dcvalue element="relation" qualifier="journalWebOfScienceCategory">Telecommunications</dcvalue>
<dcvalue element="relation" qualifier="journalResearchArea">Computer&#x20;Science</dcvalue>
<dcvalue element="relation" qualifier="journalResearchArea">Engineering</dcvalue>
<dcvalue element="relation" qualifier="journalResearchArea">Telecommunications</dcvalue>
<dcvalue element="type" qualifier="docType">Article</dcvalue>
<dcvalue element="subject" qualifier="keywordPlus">HUMAN&#x20;BODIES</dcvalue>
<dcvalue element="subject" qualifier="keywordPlus">RECONSTRUCTION</dcvalue>
<dcvalue element="subject" qualifier="keywordPlus">SHAPE</dcvalue>
<dcvalue element="subject" qualifier="keywordAuthor">3D&#x20;human&#x20;shape</dcvalue>
<dcvalue element="subject" qualifier="keywordAuthor">convolutional&#x20;neural&#x20;networks</dcvalue>
<dcvalue element="subject" qualifier="keywordAuthor">deep&#x20;learning</dcvalue>
<dcvalue element="subject" qualifier="keywordAuthor">single&#x20;depth&#x20;image</dcvalue>
<dcvalue element="subject" qualifier="keywordAuthor">synthetic&#x20;data&#x20;generation</dcvalue>
</dublin_core>
