<?xml version="1.0" encoding="utf-8" standalone="no"?>
<dublin_core schema="dc">
<dcvalue element="contributor" qualifier="author">Tiong,&#x20;Leslie&#x20;Ching&#x20;Ow</dcvalue>
<dcvalue element="contributor" qualifier="author">Sigmund,&#x20;Dick</dcvalue>
<dcvalue element="contributor" qualifier="author">Teoh,&#x20;Andrew&#x20;Beng&#x20;Jin</dcvalue>
<dcvalue element="date" qualifier="accessioned">2024-01-12T02:47:51Z</dcvalue>
<dcvalue element="date" qualifier="available">2024-01-12T02:47:51Z</dcvalue>
<dcvalue element="date" qualifier="created">2023-06-29</dcvalue>
<dcvalue element="date" qualifier="issued">2022-12</dcvalue>
<dcvalue element="identifier" qualifier="issn">0302-9743</dcvalue>
<dcvalue element="identifier" qualifier="uri">https:&#x2F;&#x2F;pubs.kist.re.kr&#x2F;handle&#x2F;201004&#x2F;76518</dcvalue>
<dcvalue element="description" qualifier="abstract">Recently,&#x20;the&#x20;transformer&#x20;model&#x20;has&#x20;been&#x20;successfully&#x20;employed&#x20;for&#x20;the&#x20;multi-view&#x20;3D&#x20;reconstruction&#x20;problem.&#x20;However,&#x20;challenges&#x20;remain&#x20;in&#x20;designing&#x20;an&#x20;attention&#x20;mechanism&#x20;to&#x20;explore&#x20;the&#x20;multi-view&#x20;features&#x20;and&#x20;exploit&#x20;their&#x20;relations&#x20;for&#x20;reinforcing&#x20;the&#x20;encoding-decoding&#x20;modules.&#x20;This&#x20;paper&#x20;proposes&#x20;a&#x20;new&#x20;model,&#x20;namely&#x20;3D&#x20;coarse-to-fine&#x20;transformer&#x20;(3D-C2FT),&#x20;by&#x20;introducing&#x20;a&#x20;novel&#x20;coarse-to-fine&#x20;(C2F)&#x20;attention&#x20;mechanism&#x20;for&#x20;encoding&#x20;multi-view&#x20;features&#x20;and&#x20;rectifying&#x20;defective&#x20;voxel-based&#x20;3D&#x20;objects.&#x20;C2F&#x20;attention&#x20;mechanism&#x20;enables&#x20;the&#x20;model&#x20;to&#x20;learn&#x20;multi-view&#x20;information&#x20;flow&#x20;and&#x20;synthesize&#x20;3D&#x20;surface&#x20;correction&#x20;in&#x20;a&#x20;coarse&#x20;to&#x20;fine-grained&#x20;manner.&#x20;The&#x20;proposed&#x20;model&#x20;is&#x20;evaluated&#x20;by&#x20;ShapeNet&#x20;and&#x20;Multi-view&#x20;Real-life&#x20;voxel-based&#x20;datasets.&#x20;Experimental&#x20;results&#x20;show&#x20;that&#x20;3D-C2FT&#x20;achieves&#x20;notable&#x20;results&#x20;and&#x20;outperforms&#x20;several&#x20;competing&#x20;models&#x20;on&#x20;these&#x20;datasets.</dcvalue>
<dcvalue element="language" qualifier="none">English</dcvalue>
<dcvalue element="publisher" qualifier="none">SPRINGER&#x20;INTERNATIONAL&#x20;PUBLISHING&#x20;AG</dcvalue>
<dcvalue element="title" qualifier="none">3D-C2FT:&#x20;Coarse-to-Fine&#x20;Transformer&#x20;for&#x20;Multi-view&#x20;3D&#x20;Reconstruction</dcvalue>
<dcvalue element="type" qualifier="none">Conference</dcvalue>
<dcvalue element="identifier" qualifier="doi">10.1007&#x2F;978-3-031-26319-4_13</dcvalue>
<dcvalue element="description" qualifier="journalClass">1</dcvalue>
<dcvalue element="identifier" qualifier="bibliographicCitation">16th&#x20;Asian&#x20;Conference&#x20;on&#x20;Computer&#x20;Vision&#x20;(ACCV),&#x20;pp.211&#x20;-&#x20;227</dcvalue>
<dcvalue element="citation" qualifier="title">16th&#x20;Asian&#x20;Conference&#x20;on&#x20;Computer&#x20;Vision&#x20;(ACCV)</dcvalue>
<dcvalue element="citation" qualifier="startPage">211</dcvalue>
<dcvalue element="citation" qualifier="endPage">227</dcvalue>
<dcvalue element="citation" qualifier="conferencePlace">SZ</dcvalue>
<dcvalue element="citation" qualifier="conferencePlace">Macao,&#x20;PEOPLES&#x20;R&#x20;CHINA</dcvalue>
<dcvalue element="citation" qualifier="conferenceDate">2022-12-04</dcvalue>
<dcvalue element="relation" qualifier="isPartOf">COMPUTER&#x20;VISION&#x20;-&#x20;ACCV&#x20;2022,&#x20;PT&#x20;I</dcvalue>
<dcvalue element="identifier" qualifier="wosid">001000819500013</dcvalue>
<dcvalue element="identifier" qualifier="scopusid">2-s2.0-85151055806</dcvalue>
</dublin_core>
