<?xml version="1.0" encoding="utf-8" standalone="no"?>
<dublin_core schema="dc">
<dcvalue element="contributor" qualifier="author">Kim,&#x20;Ig-Jae</dcvalue>
<dcvalue element="contributor" qualifier="author">Ko,&#x20;Hyeong-Seok</dcvalue>
<dcvalue element="date" qualifier="accessioned">2024-01-21T00:32:49Z</dcvalue>
<dcvalue element="date" qualifier="available">2024-01-21T00:32:49Z</dcvalue>
<dcvalue element="date" qualifier="created">2021-09-05</dcvalue>
<dcvalue element="date" qualifier="issued">2007-09</dcvalue>
<dcvalue element="identifier" qualifier="issn">0167-7055</dcvalue>
<dcvalue element="identifier" qualifier="uri">https:&#x2F;&#x2F;pubs.kist.re.kr&#x2F;handle&#x2F;201004&#x2F;134141</dcvalue>
<dcvalue element="description" qualifier="abstract">This&#x20;paper&#x20;proposes&#x20;a&#x20;new&#x20;technique&#x20;for&#x20;generating&#x20;three-dimensional&#x20;speech&#x20;animation.&#x20;The&#x20;proposed&#x20;technique&#x20;takes&#x20;advantage&#x20;of&#x20;both&#x20;data-driven&#x20;and&#x20;machine&#x20;learning&#x20;approaches.&#x20;It&#x20;seeks&#x20;to&#x20;utilize&#x20;the&#x20;most&#x20;relevant&#x20;part&#x20;of&#x20;the&#x20;captured&#x20;utterances&#x20;for&#x20;the&#x20;synthesis&#x20;of&#x20;input&#x20;phoneme&#x20;sequences.&#x20;If&#x20;highly&#x20;relevant&#x20;data&#x20;are&#x20;missing&#x20;or&#x20;lacking,&#x20;then&#x20;it&#x20;utilizes&#x20;less&#x20;relevant&#x20;(but&#x20;more&#x20;abundant)&#x20;data&#x20;and&#x20;relies&#x20;more&#x20;heavily&#x20;on&#x20;machine&#x20;learning&#x20;for&#x20;the&#x20;lip-synch&#x20;generation.&#x20;This&#x20;hybrid&#x20;approach&#x20;produces&#x20;results&#x20;that&#x20;are&#x20;more&#x20;faithful&#x20;to&#x20;real&#x20;data&#x20;than&#x20;conventional&#x20;machine&#x20;learning&#x20;approaches,&#x20;while&#x20;being&#x20;better&#x20;able&#x20;to&#x20;handle&#x20;incompleteness&#x20;or&#x20;redundancy&#x20;in&#x20;the&#x20;database&#x20;than&#x20;conventional&#x20;data-driven&#x20;approaches.&#x20;Experimental&#x20;results,&#x20;obtained&#x20;by&#x20;applying&#x20;the&#x20;proposed&#x20;technique&#x20;to&#x20;the&#x20;utterance&#x20;of&#x20;various&#x20;words&#x20;and&#x20;phrases,&#x20;show&#x20;that&#x20;(1)&#x20;the&#x20;proposed&#x20;technique&#x20;generates&#x20;lip-synchs&#x20;of&#x20;different&#x20;qualities&#x20;depending&#x20;on&#x20;the&#x20;availability&#x20;of&#x20;the&#x20;data,&#x20;and&#x20;(2)&#x20;the&#x20;new&#x20;technique&#x20;produces&#x20;more&#x20;realistic&#x20;results&#x20;than&#x20;conventional&#x20;machine&#x20;learning&#x20;approaches.</dcvalue>
<dcvalue element="language" qualifier="none">English</dcvalue>
<dcvalue element="publisher" qualifier="none">WILEY</dcvalue>
<dcvalue element="title" qualifier="none">3D&#x20;lip-synch&#x20;generation&#x20;with&#x20;data-faithful&#x20;machine&#x20;learning</dcvalue>
<dcvalue element="type" qualifier="none">Article</dcvalue>
<dcvalue element="identifier" qualifier="doi">10.1111&#x2F;j.1467-8659.2007.01051.x</dcvalue>
<dcvalue element="description" qualifier="journalClass">1</dcvalue>
<dcvalue element="identifier" qualifier="bibliographicCitation">COMPUTER&#x20;GRAPHICS&#x20;FORUM,&#x20;v.26,&#x20;no.3,&#x20;pp.295&#x20;-&#x20;301</dcvalue>
<dcvalue element="citation" qualifier="title">COMPUTER&#x20;GRAPHICS&#x20;FORUM</dcvalue>
<dcvalue element="citation" qualifier="volume">26</dcvalue>
<dcvalue element="citation" qualifier="number">3</dcvalue>
<dcvalue element="citation" qualifier="startPage">295</dcvalue>
<dcvalue element="citation" qualifier="endPage">301</dcvalue>
<dcvalue element="description" qualifier="journalRegisteredClass">scie</dcvalue>
<dcvalue element="description" qualifier="journalRegisteredClass">scopus</dcvalue>
<dcvalue element="identifier" qualifier="wosid">000249660500010</dcvalue>
<dcvalue element="identifier" qualifier="scopusid">2-s2.0-35348984054</dcvalue>
<dcvalue element="relation" qualifier="journalWebOfScienceCategory">Computer&#x20;Science,&#x20;Software&#x20;Engineering</dcvalue>
<dcvalue element="relation" qualifier="journalResearchArea">Computer&#x20;Science</dcvalue>
<dcvalue element="type" qualifier="docType">Article;&#x20;Proceedings&#x20;Paper</dcvalue>
<dcvalue element="subject" qualifier="keywordAuthor">speech&#x20;animation</dcvalue>
<dcvalue element="subject" qualifier="keywordAuthor">lip&#x20;synch</dcvalue>
<dcvalue element="subject" qualifier="keywordAuthor">facial&#x20;animation</dcvalue>
</dublin_core>
