<?xml version="1.0" encoding="utf-8" standalone="no"?>
<dublin_core schema="dc">
<dcvalue element="contributor" qualifier="author">Son,&#x20;Geonhui</dcvalue>
<dcvalue element="contributor" qualifier="author">Lee,&#x20;Jeong&#x20;Ryong</dcvalue>
<dcvalue element="contributor" qualifier="author">Hwang,&#x20;Dosik</dcvalue>
<dcvalue element="date" qualifier="accessioned">2026-02-26T09:30:14Z</dcvalue>
<dcvalue element="date" qualifier="available">2026-02-26T09:30:14Z</dcvalue>
<dcvalue element="date" qualifier="created">2026-02-26</dcvalue>
<dcvalue element="date" qualifier="issued">2026-07</dcvalue>
<dcvalue element="identifier" qualifier="issn">0893-6080</dcvalue>
<dcvalue element="identifier" qualifier="uri">https:&#x2F;&#x2F;pubs.kist.re.kr&#x2F;handle&#x2F;201004&#x2F;154381</dcvalue>
<dcvalue element="description" qualifier="abstract">Generative&#x20;Adversarial&#x20;Networks&#x20;(GANs)&#x20;have&#x20;made&#x20;significant&#x20;progress&#x20;in&#x20;enhancing&#x20;the&#x20;quality&#x20;of&#x20;image&#x20;synthesis.&#x20;Recent&#x20;methods&#x20;frequently&#x20;leverage&#x20;pretrained&#x20;networks&#x20;to&#x20;calculate&#x20;perceptual&#x20;losses&#x20;or&#x20;utilize&#x20;pretrained&#x20;feature&#x20;spaces.&#x20;In&#x20;this&#x20;paper,&#x20;we&#x20;extend&#x20;the&#x20;capabilities&#x20;of&#x20;pretrained&#x20;networks&#x20;by&#x20;incorporating&#x20;innovative&#x20;self-supervised&#x20;learning&#x20;techniques&#x20;and&#x20;enforcing&#x20;consistency&#x20;between&#x20;discriminators&#x20;during&#x20;GAN&#x20;training.&#x20;Our&#x20;proposed&#x20;method,&#x20;named&#x20;HP-GAN,&#x20;effectively&#x20;exploits&#x20;neural&#x20;network&#x20;priors&#x20;through&#x20;two&#x20;primary&#x20;strategies:&#x20;FakeTwins&#x20;and&#x20;discriminator&#x20;consistency.&#x20;FakeTwins&#x20;leverages&#x20;pretrained&#x20;networks&#x20;as&#x20;encoders&#x20;to&#x20;compute&#x20;a&#x20;self-supervised&#x20;loss&#x20;and&#x20;applies&#x20;this&#x20;through&#x20;the&#x20;generated&#x20;images&#x20;to&#x20;train&#x20;the&#x20;generator,&#x20;thereby&#x20;enabling&#x20;the&#x20;generation&#x20;of&#x20;more&#x20;diverse&#x20;and&#x20;high&#x20;quality&#x20;images.&#x20;Additionally,&#x20;we&#x20;introduce&#x20;a&#x20;consistency&#x20;mechanism&#x20;between&#x20;discriminators&#x20;that&#x20;evaluate&#x20;feature&#x20;maps&#x20;extracted&#x20;from&#x20;Convolutional&#x20;Neural&#x20;Network&#x20;(CNN)&#x20;and&#x20;Vision&#x20;Transformer&#x20;(ViT)&#x20;feature&#x20;networks.&#x20;Discriminator&#x20;consistency&#x20;promotes&#x20;coherent&#x20;learning&#x20;among&#x20;discriminators&#x20;and&#x20;enhances&#x20;training&#x20;robustness&#x20;by&#x20;aligning&#x20;their&#x20;assessments&#x20;of&#x20;image&#x20;quality.&#x20;Our&#x20;extensive&#x20;evaluation&#x20;across&#x20;seventeen&#x20;datasets-including&#x20;scenarios&#x20;with&#x20;large,&#x20;small,&#x20;and&#x20;limited&#x20;data,&#x20;and&#x20;covering&#x20;a&#x20;variety&#x20;of&#x20;image&#x20;domains-demonstrates&#x20;that&#x20;HP-GAN&#x20;consistently&#x20;outperforms&#x20;current&#x20;state-of-the-art&#x20;methods&#x20;in&#x20;terms&#x20;of&#x20;Fr&#x20;&amp;&#x20;eacute;chet&#x20;Inception&#x20;Distance&#x20;(FID),&#x20;achieving&#x20;significant&#x20;improvements&#x20;in&#x20;image&#x20;diversity&#x20;and&#x20;quality.&#x20;Code&#x20;is&#x20;available&#x20;at:&#x20;https:&#x2F;&#x2F;github.com&#x2F;higun2&#x2F;HP-GAN.</dcvalue>
<dcvalue element="language" qualifier="none">English</dcvalue>
<dcvalue element="publisher" qualifier="none">Pergamon&#x20;Press&#x20;Ltd.</dcvalue>
<dcvalue element="title" qualifier="none">HP-GAN:&#x20;Harnessing&#x20;pretrained&#x20;networks&#x20;for&#x20;GAN&#x20;improvement&#x20;with&#x20;FakeTwins&#x20;and&#x20;discriminator&#x20;consistency</dcvalue>
<dcvalue element="type" qualifier="none">Article</dcvalue>
<dcvalue element="identifier" qualifier="doi">10.1016&#x2F;j.neunet.2026.108666</dcvalue>
<dcvalue element="description" qualifier="journalClass">1</dcvalue>
<dcvalue element="identifier" qualifier="bibliographicCitation">Neural&#x20;Networks,&#x20;v.199</dcvalue>
<dcvalue element="citation" qualifier="title">Neural&#x20;Networks</dcvalue>
<dcvalue element="citation" qualifier="volume">199</dcvalue>
<dcvalue element="description" qualifier="isOpenAccess">Y</dcvalue>
<dcvalue element="description" qualifier="journalRegisteredClass">scie</dcvalue>
<dcvalue element="description" qualifier="journalRegisteredClass">scopus</dcvalue>
<dcvalue element="identifier" qualifier="wosid">001683690000001</dcvalue>
<dcvalue element="identifier" qualifier="scopusid">2-s2.0-105029054729</dcvalue>
<dcvalue element="relation" qualifier="journalWebOfScienceCategory">Computer&#x20;Science,&#x20;Artificial&#x20;Intelligence</dcvalue>
<dcvalue element="relation" qualifier="journalWebOfScienceCategory">Neurosciences</dcvalue>
<dcvalue element="relation" qualifier="journalResearchArea">Computer&#x20;Science</dcvalue>
<dcvalue element="relation" qualifier="journalResearchArea">Neurosciences&#x20;&amp;&#x20;Neurology</dcvalue>
<dcvalue element="type" qualifier="docType">Article</dcvalue>
<dcvalue element="subject" qualifier="keywordAuthor">Image&#x20;generation</dcvalue>
<dcvalue element="subject" qualifier="keywordAuthor">Generative&#x20;adversarial&#x20;network</dcvalue>
<dcvalue element="subject" qualifier="keywordAuthor">Pretrained&#x20;network</dcvalue>
<dcvalue element="subject" qualifier="keywordAuthor">Self-supervised&#x20;learning</dcvalue>
</dublin_core>
