{"@context":{"@vocab":"https://cir.nii.ac.jp/schema/1.0/","rdfs":"http://www.w3.org/2000/01/rdf-schema#","dc":"http://purl.org/dc/elements/1.1/","dcterms":"http://purl.org/dc/terms/","foaf":"http://xmlns.com/foaf/0.1/","prism":"http://prismstandard.org/namespaces/basic/2.0/","cinii":"http://ci.nii.ac.jp/ns/1.0/","datacite":"https://schema.datacite.org/meta/kernel-4/","ndl":"http://ndl.go.jp/dcndl/terms/","jpcoar":"https://github.com/JPCOAR/schema/blob/master/2.0/"},"@id":"https://cir.nii.ac.jp/crid/1360021392650180480.json","@type":"Article","productIdentifier":[{"identifier":{"@type":"DOI","@value":"10.1109/icassp49357.2023.10095298"}},{"identifier":{"@type":"URI","@value":"http://xplorestaging.ieee.org/ielx7/10094559/10094560/10095298.pdf?arnumber=10095298"}},{"identifier":{"@type":"DOI","@value":"10.48550/arxiv.2210.15533"}}],"dc:title":[{"@value":"Source-Filter HiFi-GAN: Fast and Pitch Controllable High-Fidelity Neural Vocoder"}],"description":[{"notation":[{"@value":"Our previous work, the unified source-filter GAN (uSFGAN) vocoder, introduced a novel architecture based on the source-filter theory into the parallel waveform generative adversarial network to achieve high voice quality and pitch controllability. However, the high temporal resolution inputs result in high computation costs. Although the HiFi-GAN vocoder achieves fast high-fidelity voice generation thanks to the efficient upsampling-based generator architecture, the pitch controllability is severely limited. To realize a fast and pitch-controllable high-fidelity neural vocoder, we introduce the source-filter theory into HiFi-GAN by hierarchically conditioning the resonance filtering network on a well-estimated source excitation information. According to the experimental results, our proposed method outperforms HiFi-GAN and uSFGAN on a singing voice generation in voice quality and synthesis speed on a single CPU. Furthermore, unlike the uSFGAN vocoder, the proposed method can be easily adopted/integrated in real-time applications and end-to-end systems."}]},{"notation":[{"@value":"Accepted to ICASSP 2023"}]}],"creator":[{"@id":"https://cir.nii.ac.jp/crid/1380021392650180480","@type":"Researcher","foaf:name":[{"@value":"Reo Yoneyama"}],"jpcoar:affiliationName":[{"@value":"Nagoya University,Japan"}]},{"@id":"https://cir.nii.ac.jp/crid/1380021392650180482","@type":"Researcher","foaf:name":[{"@value":"Yi-Chiao Wu"}],"jpcoar:affiliationName":[{"@value":"Meta Reality Labs Research,USA"}]},{"@id":"https://cir.nii.ac.jp/crid/1380021392650180481","@type":"Researcher","foaf:name":[{"@value":"Tomoki Toda"}],"jpcoar:affiliationName":[{"@value":"Nagoya University,Japan"}]}],"publication":{"prism:publicationName":[{"@value":"ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"}],"dc:publisher":[{"@value":"IEEE"}],"prism:publicationDate":"2023-06-04","prism:startingPage":"1","prism:endingPage":"5"},"reviewed":"false","dcterms:accessRights":"http://purl.org/coar/access_right/c_abf2","dc:rights":["https://doi.org/10.15223/policy-029","https://doi.org/10.15223/policy-037"],"url":[{"@id":"http://xplorestaging.ieee.org/ielx7/10094559/10094560/10095298.pdf?arnumber=10095298"}],"createdAt":"2023-05-05","modifiedAt":"2023-11-20","foaf:topic":[{"@id":"https://cir.nii.ac.jp/all?q=FOS:%20Computer%20and%20information%20sciences","dc:title":"FOS: Computer and information sciences"},{"@id":"https://cir.nii.ac.jp/all?q=Computer%20Science%20-%20Machine%20Learning","dc:title":"Computer Science - Machine Learning"},{"@id":"https://cir.nii.ac.jp/all?q=Sound%20(cs.SD)","dc:title":"Sound (cs.SD)"},{"@id":"https://cir.nii.ac.jp/all?q=Computer%20Science%20-%20Sound","dc:title":"Computer Science - Sound"},{"@id":"https://cir.nii.ac.jp/all?q=Machine%20Learning%20(cs.LG)","dc:title":"Machine Learning (cs.LG)"},{"@id":"https://cir.nii.ac.jp/all?q=Audio%20and%20Speech%20Processing%20(eess.AS)","dc:title":"Audio and Speech Processing (eess.AS)"},{"@id":"https://cir.nii.ac.jp/all?q=FOS:%20Electrical%20engineering,%20electronic%20engineering,%20information%20engineering","dc:title":"FOS: Electrical engineering, electronic engineering, information engineering"},{"@id":"https://cir.nii.ac.jp/all?q=Electrical%20Engineering%20and%20Systems%20Science%20-%20Audio%20and%20Speech%20Processing","dc:title":"Electrical Engineering and Systems Science - Audio and Speech Processing"}],"relatedProduct":[{"@id":"https://cir.nii.ac.jp/crid/1050581224891352704","@type":"Article","resourceType":"学術雑誌論文(journal article)","relationType":["isReferencedBy"],"jpcoar:relatedTitle":[{"@language":"en","@value":"Fast Neural Speech Waveform Generative Models With Fully-Connected Layer-Based Upsampling"}]}],"dataSourceIdentifier":[{"@type":"CROSSREF","@value":"10.1109/icassp49357.2023.10095298"},{"@type":"OPENAIRE","@value":"doi_dedup___::a72f5ac3a319b2f2da6863c7c41dc9aa"},{"@type":"CROSSREF","@value":"10.1109/access.2024.3366707_references_DOI_LoO9BFUGbNYaC4qRwxh365f940F"}]}