{"@context":{"@vocab":"https://cir.nii.ac.jp/schema/1.0/","rdfs":"http://www.w3.org/2000/01/rdf-schema#","dc":"http://purl.org/dc/elements/1.1/","dcterms":"http://purl.org/dc/terms/","foaf":"http://xmlns.com/foaf/0.1/","prism":"http://prismstandard.org/namespaces/basic/2.0/","cinii":"http://ci.nii.ac.jp/ns/1.0/","datacite":"https://schema.datacite.org/meta/kernel-4/","ndl":"http://ndl.go.jp/dcndl/terms/","jpcoar":"https://github.com/JPCOAR/schema/blob/master/2.0/"},"@id":"https://cir.nii.ac.jp/crid/1050021370926484864.json","@type":"Article","productIdentifier":[{"identifier":{"@type":"HDL","@value":"https://hdl.handle.net/2324/7232999"}}],"resourceType":"会議発表資料(conference paper)","dc:title":[{"@language":"en","@value":"Generative Range Imaging for Learning Scene Priors of 3D LiDAR Data"}],"dc:language":"en","description":[{"type":"Abstract","notation":[{"@language":"en","@value":"3D LiDAR sensors are indispensable for the robust vision of autonomous mobile robots. However, deploying LiDAR-based perception algorithms often fails due to a domain gap from the training environment, such as inconsistent angular resolution and missing properties. Existing studies have tackled the issue by learning inter-domain mapping, while the transferability is constrained by the training configuration and the training is susceptible to peculiar lossy noises called ray-drop. To address the issue, this paper proposes a generative model of LiDAR range images applicable to the data-level domain transfer. Motivated by the fact that LiDAR measurement is based on point-by-point range imaging, we train an implicit image representation-based generative adversarial networks along with a differentiable ray-drop effect. We demonstrate the fidelity and diversity of our model in comparison with the point-based and image-based state-of-the-art generative models. We also showcase upsampling and restoration applications. Furthermore, we introduce a Sim2Real application for LiDAR semantic segmentation. We demonstrate that our method is effective as a realistic ray-drop simulator and outperforms state-of-the-art methods."}]}],"creator":[{"@id":"https://cir.nii.ac.jp/crid/1070021370926484866","@type":"Researcher","personIdentifier":[{"@type":"ORCID","@value":"0000-0002-6773-7811"}],"foaf:name":[{"@language":"en","@value":"Nakashima, Kazuto"},{"@language":"ja","@value":"中嶋, 一斗"},{"@language":"ja-Kana","@value":"ナカシマ, カズト"}],"jpcoar:affiliationName":[{"@language":"en","@value":"Kyushu University"},{"@language":"ja","@value":"九州大学"}]},{"@id":"https://cir.nii.ac.jp/crid/1030003658708608002","@type":"Researcher","personIdentifier":[{"@type":"KAKEN_RESEARCHERS","@value":"70467877"},{"@type":"NRID","@value":"1000070467877"},{"@type":"ORCID","@value":"0000-0001-8931-0571"},{"@type":"NRID","@value":"9000242091317"},{"@type":"NRID","@value":"9000293528885"}],"foaf:name":[{"@language":"en","@value":"Iwashita, Yumi"},{"@language":"ja","@value":"岩下, 友美"},{"@language":"ja-Kana","@value":"イワシタ, ユミ"}],"jpcoar:affiliationName":[{"@language":"en","@value":"Jet Propulsion Laboratory, California Institute of Technology"},{"@language":"ja","@value":"カリフォルニア工科大学"}]},{"@id":"https://cir.nii.ac.jp/crid/1070021370926484865","@type":"Researcher","personIdentifier":[{"@type":"ORCID","@value":"0000-0002-4219-7644"}],"foaf:name":[{"@language":"en","@value":"Kurazume, Ryo"},{"@language":"ja","@value":"倉爪, 亮"},{"@language":"ja-Kana","@value":"クラヅメ, リョウ"}],"jpcoar:affiliationName":[{"@language":"en","@value":"Kyushu University"},{"@language":"ja","@value":"九州大学"}]}],"publication":{"prism:publicationName":[{"@language":"en","@value":"IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)"}],"dc:publisher":[{"@value":"Institute of Electrical and Electronics Engineers (IEEE)"}],"prism:publicationDate":"2023","prism:startingPage":"1256","prism:endingPage":"1266"},"jpcoar:conferenceName":"IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)","jpcoar:conferencePlace":"Waikoloa, HI","jpcoar:conferenceDate":{"jpcoar:startDay":"02","jpcoar:startMonth":"01","jpcoar:startYear":"2023","jpcoar:endDay":"07","jpcoar:endMonth":"01","jpcoar:endYear":"2023"},"dc:rights":["© 2024 IEEE.  Personal use of this material is permitted.  Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works."],"foaf:topic":[{"@id":"https://cir.nii.ac.jp/all?q=Training","dc:title":"Training"},{"@id":"https://cir.nii.ac.jp/all?q=Adaptation%20models","dc:title":"Adaptation models"},{"@id":"https://cir.nii.ac.jp/all?q=Laser%20radar","dc:title":"Laser radar"},{"@id":"https://cir.nii.ac.jp/all?q=Three-dimensional%20displays","dc:title":"Three-dimensional displays"},{"@id":"https://cir.nii.ac.jp/all?q=Semantic%20segmentation","dc:title":"Semantic segmentation"},{"@id":"https://cir.nii.ac.jp/all?q=Imaging","dc:title":"Imaging"},{"@id":"https://cir.nii.ac.jp/all?q=Rendering%20(computer%20graphics)","dc:title":"Rendering (computer graphics)"},{"@id":"https://cir.nii.ac.jp/all?q=Applications:%20Robotics","dc:title":"Applications: Robotics"},{"@id":"https://cir.nii.ac.jp/all?q=3D%20computer%20vision","dc:title":"3D computer vision"}],"dcterms:subject":[{"subjectScheme":"Other","notation":[{"@language":"en","@value":"Training"}]},{"subjectScheme":"Other","notation":[{"@language":"en","@value":"Adaptation models"}]},{"subjectScheme":"Other","notation":[{"@language":"en","@value":"Laser radar"}]},{"subjectScheme":"Other","notation":[{"@language":"en","@value":"Three-dimensional displays"}]},{"subjectScheme":"Other","notation":[{"@language":"en","@value":"Semantic segmentation"}]},{"subjectScheme":"Other","notation":[{"@language":"en","@value":"Imaging"}]},{"subjectScheme":"Other","notation":[{"@language":"en","@value":"Rendering (computer graphics)"}]},{"subjectScheme":"Other","notation":[{"@language":"en","@value":"Applications: Robotics"}]},{"subjectScheme":"Other","notation":[{"@language":"en","@value":"3D computer vision"}]}],"project":[{"@id":"https://cir.nii.ac.jp/crid/1040848250603798272","@type":"Project","projectIdentifier":[{"@type":"KAKEN","@value":"19J12159"},{"@type":"JGN","@value":"JP19J12159"},{"@type":"URI","@value":"https://kaken.nii.ac.jp/grant/KAKENHI-PROJECT-19J12159/"}],"notation":[{"@language":"ja","@value":"複数人称視点に基づく知能化空間の時空間記述とシーン再構成"}]},{"@id":"https://cir.nii.ac.jp/crid/1040848250647710848","@type":"Project","projectIdentifier":[{"@type":"KAKEN","@value":"20H00230"},{"@type":"JGN","@value":"JP20H00230"},{"@type":"URI","@value":"https://kaken.nii.ac.jp/grant/KAKENHI-PROJECT-20H00230/"}],"notation":[{"@language":"ja","@value":"海洋破砕プラスチックごみ回収ロボットシステムに関する研究開発"},{"@language":"en","@value":"Development of garbage collecting robot for marine microplastics"}]},{"@id":"https://cir.nii.ac.jp/crid/1900022996656701696","@type":"Project","projectIdentifier":[{"@type":"JGN","@value":"JPMJMS2032"}],"notation":[{"@value":"多様な環境に適応しインフラ構築を革新する協働AIロボット"}]}],"dataSourceIdentifier":[{"@type":"IRDB","@value":"oai:irdb.nii.ac.jp:01211:0006436612"}]}