@article{oai:repo.qst.go.jp:00073841, author = {A, Hagiwara, and Y, Otsuka, and M, Hori, and 立花, 泰彦 and K, Yokoyama, and S, Fujita, and C, Andica, and K, Kamagata, and R, Irie, and S, Koshino, and T, Maekawa, and L, Chougar, and A, Wada, and M.Y, Takemura, and N, Hattori, and S, Aoki, and Tachibana, Yasuhiko}, issue = {2}, journal = {American Journal of Neuroradiology}, month = {Feb}, note = {BACKGROUND AND PURPOSE: Synthetic FLAIR images are of lower quality than conventional FLAIR images. Here, we aimed to improve the synthetic FLAIR image quality using deep learning with pixel-by-pixel translation through conditional generative adversarial network training. MATERIALS AND METHODS: Forty patients with MS were prospectively included and scanned (3T) to acquire synthetic MR imaging and conventional FLAIR images. Synthetic FLAIR images were created with the SyMRI software. Acquired data were divided into 30 training and 10 test datasets. A conditional generative adversarial network was trained to generate improved FLAIR images from raw synthetic MR imaging data using conventional FLAIR images as targets. The peak signal-to-noise ratio, normalized root mean square error, and the Dice index of MS lesion maps were calculated for synthetic and deep learning FLAIR images against conventional FLAIR images, respectively. Lesion conspicuity and the existence of artifacts were visually assessed. RESULTS: The peak signal-to-noise ratio and normalized root mean square error were significantly higher and lower, respectively, in generated-versus-synthetic FLAIR images in aggregate intracranial tissues and all tissue segments (all P < .001). The Dice index of lesion maps and visual lesion conspicuity were comparable between generated and synthetic FLAIR images (P = 1 and .59, respectively). Generated FLAIR images showed fewer granular artifacts (P = .003) and swelling artifacts (in all cases) than synthetic FLAIR images. CONCLUSIONS: Using deep learning, we improved the synthetic FLAIR image quality by generating FLAIR images that have contrast closer to that of conventional FLAIR images and fewer granular and swelling artifacts, while preserving the lesion contrast.}, pages = {224--230}, title = {Improving the Quality of Synthetic FLAIR Images with Deep Learning Using a Conditional Generative Adversarial Network for Pixel-by-Pixel Image Translation.}, volume = {40}, year = {2019} }