How to cite item

ADAPTIVE-NET: deep computed tomography reconstruction network with analytical domain transformation knowledge

  
@article{QIMS34393,
	author = {Yongshuai Ge and Ting Su and Jiongtao Zhu and Xiaolei Deng and Qiyang Zhang and Jianwei Chen and Zhanli Hu and Hairong Zheng and Dong Liang},
	title = {ADAPTIVE-NET: deep computed tomography reconstruction network with analytical domain transformation knowledge},
	journal = {Quantitative Imaging in Medicine and Surgery},
	volume = {10},
	number = {2},
	year = {2020},
	keywords = {},
	abstract = {Background: Recently, the paradigm of computed tomography (CT) reconstruction has shifted as the deep learning technique evolves. In this study, we proposed a new convolutional neural network (called ADAPTIVE-NET) to perform CT image reconstruction directly from a sinogram by integrating the analytical domain transformation knowledge.
Methods: In the proposed ADAPTIVE-NET, a specific network layer with constant weights was customized to transform the sinogram onto the CT image domain via analytical back-projection. With this new framework, feature extractions were performed simultaneously on both the sinogram domain and the CT image domain. The Mayo low dose CT (LDCT) data was used to validate the new network. In particular, the new network was compared with the previously proposed residual encoder-decoder (RED)-CNN network. For each network, the mean square error (MSE) loss with and without VGG- based perceptual loss was compared. Furthermore, to evaluate the image quality with certain metrics, the noise correlation was quantified via the noise power spectrum (NPS) on the reconstructed LDCT for each method.
Results: CT images that have clinically relevant dimensions of 512×512 can be easily reconstructed from a sinogram on a single graphics processing unit (GPU) with moderate memory size (e.g., 11 GB) by ADAPTIVE-NET. With the same MSE loss function, the new network is able to generate better results than the RED-CNN. Moreover, the new network is able to reconstruct natural looking CT images with enhanced image quality if jointly using the VGG loss.
Conclusions: The newly proposed end-to-end supervised ADAPTIVE-NET is able to reconstruct high- quality LDCT images directly from a sinogram.},
	issn = {2223-4306},	url = {https://qims.amegroups.org/article/view/34393}
}