import os import glob import numpy as np import itk import math from PIL import Image, ImageDraw import meshio import matplotlib.pyplot as plt # Angle in radians betweeen projection central axis and reference axis. [m_ProjectionAngle] # Source to isocenter (i.e., 3D image center) distance in mm. [m_FocalPointToIsocenterDistance] # DRR Pixel spacing in DETECTOR plane in mm [im_sx, im_sy] # We have calculated this value by assuming that the pixel space in the detector plane is 1.0 mm. # refer https://github.com/InsightSoftwareConsortium/ITKTwoProjectionRegistration/blob/master/test/GetDRRSiddonJacobsRayTracing.cxx#L112-L113 # Size of the DRR image in number of pixels. [dx, dy] # This function is only use to calculate DRR projection coordinate for only one 3D vertex cooridnate def getDRRProjectionCoords(ct_img_path=None, points=None, m_ProjectionAngle=0., m_FocalPointToIsocenterDistance=1000., m_Threshold=0., source_to_detector_distance=1536., im_sx=0.65104, im_sy=0.65104, dx=1024, dy=1024): # Use the image center as the central axis position. # refer https://github.com/InsightSoftwareConsortium/ITKTwoProjectionRegistration/blob/master/test/GetDRRSiddonJacobsRayTracing.cxx#L506-L510 o2Dx = (dx - 1.) / 2. o2Dy = (dy - 1.) / 2. # constant for converting degrees into radians dtr = (math.atan(1.0) * 4.0) / 180.0 Dimension = 3 PointType = itk.Point[itk.D, Dimension] VectorType = itk.Vector[itk.D, Dimension] MatrixType = itk.Matrix[itk.D, Dimension, Dimension] TransformType = itk.Euler3DTransform[itk.D] # Compute the origin (in mm) of the 2D Image m_Origin = PointType() m_Origin[0] = -im_sx * o2Dx m_Origin[1] = -im_sy * o2Dy m_Origin[2] = -m_FocalPointToIsocenterDistance m_Spacing = VectorType() m_Spacing[0] = im_sx # pixel spacing along X of the 2D DRR image [mm] m_Spacing[1] = im_sy # pixel spacing along Y of the 2D DRR image [mm] m_Spacing[2] = 1.0 # slice thickness of the 2D DRR image [mm] # inverse trasnform to rotate the vertex into DRR detector coordinate system # user can set an arbitrary transform for the volume. If not explicitly set, it should be identity. # Here I'm not setting the isocenter location and assume it is in (0, 0, 0) by default, # but in the DRR algorithm that we used to generate DRR images, Set the center of the 3D image volume (CT) as the isocenter # refer https://github.com/InsightSoftwareConsortium/ITKTwoProjectionRegistration/blob/master/test/GetDRRSiddonJacobsRayTracing.cxx#L452-L454 m_Transform = TransformType.New() m_Transform.SetComputeZYX(True) m_Transform.SetIdentity() m_InverseTransform = TransformType.New() m_InverseTransform.SetComputeZYX(True) m_ComposedTransform = TransformType.New() m_ComposedTransform.SetComputeZYX(True) m_GantryRotTransform = TransformType.New() m_GantryRotTransform.SetComputeZYX(True) m_GantryRotTransform.SetIdentity() m_CamShiftTransform = TransformType.New() m_CamShiftTransform.SetComputeZYX(True) m_CamShiftTransform.SetIdentity() m_CamRotTransform = TransformType.New() m_CamRotTransform.SetComputeZYX(True) m_CamRotTransform.SetIdentity() m_CamRotTransform.SetRotation(dtr * (-90.0), 0.0, 0.0) # isocenter set to (0, 0, 0) # but in the DRR algorithm that we used to generate DRR images, Set the center of the 3D image volume (CT) as the isocenter inputImage = itk.imread(ct_img_path) imOrigin = inputImage.GetOrigin() imRes = inputImage.GetSpacing() imRegion = inputImage.GetBufferedRegion() imSize = imRegion.GetSize() isocenter = [(imRes[i]*imSize[i]/2.0) for i in range(3)] m_ComposedTransform.SetIdentity() m_Transform.SetCenter(isocenter) m_Transform.SetTranslation([-1.*imOrigin[i] for i in range(3)]) m_ComposedTransform.Compose(m_Transform, False) # An Euler 3D transform is used to rotate the volume to simulate the roation of the linac gantry. # The rotation is about z-axis. After the transform, a AP projection geometry (projecting # towards positive y direction) is established. m_GantryRotTransform.SetRotation(0.0, 0.0, -1. * dtr * (m_ProjectionAngle)) m_GantryRotTransform.SetCenter(isocenter) m_ComposedTransform.Compose(m_GantryRotTransform, False) # An Euler 3D transfrom is used to shift the camera to the detection plane camtranslation = VectorType() camtranslation[0] = -isocenter[0] camtranslation[1] = m_FocalPointToIsocenterDistance - isocenter[1] camtranslation[2] = -isocenter[2] #print(camtranslation) m_CamShiftTransform.SetTranslation(camtranslation) m_ComposedTransform.Compose(m_CamShiftTransform, False) # An Euler 3D transform is used to establish the standard negative z-axis projection geometry. (By # default, the camera is situated at the origin, points down the negative z-axis, and has an up- # vector of (0, 1, 0)) m_ComposedTransform.Compose(m_CamRotTransform, False) # The overall inverse transform is computed. The inverse transform will be used by the interpolation # procedure. m_ComposedTransform.GetInverse(m_InverseTransform) MatrixType = itk.Matrix[itk.D, Dimension, Dimension] m_Direction = MatrixType() m_Direction.SetIdentity() m_IndexToPhysicalPoint = MatrixType() m_IndexToPhysicalPoint.SetIdentity() m_PhysicalPointToIndex = MatrixType() m_PhysicalPointToIndex.SetIdentity() scale = MatrixType() # Compute helper matrices used to transform Index coordinates to # PhysicalPoint coordinates and back. This method is virtual and will be # overloaded in derived classes in order to provide backward compatibility # behaviour in classes that did not used to take image orientation into # account. scale = itk.GetMatrixFromArray(np.diag(m_Spacing)) m_IndexToPhysicalPoint = m_Direction * scale m_PhysicalPointToIndex = m_IndexToPhysicalPoint.GetInverse() # this entire bottom part should be iterated over all the points point_list = points.tolist() # hold the pixel cooridnates as separate numpy arrays for x and y coordinates u = np.array([]) v = np.array([]) for point in point_list: # coordinate to the coordinate system defined by the detector coordinate system drrPixelWorld = m_ComposedTransform.TransformPoint(point) #print(drrPixelWorld) # 2D coordinates on the detector plane (in physical units, not in pixels) # calculate the physical cooridante in DRR image plane # i.e. multiply the x and y coordinates by the ratio source-to-detector distance over z drrPixelWorld[0] = -drrPixelWorld[0] * m_FocalPointToIsocenterDistance/drrPixelWorld[2] drrPixelWorld[1] = -drrPixelWorld[1] * m_FocalPointToIsocenterDistance/drrPixelWorld[2] index = VectorType() for i in range(Dimension): sum = itk.NumericTraits[itk.D].ZeroValue() for j in range(Dimension): sum += m_PhysicalPointToIndex(i, j) * (drrPixelWorld[j] - m_Origin[j]) index[i] = sum u = np.append(u, index[0]) v = np.append(v, index[1]) return u, v def getImageDict(img_paths): img_dict = {} for i, path in enumerate(img_paths): key_with_angle = os.path.basename(path).rsplit('.', 1)[0] # key hold the volumetric mesh name (i.e. ellipsoid) by removing the gantry angle (i.e. ellipsoid_72) key = os.path.basename(key_with_angle).rsplit('_', 1)[0] if key in img_dict: img_dict[key].append(path) else: img_dict[key] = [path] return img_dict def alignedWithDRR(ct_img_path, meshPaths, img_dict, outputImgFolder): for j, path in enumerate(meshPaths): mesh = meshio.read(path) img_key = os.path.basename(path).rsplit('.', 1)[0] drr_img_path_list = img_dict[img_key] for drr_img_path in drr_img_path_list: # extract the drr image name with extention from the absolute path (e.g. volume-10_0.png) img_name_str = os.path.basename(drr_img_path).rsplit('.', 1)[0] # projection angle in degrees gantry_angle = float(os.path.basename(img_name_str).rsplit('_', 1)[1]) print('gantry_angle:', str(gantry_angle)) img_name = img_name_str + '_aligned' print('image_name:', img_name) # need to pass projection angle to get the 2D coordinates for each case u, v = getDRRProjectionCoords(ct_img_path=ct_img_path, points=mesh.points, m_ProjectionAngle=gantry_angle) v = 1023-v # Mimicks flipFilter u = u.tolist() v = v.tolist() output_img_path = os.path.join(outputImgFolder, img_name + ".png") i = Image.open(drr_img_path).convert("RGBA") draw = ImageDraw.Draw(i) w, h = i.size # plot U, V points for x, y in zip(u, v): draw.point((x, y), fill="red") i.save(output_img_path) ct_img_path = 'F:/all_CTs_HU_corrected/liver_ct.nii.gz' mesh_paths = glob.glob('F:/meshes/*.vtk') drr_img_paths = glob.glob('F:/ITK_DRRs/all_drrs/*.png') output_img_folder = 'F:/results_proejctions' img_dict = getImageDict(drr_img_paths) alignedWithDRR(ct_img_path, mesh_paths, img_dict, output_img_folder)