From 77be681a1178084852a9670b288e191d22708238 Mon Sep 17 00:00:00 2001 From: effepivi Date: Tue, 28 May 2024 15:54:21 +0100 Subject: [PATCH 01/17] Add a new class Cone3D_SOUV that inherits of SystemConfiguration, amd a new AcquisitionGeometry called CONE_SOUV = 'cone_souv'. It is impossible to set_angles on this new geometry. --- .../cil/framework/acquisition_geometry.py | 336 ++++++++++++++++-- 1 file changed, 304 insertions(+), 32 deletions(-) diff --git a/Wrappers/Python/cil/framework/acquisition_geometry.py b/Wrappers/Python/cil/framework/acquisition_geometry.py index 662e6bc0b9..237d4fe4fe 100644 --- a/Wrappers/Python/cil/framework/acquisition_geometry.py +++ b/Wrappers/Python/cil/framework/acquisition_geometry.py @@ -1216,6 +1216,279 @@ def set_centre_of_rotation(self, offset, angle): self.rotation_axis.direction = p2_on_plane - p1_on_plane +class Cone3D_SOUV(SystemConfiguration): + r'''This class creates the SystemConfiguration of a cone beam 3D tomographic system + + :param source_position_set: N 3D vectors describing the position of the source (x,y,z) + :type source_position_set: list, tuple, ndarray + :param detector_position_set: N 3D vectors describing the position of the centre of the detector (x,y,z) + :type detector_position_set: list, tuple, ndarray + :param detector_direction_x_set: N 3D vectors describing the direction of the detector_x (x,y,z) + :type detector_direction_x_set: list, tuple, ndarray + :param detector_direction_y_set: N 3D vectors describing the direction of the detector_y (x,y,z) + :type detector_direction_y_set: list, tuple, ndarray + :param volume_centre_position: A 3D vector describing the position of the centre of the reconstructed volume (x,y,z) + :type volume_centre_position: list, tuple, ndarray + :param units: Label the units of distance used for the configuration + :type units: string + ''' + + def __init__ (self, source_position_set, detector_position_set, detector_direction_x_set, detector_direction_y_set, volume_centre_position, units='units'): + """Constructor method + """ + super(Cone3D_SOUV, self).__init__(dof=3, geometry = 'cone_souv', units=units); + + # All the sets must have the same number of parameters + if len(source_position_set) != len(detector_position_set) or \ + len(source_position_set) != len(detector_direction_x_set) or \ + len(source_position_set) != len(detector_direction_y_set): + + raise ValueError("Make sure all the sets have the same number of parameters"); + + #source + self.source.position_set = source_position_set; + + #detector + self.detector.position_set = detector_position_set; + self.detector.direction_x_set = detector_direction_x_set; + self.detector.direction_y_set = detector_direction_y_set; + + #reconstructed volume centre + self.volume_centre_position = numpy.array(volume_centre_position); + + # def align_z(self): + # r'''Transforms the system origin to the rotate axis with z direction aligned to the rotate axis direction + # ''' + # self.set_origin(self.rotation_axis.position) + # rotation_matrix = SystemConfiguration.rotation_vec_to_z(self.rotation_axis.direction) + + # #apply transform + # self.rotation_axis.direction = [0,0,1] + # self.source.position = rotation_matrix.dot(self.source.position.reshape(3,1)) + # self.detector.position = rotation_matrix.dot(self.detector.position.reshape(3,1)) + # new_x = rotation_matrix.dot(self.detector.direction_x.reshape(3,1)) + # new_y = rotation_matrix.dot(self.detector.direction_y.reshape(3,1)) + # self.detector.set_direction(new_x, new_y) + + + # def align_reference_frame(self, definition='cil'): + # r'''Transforms and rotates the system to backend definitions + # ''' + + # self.align_z() + + # if definition=='cil': + # rotation_matrix = SystemConfiguration.rotation_vec_to_y(self.detector.position - self.source.position) + # elif definition=='tigre': + # rotation_matrix = SystemConfiguration.rotation_vec_to_y(self.rotation_axis.position - self.source.position) + # else: + # raise ValueError("Geometry can be configured for definition = 'cil' or 'tigre' only. Got {}".format(definition)) + + # self.source.position = rotation_matrix.dot(self.source.position.reshape(3,1)) + # self.detector.position = rotation_matrix.dot(self.detector.position.reshape(3,1)) + # new_direction_x = rotation_matrix.dot(self.detector.direction_x.reshape(3,1)) + # new_direction_y = rotation_matrix.dot(self.detector.direction_y.reshape(3,1)) + # self.detector.set_direction(new_direction_x, new_direction_y) + + + # def system_description(self): + # r'''Returns `simple` if the the geometry matches the default definitions with no offsets or rotations, + # \nReturns `offset` if the the geometry matches the default definitions with centre-of-rotation or detector offsets + # \nReturns `advanced` if the the geometry has rotated or tilted rotation axis or detector, can also have offsets + # ''' + + # vec_src2det = ComponentDescription.create_unit_vector(self.detector.position - self.source.position) + + # principal_ray_centred = ComponentDescription.test_parallel(vec_src2det, self.detector.normal) + # centre_ray_perpendicular_rotation = ComponentDescription.test_perpendicular(vec_src2det, self.rotation_axis.direction) + # rotation_parallel_detector_y = ComponentDescription.test_parallel(self.rotation_axis.direction, self.detector.direction_y) + + # #rotation axis to detector is parallel with centre ray + # if numpy.allclose(self.rotation_axis.position, self.detector.position): #points are equal + # rotation_axis_centred = True + # else: + # vec_b = ComponentDescription.create_unit_vector(self.detector.position - self.rotation_axis.position ) + # rotation_axis_centred = ComponentDescription.test_parallel(vec_src2det, vec_b) + + # if not principal_ray_centred or\ + # not centre_ray_perpendicular_rotation or\ + # not rotation_parallel_detector_y: + # config = SystemConfiguration.SYSTEM_ADVANCED + # elif not rotation_axis_centred: + # config = SystemConfiguration.SYSTEM_OFFSET + # else: + # config = SystemConfiguration.SYSTEM_SIMPLE + + # return config + + # def get_centre_slice(self): + # """Returns the 2D system configuration corresponding to the centre slice + # """ + # #requires the rotate axis to be perpendicular to the normal of the detector, and perpendicular to detector_direction_x + # dp1 = self.rotation_axis.direction.dot(self.detector.normal) + # dp2 = self.rotation_axis.direction.dot(self.detector.direction_x) + + # if numpy.isclose(dp1, 0) and numpy.isclose(dp2, 0): + # temp = self.copy() + # temp.align_reference_frame() + # source_position = temp.source.position[0:2] + # detector_position = temp.detector.position[0:2] + # detector_direction_x = temp.detector.direction_x[0:2] + # rotation_axis_position = temp.rotation_axis.position[0:2] + + # return Cone2D(source_position, detector_position, detector_direction_x, rotation_axis_position) + # else: + # raise ValueError('Cannot convert geometry to 2D. Requires axis of rotation to be perpendicular to the detector.') + + def __str__(self): + def csv(val): + return numpy.array2string(val, separator=', ') + + print(self.volume_centre_position) + repres = "Per-projection 3D Cone-beam tomography\n" + repres += "System configuration:\n" + repres += "\tSource positions 0-9: {0}\n".format(numpy.array2string(self.source.position_set[0:10], separator=', ')) + repres += "\tDetector positions 0-9: {0}\n".format(numpy.array2string(self.detector.position_set[0:10], separator=', ')) + repres += "\tDetector directions x 0-9: {0}\n".format(numpy.array2string(self.detector.direction_x_set[0:10], separator=', ')) + repres += "\tDetector directions y 0-9: {0}\n".format(numpy.array2string(self.detector.direction_y_set[0:10], separator=', ')) + repres += "Reconstructed volume:\n" + repres += "\tCentre position: {0}\n".format(csv(self.volume_centre_position)) + return repres + + # def __eq__(self, other): + + # if not isinstance(other, self.__class__): + # return False + + # if numpy.allclose(self.source.position, other.source.position) \ + # and numpy.allclose(self.detector.position, other.detector.position)\ + # and numpy.allclose(self.detector.direction_x, other.detector.direction_x)\ + # and numpy.allclose(self.detector.direction_y, other.detector.direction_y)\ + # and numpy.allclose(self.rotation_axis.position, other.rotation_axis.position)\ + # and numpy.allclose(self.rotation_axis.direction, other.rotation_axis.direction): + + # return True + + # return False + + # def calculate_magnification(self): + + # ab = (self.rotation_axis.position - self.source.position) + # dist_source_center = float(numpy.sqrt(ab.dot(ab))) + + # ab_unit = ab / numpy.sqrt(ab.dot(ab)) + + # n = self.detector.normal + + # #perpendicular distance between source and detector centre + # sd = float((self.detector.position - self.source.position).dot(n)) + # ratio = float(ab_unit.dot(n)) + + # source_to_detector = sd / ratio + # dist_center_detector = source_to_detector - dist_source_center + # magnification = (dist_center_detector + dist_source_center) / dist_source_center + + # return [dist_source_center, dist_center_detector, magnification] + + # def rotation_axis_on_detector(self): + # """ + # Calculates the position, on the detector, of the projection of the rotation axis in the world coordinate system + + # Returns + # ------- + # PositionDirectionVector + # Position and direction in the 3D system + # """ + # #calculate the intersection with the detector, of source to pv + # Pv = self.rotation_axis.position + # vec_a = Pv - self.source.position + # ratio = (self.detector.position - Pv).dot(self.detector.normal) / vec_a.dot(self.detector.normal) + # point1 = Pv + vec_a * ratio + + # #calculate the intersection with the detector, of source to pv + # Pv = self.rotation_axis.position + self.rotation_axis.direction + # vec_a = Pv - self.source.position + # ratio = (self.detector.position - Pv).dot(self.detector.normal) / vec_a.dot(self.detector.normal) + # point2 = Pv + vec_a * ratio + + # out = PositionDirectionVector(3) + # out.position = point1 + # out.direction = point2 - point1 + # return out + + # def calculate_centre_of_rotation(self): + # """ + # Calculates the position, on the detector, of the projection of the rotation axis in the detector coordinate system + + # Note + # ---- + # - Origin is in the centre of the detector + # - Axes directions are specified by detector.direction_x, detector.direction_y + # - Units are the units of distance used to specify the component's positions + + # Returns + # ------- + # Float + # Offset position along the detector x_axis at y=0 + # Float + # Angle between the y_axis and the rotation axis projection, in radians + # """ + # rotate_axis_projection = self.rotation_axis_on_detector() + + # p1 = rotate_axis_projection.position + # p2 = p1 + rotate_axis_projection.direction + + # #point1 and point2 are on the detector plane. need to return them in the detector coordinate system + # dp1 = p1 - self.detector.position + # x1 = self.detector.direction_x.dot(dp1) + # y1 = self.detector.direction_y.dot(dp1) + # dp2 = p2 - self.detector.position + # x2 = self.detector.direction_x.dot(dp2) + # y2 = self.detector.direction_y.dot(dp2) + + # #y = m * x + c + # #c = y1 - m * x1 + # #when y is 0 + # #x=-c/m + # #x_y0 = -y1/m + x1 + # offset_x_y0 = x1 -y1 * (x2 - x1)/(y2-y1) + + # angle = math.atan2(x2 - x1, y2 - y1) + # offset = offset_x_y0 + + # return (offset, angle) + + + # def set_centre_of_rotation(self, offset, angle): + # """ Configures the geometry to have the requested centre of rotation offset at the detector + # """ + # #two points on the detector + # x1 = offset + # y1 = 0 + # x2 = offset + math.tan(angle) + # y2 = 1 + + # #convert to 3d coordinates in system frame + # p1 = self.detector.position + x1 * self.detector.direction_x + y1 * self.detector.direction_y + # p2 = self.detector.position + x2 * self.detector.direction_x + y2 * self.detector.direction_y + + # # vectors from source define plane + # sp1 = p1 - self.source.position + # sp2 = p2 - self.source.position + + # #find vector intersection with a plane defined by rotate axis (pos and dir) and det_x direction + # plane_normal = numpy.cross(self.rotation_axis.direction, self.detector.direction_x) + + # ratio = (self.rotation_axis.position - self.source.position).dot(plane_normal) / sp1.dot(plane_normal) + # p1_on_plane = self.source.position + sp1 * ratio + + # ratio = (self.rotation_axis.position - self.source.position).dot(plane_normal) / sp2.dot(plane_normal) + # p2_on_plane = self.source.position + sp2 * ratio + + # self.rotation_axis.position = p1_on_plane + # self.rotation_axis.direction = p2_on_plane - p1_on_plane + + class Panel(object): r'''This is a class describing the panel of the system. @@ -1513,11 +1786,14 @@ def configured(self): \n\tAcquisitionGeometry.create_Parallel2D()\ \n\tAcquisitionGeometry.create_Cone3D()\ \n\tAcquisitionGeometry.create_Parallel2D()\ - \n\tAcquisitionGeometry.create_Cone3D()") + \n\tAcquisitionGeometry.create_Cone3D()\ + \n\tAcquisitionGeometry.create_Cone3D_SOUV()") return False configured = True - if self.angles is None: + + # cone_souv geometry does not use angles as it uses per-projection geometries + if self.angles is None and self.system.geometry != "cone_souv": print("Please configure angular data using the set_angles() method") configured = False if self.panel is None: @@ -1605,40 +1881,12 @@ class AcquisitionGeometry(object): `AcquisitionGeometry.create_Parallel3D()` `AcquisitionGeometry.create_Cone3D()` + + `AcquisitionGeometry.create_SOUV()` """ #for backwards compatibility - @property - def ANGLE(self): - warnings.warn("use AcquisitionDimension.Angle instead", DeprecationWarning, stacklevel=2) - return AcquisitionDimension.ANGLE - - @property - def CHANNEL(self): - warnings.warn("use AcquisitionDimension.Channel instead", DeprecationWarning, stacklevel=2) - return AcquisitionDimension.CHANNEL - - @property - def DEGREE(self): - warnings.warn("use AngleUnit.DEGREE", DeprecationWarning, stacklevel=2) - return AngleUnit.DEGREE - - @property - def HORIZONTAL(self): - warnings.warn("use AcquisitionDimension.HORIZONTAL instead", DeprecationWarning, stacklevel=2) - return AcquisitionDimension.HORIZONTAL - - @property - def RADIAN(self): - warnings.warn("use AngleUnit.RADIAN instead", DeprecationWarning, stacklevel=2) - return AngleUnit.RADIAN - - @property - def VERTICAL(self): - warnings.warn("use AcquisitionDimension.VERTICAL instead", DeprecationWarning, stacklevel=2) - return AcquisitionDimension.VERTICAL - @property def geom_type(self): return self.config.system.geometry @@ -2082,6 +2330,30 @@ def create_Cone3D(source_position, detector_position, detector_direction_x=[1,0, AG.config.system = Cone3D(source_position, detector_position, detector_direction_x, detector_direction_y, rotation_axis_position, rotation_axis_direction, units) return AG + @staticmethod + def create_Cone3D_SOUV(source_position_set, detector_position_set, detector_direction_x_set, detector_direction_y_set, volume_centre_position=[0,0,0], units='units distance'): + r'''This creates the AcquisitionGeometry for a per-projection cone beam 3D tomographic system + + :param source_position_set: N 3D vectors describing the position of the source (x,y,z) + :type source_position_set: list, tuple, ndarray, optional + :param detector_position_set: N 3D vectors describing the position of the centre of the detector (x,y,z) + :type detector_position_set: list, tuple, ndarray, optional + :param detector_direction_x_set: N 3D vectors describing the direction of the detector_x (x,y,z) + :type detector_direction_x_set: list, tuple, ndarray + :param detector_direction_y_set: N 3D vectors describing the direction of the detector_y (x,y,z) + :type detector_direction_y_set: list, tuple, ndarray + :param volume_centre_position: A 3D vector describing the position of the centre of the reconstructed volume (x,y,z) + :type volume_centre_position: list, tuple, ndarray, optional + :param units: Label the units of distance used for the configuration, these should be consistent for the geometry and panel + :type units: string + :return: returns a configured AcquisitionGeometry object + :rtype: AcquisitionGeometry + ''' + AG = AcquisitionGeometry() + AG.config = Configuration(units) + AG.config.system = Cone3D_SOUV(source_position_set, detector_position_set, detector_direction_x_set, detector_direction_y_set, volume_centre_position, units) + return AG + def get_order_by_label(self, dimension_labels, default_dimension_labels): order = [] for i, el in enumerate(default_dimension_labels): From 97735b8ebe7919a3a35659b88209914e400a9488 Mon Sep 17 00:00:00 2001 From: effepivi Date: Tue, 28 May 2024 16:07:36 +0100 Subject: [PATCH 02/17] Add __eq__ for SOUV geometry. --- .../cil/framework/acquisition_geometry.py | 21 +++++++++---------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/Wrappers/Python/cil/framework/acquisition_geometry.py b/Wrappers/Python/cil/framework/acquisition_geometry.py index 237d4fe4fe..eea55a6c69 100644 --- a/Wrappers/Python/cil/framework/acquisition_geometry.py +++ b/Wrappers/Python/cil/framework/acquisition_geometry.py @@ -1355,21 +1355,20 @@ def csv(val): repres += "\tCentre position: {0}\n".format(csv(self.volume_centre_position)) return repres - # def __eq__(self, other): + def __eq__(self, other): - # if not isinstance(other, self.__class__): - # return False + if not isinstance(other, self.__class__): + return False - # if numpy.allclose(self.source.position, other.source.position) \ - # and numpy.allclose(self.detector.position, other.detector.position)\ - # and numpy.allclose(self.detector.direction_x, other.detector.direction_x)\ - # and numpy.allclose(self.detector.direction_y, other.detector.direction_y)\ - # and numpy.allclose(self.rotation_axis.position, other.rotation_axis.position)\ - # and numpy.allclose(self.rotation_axis.direction, other.rotation_axis.direction): + if numpy.allclose(self.source.position_set, other.source.position_set) \ + and numpy.allclose(self.detector.position_set, other.detector.position_set)\ + and numpy.allclose(self.detector.direction_x_set, other.detector.direction_x_set)\ + and numpy.allclose(self.detector.direction_y_set, other.detector.direction_y_set)\ + and numpy.allclose(self.volume_centre_position, other.volume_centre_position): - # return True + return True - # return False + return False # def calculate_magnification(self): From ad7ae7c83c6e6261ac28657655907dfa629797ec Mon Sep 17 00:00:00 2001 From: effepivi Date: Tue, 28 May 2024 16:22:41 +0100 Subject: [PATCH 03/17] Compute the magnification for the SOUV geometry. Per default, the geometry of the first projection is used. --- .../cil/framework/acquisition_geometry.py | 31 ++++++++++++------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/Wrappers/Python/cil/framework/acquisition_geometry.py b/Wrappers/Python/cil/framework/acquisition_geometry.py index eea55a6c69..e869aa43a5 100644 --- a/Wrappers/Python/cil/framework/acquisition_geometry.py +++ b/Wrappers/Python/cil/framework/acquisition_geometry.py @@ -1256,6 +1256,7 @@ def __init__ (self, source_position_set, detector_position_set, detector_directi #reconstructed volume centre self.volume_centre_position = numpy.array(volume_centre_position); + # def align_z(self): # r'''Transforms the system origin to the rotate axis with z direction aligned to the rotate axis direction # ''' @@ -1370,24 +1371,30 @@ def __eq__(self, other): return False - # def calculate_magnification(self): + def calculate_magnification(self, projection_ID:int=0): - # ab = (self.rotation_axis.position - self.source.position) - # dist_source_center = float(numpy.sqrt(ab.dot(ab))) + if projection_ID < self.num_positions: + idx = projection_ID; + else: + raise IndexError("The requested projection index is greater than or equal to the total number of projections in the SOUV geometry."); - # ab_unit = ab / numpy.sqrt(ab.dot(ab)) + ab = (self.volume_centre_position - self.source.position_set[idx]) + dist_source_center = float(numpy.sqrt(ab.dot(ab))) - # n = self.detector.normal + ab_unit = ab / numpy.sqrt(ab.dot(ab)) - # #perpendicular distance between source and detector centre - # sd = float((self.detector.position - self.source.position).dot(n)) - # ratio = float(ab_unit.dot(n)) + # Compute the normal vector of the detector + n = numpy.cross(self.detector.direction_x_set[idx], self.detector.direction_y_set[idx]) - # source_to_detector = sd / ratio - # dist_center_detector = source_to_detector - dist_source_center - # magnification = (dist_center_detector + dist_source_center) / dist_source_center + #perpendicular distance between source and detector centre + sd = float((self.detector.position_set[idx] - self.source.position_set[idx]).dot(n)) + ratio = float(ab_unit.dot(n)) - # return [dist_source_center, dist_center_detector, magnification] + source_to_detector = sd / ratio + dist_center_detector = source_to_detector - dist_source_center + magnification = (dist_center_detector + dist_source_center) / dist_source_center + + return [dist_source_center, dist_center_detector, magnification] # def rotation_axis_on_detector(self): # """ From bd8cd2775ad53e44c4fc54e232f9933a49a044b5 Mon Sep 17 00:00:00 2001 From: effepivi Date: Tue, 28 May 2024 16:52:52 +0100 Subject: [PATCH 04/17] Add SOUV vector. --- .../convert_geometry_to_astra_vec_3D.py | 85 ++++++++++++------- 1 file changed, 54 insertions(+), 31 deletions(-) diff --git a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py index d03322df74..9246e9b717 100644 --- a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py +++ b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py @@ -43,21 +43,26 @@ def convert_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry_in): sinogram_geometry = sinogram_geometry_in.copy() - #this catches behaviour modified after CIL 21.3.1 - try: - sinogram_geometry.config.system.align_reference_frame('cil') - except: - sinogram_geometry.config.system.update_reference_frame() + # Only needed when using the traditional geometry set with rotation angles + if sinogram_geometry.geom_type != 'cone_souv': + + #this catches behaviour modified after CIL 21.3.1 + try: + sinogram_geometry.config.system.align_reference_frame('cil') + except: + sinogram_geometry.config.system.update_reference_frame() - angles = sinogram_geometry.config.angles + angles = sinogram_geometry.config.angles + + #get units + degrees = angles.angle_unit == sinogram_geometry.DEGREE + system = sinogram_geometry.config.system panel = sinogram_geometry.config.panel + - #get units - degrees = angles.angle_unit == AngleUnit.DEGREE - - if AcquisitionType.DIM2 & sinogram_geometry.dimension: + if sinogram_geometry.dimension == '2D': #create a 3D astra geom from 2D CIL geometry volume_geometry_temp = volume_geometry.copy() volume_geometry_temp.voxel_num_z = 1 @@ -92,34 +97,52 @@ def convert_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry_in): else: volume_geometry_temp = volume_geometry.copy() - row = panel.pixel_size[0] * system.detector.direction_x.reshape(3,1) - col = panel.pixel_size[1] * system.detector.direction_y.reshape(3,1) - det = system.detector.position.reshape(3, 1) - - if 'right' in panel.origin: - row *= -1 - if 'top' in panel.origin: - col *= -1 - - if sinogram_geometry.geom_type == 'parallel': - src = system.ray.direction.reshape(3,1) - projector = 'parallel3d_vec' + # Only needed when using the traditional geometry set with rotation angles + if sinogram_geometry.geom_type != 'cone_souv': + row = panel.pixel_size[0] * system.detector.direction_x.reshape(3,1) + col = panel.pixel_size[1] * system.detector.direction_y.reshape(3,1) + det = system.detector.position.reshape(3, 1) + + if 'right' in panel.origin: + row *= -1 + if 'top' in panel.origin: + col *= -1 + + if sinogram_geometry.geom_type == 'parallel': + src = system.ray.direction.reshape(3,1) + projector = 'parallel3d_vec' + elif sinogram_geometry.geom_type != 'cone_souv': + src = system.source.position.reshape(3,1) + projector = 'cone_vec' + # Use the per-projection geometry else: - src = system.source.position.reshape(3,1) projector = 'cone_vec' #Build for astra 3D only - vectors = np.zeros((angles.num_positions, 12)) + # Use the traditional geometry set with rotation angles + if sinogram_geometry.geom_type != 'cone_souv': + vectors = np.zeros((angles.num_positions, 12)) + + for i, theta in enumerate(angles.angle_data): + ang = - angles.initial_angle - theta + + rotation_matrix = rotation_matrix_z_from_euler(ang, degrees=degrees) + + vectors[i, :3] = rotation_matrix.dot(src).reshape(3) + vectors[i, 3:6] = rotation_matrix.dot(det).reshape(3) + vectors[i, 6:9] = rotation_matrix.dot(row).reshape(3) + vectors[i, 9:] = rotation_matrix.dot(col).reshape(3) + # Use the per-projection geometry + else: + vectors = np.zeros((system.num_positions, 12)) - for i, theta in enumerate(angles.angle_data): - ang = - angles.initial_angle - theta + for i, (src, det, row, col) in enumerate(zip(system.source.position_set, system.detector.position_set, system.detector.direction_x_set, system.detector.direction_y_set)): - rotation_matrix = rotation_matrix_z_from_euler(ang, degrees=degrees) + vectors[i, :3] = src.reshape(3) + vectors[i, 3:6] = det.reshape(3) + vectors[i, 6:9] = row.reshape(3) + vectors[i, 9:] = col.reshape(3) - vectors[i, :3] = rotation_matrix.dot(src).reshape(3) - vectors[i, 3:6] = rotation_matrix.dot(det).reshape(3) - vectors[i, 6:9] = rotation_matrix.dot(row).reshape(3) - vectors[i, 9:] = rotation_matrix.dot(col).reshape(3) proj_geom = astra.creators.create_proj_geom(projector, panel.num_pixels[1], panel.num_pixels[0], vectors) vol_geom = astra.create_vol_geom(volume_geometry_temp.voxel_num_y, From 67c61b87f40f68d9592fe1866b999d5c92d387af Mon Sep 17 00:00:00 2001 From: effepivi Date: Wed, 29 May 2024 08:35:18 +0100 Subject: [PATCH 05/17] Port to SOUV geometry. --- Wrappers/Python/cil/plugins/astra/processors/FDK_Flexible.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/plugins/astra/processors/FDK_Flexible.py b/Wrappers/Python/cil/plugins/astra/processors/FDK_Flexible.py index f0446b4df3..83178469d9 100644 --- a/Wrappers/Python/cil/plugins/astra/processors/FDK_Flexible.py +++ b/Wrappers/Python/cil/plugins/astra/processors/FDK_Flexible.py @@ -68,7 +68,7 @@ def check_input(self, dataset): raise ValueError("Expected input data to be single channel, got {0}"\ .format(self.sinogram_geometry.channels)) - if self.sinogram_geometry.geom_type != 'cone': + if self.sinogram_geometry.geom_type != 'cone' and self.sinogram_geometry.geom_type != 'cone_souv': raise ValueError("Expected input data to be cone beam geometry , got {0}"\ .format(self.sinogram_geometry.geom_type)) From 8a0cf16342a81617495c76aa9cf685da4fec4b6e Mon Sep 17 00:00:00 2001 From: effepivi Date: Wed, 29 May 2024 09:36:04 +0100 Subject: [PATCH 06/17] Fix the property num_projections when SOUV is used. --- Wrappers/Python/cil/framework/acquisition_geometry.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/Wrappers/Python/cil/framework/acquisition_geometry.py b/Wrappers/Python/cil/framework/acquisition_geometry.py index e869aa43a5..8c84dbca99 100644 --- a/Wrappers/Python/cil/framework/acquisition_geometry.py +++ b/Wrappers/Python/cil/framework/acquisition_geometry.py @@ -1345,7 +1345,6 @@ def __str__(self): def csv(val): return numpy.array2string(val, separator=', ') - print(self.volume_centre_position) repres = "Per-projection 3D Cone-beam tomography\n" repres += "System configuration:\n" repres += "\tSource positions 0-9: {0}\n".format(numpy.array2string(self.source.position_set[0:10], separator=', ')) @@ -1899,7 +1898,12 @@ def geom_type(self): @property def num_projections(self): - return len(self.angles) + # Using the traditional geometry set with rotation angles + if self.geom_type != "cone_souv": + return len(self.angles); + # Using the per-projection SOUV geometry + else: + return self.config.system.num_positions; @property def pixel_num_h(self): From 055ba1d9f33c7c5d45035a487db4aee76f7539a9 Mon Sep 17 00:00:00 2001 From: effepivi Date: Wed, 29 May 2024 15:42:07 +0100 Subject: [PATCH 07/17] Take into account the centre of the reconstructed volume. --- .../convert_geometry_to_astra_vec_3D.py | 73 +++++++++++-------- 1 file changed, 44 insertions(+), 29 deletions(-) diff --git a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py index 9246e9b717..3070a6b6d4 100644 --- a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py +++ b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py @@ -60,7 +60,10 @@ def convert_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry_in): system = sinogram_geometry.config.system panel = sinogram_geometry.config.panel - + + # Translation vector that will modify the centre of the reconstructed volume + # (by defalut, no translation) + translation = [0.0, 0.0, 0.0]; if sinogram_geometry.dimension == '2D': #create a 3D astra geom from 2D CIL geometry @@ -94,29 +97,41 @@ def convert_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry_in): src[1] = system.source.position[1] projector = 'cone_vec' - else: + # Only needed when using the traditional geometry set with rotation angles + elif sinogram_geometry.geom_type != 'cone_souv': + volume_geometry_temp = volume_geometry.copy() - # Only needed when using the traditional geometry set with rotation angles - if sinogram_geometry.geom_type != 'cone_souv': - row = panel.pixel_size[0] * system.detector.direction_x.reshape(3,1) - col = panel.pixel_size[1] * system.detector.direction_y.reshape(3,1) - det = system.detector.position.reshape(3, 1) - - if 'right' in panel.origin: - row *= -1 - if 'top' in panel.origin: - col *= -1 - - if sinogram_geometry.geom_type == 'parallel': - src = system.ray.direction.reshape(3,1) - projector = 'parallel3d_vec' - elif sinogram_geometry.geom_type != 'cone_souv': - src = system.source.position.reshape(3,1) - projector = 'cone_vec' - # Use the per-projection geometry - else: + row = panel.pixel_size[0] * system.detector.direction_x.reshape(3,1) + col = panel.pixel_size[1] * system.detector.direction_y.reshape(3,1) + det = system.detector.position.reshape(3, 1) + + if 'right' in panel.origin: + row *= -1 + if 'top' in panel.origin: + col *= -1 + + if sinogram_geometry.geom_type == 'parallel': + src = system.ray.direction.reshape(3,1) + projector = 'parallel3d_vec' + elif sinogram_geometry.geom_type != 'cone_souv': + src = system.source.position.reshape(3,1) projector = 'cone_vec' + # Use the per-projection geometry + else: + volume_geometry_temp = volume_geometry.copy() + + # Compute the current centre + current_centre = np.array([ + volume_geometry_temp.get_min_x() + (volume_geometry_temp.get_max_x() - volume_geometry_temp.get_min_x()) / 2.0, + volume_geometry_temp.get_min_y() + (volume_geometry_temp.get_max_y() - volume_geometry_temp.get_min_y()) / 2.0, + volume_geometry_temp.get_min_z() + (volume_geometry_temp.get_max_z() - volume_geometry_temp.get_min_z()) / 2.0 + ]); + + # Compute a translation vector that will modify the centre of the reconstructed volume + translation = np.array(system.volume_centre_position) - current_centre; + + projector = 'cone_vec' #Build for astra 3D only # Use the traditional geometry set with rotation angles @@ -145,15 +160,15 @@ def convert_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry_in): proj_geom = astra.creators.create_proj_geom(projector, panel.num_pixels[1], panel.num_pixels[0], vectors) - vol_geom = astra.create_vol_geom(volume_geometry_temp.voxel_num_y, - volume_geometry_temp.voxel_num_x, + vol_geom = astra.create_vol_geom(volume_geometry_temp.voxel_num_x, + volume_geometry_temp.voxel_num_y, volume_geometry_temp.voxel_num_z, - volume_geometry_temp.get_min_x(), - volume_geometry_temp.get_max_x(), - volume_geometry_temp.get_min_y(), - volume_geometry_temp.get_max_y(), - volume_geometry_temp.get_min_z(), - volume_geometry_temp.get_max_z() + volume_geometry_temp.get_min_x() + translation[0], + volume_geometry_temp.get_max_x() + translation[0], + volume_geometry_temp.get_min_y() + translation[1], + volume_geometry_temp.get_max_y() + translation[1], + volume_geometry_temp.get_min_z() + translation[2], + volume_geometry_temp.get_max_z() + translation[2] ) From 16f71db67be9b988a385e32d7f894f4252f1c343 Mon Sep 17 00:00:00 2001 From: effepivi Date: Wed, 29 May 2024 16:17:39 +0100 Subject: [PATCH 08/17] Can estimate the centre of the reconstructed volume. --- .../cil/framework/acquisition_geometry.py | 62 ++++++++++++++++++- 1 file changed, 60 insertions(+), 2 deletions(-) diff --git a/Wrappers/Python/cil/framework/acquisition_geometry.py b/Wrappers/Python/cil/framework/acquisition_geometry.py index 8c84dbca99..3df283541b 100644 --- a/Wrappers/Python/cil/framework/acquisition_geometry.py +++ b/Wrappers/Python/cil/framework/acquisition_geometry.py @@ -1254,7 +1254,65 @@ def __init__ (self, source_position_set, detector_position_set, detector_directi self.detector.direction_y_set = detector_direction_y_set; #reconstructed volume centre - self.volume_centre_position = numpy.array(volume_centre_position); + if volume_centre_position is not None: + self.volume_centre_position = numpy.array(volume_centre_position); + # Estimate the position + else: + def clamp(n, smallest, largest): + return max(smallest, min(n, largest)) + + # The function below has been adapted from: + # https://zalo.github.io/blog/closest-point-between-segments/ + def lineSegClosestPoints(a, u, b, v): + r = b - a + + ru = numpy.dot(r, u) + rv = numpy.dot(r, v) + uu = numpy.dot(u, u) + uv = numpy.dot(u, v) + vv = numpy.dot(v, v) + + det = uu*vv - uv*uv + + # if det is too close to 0, then they're parallel + # you can work out a way to handle this case + + # compute optimal values for s and t + s = (ru*vv - rv*uv)/det + t = (ru*uv - rv*uu)/det + + # constrain values s and t so that they describe points on the segments + s = clamp(s, 0, 1) + t = clamp(t, 0, 1) + + # convert value s for segA into the corresponding closest value t for segB + # and vice versa + S = (t*uv + ru)/uu + T = (s*uv - rv)/vv + + # constrain + S = clamp(S, 0, 1) + T = clamp(T, 0, 1) + + return a + S*u, b + T*v + + points = numpy.zeros(((len(source_position_set) - 1) * 2, 3)); + + for i in range(len(source_position_set) - 1): + a = source_position_set[i + 0] + b = source_position_set[i + 1] + + u = detector_position_set[i + 0] - source_position_set[i + 0] + v = detector_position_set[i + 1] - source_position_set[i + 1] + + A, B = lineSegClosestPoints(a, u, b, v) + + points[i * 2 + 0] = A + points[i * 2 + 1] = B + + self.volume_centre_position = numpy.array([points[:, 0].mean(), points[:, 1].mean(), points[:, 2].mean()]); + + # def align_z(self): @@ -2352,7 +2410,7 @@ def create_Cone3D_SOUV(source_position_set, detector_position_set, detector_dire :type detector_direction_x_set: list, tuple, ndarray :param detector_direction_y_set: N 3D vectors describing the direction of the detector_y (x,y,z) :type detector_direction_y_set: list, tuple, ndarray - :param volume_centre_position: A 3D vector describing the position of the centre of the reconstructed volume (x,y,z) + :param volume_centre_position: A 3D vector describing the position of the centre of the reconstructed volume (x,y,z). If no position givenm it will be computed automatically. :type volume_centre_position: list, tuple, ndarray, optional :param units: Label the units of distance used for the configuration, these should be consistent for the geometry and panel :type units: string From f086ecd1727a8e041c03a0d6c7da39fcfb61fa9e Mon Sep 17 00:00:00 2001 From: effepivi Date: Wed, 26 Jun 2024 12:20:15 +0100 Subject: [PATCH 09/17] Add a new functionality that displays four plots to show i) the source position, ii) the imager centre, iii) the imager x-direction, and iv) the imager y-direction for each projection. --- Wrappers/Python/cil/utilities/display.py | 94 ++++++++++++++++++++++++ 1 file changed, 94 insertions(+) diff --git a/Wrappers/Python/cil/utilities/display.py b/Wrappers/Python/cil/utilities/display.py index 5e200555aa..d56abca8bc 100644 --- a/Wrappers/Python/cil/utilities/display.py +++ b/Wrappers/Python/cil/utilities/display.py @@ -1075,3 +1075,97 @@ def __init__(self,acquisition_geometry, image_geometry=None, elevation=20, azimu self.display = _ShowGeometry(acquisition_geometry, image_geometry) self.figure = self.display.draw(elev=elevation, azim=azimuthal, view_distance=view_distance, grid=grid, figsize=figsize, fontsize=fontsize) + + + +class show_SOUV_geometry_vectors(cil.utilities.display.show_base): + ''' + Displays four plots to show i) the source position, + ii) the imager centre, iii) the imager x-direction, and + iv) the imager y-direction for each projection. + + + Parameters + ---------- + acquisition_geometry: AcquisitionGeometry + CIL acquisition geometry + figsize: tuple (x, y) + Set figure size (inches), default (10,10) + fontsize: int + Set fontsize, default 10 + + Returns + ------- + matplotlib.figure.Figure + returns a matplotlib.pyplot figure object + ''' + + + def __init__(self, acquisition_geometry:cil.framework.framework.AcquisitionGeometry, figsize=(10,10), fontsize=10): + + # Only applicable for AcquisitionGeometry + if not isinstance(acquisition_geometry, cil.framework.framework.AcquisitionGeometry): + raise ValueError("The data type of `acquisition_geometry` must be \"\". It is \"%s\", which is not currently supported by this function." % type(acquisition_geometry)) + + # Only applicable for cone_souv geometry type + if acquisition_geometry.geom_type != "cone_souv": + raise ValueError("The geometry type of `acquisition_geometry` must be \"cone_souv\". It is \"%s\", which is not currently supported by this function." % acquisition_geometry.geom_type) + + self.figure = self._draw(acquisition_geometry, figsize, fontsize); + + + def _draw(self, acquisition_geometry, figsize, fontsize): + + # Plot the data + self.fig, self.axs = plt.subplots(2, 2, figsize=figsize); + + x_axis_values = np.arange(acquisition_geometry.num_projections); + i = 0; j = 0; + x = 0; y = 1; z = 2; + self.axs[j,i].set_title("Source position"); + self.axs[j,i].plot(x_axis_values, geometry.config.system.source.position_set[:,0], label="X axis"); + self.axs[j,i].plot(x_axis_values, geometry.config.system.source.position_set[:,1], label="Y axis"); + self.axs[j,i].plot(x_axis_values, geometry.config.system.source.position_set[:,2], label="Z axis"); + self.axs[j,i].legend(fontsize=fontsize); + # self.axs[j,i].set_xlabel("Projection #"); + self.axs[j,i].set_ylabel("Position"); + + i = 1; j = 0; + x += 3; y += 3; z += 3; + self.axs[j,i].set_title("Imager Center"); + self.axs[j,i].plot(x_axis_values, geometry.config.system.detector.position_set[:,0], label="X axis"); + self.axs[j,i].plot(x_axis_values, geometry.config.system.detector.position_set[:,1], label="Y axis"); + self.axs[j,i].plot(x_axis_values, geometry.config.system.detector.position_set[:,2], label="Z axis"); + self.axs[j,i].legend(fontsize=fontsize); + # axs[j,i].set_xlabel("Projection #"); + # axs[j,i].set_ylabel("Position in (cm)"); + + i = 0; j = 1; + x += 3; y += 3; z += 3; + self.axs[j,i].set_title("Imager X-direction"); + self.axs[j,i].plot(x_axis_values, geometry.config.system.detector.direction_x_set[:,0], label="X axis"); + self.axs[j,i].plot(x_axis_values, geometry.config.system.detector.direction_x_set[:,1], label="Y axis"); + self.axs[j,i].plot(x_axis_values, geometry.config.system.detector.direction_x_set[:,2], label="Z axis"); + self.axs[j,i].legend(fontsize=fontsize); + self.axs[j,i].set_xlabel("Projection #"); + self.axs[j,i].set_ylabel("Position"); + + i = 1; j = 1; + x += 3; y += 3; z += 3; + self.axs[j,i].set_title("Imager Y-direction"); + self.axs[j,i].plot(x_axis_values, geometry.config.system.detector.direction_y_set[:,0], label="X axis"); + self.axs[j,i].plot(x_axis_values, geometry.config.system.detector.direction_y_set[:,1], label="Y axis"); + self.axs[j,i].plot(x_axis_values, geometry.config.system.detector.direction_y_set[:,2], label="Z axis"); + self.axs[j,i].legend(fontsize=fontsize); + self.axs[j,i].set_xlabel("Projection #"); + # axs[j,i].set_ylabel("Position in (mm)"); + + # Resize the text + for ax in self.axs.flatten(): + for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()): + item.set_fontsize(fontsize) + + plt.tight_layout() + fig2 = plt.gcf() + return fig2 + \ No newline at end of file From 280158dac825cb3ba4b23554983222b231055444 Mon Sep 17 00:00:00 2001 From: effepivi Date: Wed, 26 Jun 2024 12:32:57 +0100 Subject: [PATCH 10/17] Fix errors due to porting the code from Notebook to display.py --- Wrappers/Python/cil/utilities/display.py | 30 ++++++++++++------------ 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/Wrappers/Python/cil/utilities/display.py b/Wrappers/Python/cil/utilities/display.py index d56abca8bc..344db5685d 100644 --- a/Wrappers/Python/cil/utilities/display.py +++ b/Wrappers/Python/cil/utilities/display.py @@ -1078,7 +1078,7 @@ def __init__(self,acquisition_geometry, image_geometry=None, elevation=20, azimu -class show_SOUV_geometry_vectors(cil.utilities.display.show_base): +class show_SOUV_geometry_vectors(show_base): ''' Displays four plots to show i) the source position, ii) the imager centre, iii) the imager x-direction, and @@ -1101,10 +1101,10 @@ class show_SOUV_geometry_vectors(cil.utilities.display.show_base): ''' - def __init__(self, acquisition_geometry:cil.framework.framework.AcquisitionGeometry, figsize=(10,10), fontsize=10): + def __init__(self, acquisition_geometry:AcquisitionGeometry, figsize=(10,10), fontsize=10): # Only applicable for AcquisitionGeometry - if not isinstance(acquisition_geometry, cil.framework.framework.AcquisitionGeometry): + if not isinstance(acquisition_geometry, AcquisitionGeometry): raise ValueError("The data type of `acquisition_geometry` must be \"\". It is \"%s\", which is not currently supported by this function." % type(acquisition_geometry)) # Only applicable for cone_souv geometry type @@ -1123,9 +1123,9 @@ def _draw(self, acquisition_geometry, figsize, fontsize): i = 0; j = 0; x = 0; y = 1; z = 2; self.axs[j,i].set_title("Source position"); - self.axs[j,i].plot(x_axis_values, geometry.config.system.source.position_set[:,0], label="X axis"); - self.axs[j,i].plot(x_axis_values, geometry.config.system.source.position_set[:,1], label="Y axis"); - self.axs[j,i].plot(x_axis_values, geometry.config.system.source.position_set[:,2], label="Z axis"); + self.axs[j,i].plot(x_axis_values, acquisition_geometry.config.system.source.position_set[:,0], label="X axis"); + self.axs[j,i].plot(x_axis_values, acquisition_geometry.config.system.source.position_set[:,1], label="Y axis"); + self.axs[j,i].plot(x_axis_values, acquisition_geometry.config.system.source.position_set[:,2], label="Z axis"); self.axs[j,i].legend(fontsize=fontsize); # self.axs[j,i].set_xlabel("Projection #"); self.axs[j,i].set_ylabel("Position"); @@ -1133,9 +1133,9 @@ def _draw(self, acquisition_geometry, figsize, fontsize): i = 1; j = 0; x += 3; y += 3; z += 3; self.axs[j,i].set_title("Imager Center"); - self.axs[j,i].plot(x_axis_values, geometry.config.system.detector.position_set[:,0], label="X axis"); - self.axs[j,i].plot(x_axis_values, geometry.config.system.detector.position_set[:,1], label="Y axis"); - self.axs[j,i].plot(x_axis_values, geometry.config.system.detector.position_set[:,2], label="Z axis"); + self.axs[j,i].plot(x_axis_values, acquisition_geometry.config.system.detector.position_set[:,0], label="X axis"); + self.axs[j,i].plot(x_axis_values, acquisition_geometry.config.system.detector.position_set[:,1], label="Y axis"); + self.axs[j,i].plot(x_axis_values, acquisition_geometry.config.system.detector.position_set[:,2], label="Z axis"); self.axs[j,i].legend(fontsize=fontsize); # axs[j,i].set_xlabel("Projection #"); # axs[j,i].set_ylabel("Position in (cm)"); @@ -1143,9 +1143,9 @@ def _draw(self, acquisition_geometry, figsize, fontsize): i = 0; j = 1; x += 3; y += 3; z += 3; self.axs[j,i].set_title("Imager X-direction"); - self.axs[j,i].plot(x_axis_values, geometry.config.system.detector.direction_x_set[:,0], label="X axis"); - self.axs[j,i].plot(x_axis_values, geometry.config.system.detector.direction_x_set[:,1], label="Y axis"); - self.axs[j,i].plot(x_axis_values, geometry.config.system.detector.direction_x_set[:,2], label="Z axis"); + self.axs[j,i].plot(x_axis_values, acquisition_geometry.config.system.detector.direction_x_set[:,0], label="X axis"); + self.axs[j,i].plot(x_axis_values, acquisition_geometry.config.system.detector.direction_x_set[:,1], label="Y axis"); + self.axs[j,i].plot(x_axis_values, acquisition_geometry.config.system.detector.direction_x_set[:,2], label="Z axis"); self.axs[j,i].legend(fontsize=fontsize); self.axs[j,i].set_xlabel("Projection #"); self.axs[j,i].set_ylabel("Position"); @@ -1153,9 +1153,9 @@ def _draw(self, acquisition_geometry, figsize, fontsize): i = 1; j = 1; x += 3; y += 3; z += 3; self.axs[j,i].set_title("Imager Y-direction"); - self.axs[j,i].plot(x_axis_values, geometry.config.system.detector.direction_y_set[:,0], label="X axis"); - self.axs[j,i].plot(x_axis_values, geometry.config.system.detector.direction_y_set[:,1], label="Y axis"); - self.axs[j,i].plot(x_axis_values, geometry.config.system.detector.direction_y_set[:,2], label="Z axis"); + self.axs[j,i].plot(x_axis_values, acquisition_geometry.config.system.detector.direction_y_set[:,0], label="X axis"); + self.axs[j,i].plot(x_axis_values, acquisition_geometry.config.system.detector.direction_y_set[:,1], label="Y axis"); + self.axs[j,i].plot(x_axis_values, acquisition_geometry.config.system.detector.direction_y_set[:,2], label="Z axis"); self.axs[j,i].legend(fontsize=fontsize); self.axs[j,i].set_xlabel("Projection #"); # axs[j,i].set_ylabel("Position in (mm)"); From 993ab12604eb569b09093a23579cdf24b9a61b42 Mon Sep 17 00:00:00 2001 From: effepivi Date: Tue, 28 May 2024 16:05:57 +0100 Subject: [PATCH 11/17] Can create AcquisitionData for a SOUV geometry. --- .../cil/framework/acquisition_geometry.py | 22 ++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/Wrappers/Python/cil/framework/acquisition_geometry.py b/Wrappers/Python/cil/framework/acquisition_geometry.py index 3df283541b..9924c18994 100644 --- a/Wrappers/Python/cil/framework/acquisition_geometry.py +++ b/Wrappers/Python/cil/framework/acquisition_geometry.py @@ -1245,6 +1245,9 @@ def __init__ (self, source_position_set, detector_position_set, detector_directi raise ValueError("Make sure all the sets have the same number of parameters"); + # Number of projections + self.num_positions = len(source_position_set); + #source self.source.position_set = source_position_set; @@ -2035,11 +2038,20 @@ def shape(self): def dimension_labels(self): labels_default = AcquisitionDimension.get_order_for_engine("cil") - shape_default = [self.config.channels.num_channels, - self.config.angles.num_positions, - self.config.panel.num_pixels[1], - self.config.panel.num_pixels[0] - ] + # We are using angles, not per-projection geometry + if self.config.system.geometry != "cone_souv": + shape_default = [self.config.channels.num_channels, + self.config.angles.num_positions, + self.config.panel.num_pixels[1], + self.config.panel.num_pixels[0] + ] + # We are using per-projection geometry, not angles + else: + shape_default = [self.config.channels.num_channels, + self.config.system.num_positions, + self.config.panel.num_pixels[1], + self.config.panel.num_pixels[0] + ] try: labels = self._dimension_labels From 0cdf4702b9bd5839a0c0f1b393f09a875fba3faf Mon Sep 17 00:00:00 2001 From: Gemma Fardell Date: Mon, 13 Jan 2025 11:14:41 +0000 Subject: [PATCH 12/17] bugfix for standard astra geometry conversion --- .../astra/utilities/convert_geometry_to_astra_vec_3D.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py index 3070a6b6d4..98f45b9ea5 100644 --- a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py +++ b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py @@ -160,8 +160,8 @@ def convert_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry_in): proj_geom = astra.creators.create_proj_geom(projector, panel.num_pixels[1], panel.num_pixels[0], vectors) - vol_geom = astra.create_vol_geom(volume_geometry_temp.voxel_num_x, - volume_geometry_temp.voxel_num_y, + vol_geom = astra.create_vol_geom(volume_geometry_temp.voxel_num_y, + volume_geometry_temp.voxel_num_x, volume_geometry_temp.voxel_num_z, volume_geometry_temp.get_min_x() + translation[0], volume_geometry_temp.get_max_x() + translation[0], From 99996d928f18afc81d89890fff27930ae5931a9f Mon Sep 17 00:00:00 2001 From: Gemma Fardell Date: Tue, 14 Jan 2025 18:14:12 +0000 Subject: [PATCH 13/17] framework unittests. Update storage structure for souv parameters. Doc strings and ag souv aware default behaviour. --- .../cil/framework/acquisition_geometry.py | 534 ++++++++++++------ .../convert_geometry_to_astra_vec_3D.py | 16 +- .../Python/test/test_AcquisitionGeometry.py | 88 +++ Wrappers/Python/test/test_labels.py | 2 +- 4 files changed, 464 insertions(+), 176 deletions(-) diff --git a/Wrappers/Python/cil/framework/acquisition_geometry.py b/Wrappers/Python/cil/framework/acquisition_geometry.py index 9924c18994..ba3e3e292d 100644 --- a/Wrappers/Python/cil/framework/acquisition_geometry.py +++ b/Wrappers/Python/cil/framework/acquisition_geometry.py @@ -198,21 +198,28 @@ def acquisition_type(self): def acquisition_type(self, val): self._acquisition_type = AcquisitionType(val).validate() - def __init__(self, dof: int, geometry, units='units'): + def __init__(self, dof: int, geometry, units='units', number_vectors=1): self.acquisition_type = AcquisitionType(f"{dof}D") | AcquisitionType(geometry) + self._num_vectors = number_vectors self.units = units - if AcquisitionType.PARALLEL & self.geometry: - self.ray = DirectionVector(dof) - else: - self.source = PositionVector(dof) + if AcquisitionType.CONE_SOUV & self.geometry: + self.source = [PositionVector(dof) for _ in range(number_vectors)] + self.detector = [Detector2D(dof) for _ in range(number_vectors)] + self.volume_centre = PositionVector(dof) - if AcquisitionType.DIM2 & self.dimension: - self.detector = Detector1D(dof) - self.rotation_axis = PositionVector(dof) else: - self.detector = Detector2D(dof) - self.rotation_axis = PositionDirectionVector(dof) + if AcquisitionType.PARALLEL & self.geometry: + self.ray = DirectionVector(dof) + else: + self.source = PositionVector(dof) + + if AcquisitionType.DIM2 & self.dimension: + self.detector = Detector1D(dof) + self.rotation_axis = PositionVector(dof) + else: + self.detector = Detector2D(dof) + self.rotation_axis = PositionDirectionVector(dof) def __str__(self): """Implements the string representation of the system configuration @@ -273,21 +280,30 @@ def rotation_vec_to_z(vec): return axis_rotation def update_reference_frame(self): - r'''Transforms the system origin to the rotation_axis position + r'''Transforms the system origin to the rotation_axis position, or the volume centre for per-projection geometry ''' - self.set_origin(self.rotation_axis.position) + if self.acquisition_type & AcquisitionType.CONE_SOUV: + self.set_origin(self.volume_centre.position) + else: + self.set_origin(self.rotation_axis.position) def set_origin(self, origin): r'''Transforms the system origin to the input origin ''' translation = origin.copy() - if hasattr(self,'source'): - self.source.position -= translation - self.detector.position -= translation - self.rotation_axis.position -= translation + if self.acquisition_type & AcquisitionType.CONE_SOUV: + for i in range(self._num_vectors): + self.source[i].position -= translation + self.detector[i].position -= translation + self.volume_centre.position -= translation + else: + if hasattr(self,'source'): + self.source.position -= translation + self.detector.position -= translation + self.rotation_axis.position -= translation def get_centre_slice(self): """Returns the 2D system configuration corresponding to the centre slice @@ -1219,46 +1235,62 @@ def set_centre_of_rotation(self, offset, angle): class Cone3D_SOUV(SystemConfiguration): r'''This class creates the SystemConfiguration of a cone beam 3D tomographic system - :param source_position_set: N 3D vectors describing the position of the source (x,y,z) - :type source_position_set: list, tuple, ndarray - :param detector_position_set: N 3D vectors describing the position of the centre of the detector (x,y,z) - :type detector_position_set: list, tuple, ndarray - :param detector_direction_x_set: N 3D vectors describing the direction of the detector_x (x,y,z) - :type detector_direction_x_set: list, tuple, ndarray - :param detector_direction_y_set: N 3D vectors describing the direction of the detector_y (x,y,z) - :type detector_direction_y_set: list, tuple, ndarray - :param volume_centre_position: A 3D vector describing the position of the centre of the reconstructed volume (x,y,z) - :type volume_centre_position: list, tuple, ndarray - :param units: Label the units of distance used for the configuration - :type units: string + Parameters + ---------- + source_position_set : array_like + N 3D vectors describing the position of the source (x,y,z) + detector_position_set : array_like + N 3D vectors describing the position of the centre of the detector (x,y,z) + detector_direction_x_set : array_like + N 3D vectors describing the direction of the detector_x (x,y,z) + detector_direction_y_set : array_like + N 3D vectors describing the direction of the detector_y (x,y,z) + volume_centre_position : array_like, optional + A 3D vector describing the position of the centre of the reconstructed volume (x,y,z). If + not provided, the volume centre will be estimated as the mean of the midpoints of the + lines connecting the source and detector positions. + units : str + Label the units of distance used for the configuration + ''' - def __init__ (self, source_position_set, detector_position_set, detector_direction_x_set, detector_direction_y_set, volume_centre_position, units='units'): - """Constructor method - """ - super(Cone3D_SOUV, self).__init__(dof=3, geometry = 'cone_souv', units=units); + def _validate_and_convert(self, array, name, num_positions, normalise=False): + try: + array = numpy.array(array,dtype=numpy.float64).reshape(num_positions,3) + except: + raise ValueError(f"{name} expected a list of {num_positions} 3D vectors") - # All the sets must have the same number of parameters - if len(source_position_set) != len(detector_position_set) or \ - len(source_position_set) != len(detector_direction_x_set) or \ - len(source_position_set) != len(detector_direction_y_set): - - raise ValueError("Make sure all the sets have the same number of parameters"); + if normalise: + for i in range(num_positions): + array[i] = array[i] / numpy.linalg.norm(array[i]) - # Number of projections - self.num_positions = len(source_position_set); + return array - #source - self.source.position_set = source_position_set; - #detector - self.detector.position_set = detector_position_set; - self.detector.direction_x_set = detector_direction_x_set; - self.detector.direction_y_set = detector_direction_y_set; + + def __init__ (self, source_position_set, detector_position_set, detector_direction_x_set, detector_direction_y_set, volume_centre_position, units='units'): + + self.num_positions = len(source_position_set) + super(Cone3D_SOUV, self).__init__(dof=3, geometry = 'cone_souv', units=units, number_vectors= self.num_positions) + + # Check that all input sets have the same length + if not (len(detector_position_set) == self.num_positions and + len(detector_direction_x_set) == self.num_positions and + len(detector_direction_y_set) == self.num_positions): + raise ValueError(f"All input sets must have the same length. Got {self.num_positions} source positions, \ + {len(detector_position_set)} detector positions, {len(detector_direction_x_set)} detector direction x vectors, \ + and {len(detector_direction_y_set)} detector direction y vectors.") + + # Validate and convert the input arrays + for i in range(self.num_positions): + self.source[i].position = source_position_set[i] + self.detector[i].position = detector_position_set[i] + self.detector[i].set_direction(detector_direction_x_set[i], detector_direction_y_set[i]) #reconstructed volume centre if volume_centre_position is not None: - self.volume_centre_position = numpy.array(volume_centre_position); + self.volume_centre.position = volume_centre_position + # Estimate the position else: def clamp(n, smallest, largest): @@ -1299,21 +1331,21 @@ def lineSegClosestPoints(a, u, b, v): return a + S*u, b + T*v - points = numpy.zeros(((len(source_position_set) - 1) * 2, 3)); + points = numpy.zeros(((len(source_position_set) - 1) * 2, 3)) for i in range(len(source_position_set) - 1): - a = source_position_set[i + 0] - b = source_position_set[i + 1] + a = self.source[i+0].position + b = self.source[i+1].position - u = detector_position_set[i + 0] - source_position_set[i + 0] - v = detector_position_set[i + 1] - source_position_set[i + 1] + u = self.detector[i + 0].position - self.source[i + 0].position + v = self.detector[i + 1].position - self.source[i + 1].position A, B = lineSegClosestPoints(a, u, b, v) points[i * 2 + 0] = A points[i * 2 + 1] = B - self.volume_centre_position = numpy.array([points[:, 0].mean(), points[:, 1].mean(), points[:, 2].mean()]); + self.volume_centre.position = [points[:, 0].mean(), points[:, 1].mean(), points[:, 2].mean()] @@ -1404,16 +1436,19 @@ def lineSegClosestPoints(a, u, b, v): def __str__(self): def csv(val): - return numpy.array2string(val, separator=', ') + return numpy.array2string(numpy.array(val), separator=',') repres = "Per-projection 3D Cone-beam tomography\n" repres += "System configuration:\n" - repres += "\tSource positions 0-9: {0}\n".format(numpy.array2string(self.source.position_set[0:10], separator=', ')) - repres += "\tDetector positions 0-9: {0}\n".format(numpy.array2string(self.detector.position_set[0:10], separator=', ')) - repres += "\tDetector directions x 0-9: {0}\n".format(numpy.array2string(self.detector.direction_x_set[0:10], separator=', ')) - repres += "\tDetector directions y 0-9: {0}\n".format(numpy.array2string(self.detector.direction_y_set[0:10], separator=', ')) - repres += "Reconstructed volume:\n" - repres += "\tCentre position: {0}\n".format(csv(self.volume_centre_position)) + repres += f"\tNumber of projections: {self.num_positions}\n" + + num_show = min(10, self.num_positions) + repres += f"\tSource positions 0-{num_show-1}: {csv([self.source[i].position for i in range(num_show)])}\n" + repres += f"\tDetector positions 0-{num_show-1}: {csv([self.detector[i].position for i in range(num_show)])}\n" + repres += f"\tDetector directions x 0-{num_show-1}: {csv([self.detector[i].direction_x for i in range(num_show)])}\n" + repres += f"\tDetector directions y 0-{num_show-1}: {csv([self.detector[i].direction_y for i in range(num_show)])}\n" + repres += f"\tVolume centre position: {csv(self.volume_centre.position)}\n" + repres += f"\tAverage system magnification: {numpy.mean(self.calculate_magnification()[2])}\n" return repres def __eq__(self, other): @@ -1425,34 +1460,51 @@ def __eq__(self, other): and numpy.allclose(self.detector.position_set, other.detector.position_set)\ and numpy.allclose(self.detector.direction_x_set, other.detector.direction_x_set)\ and numpy.allclose(self.detector.direction_y_set, other.detector.direction_y_set)\ - and numpy.allclose(self.volume_centre_position, other.volume_centre_position): + and numpy.allclose(self.volume_centre.position, other.volume_centre.position): return True return False - def calculate_magnification(self, projection_ID:int=0): + def calculate_magnification(self): + """ + Calculate the magnification for each position. - if projection_ID < self.num_positions: - idx = projection_ID; - else: - raise IndexError("The requested projection index is greater than or equal to the total number of projections in the SOUV geometry."); + This method calculates the magnification of the volume_centre point for each projection in + the acquisition geometry. - ab = (self.volume_centre_position - self.source.position_set[idx]) - dist_source_center = float(numpy.sqrt(ab.dot(ab))) + It computes the distances from the source to the volume_centre and from the volume_centre to the detector, + and then uses these distances to determine the magnification. - ab_unit = ab / numpy.sqrt(ab.dot(ab)) + Returns + ------- + list of numpy.ndarray + A list containing three numpy arrays: + - dist_source_center: The distances from the source to the center for each position. + - dist_center_detector: The distances from the center to the detector for each position. + - magnification: The magnification factors for each position. + """ - # Compute the normal vector of the detector - n = numpy.cross(self.detector.direction_x_set[idx], self.detector.direction_y_set[idx]) + dist_center_detector = numpy.empty(self.num_positions) + dist_source_center = numpy.empty(self.num_positions) + magnification = numpy.empty(self.num_positions) - #perpendicular distance between source and detector centre - sd = float((self.detector.position_set[idx] - self.source.position_set[idx]).dot(n)) - ratio = float(ab_unit.dot(n)) + for idx in range(self.num_positions): + ab = (self.volume_centre.position - self.source[idx].position) + dist_source_center[idx] = float(numpy.sqrt(ab.dot(ab))) + + ab_unit = ab / numpy.sqrt(ab.dot(ab)) + + n = self.detector[idx].normal + + #perpendicular distance between source and detector centre + sd = float((self.detector[idx].position - self.source[idx].position).dot(n)) + ratio = float(ab_unit.dot(n)) + + source_to_detector= sd / ratio + dist_center_detector[idx] = source_to_detector - dist_source_center[idx] + magnification[idx] = (dist_center_detector[idx] + dist_source_center[idx] ) / dist_source_center[idx] - source_to_detector = sd / ratio - dist_center_detector = source_to_detector - dist_source_center - magnification = (dist_center_detector + dist_source_center) / dist_source_center return [dist_source_center, dist_center_detector, magnification] @@ -2008,16 +2060,46 @@ def angles(self): @property def dist_source_center(self): + """ + Calculate and return the distance from the source to the center of the volume. + + For `cone3d_souv` geometry, this method returns a NumPy ndarray with the distances + for each position. For other geometries, it returns a single float value. + + Returns: + numpy.ndarray: The distance from the source to the center for `cone3d_souv` geometry. + float: The distance from the source to the center for other geometries. + """ out = self.config.system.calculate_magnification() return out[0] - + @property def dist_center_detector(self): + """ + Calculate and return the distance from the center of the volume to the detector. + + For `cone3d_souv` geometry, this method returns a NumPy ndarray with the distances + for each position. For other geometries, it returns a single float value. + + Returns: + numpy.ndarray: The distance from the source to the center for `cone3d_souv` geometry. + float: The distance from the source to the center for other geometries. + """ out = self.config.system.calculate_magnification() return out[1] @property def magnification(self): + """ + Calculate and return the magnification of the system. + + For `cone3d_souv` geometry, this method returns a NumPy ndarray with the distances + for each position. For other geometries, it returns a single float value. + + Returns: + numpy.ndarray: The magnification factor for `CONE3D_SOUV` geometry. + float: The magnification factor for other geometries. + """ out = self.config.system.calculate_magnification() return out[2] @@ -2316,21 +2398,42 @@ def set_labels(self, labels=None): @staticmethod def create_Parallel2D(ray_direction=[0, 1], detector_position=[0, 0], detector_direction_x=[1, 0], rotation_axis_position=[0, 0], units='units distance'): - r'''This creates the AcquisitionGeometry for a parallel beam 2D tomographic system - - :param ray_direction: A 2D vector describing the x-ray direction (x,y) - :type ray_direction: list, tuple, ndarray, optional - :param detector_position: A 2D vector describing the position of the centre of the detector (x,y) - :type detector_position: list, tuple, ndarray, optional - :param detector_direction_x: A 2D vector describing the direction of the detector_x (x,y) - :type detector_direction_x: list, tuple, ndarray - :param rotation_axis_position: A 2D vector describing the position of the axis of rotation (x,y) - :type rotation_axis_position: list, tuple, ndarray, optional - :param units: Label the units of distance used for the configuration, these should be consistent for the geometry and panel - :type units: string - :return: returns a configured AcquisitionGeometry object - :rtype: AcquisitionGeometry - ''' + """ + Creates the AcquisitionGeometry for a parallel beam 2D tomographic system. + + After creating the AcquisitionGeometry object, the panel must be set using the `set_panel()` method and the angles must be set using the `set_angles()` method. + In addition,`set_channel()` can be used to set the number of channels and their labels. `set_labels()` can be used to set the order of the dimensions describing the data. + + See notes for default directions. + + Parameters + ---------- + ray_direction : array_like, optional + A 2D vector describing the x-ray direction (x,y). Default is [0,1]. + detector_position : array_like, optional + A 2D vector describing the position of the centre of the detector (x,y). Default is [0,0]. + detector_direction_x : array_like, optional + A 2D vector describing the direction of the detector_x (x,y). Default is [1,0]. + rotation_axis_position : array_like, optional + A 2D vector describing the position of the axis of rotation (x,y). Default is [0,0]. + units : str + Label the units of distance used for the configuration. These should be consistent for the geometry and panel. + + Returns + ------- + AcquisitionGeometry + An AcquisitionGeometry object. + + Note + ---- + The geometry is configured with default directions. The geometry can be customised but ensure that the directions are consistent. + + The default direction of the detector_x is [1,0]. This means that the detector is assumed to be pointing in the x direction. + + The rotation axis position is [0,0] which means that the rotation axis is assumed to be at the origin. + + If these values are left as default then the source and detector should lie along the y-axis. + """ AG = AcquisitionGeometry() AG.config = Configuration(units) AG.config.system = Parallel2D(ray_direction, detector_position, detector_direction_x, rotation_axis_position, units) @@ -2338,21 +2441,42 @@ def create_Parallel2D(ray_direction=[0, 1], detector_position=[0, 0], detector_d @staticmethod def create_Cone2D(source_position, detector_position, detector_direction_x=[1,0], rotation_axis_position=[0,0], units='units distance'): - r'''This creates the AcquisitionGeometry for a cone beam 2D tomographic system - - :param source_position: A 2D vector describing the position of the source (x,y) - :type source_position: list, tuple, ndarray - :param detector_position: A 2D vector describing the position of the centre of the detector (x,y) - :type detector_position: list, tuple, ndarray - :param detector_direction_x: A 2D vector describing the direction of the detector_x (x,y) - :type detector_direction_x: list, tuple, ndarray - :param rotation_axis_position: A 2D vector describing the position of the axis of rotation (x,y) - :type rotation_axis_position: list, tuple, ndarray, optional - :param units: Label the units of distance used for the configuration, these should be consistent for the geometry and panel - :type units: string - :return: returns a configured AcquisitionGeometry object - :rtype: AcquisitionGeometry - ''' + """ + Creates the AcquisitionGeometry for a cone beam 2D tomographic system. + + After creating the AcquisitionGeometry object, the panel must be set using the `set_panel()` method and the angles must be set using the `set_angles()` method. + In addition,`set_channel()` can be used to set the number of channels and their labels. `set_labels()` can be used to set the order of the dimensions describing the data. + + See notes for default directions. + + Parameters + ---------- + source_position : array_like + A 2D vector describing the position of the source (x,y). With the default directions, this should lie along the y-axis. + detector_position : array_like + A 2D vector describing the position of the centre of the detector (x,y). With the default directions, this should lie along the y-axis. + detector_direction_x : array_like, optional + A 2D vector describing the direction of the detector_x (x,y). Default is [1,0]. + rotation_axis_position : array_like, optional + A 2D vector describing the position of the axis of rotation (x,y). Default is [0,0]. + units : str + Label the units of distance used for the configuration. These should be consistent for the geometry and panel. + + Returns + ------- + AcquisitionGeometry + An AcquisitionGeometry object. + + Note + ---- + The geometry is configured with default directions. The geometry can be customised but ensure that the directions are consistent. + + The default direction of the detector_x is [1,0]. This means that the detector is assumed to be pointing in the x direction. + + The rotation axis position is [0,0] which means that the rotation axis is assumed to be at the origin. + + If these values are left as default then the source and detector should lie along the y-axis. + """ AG = AcquisitionGeometry() AG.config = Configuration(units) AG.config.system = Cone2D(source_position, detector_position, detector_direction_x, rotation_axis_position, units) @@ -2360,25 +2484,46 @@ def create_Cone2D(source_position, detector_position, detector_direction_x=[1,0] @staticmethod def create_Parallel3D(ray_direction=[0,1,0], detector_position=[0,0,0], detector_direction_x=[1,0,0], detector_direction_y=[0,0,1], rotation_axis_position=[0,0,0], rotation_axis_direction=[0,0,1], units='units distance'): - r'''This creates the AcquisitionGeometry for a parallel beam 3D tomographic system - - :param ray_direction: A 3D vector describing the x-ray direction (x,y,z) - :type ray_direction: list, tuple, ndarray, optional - :param detector_position: A 3D vector describing the position of the centre of the detector (x,y,z) - :type detector_position: list, tuple, ndarray, optional - :param detector_direction_x: A 3D vector describing the direction of the detector_x (x,y,z) - :type detector_direction_x: list, tuple, ndarray - :param detector_direction_y: A 3D vector describing the direction of the detector_y (x,y,z) - :type detector_direction_y: list, tuple, ndarray - :param rotation_axis_position: A 3D vector describing the position of the axis of rotation (x,y,z) - :type rotation_axis_position: list, tuple, ndarray, optional - :param rotation_axis_direction: A 3D vector describing the direction of the axis of rotation (x,y,z) - :type rotation_axis_direction: list, tuple, ndarray, optional - :param units: Label the units of distance used for the configuration, these should be consistent for the geometry and panel - :type units: string - :return: returns a configured AcquisitionGeometry object - :rtype: AcquisitionGeometry - ''' + """ + Creates the AcquisitionGeometry for a parallel beam 3D tomographic system. + + After creating the AcquisitionGeometry object, the panel must be set using the `set_panel()` method and the angles must be set using the `set_angles()` method. + In addition,`set_channel()` can be used to set the number of channels and their labels. `set_labels()` can be used to set the order of the dimensions describing the data. + + Parameters + ---------- + ray_direction : array_like, optional + A 3D vector describing the x-ray direction (x,y,z). Default is [0,1,0]. + detector_position : array_like, optional + A 3D vector describing the position of the centre of the detector (x,y,z). Default is [0,0,0]. + detector_direction_x : array_like, optional + A 3D vector describing the direction of the detector_x (x,y,z). Default is [1,0,0]. + detector_direction_y : array_like, optional + A 3D vector describing the direction of the detector_y (x,y,z). Default is [0,0,1]. + rotation_axis_position : array_like, optional + A 3D vector describing the position of the axis of rotation (x,y,z). Default is [0,0,0]. + rotation_axis_direction : array_like, optional + A 3D vector describing the direction of the axis of rotation (x,y,z). Default is [0,0,1]. + units : str + Label the units of distance used for the configuration. These should be consistent for the geometry and panel. + + Returns + ------- + AcquisitionGeometry + An AcquisitionGeometry object. + + Notes + ----- + The geometry is configured with default directions. The geometry can be customised but ensure that the directions are consistent. + + The default direction of the detector_x is [1,0,0] and the default direction of the detector_y [0,0,1]. This means that the detector + is assumed to be in the x-z plane with the detector_x direction pointing in the x direction and the detector_y direction pointing in the z direction. + + The rotation axis position is [0,0,0] which means that the rotation axis is assumed to be at the origin. + The rotation axis direction is [0,0,1] which means that the rotation axis is assumed to be pointing in the z direction. + + The default direction of the ray is [0,1,0] which means that the ray is assumed to be pointing in the y direction. + """ AG = AcquisitionGeometry() AG.config = Configuration(units) AG.config.system = Parallel3D(ray_direction, detector_position, detector_direction_x, detector_direction_y, rotation_axis_position, rotation_axis_direction, units) @@ -2386,49 +2531,81 @@ def create_Parallel3D(ray_direction=[0,1,0], detector_position=[0,0,0], detector @staticmethod def create_Cone3D(source_position, detector_position, detector_direction_x=[1,0,0], detector_direction_y=[0,0,1], rotation_axis_position=[0,0,0], rotation_axis_direction=[0,0,1], units='units distance'): - r'''This creates the AcquisitionGeometry for a cone beam 3D tomographic system - - :param source_position: A 3D vector describing the position of the source (x,y,z) - :type source_position: list, tuple, ndarray, optional - :param detector_position: A 3D vector describing the position of the centre of the detector (x,y,z) - :type detector_position: list, tuple, ndarray, optional - :param detector_direction_x: A 3D vector describing the direction of the detector_x (x,y,z) - :type detector_direction_x: list, tuple, ndarray - :param detector_direction_y: A 3D vector describing the direction of the detector_y (x,y,z) - :type detector_direction_y: list, tuple, ndarray - :param rotation_axis_position: A 3D vector describing the position of the axis of rotation (x,y,z) - :type rotation_axis_position: list, tuple, ndarray, optional - :param rotation_axis_direction: A 3D vector describing the direction of the axis of rotation (x,y,z) - :type rotation_axis_direction: list, tuple, ndarray, optional - :param units: Label the units of distance used for the configuration, these should be consistent for the geometry and panel - :type units: string - :return: returns a configured AcquisitionGeometry object - :rtype: AcquisitionGeometry - ''' + """ + Creates the AcquisitionGeometry for a cone beam 3D tomographic system. + + After creating the AcquisitionGeometry object, the panel must be set using the `set_panel()` method and the angles must be set using the `set_angles()` method. + In addition,`set_channel()` can be used to set the number of channels and their labels. `set_labels()` can be used to set the order of the dimensions describing the data. + + Parameters + ---------- + source_position : array_like + A 3D vector describing the position of the source (x,y,z). With the default directions, this should lie along the y-axis. + detector_position : array_like + A 3D vector describing the position of the centre of the detector (x,y,z). With the default directions, this should lie along the y-axis. + detector_direction_x : array_like, optional + A 3D vector describing the direction of the detector_x (x,y,z). Default is [1,0,0]. + detector_direction_y : array_like, optional + A 3D vector describing the direction of the detector_y (x,y,z). Default is [0,0,1]. + rotation_axis_position : array_like, optional + A 3D vector describing the position of the axis of rotation (x,y,z). Default is [0,0,0]. + rotation_axis_direction : array_like, optional + A 3D vector describing the direction of the axis of rotation (x,y,z). Default is [0,0,1]. + units : str + Label the units of distance used for the configuration. These should be consistent for the geometry and panel. + + Returns + ------- + AcquisitionGeometry + An AcquisitionGeometry object. + + Note + ---- + The geometry is configured with default directions. The geometry can be customised but ensure that the directions are consistent. + + The default direction of the detector_x is [1,0,0] and the default direction of the detector_y [0,0,1]. This means that the detector + is assumed to be in the x-z plane with the detector_x direction pointing in the x direction and the detector_y direction pointing in the z direction. + + The rotation axis position is [0,0,0] which means that the rotation axis is assumed to be at the origin. + The rotation axis direction is [0,0,1] which means that the rotation axis is assumed to be pointing in the z direction. + + If these values are left as default then the source and detector should lie along the y-axis. + """ AG = AcquisitionGeometry() AG.config = Configuration(units) AG.config.system = Cone3D(source_position, detector_position, detector_direction_x, detector_direction_y, rotation_axis_position, rotation_axis_direction, units) return AG @staticmethod - def create_Cone3D_SOUV(source_position_set, detector_position_set, detector_direction_x_set, detector_direction_y_set, volume_centre_position=[0,0,0], units='units distance'): - r'''This creates the AcquisitionGeometry for a per-projection cone beam 3D tomographic system - - :param source_position_set: N 3D vectors describing the position of the source (x,y,z) - :type source_position_set: list, tuple, ndarray, optional - :param detector_position_set: N 3D vectors describing the position of the centre of the detector (x,y,z) - :type detector_position_set: list, tuple, ndarray, optional - :param detector_direction_x_set: N 3D vectors describing the direction of the detector_x (x,y,z) - :type detector_direction_x_set: list, tuple, ndarray - :param detector_direction_y_set: N 3D vectors describing the direction of the detector_y (x,y,z) - :type detector_direction_y_set: list, tuple, ndarray - :param volume_centre_position: A 3D vector describing the position of the centre of the reconstructed volume (x,y,z). If no position givenm it will be computed automatically. - :type volume_centre_position: list, tuple, ndarray, optional - :param units: Label the units of distance used for the configuration, these should be consistent for the geometry and panel - :type units: string - :return: returns a configured AcquisitionGeometry object - :rtype: AcquisitionGeometry - ''' + def create_Cone3D_SOUV(source_position_set, detector_position_set, detector_direction_x_set, detector_direction_y_set, volume_centre_position=None, units='units distance'): + """ + Creates the AcquisitionGeometry for a per-projection cone beam 3D tomographic system. + + After creating the AcquisitionGeometry object, the panel must be set using the `set_panel()` method. + In addition,`set_channel()` can be used to set the number of channels and their labels. `set_labels()` can be used to set the order of the dimensions describing the data. + + Parameters + ---------- + source_position_set : array_like + N 3D vectors describing the position of the source (x,y,z). + detector_position_set : array_like + N 3D vectors describing the position of the centre of the detector (x,y,z). + detector_direction_x_set : array_like + N 3D vectors describing the direction of the detector_x (x,y,z). + detector_direction_y_set : array_like + N 3D vectors describing the direction of the detector_y (x,y,z). + volume_centre_position : array_like, optional + A 3D vector describing the position of the centre of the reconstructed volume (x,y,z). + If no position is given, it will be computed automatically. + units : str + Label the units of distance used for the configuration. These should be consistent for the geometry and panel. + + Returns + ------- + AcquisitionGeometry + An AcquisitionGeometry object. + + """ AG = AcquisitionGeometry() AG.config = Configuration(units) AG.config.system = Cone3D_SOUV(source_position_set, detector_position_set, detector_direction_x_set, detector_direction_y_set, volume_centre_position, units) @@ -2473,14 +2650,36 @@ def get_centre_slice(self): return AG_2D def get_ImageGeometry(self, resolution=1.0): - '''returns a default configured ImageGeometry object based on the AcquisitionGeomerty''' + """ + Returns an ImageGeometry object that corresponds to the AcquisitionGeometry object. The voxel numbers are calculated from + the resolution and the pixel sizes of the panel. The voxel sizes are calculated from the resolution and the magnification of the system. + + Parameters + ---------- + resolution : float, optional + The resolution of the ImageGeometry object. Defaults to 1.0. + + Returns + ------- + ImageGeometry + The ImageGeometry object that corresponds to the AcquisitionGeometry object. + + Note + ---- + For CONE3d_SOUV geometries, the magnification of the volume centre is calculated for each projection and the mean is used. + """ + + if self.config.system.geometry == "cone_souv": + mag = self.magnification.mean() + else: + mag = self.magnification num_voxel_xy = int(numpy.ceil(self.config.panel.num_pixels[0] * resolution)) - voxel_size_xy = self.config.panel.pixel_size[0] / (resolution * self.magnification) + voxel_size_xy = self.config.panel.pixel_size[0] / (resolution * mag) if AcquisitionType.DIM3 & self.dimension: num_voxel_z = int(numpy.ceil(self.config.panel.num_pixels[1] * resolution)) - voxel_size_z = self.config.panel.pixel_size[1] / (resolution * self.magnification) + voxel_size_z = self.config.panel.pixel_size[1] / (resolution * mag) else: num_voxel_z = 0 voxel_size_z = 1 @@ -2561,3 +2760,4 @@ def allocate(self, value=0, **kwargs): else: raise ValueError(f'Value {value} unknown') return out + diff --git a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py index 98f45b9ea5..9fc8f874ec 100644 --- a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py +++ b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py @@ -63,7 +63,7 @@ def convert_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry_in): # Translation vector that will modify the centre of the reconstructed volume # (by defalut, no translation) - translation = [0.0, 0.0, 0.0]; + translation = [0.0, 0.0, 0.0] if sinogram_geometry.dimension == '2D': #create a 3D astra geom from 2D CIL geometry @@ -129,7 +129,7 @@ def convert_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry_in): ]); # Compute a translation vector that will modify the centre of the reconstructed volume - translation = np.array(system.volume_centre_position) - current_centre; + translation = np.array(system.volume_centre.position) - current_centre projector = 'cone_vec' @@ -151,12 +151,12 @@ def convert_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry_in): else: vectors = np.zeros((system.num_positions, 12)) - for i, (src, det, row, col) in enumerate(zip(system.source.position_set, system.detector.position_set, system.detector.direction_x_set, system.detector.direction_y_set)): - - vectors[i, :3] = src.reshape(3) - vectors[i, 3:6] = det.reshape(3) - vectors[i, 6:9] = row.reshape(3) - vectors[i, 9:] = col.reshape(3) + #for i, (src, det, row, col) in enumerate(zip(system.source.position_set, system.detector.position_set, system.detector.direction_x_set, system.detector.direction_y_set)): + for i in range(system.num_positions): + vectors[i, :3] = system.source[i].position + vectors[i, 3:6] = system.detector[i].position + vectors[i, 6:9] = system.detector[i].direction_x * panel.pixel_size[0] + vectors[i, 9:] = system.detector[i].direction_y * panel.pixel_size[1] proj_geom = astra.creators.create_proj_geom(projector, panel.num_pixels[1], panel.num_pixels[0], vectors) diff --git a/Wrappers/Python/test/test_AcquisitionGeometry.py b/Wrappers/Python/test/test_AcquisitionGeometry.py index f19ff86b63..c56b9a3d00 100644 --- a/Wrappers/Python/test/test_AcquisitionGeometry.py +++ b/Wrappers/Python/test/test_AcquisitionGeometry.py @@ -158,6 +158,94 @@ def test_create_Cone3D(self): np.testing.assert_allclose(AG.config.system.rotation_axis.direction, rotation_axis_direction, rtol=1E-6) + def test_create_CONE3D_SOUV(self): + source_position_set = [[0.1, -500.0,0.2], [0.3, -499.0,0.4]] + detector_position_set = [[-1.3,1000.0, -1.0], [-1.4,1004.0, -0.08]] + detector_direction_x_set = [[1,0.0, 0.0], [1,0.02, 0.0]] + detector_direction_y_set = [[0.,0.,1], [0,0.0,1]] + + ag = AcquisitionGeometry.create_Cone3D_SOUV(source_position_set=source_position_set, \ + detector_position_set=detector_position_set, \ + detector_direction_x_set=detector_direction_x_set, \ + detector_direction_y_set=detector_direction_y_set) + + def get_unit_vec(x): + x = np.array(x) + return x / np.linalg.norm(x) + + # check the values + for i in range(len(source_position_set)): + np.testing.assert_array_equal(ag.config.system.source[i].position, source_position_set[i]) + np.testing.assert_array_equal(ag.config.system.detector[i].position, detector_position_set[i]) + np.testing.assert_array_equal(ag.config.system.detector[i].direction_x, get_unit_vec(detector_direction_x_set[i])) + np.testing.assert_array_equal(ag.config.system.detector[i].direction_y, get_unit_vec(detector_direction_y_set[i])) + + + def test_create_CONE3D_SOUV_invalid_values(self): + source_position_set = [[1, 0.0, 0.0], [0.0, 0.0, 1]] + detector_position_set = [[0.0, 0.0, 1], [1, 0.0, 0.0]] + detector_direction_x_set = [[1, 0.0, 0.0], [0.0, 0.0, 1]] + detector_direction_y_set = [[0.0, 1, 0.0], [0.0, 1, 0.0]] + + invalid_values = [ + "not a list", # Not a list + [1, 2, 3], # List of integers, not lists/arrays + [[1,2],[3,4],[5,6]], # List of lists of incorrect length + [[1, 2, 3], [3, 4]], # Lists of incorrect length + [[1,2], "not a list"], # List with invalid value + ] + + for invalid_value in invalid_values: + try: + with self.assertRaises(ValueError) as cm: + ag = AcquisitionGeometry.create_Cone3D_SOUV( + source_position_set=invalid_value, + detector_position_set=detector_position_set, + detector_direction_x_set=detector_direction_x_set, + detector_direction_y_set=detector_direction_y_set + ) + except AssertionError as e: + print(f"Test failed for source_position_set={invalid_value}: {e}") + raise + + try: + with self.assertRaises(ValueError) as cm: + ag = AcquisitionGeometry.create_Cone3D_SOUV( + source_position_set=source_position_set, + detector_position_set=invalid_value, + detector_direction_x_set=detector_direction_x_set, + detector_direction_y_set=detector_direction_y_set + ) + except AssertionError as e: + print(f"Test failed for detector_position_set={invalid_value}: {e}") + raise + + try: + with self.assertRaises(ValueError) as cm: + ag = AcquisitionGeometry.create_Cone3D_SOUV( + source_position_set=source_position_set, + detector_position_set=detector_position_set, + detector_direction_x_set=invalid_value, + detector_direction_y_set=detector_direction_y_set + ) + except AssertionError as e: + print(f"Test failed for detector_direction_x_set={invalid_value}: {e}") + raise + + try: + with self.assertRaises(ValueError) as cm: + ag = AcquisitionGeometry.create_Cone3D_SOUV( + source_position_set=source_position_set, + detector_position_set=detector_position_set, + detector_direction_x_set=detector_direction_x_set, + detector_direction_y_set=invalid_value + ) + except AssertionError as e: + print(f"Test failed for detector_direction_y_set={invalid_value}: {e}") + raise + + + def test_shift_detector_origin_bottom_left(self): initial_position = np.array([2.5, -1.3, 10.2]) pixel_size = np.array([0.5, 0.7]) diff --git a/Wrappers/Python/test/test_labels.py b/Wrappers/Python/test/test_labels.py index e97b52d2bc..d09ae12c92 100644 --- a/Wrappers/Python/test/test_labels.py +++ b/Wrappers/Python/test/test_labels.py @@ -65,7 +65,7 @@ def test_units_angles(self): self.assertIn(getattr(AngleUnit, i), AngleUnit) def test_acquisition_type(self): - for i in ('PARALLEL', 'CONE', 'DIM2', 'DIM3'): + for i in ('PARALLEL', 'CONE', 'DIM2', 'DIM3', 'CONE_SOUV'): self.assertIn(i, AcquisitionType) self.assertIn(i.lower(), AcquisitionType) self.assertIn(getattr(AcquisitionType, i), AcquisitionType) From e5870ca6487f357653c3079fba55ce8e644bbf97 Mon Sep 17 00:00:00 2001 From: Gemma Fardell Date: Wed, 15 Jan 2025 18:09:05 +0000 Subject: [PATCH 14/17] fix some manual rebase bugs --- .../cil/framework/acquisition_geometry.py | 62 +++++++++++++------ Wrappers/Python/cil/framework/labels.py | 3 +- .../convert_geometry_to_astra_vec_3D.py | 2 +- 3 files changed, 46 insertions(+), 21 deletions(-) diff --git a/Wrappers/Python/cil/framework/acquisition_geometry.py b/Wrappers/Python/cil/framework/acquisition_geometry.py index ba3e3e292d..15c71aa61e 100644 --- a/Wrappers/Python/cil/framework/acquisition_geometry.py +++ b/Wrappers/Python/cil/framework/acquisition_geometry.py @@ -1253,21 +1253,6 @@ class Cone3D_SOUV(SystemConfiguration): Label the units of distance used for the configuration ''' - - def _validate_and_convert(self, array, name, num_positions, normalise=False): - try: - array = numpy.array(array,dtype=numpy.float64).reshape(num_positions,3) - except: - raise ValueError(f"{name} expected a list of {num_positions} 3D vectors") - - if normalise: - for i in range(num_positions): - array[i] = array[i] / numpy.linalg.norm(array[i]) - - return array - - - def __init__ (self, source_position_set, detector_position_set, detector_direction_x_set, detector_direction_y_set, volume_centre_position, units='units'): self.num_positions = len(source_position_set) @@ -2005,6 +1990,37 @@ class AcquisitionGeometry(object): #for backwards compatibility + #for backwards compatibility + @property + def ANGLE(self): + warnings.warn("use AcquisitionDimension.Angle instead", DeprecationWarning, stacklevel=2) + return AcquisitionDimension.ANGLE + + @property + def CHANNEL(self): + warnings.warn("use AcquisitionDimension.Channel instead", DeprecationWarning, stacklevel=2) + return AcquisitionDimension.CHANNEL + + @property + def DEGREE(self): + warnings.warn("use AngleUnit.DEGREE", DeprecationWarning, stacklevel=2) + return AngleUnit.DEGREE + + @property + def HORIZONTAL(self): + warnings.warn("use AcquisitionDimension.HORIZONTAL instead", DeprecationWarning, stacklevel=2) + return AcquisitionDimension.HORIZONTAL + + @property + def RADIAN(self): + warnings.warn("use AngleUnit.RADIAN instead", DeprecationWarning, stacklevel=2) + return AngleUnit.RADIAN + + @property + def VERTICAL(self): + warnings.warn("use AcquisitionDimension.VERTICAL instead", DeprecationWarning, stacklevel=2) + return AcquisitionDimension.VERTICAL + @property def geom_type(self): return self.config.system.geometry @@ -2109,11 +2125,19 @@ def dimension(self): @property def shape(self): + if self.config.system.geometry != "cone_souv": + shape_dict = {AcquisitionDimension.CHANNEL: self.config.channels.num_channels, + AcquisitionDimension.ANGLE: self.config.angles.num_positions, + AcquisitionDimension.VERTICAL: self.config.panel.num_pixels[1], + AcquisitionDimension.HORIZONTAL: self.config.panel.num_pixels[0]} + # We are using per-projection geometry, not angles + else: + shape_dict = {AcquisitionDimension.CHANNEL: self.config.channels.num_channels, + AcquisitionDimension.ANGLE: self.config.system.num_positions, + AcquisitionDimension.VERTICAL: self.config.panel.num_pixels[1], + AcquisitionDimension.HORIZONTAL: self.config.panel.num_pixels[0]} - shape_dict = {AcquisitionDimension.CHANNEL: self.config.channels.num_channels, - AcquisitionDimension.ANGLE: self.config.angles.num_positions, - AcquisitionDimension.VERTICAL: self.config.panel.num_pixels[1], - AcquisitionDimension.HORIZONTAL: self.config.panel.num_pixels[0]} + return tuple(shape_dict[label] for label in self.dimension_labels) @property diff --git a/Wrappers/Python/cil/framework/labels.py b/Wrappers/Python/cil/framework/labels.py index c1bb23dfb1..9a2ac0ddd2 100644 --- a/Wrappers/Python/cil/framework/labels.py +++ b/Wrappers/Python/cil/framework/labels.py @@ -241,6 +241,7 @@ class AcquisitionType(Flag): CONE = auto() DIM2 = auto() DIM3 = auto() + CONE_SOUV = auto() def validate(self): """ @@ -262,7 +263,7 @@ def geometry(self): """ Returns the label for the geometry type """ - return self & (self.PARALLEL | self.CONE) + return self & (self.PARALLEL | self.CONE | self.CONE_SOUV) @classmethod def _missing_(cls, value): diff --git a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py index 9fc8f874ec..b52bebf794 100644 --- a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py +++ b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py @@ -56,7 +56,7 @@ def convert_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry_in): angles = sinogram_geometry.config.angles #get units - degrees = angles.angle_unit == sinogram_geometry.DEGREE + degrees = angles.angle_unit == AngleUnit.DEGREE system = sinogram_geometry.config.system panel = sinogram_geometry.config.panel From c3dd87f2229a6b6a1f05de75d8df1fbe1b432077 Mon Sep 17 00:00:00 2001 From: Gemma Fardell Date: Tue, 4 Feb 2025 18:37:48 +0000 Subject: [PATCH 15/17] astra geometry conversion unit tests --- .../utilities/convert_geometry_to_astra.py | 3 + .../Python/test/test_PluginsAstra_Geometry.py | 507 ++++++++++++++---- 2 files changed, 408 insertions(+), 102 deletions(-) diff --git a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra.py b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra.py index 15fbd49761..01326e6367 100644 --- a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra.py +++ b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra.py @@ -39,6 +39,9 @@ def convert_geometry_to_astra(volume_geometry, sinogram_geometry): The ASTRA vol_geom and proj_geom """ + if sinogram_geometry.geom_type == AcquisitionType.CONE_SOUV: + raise ValueError('Cone-SOUV geometry is not supported by this function, use convert_geometry_to_astra_vec_3D instead') + # determine if the geometry is 2D or 3D dimension = AcquisitionType.DIM3 if sinogram_geometry.pixel_num_v > 1 else AcquisitionType.DIM2 diff --git a/Wrappers/Python/test/test_PluginsAstra_Geometry.py b/Wrappers/Python/test/test_PluginsAstra_Geometry.py index c6b3d6f5f7..3c9a8f1b19 100644 --- a/Wrappers/Python/test/test_PluginsAstra_Geometry.py +++ b/Wrappers/Python/test/test_PluginsAstra_Geometry.py @@ -19,6 +19,7 @@ import unittest from cil.framework import AcquisitionGeometry import numpy as np +import math from utils import has_astra, initialise_tests initialise_tests() @@ -27,56 +28,29 @@ from cil.plugins.astra.utilities import convert_geometry_to_astra from cil.plugins.astra.utilities import convert_geometry_to_astra_vec_3D -class TestGeometry(unittest.TestCase): + +class TestGeometry_Parallel2D(unittest.TestCase): def setUp(self): - # Define image geometry. pixels_x = 128 - pixels_y = 3 angles_deg = np.asarray([0,90.0,180.0], dtype='float32') angles_rad = angles_deg * np.pi /180.0 - ag = AcquisitionGeometry.create_Parallel2D()\ + self.ag = AcquisitionGeometry.create_Parallel2D()\ .set_angles(angles_rad, angle_unit='radian')\ .set_labels(['angle','horizontal'])\ .set_panel(pixels_x, 0.1) - ig = ag.get_ImageGeometry() - + self.ig = self.ag.get_ImageGeometry() - ag_deg = AcquisitionGeometry.create_Parallel2D()\ + self.ag_deg = AcquisitionGeometry.create_Parallel2D()\ .set_angles(angles_deg, angle_unit='degree')\ .set_labels(['angle','horizontal'])\ .set_panel(pixels_x, 0.1) - ag_cone = AcquisitionGeometry.create_Cone2D([0,-2], [0,1])\ - .set_angles(angles_rad, angle_unit='radian')\ - .set_labels(['angle','horizontal'])\ - .set_panel(pixels_x, 0.1) - - - - ag3 = AcquisitionGeometry.create_Parallel3D()\ - .set_angles(angles_rad, angle_unit='radian')\ - .set_labels(['vertical', 'angle','horizontal'])\ - .set_panel((pixels_x,pixels_y), (0.1,0.1)) - - ig3 = ag3.get_ImageGeometry() - - ag3_cone = AcquisitionGeometry.create_Cone3D([0,-2,0], [0,1,0])\ - .set_angles(angles_rad, angle_unit='radian')\ - .set_labels(['vertical', 'angle','horizontal'])\ - .set_panel((pixels_x,pixels_y), (0.1,0.1)) - self.ig = ig - self.ig3 = ig3 - self.ag = ag - self.ag_deg = ag_deg - self.ag_cone = ag_cone - self.ag3 = ag3 - self.ag3_cone = ag3_cone @unittest.skipUnless(has_astra, "Requires ASTRA") - def test_convert_geometry_to_astra(self): + def test_convert_geometry_simple(self): #2D parallel radians astra_vol, astra_sino = convert_geometry_to_astra(self.ig, self.ag) @@ -86,6 +60,11 @@ def test_convert_geometry_to_astra(self): self.assertEqual(astra_sino['DetectorWidth'], self.ag.pixel_size_h) np.testing.assert_allclose(astra_sino['ProjectionAngles'], -self.ag.angles) + #2D parallel degrees + astra_vol, astra_sino = convert_geometry_to_astra(self.ig, self.ag_deg) + np.testing.assert_allclose(astra_sino['ProjectionAngles'], -self.ag.angles) + + # check the image geometry self.assertEqual(astra_vol['GridColCount'], self.ig.voxel_num_x) self.assertEqual(astra_vol['GridRowCount'], self.ig.voxel_num_y) self.assertEqual(astra_vol['option']['WindowMinX'], -self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) @@ -93,52 +72,11 @@ def test_convert_geometry_to_astra(self): self.assertEqual(astra_vol['option']['WindowMinY'], -self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) self.assertEqual(astra_vol['option']['WindowMaxY'], self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) - #2D parallel degrees - astra_vol, astra_sino = convert_geometry_to_astra(self.ig, self.ag_deg) - np.testing.assert_allclose(astra_sino['ProjectionAngles'], -self.ag.angles) - - #2D cone - astra_vol, astra_sino = convert_geometry_to_astra(self.ig, self.ag_cone) - - self.assertEqual(astra_sino['type'], 'fanflat') - self.assertEqual(astra_sino['DistanceOriginSource'], self.ag_cone.dist_source_center) - self.assertTrue(astra_sino['DistanceOriginDetector'], self.ag_cone.dist_center_detector) - - - #3D parallel - astra_vol, astra_sino = convert_geometry_to_astra(self.ig3, self.ag3) - self.assertEqual(astra_sino['type'], 'parallel3d') - self.assertEqual(astra_sino['DetectorColCount'], self.ag3.pixel_num_h) - self.assertEqual(astra_sino['DetectorRowCount'], self.ag3.pixel_num_v) - self.assertEqual(astra_sino['DetectorSpacingX'], self.ag3.pixel_size_h) - self.assertEqual(astra_sino['DetectorSpacingY'], self.ag3.pixel_size_h) - np.testing.assert_allclose(astra_sino['ProjectionAngles'], -self.ag3.angles) - - self.assertEqual(astra_vol['GridColCount'], self.ig3.voxel_num_x) - self.assertEqual(astra_vol['GridRowCount'], self.ig3.voxel_num_y) - self.assertEqual(astra_vol['GridSliceCount'], self.ig3.voxel_num_z) - - self.assertEqual(astra_vol['option']['WindowMinX'], -self.ig3.voxel_num_x * self.ig3.voxel_size_x * 0.5) - self.assertEqual(astra_vol['option']['WindowMaxX'], self.ig3.voxel_num_x * self.ig3.voxel_size_x * 0.5) - self.assertEqual(astra_vol['option']['WindowMinY'], -self.ig3.voxel_num_y * self.ig3.voxel_size_y * 0.5) - self.assertEqual(astra_vol['option']['WindowMaxY'], self.ig3.voxel_num_y * self.ig3.voxel_size_y * 0.5) - self.assertEqual(astra_vol['option']['WindowMinZ'], -self.ig3.voxel_num_z * self.ig3.voxel_size_z * 0.5) - self.assertEqual(astra_vol['option']['WindowMaxZ'], self.ig3.voxel_num_z * self.ig3.voxel_size_z * 0.5) - - #3D cone - astra_vol, astra_sino = convert_geometry_to_astra(self.ig3, self.ag3_cone) - self.assertEqual(astra_sino['type'], 'cone') - self.assertEqual(astra_sino['DistanceOriginSource'], self.ag_cone.dist_source_center) - self.assertEqual(astra_sino['DistanceOriginDetector'], self.ag_cone.dist_center_detector) - self.assertEqual(astra_sino['DetectorColCount'], self.ag3.pixel_num_h) - self.assertEqual(astra_sino['DetectorRowCount'], self.ag3.pixel_num_v) - self.assertEqual(astra_sino['DetectorSpacingX'], self.ag3.pixel_size_h) - self.assertEqual(astra_sino['DetectorSpacingY'], self.ag3.pixel_size_h) - np.testing.assert_allclose(astra_sino['ProjectionAngles'], -self.ag3.angles) @unittest.skipUnless(has_astra, "Requires ASTRA") - def test_convert_geometry_to_astra_vec_3D(self): - #2D parallel radians + def test_convert_geometry_vector(self): + + # 2D parallel radians astra_vol, astra_sino = convert_geometry_to_astra_vec_3D(self.ig, self.ag) self.assertEqual(astra_sino['type'], 'parallel3d_vec') @@ -161,7 +99,11 @@ def test_convert_geometry_to_astra_vec_3D(self): np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) + # 2D parallel degrees + astra_vol, astra_sino = convert_geometry_to_astra_vec_3D(self.ig, self.ag_deg) + np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) + # image geometry self.assertEqual(astra_vol['GridColCount'], self.ig.voxel_num_x) self.assertEqual(astra_vol['GridRowCount'], self.ig.voxel_num_y) self.assertEqual(astra_vol['GridSliceCount'], 1) @@ -173,55 +115,416 @@ def test_convert_geometry_to_astra_vec_3D(self): self.assertEqual(astra_vol['option']['WindowMinZ'], - self.ig.voxel_size_x * 0.5) self.assertEqual(astra_vol['option']['WindowMaxZ'], + self.ig.voxel_size_x * 0.5) +class TestGeometry_Parallel3D(unittest.TestCase): + def setUp(self): + # Define image geometry. + pixels_x = 128 + pixels_y = 3 + + angles_deg = np.asarray([0,90.0,180.0], dtype='float32') + angles_rad = angles_deg * np.pi /180.0 + + self.ag = AcquisitionGeometry.create_Parallel3D()\ + .set_angles(angles_rad, angle_unit='radian')\ + .set_labels(['vertical', 'angle','horizontal'])\ + .set_panel((pixels_x,pixels_y), (0.1,0.1)) + + self.ag_deg = AcquisitionGeometry.create_Parallel3D()\ + .set_angles(angles_deg, angle_unit='degree')\ + .set_labels(['vertical', 'angle','horizontal'])\ + .set_panel((pixels_x,pixels_y), (0.1,0.1)) + + self.ig = self.ag.get_ImageGeometry() + + + @unittest.skipUnless(has_astra, "Requires ASTRA") + def test_convert_geometry_simple(self): + + #3D parallel + astra_vol, astra_sino = convert_geometry_to_astra(self.ig, self.ag) + self.assertEqual(astra_sino['type'], 'parallel3d') + self.assertEqual(astra_sino['DetectorColCount'], self.ag.pixel_num_h) + self.assertEqual(astra_sino['DetectorRowCount'], self.ag.pixel_num_v) + self.assertEqual(astra_sino['DetectorSpacingX'], self.ag.pixel_size_h) + self.assertEqual(astra_sino['DetectorSpacingY'], self.ag.pixel_size_h) + np.testing.assert_allclose(astra_sino['ProjectionAngles'], -self.ag.angles) + #2D parallel degrees + astra_vol, astra_sino = convert_geometry_to_astra(self.ig, self.ag_deg) + np.testing.assert_allclose(astra_sino['ProjectionAngles'], -self.ag.angles) + + # image geometry + self.assertEqual(astra_vol['GridColCount'], self.ig.voxel_num_x) + self.assertEqual(astra_vol['GridRowCount'], self.ig.voxel_num_y) + self.assertEqual(astra_vol['GridSliceCount'], self.ig.voxel_num_z) + + self.assertEqual(astra_vol['option']['WindowMinX'], -self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxX'], self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMinY'], -self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxY'], self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + self.assertEqual(astra_vol['option']['WindowMinZ'], -self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxZ'], self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5) + + @unittest.skipUnless(has_astra, "Requires ASTRA") + def test_convert_geometry_vector(self): + + astra_vol, astra_sino = convert_geometry_to_astra_vec_3D(self.ig, self.ag) + self.assertEqual(astra_sino['type'], 'parallel3d_vec') + self.assertEqual(astra_sino['DetectorRowCount'], self.ag.pixel_num_v) + self.assertEqual(astra_sino['DetectorColCount'], self.ag.pixel_num_h) + + vectors = np.zeros((3,12),dtype='float64') + + vectors[0][1] = 1.0 + vectors[0][6] = self.ag.pixel_size_h + vectors[0][11] = self.ag.pixel_size_h + + vectors[1][0] = 1.0 + vectors[1][7] = -self.ag.pixel_size_h + vectors[1][11] = self.ag.pixel_size_h + + vectors[2][1] = -1.0 + vectors[2][6] = -self.ag.pixel_size_h + vectors[2][11] = self.ag.pixel_size_h + + np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) + + #degrees astra_vol, astra_sino = convert_geometry_to_astra_vec_3D(self.ig, self.ag_deg) np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) + # image geometry + self.assertEqual(astra_vol['GridColCount'], self.ig.voxel_num_x) + self.assertEqual(astra_vol['GridRowCount'], self.ig.voxel_num_y) + self.assertEqual(astra_vol['GridSliceCount'], self.ig.voxel_num_z) + + self.assertEqual(astra_vol['option']['WindowMinX'], -self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxX'], self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMinY'], -self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxY'], self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + self.assertEqual(astra_vol['option']['WindowMinZ'], -self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxZ'], self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5) + +class TestGeometry_Cone2D(unittest.TestCase): + def setUp(self): + pixels_x = 128 + + angles_deg = np.asarray([0,90.0,180.0], dtype='float32') + angles_rad = angles_deg * np.pi /180.0 + + + self.ag = AcquisitionGeometry.create_Cone2D([0,-2], [0,1])\ + .set_angles(angles_rad, angle_unit='radian')\ + .set_labels(['angle','horizontal'])\ + .set_panel(pixels_x, 0.1) + + self.ig = self.ag.get_ImageGeometry() + + self.ag_deg = AcquisitionGeometry.create_Cone2D([0,-2], [0,1])\ + .set_angles(angles_deg, angle_unit='degree')\ + .set_labels(['angle','horizontal'])\ + .set_panel(pixels_x, 0.1) + + + + @unittest.skipUnless(has_astra, "Requires ASTRA") + def test_convert_geometry_simple(self): + astra_vol, astra_sino = convert_geometry_to_astra(self.ig, self.ag) + + self.assertEqual(astra_sino['type'], 'fanflat') + self.assertEqual(astra_sino['DistanceOriginSource'], self.ag.dist_source_center) + self.assertTrue(astra_sino['DistanceOriginDetector'], self.ag.dist_center_detector) + + self.assertEqual(astra_sino['DetectorCount'], self.ag.pixel_num_h) + self.assertEqual(astra_sino['DetectorWidth'], self.ag.pixel_size_h) + np.testing.assert_allclose(astra_sino['ProjectionAngles'], -self.ag.angles) + + #2D parallel degrees + astra_vol, astra_sino = convert_geometry_to_astra(self.ig, self.ag_deg) + np.testing.assert_allclose(astra_sino['ProjectionAngles'], -self.ag.angles) + + # check the image geometry + self.assertEqual(astra_vol['GridColCount'], self.ig.voxel_num_x) + self.assertEqual(astra_vol['GridRowCount'], self.ig.voxel_num_y) + self.assertEqual(astra_vol['option']['WindowMinX'], -self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxX'], self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMinY'], -self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxY'], self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + + + @unittest.skipUnless(has_astra, "Requires ASTRA") + def test_convert_geometry_vector(self): #2D cone - astra_vol, astra_sino = convert_geometry_to_astra_vec_3D(self.ig, self.ag_cone) + astra_vol, astra_sino = convert_geometry_to_astra_vec_3D(self.ig, self.ag) self.assertEqual(astra_sino['type'], 'cone_vec') vectors = np.zeros((3,12),dtype='float64') - pixel_size_v = self.ig.voxel_size_x * self.ag_cone.magnification - vectors[0][1] = -1 * self.ag_cone.dist_source_center - vectors[0][4] = self.ag_cone.dist_center_detector - vectors[0][6] = self.ag_cone.pixel_size_h + pixel_size_v = self.ig.voxel_size_x * self.ag.magnification + vectors[0][1] = -1 * self.ag.dist_source_center + vectors[0][4] = self.ag.dist_center_detector + vectors[0][6] = self.ag.pixel_size_h vectors[0][11] = pixel_size_v - vectors[1][0] = -1 * self.ag_cone.dist_source_center - vectors[1][3] = self.ag_cone.dist_center_detector - vectors[1][7] = -self.ag_cone.pixel_size_h + vectors[1][0] = -1 * self.ag.dist_source_center + vectors[1][3] = self.ag.dist_center_detector + vectors[1][7] = -self.ag.pixel_size_h vectors[1][11] = pixel_size_v - vectors[2][1] = self.ag_cone.dist_source_center - vectors[2][4] = -1 * self.ag_cone.dist_center_detector - vectors[2][6] = -1 * self.ag_cone.pixel_size_h + vectors[2][1] = self.ag.dist_source_center + vectors[2][4] = -1 * self.ag.dist_center_detector + vectors[2][6] = -1 * self.ag.pixel_size_h vectors[2][11] = pixel_size_v np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) - #3D cone - astra_vol, astra_sino = convert_geometry_to_astra_vec_3D(self.ig3, self.ag3_cone) + #2D parallel degrees + astra_vol, astra_sino = convert_geometry_to_astra_vec_3D(self.ig, self.ag_deg) + np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) + + # check image geometry + self.assertEqual(astra_vol['GridColCount'], self.ig.voxel_num_x) + self.assertEqual(astra_vol['GridRowCount'], self.ig.voxel_num_y) + self.assertEqual(astra_vol['option']['WindowMinX'], -self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxX'], self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMinY'], -self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxY'], self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + + +class TestGeometry_Cone3D(unittest.TestCase): + def setUp(self): + pixels_x = 128 + pixels_y = 3 + angles_deg = np.asarray([0,90.0,180.0], dtype='float32') + angles_rad = angles_deg * np.pi /180.0 + source_position = [0,-2,0] + detector_position = [0,1,0] + + self.ag = AcquisitionGeometry.create_Cone3D(source_position, detector_position)\ + .set_angles(angles_rad, angle_unit='radian')\ + .set_labels(['vertical', 'angle','horizontal'])\ + .set_panel((pixels_x,pixels_y), (0.1,0.1)) + + self.ag_deg = AcquisitionGeometry.create_Cone3D(source_position, detector_position)\ + .set_angles(angles_deg, angle_unit='degree')\ + .set_labels(['vertical', 'angle','horizontal'])\ + .set_panel((pixels_x,pixels_y), (0.1,0.1)) + + self.ig = self.ag.get_ImageGeometry() + + source_position_set = [] + detector_position_set = [] + detector_direction_x_set = [] + detector_direction_y_set = [] + + for angle in angles_rad: + rotation_matrix = np.eye(3) + rotation_matrix[0,0] = rotation_matrix[1,1] = math.cos(-angle) + rotation_matrix[0,1] = -math.sin(-angle) + rotation_matrix[1,0] = math.sin(-angle) + + source_position_set.append(rotation_matrix.dot(source_position)) + detector_position_set.append(rotation_matrix.dot(detector_position)) + detector_direction_x_set.append(rotation_matrix.dot([1,0,0])) + detector_direction_y_set.append(rotation_matrix.dot([0,0,1])) + + + self.ag_souv = AcquisitionGeometry.create_Cone3D_SOUV(source_position_set=source_position_set,\ + detector_position_set=detector_position_set,\ + detector_direction_x_set=detector_direction_x_set,\ + detector_direction_y_set=detector_direction_y_set)\ + .set_labels(['vertical', 'angle','horizontal'])\ + .set_panel((pixels_x,pixels_y), (0.1,0.1)) + + + @unittest.skipUnless(has_astra, "Requires ASTRA") + def test_convert_geometry_simple(self): + astra_vol, astra_sino = convert_geometry_to_astra(self.ig, self.ag) + self.assertEqual(astra_sino['type'], 'cone') + self.assertEqual(astra_sino['DistanceOriginSource'], self.ag.dist_source_center) + self.assertEqual(astra_sino['DistanceOriginDetector'], self.ag.dist_center_detector) + self.assertEqual(astra_sino['DetectorColCount'], self.ag.pixel_num_h) + self.assertEqual(astra_sino['DetectorRowCount'], self.ag.pixel_num_v) + self.assertEqual(astra_sino['DetectorSpacingX'], self.ag.pixel_size_h) + self.assertEqual(astra_sino['DetectorSpacingY'], self.ag.pixel_size_h) + np.testing.assert_allclose(astra_sino['ProjectionAngles'], -self.ag.angles) + + # degrees + astra_vol, astra_sino = convert_geometry_to_astra(self.ig, self.ag_deg) + np.testing.assert_allclose(astra_sino['ProjectionAngles'], -self.ag.angles) + + # check the image geometry + self.assertEqual(astra_vol['GridColCount'], self.ig.voxel_num_x) + self.assertEqual(astra_vol['GridRowCount'], self.ig.voxel_num_y) + self.assertEqual(astra_vol['GridSliceCount'], self.ig.voxel_num_z) + + self.assertEqual(astra_vol['option']['WindowMinX'], -self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxX'], self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMinY'], -self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxY'], self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + self.assertEqual(astra_vol['option']['WindowMinZ'], -self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxZ'], self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5) + + + @unittest.skipUnless(has_astra, "Requires ASTRA") + def test_convert_geometry_vector(self): + + astra_vol, astra_sino = convert_geometry_to_astra_vec_3D(self.ig, self.ag) self.assertEqual(astra_sino['type'], 'cone_vec') vectors = np.zeros((3,12),dtype='float64') + vectors[0][1] = -1 * self.ag.dist_source_center + vectors[0][4] = self.ag.dist_center_detector + vectors[0][6] = self.ag.pixel_size_h + vectors[0][11] = self.ag.pixel_size_h + + vectors[1][0] = -1 * self.ag.dist_source_center + vectors[1][3] = self.ag.dist_center_detector + vectors[1][7] = -1 * self.ag.pixel_size_h + vectors[1][11] = self.ag.pixel_size_h + + vectors[2][1] = self.ag.dist_source_center + vectors[2][4] = -1 * self.ag.dist_center_detector + vectors[2][6] = -1 * self.ag.pixel_size_h + vectors[2][11] = self.ag.pixel_size_h + + np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) + + #degrees + astra_vol, astra_sino = convert_geometry_to_astra_vec_3D(self.ig, self.ag_deg) + np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) + + # image geometry + self.assertEqual(astra_vol['GridColCount'], self.ig.voxel_num_x) + self.assertEqual(astra_vol['GridRowCount'], self.ig.voxel_num_y) + self.assertEqual(astra_vol['GridSliceCount'], self.ig.voxel_num_z) + + self.assertEqual(astra_vol['option']['WindowMinX'], -self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxX'], self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMinY'], -self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxY'], self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + self.assertEqual(astra_vol['option']['WindowMinZ'], -self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxZ'], self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5) + + + @unittest.skipUnless(has_astra, "Requires ASTRA") + def test_convert_geometry_souv(self): + """ + Checks the SOUV convention agrees with the standard cone geometry 3D + """ + + astra_vol, astra_sino = convert_geometry_to_astra_vec_3D(self.ig, self.ag_souv) + self.assertEqual(astra_sino['type'], 'cone_vec') - vectors[0][1] = -1 * self.ag_cone.dist_source_center - vectors[0][4] = self.ag_cone.dist_center_detector - vectors[0][6] = self.ag_cone.pixel_size_h - vectors[0][11] = self.ag_cone.pixel_size_h + vectors = np.zeros((3,12),dtype='float64') + vectors[0][1] = -1 * self.ag.dist_source_center + vectors[0][4] = self.ag.dist_center_detector + vectors[0][6] = self.ag.pixel_size_h + vectors[0][11] = self.ag.pixel_size_h - vectors[1][0] = -1 * self.ag_cone.dist_source_center - vectors[1][3] = self.ag_cone.dist_center_detector - vectors[1][7] = -1 * self.ag_cone.pixel_size_h - vectors[1][11] = self.ag_cone.pixel_size_h + vectors[1][0] = -1 * self.ag.dist_source_center + vectors[1][3] = self.ag.dist_center_detector + vectors[1][7] = -1 * self.ag.pixel_size_h + vectors[1][11] = self.ag.pixel_size_h - vectors[2][1] = self.ag_cone.dist_source_center - vectors[2][4] = -1 * self.ag_cone.dist_center_detector - vectors[2][6] = -1 * self.ag_cone.pixel_size_h - vectors[2][11] = self.ag_cone.pixel_size_h + vectors[2][1] = self.ag.dist_source_center + vectors[2][4] = -1 * self.ag.dist_center_detector + vectors[2][6] = -1 * self.ag.pixel_size_h + vectors[2][11] = self.ag.pixel_size_h np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) + + # image geometry + self.assertEqual(astra_vol['GridColCount'], self.ig.voxel_num_x) + self.assertEqual(astra_vol['GridRowCount'], self.ig.voxel_num_y) + self.assertEqual(astra_vol['GridSliceCount'], self.ig.voxel_num_z) + + self.assertEqual(astra_vol['option']['WindowMinX'], -self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxX'], self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMinY'], -self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxY'], self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + self.assertEqual(astra_vol['option']['WindowMinZ'], -self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxZ'], self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5) + + + +class TestGeometry_Cone3D_SOUV(unittest.TestCase): + def setUp(self): + pixels_x = 128 + pixels_y = 3 + + # generate a set of source and detector positions + self.source_position_set = [ + [60.39, 90.29, 63.12], + [34.64, 24.1, -66.48], + [1.24, 66.74, -98.15], + ] + + self.detector_position_set = [ + [6579.59, 6069.16, -9907.19], + [4984.46, 5533.88, 1186.63], + [-7528.65, 2598.33, -7634.18], + ] + + # detector direction x and y must be orthogonal + self.detector_direction_x_set = [ + [1.2, 7.91, -4.95], + [4.66, 6.75, -0.77], + [-7.12, 4.69, 0.59] + ] + + vec = [ + [-4.97, 5.62, -8.26], + [-0.97, -6.93, 7.65], + [-5.5, 7.43, -5.14] + ] + + self.detector_direction_y_set = [] + for i in range(3): + self.detector_direction_x_set[i] = self.detector_direction_x_set[i] / np.linalg.norm(self.detector_direction_x_set[i]) + detector_direction_y = np.cross(self.detector_direction_x_set[i], vec[i]) + self.detector_direction_y_set.append(detector_direction_y / np.linalg.norm(detector_direction_y)) + + self.ag = AcquisitionGeometry.create_Cone3D_SOUV(source_position_set=self.source_position_set,\ + detector_position_set=self.detector_position_set,\ + detector_direction_x_set=self.detector_direction_x_set,\ + detector_direction_y_set=self.detector_direction_y_set,\ + volume_centre_position=[0,0,0]) \ + .set_labels(['vertical', 'angle','horizontal'])\ + .set_panel((pixels_x,pixels_y), (0.1,0.1)) + + self.ig = self.ag.get_ImageGeometry() + + + + @unittest.skipUnless(has_astra, "Requires ASTRA") + def test_convert_geometry_simple(self): + + with self.assertRaises(ValueError): + astra_vol, astra_sino = convert_geometry_to_astra(self.ig, self.ag) + + @unittest.skipUnless(has_astra, "Requires ASTRA") + def test_convert_geometry_vector(self): + + astra_vol, astra_sino = convert_geometry_to_astra_vec_3D(self.ig, self.ag) + self.assertEqual(astra_sino['type'], 'cone_vec') + + for i in range(3): + np.testing.assert_allclose(astra_sino['Vectors'][i][0:3], self.source_position_set[i], atol=1e-6) + np.testing.assert_allclose(astra_sino['Vectors'][i][3:6], self.detector_position_set[i], atol=1e-6) + np.testing.assert_allclose(astra_sino['Vectors'][i][6:9], self.detector_direction_x_set[i] * self.ag.pixel_size_h, atol=1e-6) + np.testing.assert_allclose(astra_sino['Vectors'][i][9:12], self.detector_direction_y_set[i] * self.ag.pixel_size_h, atol=1e-6) + + # image geometry + self.assertEqual(astra_vol['GridColCount'], self.ig.voxel_num_x) + self.assertEqual(astra_vol['GridRowCount'], self.ig.voxel_num_y) + self.assertEqual(astra_vol['GridSliceCount'], self.ig.voxel_num_z) + + self.assertEqual(astra_vol['option']['WindowMinX'], -self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxX'], self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMinY'], -self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxY'], self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + self.assertEqual(astra_vol['option']['WindowMinZ'], -self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxZ'], self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5) From 486dc43766076d4af43fc7750617fb87f9e99362 Mon Sep 17 00:00:00 2001 From: Gemma Fardell Date: Fri, 7 Feb 2025 15:25:46 +0000 Subject: [PATCH 16/17] Projector test clean up. Updated all projector tests to check panel 'origin' --- .../utilities/convert_geometry_to_astra.py | 18 +- .../convert_geometry_to_astra_vec_2D.py | 4 + .../convert_geometry_to_astra_vec_3D.py | 14 +- .../Python/test/test_PluginsAstra_Geometry.py | 267 +++++++++++++++--- .../Python/test/test_PluginsTigre_Siddon.py | 4 +- Wrappers/Python/test/utils_projectors.py | 43 +++ 6 files changed, 303 insertions(+), 47 deletions(-) diff --git a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra.py b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra.py index 01326e6367..89a5594710 100644 --- a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra.py +++ b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra.py @@ -48,9 +48,15 @@ def convert_geometry_to_astra(volume_geometry, sinogram_geometry): #get units if sinogram_geometry.config.angles.angle_unit == AngleUnit.DEGREE: - angles_rad = sinogram_geometry.config.angles.angle_data * np.pi / 180.0 + angles_rad = -sinogram_geometry.config.angles.angle_data * np.pi / 180.0 else: - angles_rad = sinogram_geometry.config.angles.angle_data + angles_rad = -sinogram_geometry.config.angles.angle_data + + if 'right' in sinogram_geometry.config.panel.origin: + angles_rad += np.pi + + if AcquisitionType.DIM3 & dimension and 'top' in sinogram_geometry.config.panel.origin: + raise ValueError('Top origin is not supported for ASTRA 3D simple geometries, either flip the data or use convert_geometry_to_astra_vec_3D') if AcquisitionType.DIM2 & dimension: vol_geom = astra.create_vol_geom(volume_geometry.voxel_num_y, @@ -64,12 +70,12 @@ def convert_geometry_to_astra(volume_geometry, sinogram_geometry): proj_geom = astra.create_proj_geom('parallel', sinogram_geometry.pixel_size_h, sinogram_geometry.pixel_num_h, - -angles_rad) + angles_rad) elif sinogram_geometry.geom_type == 'cone': proj_geom = astra.create_proj_geom('fanflat', sinogram_geometry.pixel_size_h, sinogram_geometry.pixel_num_h, - -angles_rad, + angles_rad, np.abs(sinogram_geometry.dist_source_center), np.abs(sinogram_geometry.dist_center_detector)) else: @@ -92,14 +98,14 @@ def convert_geometry_to_astra(volume_geometry, sinogram_geometry): sinogram_geometry.pixel_size_v, sinogram_geometry.pixel_num_v, sinogram_geometry.pixel_num_h, - -angles_rad) + angles_rad) elif sinogram_geometry.geom_type == 'cone': proj_geom = astra.create_proj_geom('cone', sinogram_geometry.pixel_size_h, sinogram_geometry.pixel_size_v, sinogram_geometry.pixel_num_v, sinogram_geometry.pixel_num_h, - -angles_rad, + angles_rad, np.abs(sinogram_geometry.dist_source_center), np.abs(sinogram_geometry.dist_center_detector)) else: diff --git a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_2D.py b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_2D.py index 75a9ef8fa1..6d8e029645 100644 --- a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_2D.py +++ b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_2D.py @@ -20,6 +20,7 @@ import astra import numpy as np from cil.framework.labels import AngleUnit +from cil.framework.labels import AcquisitionType, AcquisitionType def convert_geometry_to_astra_vec_2D(volume_geometry, sinogram_geometry_in): @@ -42,6 +43,9 @@ def convert_geometry_to_astra_vec_2D(volume_geometry, sinogram_geometry_in): """ sinogram_geometry = sinogram_geometry_in.copy() + if sinogram_geometry.geom_type == AcquisitionType.CONE_SOUV: + raise ValueError('Cone-SOUV geometry is not supported by this function, use convert_geometry_to_astra_vec_3D instead') + #this catches behaviour modified after CIL 21.3.1 try: sinogram_geometry.config.system.align_reference_frame('cil') diff --git a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py index b52bebf794..dbb35ce7fb 100644 --- a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py +++ b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py @@ -126,7 +126,7 @@ def convert_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry_in): volume_geometry_temp.get_min_x() + (volume_geometry_temp.get_max_x() - volume_geometry_temp.get_min_x()) / 2.0, volume_geometry_temp.get_min_y() + (volume_geometry_temp.get_max_y() - volume_geometry_temp.get_min_y()) / 2.0, volume_geometry_temp.get_min_z() + (volume_geometry_temp.get_max_z() - volume_geometry_temp.get_min_z()) / 2.0 - ]); + ]) # Compute a translation vector that will modify the centre of the reconstructed volume translation = np.array(system.volume_centre.position) - current_centre @@ -151,12 +151,20 @@ def convert_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry_in): else: vectors = np.zeros((system.num_positions, 12)) + sign_h = 1 + sign_v = 1 + + if 'right' in panel.origin: + sign_h = -1 + if 'top' in panel.origin: + sign_v = -1 + #for i, (src, det, row, col) in enumerate(zip(system.source.position_set, system.detector.position_set, system.detector.direction_x_set, system.detector.direction_y_set)): for i in range(system.num_positions): vectors[i, :3] = system.source[i].position vectors[i, 3:6] = system.detector[i].position - vectors[i, 6:9] = system.detector[i].direction_x * panel.pixel_size[0] - vectors[i, 9:] = system.detector[i].direction_y * panel.pixel_size[1] + vectors[i, 6:9] = sign_h * system.detector[i].direction_x * panel.pixel_size[0] + vectors[i, 9:] = sign_v * system.detector[i].direction_y * panel.pixel_size[1] proj_geom = astra.creators.create_proj_geom(projector, panel.num_pixels[1], panel.num_pixels[0], vectors) diff --git a/Wrappers/Python/test/test_PluginsAstra_Geometry.py b/Wrappers/Python/test/test_PluginsAstra_Geometry.py index 3c9a8f1b19..86f7582eaf 100644 --- a/Wrappers/Python/test/test_PluginsAstra_Geometry.py +++ b/Wrappers/Python/test/test_PluginsAstra_Geometry.py @@ -26,12 +26,13 @@ if has_astra: from cil.plugins.astra.utilities import convert_geometry_to_astra + from cil.plugins.astra.utilities import convert_geometry_to_astra_vec_2D from cil.plugins.astra.utilities import convert_geometry_to_astra_vec_3D class TestGeometry_Parallel2D(unittest.TestCase): def setUp(self): - pixels_x = 128 + self.pixels_x = 128 angles_deg = np.asarray([0,90.0,180.0], dtype='float32') angles_rad = angles_deg * np.pi /180.0 @@ -39,14 +40,14 @@ def setUp(self): self.ag = AcquisitionGeometry.create_Parallel2D()\ .set_angles(angles_rad, angle_unit='radian')\ .set_labels(['angle','horizontal'])\ - .set_panel(pixels_x, 0.1) + .set_panel(self.pixels_x, 0.1) self.ig = self.ag.get_ImageGeometry() self.ag_deg = AcquisitionGeometry.create_Parallel2D()\ .set_angles(angles_deg, angle_unit='degree')\ .set_labels(['angle','horizontal'])\ - .set_panel(pixels_x, 0.1) + .set_panel(self.pixels_x, 0.1) @unittest.skipUnless(has_astra, "Requires ASTRA") @@ -74,7 +75,44 @@ def test_convert_geometry_simple(self): @unittest.skipUnless(has_astra, "Requires ASTRA") - def test_convert_geometry_vector(self): + def test_convert_geometry_vector2D(self): + + # 2D parallel radians + astra_vol, astra_sino = convert_geometry_to_astra_vec_2D(self.ig, self.ag) + + self.assertEqual(astra_sino['type'], 'parallel_vec') + self.assertEqual(astra_sino['DetectorCount'], self.ag.pixel_num_h) + + # note the sign of the y values differs from the astra 3D projector case + vectors = np.zeros((3,6),dtype='float64') + + vectors[0][1] = -1.0 + vectors[0][4] = self.ag.pixel_size_h + + vectors[1][0] = 1.0 + vectors[1][5] = self.ag.pixel_size_h + + vectors[2][1] = 1.0 + vectors[2][4] = -self.ag.pixel_size_h + + np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) + + # 2D parallel degrees + astra_vol, astra_sino = convert_geometry_to_astra_vec_2D(self.ig, self.ag_deg) + np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) + + # image geometry + self.assertEqual(astra_vol['GridColCount'], self.ig.voxel_num_x) + self.assertEqual(astra_vol['GridRowCount'], self.ig.voxel_num_y) + + self.assertEqual(astra_vol['option']['WindowMinX'], -self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxX'], self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMinY'], -self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxY'], self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + + + @unittest.skipUnless(has_astra, "Requires ASTRA") + def test_convert_geometry_vector3D(self): # 2D parallel radians astra_vol, astra_sino = convert_geometry_to_astra_vec_3D(self.ig, self.ag) @@ -127,12 +165,12 @@ def setUp(self): self.ag = AcquisitionGeometry.create_Parallel3D()\ .set_angles(angles_rad, angle_unit='radian')\ .set_labels(['vertical', 'angle','horizontal'])\ - .set_panel((pixels_x,pixels_y), (0.1,0.1)) + .set_panel((pixels_x,pixels_y), (0.1,0.2)) self.ag_deg = AcquisitionGeometry.create_Parallel3D()\ .set_angles(angles_deg, angle_unit='degree')\ .set_labels(['vertical', 'angle','horizontal'])\ - .set_panel((pixels_x,pixels_y), (0.1,0.1)) + .set_panel((pixels_x,pixels_y), (0.1,0.2)) self.ig = self.ag.get_ImageGeometry() @@ -146,7 +184,7 @@ def test_convert_geometry_simple(self): self.assertEqual(astra_sino['DetectorColCount'], self.ag.pixel_num_h) self.assertEqual(astra_sino['DetectorRowCount'], self.ag.pixel_num_v) self.assertEqual(astra_sino['DetectorSpacingX'], self.ag.pixel_size_h) - self.assertEqual(astra_sino['DetectorSpacingY'], self.ag.pixel_size_h) + self.assertEqual(astra_sino['DetectorSpacingY'], self.ag.pixel_size_v) np.testing.assert_allclose(astra_sino['ProjectionAngles'], -self.ag.angles) #2D parallel degrees @@ -166,26 +204,66 @@ def test_convert_geometry_simple(self): self.assertEqual(astra_vol['option']['WindowMaxZ'], self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5) @unittest.skipUnless(has_astra, "Requires ASTRA") - def test_convert_geometry_vector(self): + def test_convert_geometry_vector2D(self): + + astra_vol, astra_sino = convert_geometry_to_astra_vec_2D(self.ig, self.ag) + self.assertEqual(astra_sino['type'], 'parallel_vec') + self.assertEqual(astra_sino['DetectorCount'], self.ag.pixel_num_h) + + vectors = np.zeros((3,6),dtype='float64') + + vectors[0][1] = -1.0 + vectors[0][4] = self.ag.pixel_size_h + + vectors[1][0] = 1.0 + vectors[1][5] = self.ag.pixel_size_h + + vectors[2][1] = 1.0 + vectors[2][4] = -self.ag.pixel_size_h + + np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) + + #degrees + astra_vol, astra_sino = convert_geometry_to_astra_vec_2D(self.ig, self.ag_deg) + np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) + + # image geometry + self.assertEqual(astra_vol['GridColCount'], self.ig.voxel_num_x) + self.assertEqual(astra_vol['GridRowCount'], self.ig.voxel_num_y) + + self.assertEqual(astra_vol['option']['WindowMinX'], -self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxX'], self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMinY'], -self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxY'], self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + + + @unittest.skipUnless(has_astra, "Requires ASTRA") + def test_convert_geometry_vector3D(self): astra_vol, astra_sino = convert_geometry_to_astra_vec_3D(self.ig, self.ag) self.assertEqual(astra_sino['type'], 'parallel3d_vec') self.assertEqual(astra_sino['DetectorRowCount'], self.ag.pixel_num_v) self.assertEqual(astra_sino['DetectorColCount'], self.ag.pixel_num_h) + + #ray : the ray direction + #d : the center of the detector + #u : the vector from detector pixel (0,0) to (0,1) + #v : the vector from detector pixel (0,0) to (1,0) + vectors = np.zeros((3,12),dtype='float64') vectors[0][1] = 1.0 vectors[0][6] = self.ag.pixel_size_h - vectors[0][11] = self.ag.pixel_size_h + vectors[0][11] = self.ag.pixel_size_v vectors[1][0] = 1.0 vectors[1][7] = -self.ag.pixel_size_h - vectors[1][11] = self.ag.pixel_size_h + vectors[1][11] = self.ag.pixel_size_v vectors[2][1] = -1.0 vectors[2][6] = -self.ag.pixel_size_h - vectors[2][11] = self.ag.pixel_size_h + vectors[2][11] = self.ag.pixel_size_v np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) @@ -251,9 +329,46 @@ def test_convert_geometry_simple(self): self.assertEqual(astra_vol['option']['WindowMinY'], -self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) self.assertEqual(astra_vol['option']['WindowMaxY'], self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + @unittest.skipUnless(has_astra, "Requires ASTRA") + def test_convert_geometry_vector2D(self): + #2D cone + astra_vol, astra_sino = convert_geometry_to_astra_vec_2D(self.ig, self.ag) + + self.assertEqual(astra_sino['type'], 'fanflat_vec') + + vectors = np.zeros((3,6),dtype='float64') + + # note the sign of the y values differs from the astra 3D projector case + + pixel_size_v = self.ig.voxel_size_x * self.ag.magnification + vectors[0][1] = 1 * self.ag.dist_source_center + vectors[0][3] = -self.ag.dist_center_detector + vectors[0][4] = self.ag.pixel_size_h + + vectors[1][0] = -1 * self.ag.dist_source_center + vectors[1][2] = self.ag.dist_center_detector + vectors[1][5] = self.ag.pixel_size_h + + vectors[2][1] = -self.ag.dist_source_center + vectors[2][3] = 1 * self.ag.dist_center_detector + vectors[2][4] = -1 * self.ag.pixel_size_h + + np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) + + #2D parallel degrees + astra_vol, astra_sino = convert_geometry_to_astra_vec_2D(self.ig, self.ag_deg) + np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) + + # check image geometry + self.assertEqual(astra_vol['GridColCount'], self.ig.voxel_num_x) + self.assertEqual(astra_vol['GridRowCount'], self.ig.voxel_num_y) + self.assertEqual(astra_vol['option']['WindowMinX'], -self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxX'], self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMinY'], -self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxY'], self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) @unittest.skipUnless(has_astra, "Requires ASTRA") - def test_convert_geometry_vector(self): + def test_convert_geometry_vector3D(self): #2D cone astra_vol, astra_sino = convert_geometry_to_astra_vec_3D(self.ig, self.ag) @@ -305,12 +420,12 @@ def setUp(self): self.ag = AcquisitionGeometry.create_Cone3D(source_position, detector_position)\ .set_angles(angles_rad, angle_unit='radian')\ .set_labels(['vertical', 'angle','horizontal'])\ - .set_panel((pixels_x,pixels_y), (0.1,0.1)) + .set_panel((pixels_x,pixels_y), (0.1,0.2)) self.ag_deg = AcquisitionGeometry.create_Cone3D(source_position, detector_position)\ .set_angles(angles_deg, angle_unit='degree')\ .set_labels(['vertical', 'angle','horizontal'])\ - .set_panel((pixels_x,pixels_y), (0.1,0.1)) + .set_panel((pixels_x,pixels_y), (0.1,0.2)) self.ig = self.ag.get_ImageGeometry() @@ -336,7 +451,7 @@ def setUp(self): detector_direction_x_set=detector_direction_x_set,\ detector_direction_y_set=detector_direction_y_set)\ .set_labels(['vertical', 'angle','horizontal'])\ - .set_panel((pixels_x,pixels_y), (0.1,0.1)) + .set_panel((pixels_x,pixels_y), (0.1,0.2)) @unittest.skipUnless(has_astra, "Requires ASTRA") @@ -348,7 +463,7 @@ def test_convert_geometry_simple(self): self.assertEqual(astra_sino['DetectorColCount'], self.ag.pixel_num_h) self.assertEqual(astra_sino['DetectorRowCount'], self.ag.pixel_num_v) self.assertEqual(astra_sino['DetectorSpacingX'], self.ag.pixel_size_h) - self.assertEqual(astra_sino['DetectorSpacingY'], self.ag.pixel_size_h) + self.assertEqual(astra_sino['DetectorSpacingY'], self.ag.pixel_size_v) np.testing.assert_allclose(astra_sino['ProjectionAngles'], -self.ag.angles) # degrees @@ -369,7 +484,42 @@ def test_convert_geometry_simple(self): @unittest.skipUnless(has_astra, "Requires ASTRA") - def test_convert_geometry_vector(self): + def test_convert_geometry_vector2D(self): + + astra_vol, astra_sino = convert_geometry_to_astra_vec_2D(self.ig, self.ag) + self.assertEqual(astra_sino['type'], 'fanflat_vec') + + # note the sign of the y values differs from the astra 3D projector case + vectors = np.zeros((3,6),dtype='float64') + vectors[0][1] = 1 * self.ag.dist_source_center + vectors[0][3] = -self.ag.dist_center_detector + vectors[0][4] = self.ag.pixel_size_h + + vectors[1][0] = -1 * self.ag.dist_source_center + vectors[1][2] = self.ag.dist_center_detector + vectors[1][5] = 1 * self.ag.pixel_size_h + + vectors[2][1] = -self.ag.dist_source_center + vectors[2][3] = 1 * self.ag.dist_center_detector + vectors[2][4] = -1 * self.ag.pixel_size_h + + np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) + + #degrees + astra_vol, astra_sino = convert_geometry_to_astra_vec_2D(self.ig, self.ag_deg) + np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) + + # image geometry + self.assertEqual(astra_vol['GridColCount'], self.ig.voxel_num_x) + self.assertEqual(astra_vol['GridRowCount'], self.ig.voxel_num_y) + + self.assertEqual(astra_vol['option']['WindowMinX'], -self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxX'], self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) + self.assertEqual(astra_vol['option']['WindowMinY'], -self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + self.assertEqual(astra_vol['option']['WindowMaxY'], self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) + + @unittest.skipUnless(has_astra, "Requires ASTRA") + def test_convert_geometry_vector3D(self): astra_vol, astra_sino = convert_geometry_to_astra_vec_3D(self.ig, self.ag) self.assertEqual(astra_sino['type'], 'cone_vec') @@ -378,17 +528,17 @@ def test_convert_geometry_vector(self): vectors[0][1] = -1 * self.ag.dist_source_center vectors[0][4] = self.ag.dist_center_detector vectors[0][6] = self.ag.pixel_size_h - vectors[0][11] = self.ag.pixel_size_h + vectors[0][11] = self.ag.pixel_size_v vectors[1][0] = -1 * self.ag.dist_source_center vectors[1][3] = self.ag.dist_center_detector vectors[1][7] = -1 * self.ag.pixel_size_h - vectors[1][11] = self.ag.pixel_size_h + vectors[1][11] = self.ag.pixel_size_v vectors[2][1] = self.ag.dist_source_center vectors[2][4] = -1 * self.ag.dist_center_detector vectors[2][6] = -1 * self.ag.pixel_size_h - vectors[2][11] = self.ag.pixel_size_h + vectors[2][11] = self.ag.pixel_size_v np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) @@ -422,17 +572,17 @@ def test_convert_geometry_souv(self): vectors[0][1] = -1 * self.ag.dist_source_center vectors[0][4] = self.ag.dist_center_detector vectors[0][6] = self.ag.pixel_size_h - vectors[0][11] = self.ag.pixel_size_h + vectors[0][11] = self.ag.pixel_size_v vectors[1][0] = -1 * self.ag.dist_source_center vectors[1][3] = self.ag.dist_center_detector vectors[1][7] = -1 * self.ag.pixel_size_h - vectors[1][11] = self.ag.pixel_size_h + vectors[1][11] = self.ag.pixel_size_v vectors[2][1] = self.ag.dist_source_center vectors[2][4] = -1 * self.ag.dist_center_detector vectors[2][6] = -1 * self.ag.pixel_size_h - vectors[2][11] = self.ag.pixel_size_h + vectors[2][11] = self.ag.pixel_size_v np.testing.assert_allclose(astra_sino['Vectors'], vectors, atol=1e-6) @@ -452,8 +602,8 @@ def test_convert_geometry_souv(self): class TestGeometry_Cone3D_SOUV(unittest.TestCase): def setUp(self): - pixels_x = 128 - pixels_y = 3 + self.pixels_x = 128 + self.pixels_y = 3 # generate a set of source and detector positions self.source_position_set = [ @@ -490,10 +640,9 @@ def setUp(self): self.ag = AcquisitionGeometry.create_Cone3D_SOUV(source_position_set=self.source_position_set,\ detector_position_set=self.detector_position_set,\ detector_direction_x_set=self.detector_direction_x_set,\ - detector_direction_y_set=self.detector_direction_y_set,\ - volume_centre_position=[0,0,0]) \ + detector_direction_y_set=self.detector_direction_y_set) \ .set_labels(['vertical', 'angle','horizontal'])\ - .set_panel((pixels_x,pixels_y), (0.1,0.1)) + .set_panel((self.pixels_x, self.pixels_y), (0.1,0.2)) self.ig = self.ag.get_ImageGeometry() @@ -506,7 +655,12 @@ def test_convert_geometry_simple(self): astra_vol, astra_sino = convert_geometry_to_astra(self.ig, self.ag) @unittest.skipUnless(has_astra, "Requires ASTRA") - def test_convert_geometry_vector(self): + def test_convert_geometry_vector2D(self): + with self.assertRaises(ValueError): + astra_vol, astra_sino = convert_geometry_to_astra_vec_2D(self.ig, self.ag) + + @unittest.skipUnless(has_astra, "Requires ASTRA") + def test_convert_geometry_vector3D(self): astra_vol, astra_sino = convert_geometry_to_astra_vec_3D(self.ig, self.ag) self.assertEqual(astra_sino['type'], 'cone_vec') @@ -515,16 +669,57 @@ def test_convert_geometry_vector(self): np.testing.assert_allclose(astra_sino['Vectors'][i][0:3], self.source_position_set[i], atol=1e-6) np.testing.assert_allclose(astra_sino['Vectors'][i][3:6], self.detector_position_set[i], atol=1e-6) np.testing.assert_allclose(astra_sino['Vectors'][i][6:9], self.detector_direction_x_set[i] * self.ag.pixel_size_h, atol=1e-6) - np.testing.assert_allclose(astra_sino['Vectors'][i][9:12], self.detector_direction_y_set[i] * self.ag.pixel_size_h, atol=1e-6) + np.testing.assert_allclose(astra_sino['Vectors'][i][9:12], self.detector_direction_y_set[i] * self.ag.pixel_size_v, atol=1e-6) # image geometry self.assertEqual(astra_vol['GridColCount'], self.ig.voxel_num_x) self.assertEqual(astra_vol['GridRowCount'], self.ig.voxel_num_y) self.assertEqual(astra_vol['GridSliceCount'], self.ig.voxel_num_z) - self.assertEqual(astra_vol['option']['WindowMinX'], -self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) - self.assertEqual(astra_vol['option']['WindowMaxX'], self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5) - self.assertEqual(astra_vol['option']['WindowMinY'], -self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) - self.assertEqual(astra_vol['option']['WindowMaxY'], self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5) - self.assertEqual(astra_vol['option']['WindowMinZ'], -self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5) - self.assertEqual(astra_vol['option']['WindowMaxZ'], self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5) + origin = self.ag.config.system.volume_centre.position + self.assertEqual(astra_vol['option']['WindowMinX'], -self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5 + origin[0]) + self.assertEqual(astra_vol['option']['WindowMaxX'], self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5 + origin[0]) + self.assertEqual(astra_vol['option']['WindowMinY'], -self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5 + origin[1]) + self.assertEqual(astra_vol['option']['WindowMaxY'], self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5 + origin[1]) + self.assertEqual(astra_vol['option']['WindowMinZ'], -self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5 + origin[2]) + self.assertEqual(astra_vol['option']['WindowMaxZ'], self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5 + origin[2]) + + + # with manual centre + origin = self.ag.config.system.volume_centre.position = [1,-2,3.5] + astra_vol, astra_sino = convert_geometry_to_astra_vec_3D(self.ig, self.ag) + + self.assertEqual(astra_vol['option']['WindowMinX'], -self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5 + origin[0]) + self.assertEqual(astra_vol['option']['WindowMaxX'], self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5 + origin[0]) + self.assertEqual(astra_vol['option']['WindowMinY'], -self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5 + origin[1]) + self.assertEqual(astra_vol['option']['WindowMaxY'], self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5 + origin[1]) + self.assertEqual(astra_vol['option']['WindowMinZ'], -self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5 + origin[2]) + self.assertEqual(astra_vol['option']['WindowMaxZ'], self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5 + origin[2]) + + + @unittest.skipUnless(has_astra, "Requires ASTRA") + def test_convert_geometry_vector_data_origin(self): + + self.ag.set_panel((self.pixels_x, self.pixels_y), (0.1,0.2),origin="top-right") + astra_vol, astra_sino = convert_geometry_to_astra_vec_3D(self.ig, self.ag) + self.assertEqual(astra_sino['type'], 'cone_vec') + + for i in range(3): + np.testing.assert_allclose(astra_sino['Vectors'][i][0:3], self.source_position_set[i], atol=1e-6) + np.testing.assert_allclose(astra_sino['Vectors'][i][3:6], self.detector_position_set[i], atol=1e-6) + np.testing.assert_allclose(astra_sino['Vectors'][i][6:9], -self.detector_direction_x_set[i] * self.ag.pixel_size_h, atol=1e-6) + np.testing.assert_allclose(astra_sino['Vectors'][i][9:12], -self.detector_direction_y_set[i] * self.ag.pixel_size_v, atol=1e-6) + + # image geometry + self.assertEqual(astra_vol['GridColCount'], self.ig.voxel_num_x) + self.assertEqual(astra_vol['GridRowCount'], self.ig.voxel_num_y) + self.assertEqual(astra_vol['GridSliceCount'], self.ig.voxel_num_z) + + origin = self.ag.config.system.volume_centre.position + self.assertEqual(astra_vol['option']['WindowMinX'], -self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5 + origin[0]) + self.assertEqual(astra_vol['option']['WindowMaxX'], self.ig.voxel_num_x * self.ig.voxel_size_x * 0.5 + origin[0]) + self.assertEqual(astra_vol['option']['WindowMinY'], -self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5 + origin[1]) + self.assertEqual(astra_vol['option']['WindowMaxY'], self.ig.voxel_num_y * self.ig.voxel_size_y * 0.5 + origin[1]) + self.assertEqual(astra_vol['option']['WindowMinZ'], -self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5 + origin[2]) + self.assertEqual(astra_vol['option']['WindowMaxZ'], self.ig.voxel_num_z * self.ig.voxel_size_z * 0.5 + origin[2]) + diff --git a/Wrappers/Python/test/test_PluginsTigre_Siddon.py b/Wrappers/Python/test/test_PluginsTigre_Siddon.py index 39e8051186..8162980ea3 100644 --- a/Wrappers/Python/test/test_PluginsTigre_Siddon.py +++ b/Wrappers/Python/test/test_PluginsTigre_Siddon.py @@ -45,7 +45,7 @@ class Test_Cone3D_Projectors_sim_tigre(unittest.TestCase, TestCommon_ProjectionO def setUp(self): setup_parameters(self) self.Cone3D() - self.tolerance_fp = 0.35 + self.tolerance_fp = 0.5 @unittest.skipUnless(has_tigre and has_nvidia, "Requires TIGRE GPU") @@ -95,7 +95,7 @@ class Test_Parallel3D_Projectors_sim_tigre(unittest.TestCase, TestCommon_Project def setUp(self): setup_parameters(self) self.Parallel3D() - self.tolerance_fp = 0.25 + self.tolerance_fp = 0.35 @unittest.skipUnless(has_tigre and has_nvidia, "Requires TIGRE GPU") diff --git a/Wrappers/Python/test/utils_projectors.py b/Wrappers/Python/test/utils_projectors.py index 795951edb7..a19577c684 100644 --- a/Wrappers/Python/test/utils_projectors.py +++ b/Wrappers/Python/test/utils_projectors.py @@ -22,6 +22,8 @@ from cil.framework import AcquisitionGeometry from cil.framework.labels import AcquisitionDimension, AcquisitionType +from cil.utilities.display import show2D + class SimData(object): def _get_roi_3D(self): @@ -432,6 +434,7 @@ class TestCommon_ProjectionOperator_SIM(SimData): ''' Tests forward and backward operators function with and without 'out' ''' + def test_forward_projector(self): Op = self.ProjectionOperator(self.ig, self.ag, **self.PO_args) fp = Op.direct(self.img_data) @@ -443,6 +446,26 @@ def test_forward_projector(self): np.testing.assert_allclose(fp.as_array(), fp2.as_array(),1e-8) + def test_forward_projector_flipped(self): + ag = self.ag.copy() + ag.set_panel([self.ag.pixel_num_h,self.ag.pixel_num_v],[self.ag.pixel_size_h,self.ag.pixel_size_v],origin='top-right') + Op = self.ProjectionOperator(self.ig, ag, **self.PO_args) + fp = Op.direct(self.img_data) + + axes = [i for i, label in enumerate(self.acq_data.dimension_labels) if label != 'angle'] + gold = self.acq_data.copy() + gold.fill(np.flip(self.acq_data.as_array(), axis=axes)) + + # show2D([self.acq_data, fp, gold],title=['self.acq_data', 'fp', 'gold']).save('test0.png') + np.testing.assert_allclose(fp.as_array(), gold.as_array(),atol=self.tolerance_fp) + + fp2 = fp.copy() + fp2.fill(0) + Op.direct(self.img_data,out=fp2) + + np.testing.assert_allclose(fp.as_array(), fp2.as_array(),1e-8) + + def test_backward_projectors_functionality(self): #this checks mechanics but not value Op = self.ProjectionOperator(self.ig, self.ag, **self.PO_args) @@ -477,6 +500,26 @@ def test_FBP(self): FBP(self.acq_data,out=reco2) np.testing.assert_allclose(reco.as_array(), reco2.as_array(),atol=1e-8) + def test_FBP_flipped(self): + + data = self.acq_data.copy() + data.geometry.set_panel([self.ag.pixel_num_h,self.ag.pixel_num_v],[self.ag.pixel_size_h,self.ag.pixel_size_v],origin='top-right') + + axes = [i for i, label in enumerate(self.acq_data.dimension_labels) if label != 'angle'] + data.fill(np.flip(self.acq_data.as_array(), axis=axes)) + + FBP = self.FBP(self.ig, data.geometry, **self.FBP_args) + reco = FBP(data) + + #show2D([self.img_data, reco],title=['self.img_data', 'reco']).save('test4.png') + + np.testing.assert_allclose(reco.as_array(), self.img_data.as_array(), atol=self.tolerance_fbp) + + reco2 = reco.copy() + reco2.fill(0) + FBP(data,out=reco2) + np.testing.assert_allclose(reco.as_array(), reco2.as_array(),atol=1e-8) + def test_FBP_roi(self): FBP = self.FBP(self.ig_roi, self.ag, **self.FBP_args) From 5bb0d515a371ac41377b615682c5686bb9cc0b8a Mon Sep 17 00:00:00 2001 From: Gemma Fardell Date: Fri, 7 Feb 2025 16:50:13 +0000 Subject: [PATCH 17/17] add cone souv to current astra unit tests --- .../convert_geometry_to_astra_vec_3D.py | 10 +- Wrappers/Python/test/test_PluginsAstra_FBP.py | 10 ++ Wrappers/Python/test/test_PluginsAstra_GPU.py | 25 +++++ Wrappers/Python/test/utils_projectors.py | 97 ++++++++++++++++++- 4 files changed, 133 insertions(+), 9 deletions(-) diff --git a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py index dbb35ce7fb..a6976552aa 100644 --- a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py +++ b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py @@ -121,15 +121,11 @@ def convert_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry_in): else: volume_geometry_temp = volume_geometry.copy() - # Compute the current centre - current_centre = np.array([ - volume_geometry_temp.get_min_x() + (volume_geometry_temp.get_max_x() - volume_geometry_temp.get_min_x()) / 2.0, - volume_geometry_temp.get_min_y() + (volume_geometry_temp.get_max_y() - volume_geometry_temp.get_min_y()) / 2.0, - volume_geometry_temp.get_min_z() + (volume_geometry_temp.get_max_z() - volume_geometry_temp.get_min_z()) / 2.0 - ]) + # roi + current_centre = [volume_geometry_temp.center_x, volume_geometry_temp.center_y, volume_geometry_temp.center_z] # Compute a translation vector that will modify the centre of the reconstructed volume - translation = np.array(system.volume_centre.position) - current_centre + translation = np.array(system.volume_centre.position) projector = 'cone_vec' diff --git a/Wrappers/Python/test/test_PluginsAstra_FBP.py b/Wrappers/Python/test/test_PluginsAstra_FBP.py index a73cd063cc..45ae38aae8 100644 --- a/Wrappers/Python/test/test_PluginsAstra_FBP.py +++ b/Wrappers/Python/test/test_PluginsAstra_FBP.py @@ -81,3 +81,13 @@ def setUp(self): self.Parallel2D() self.tolerance_fbp = 1e-3 self.tolerance_fbp_roi = 1e-3 + + +class Test_Cone3DSOUV_FBP_GPU(unittest.TestCase, TestCommon_FBP_SIM): + + @unittest.skipUnless(has_astra and has_nvidia, "Requires ASTRA GPU") + def setUp(self): + setup_parameters(self) + self.Cone3DSOUV() + self.tolerance_fbp = 1e-3 + self.tolerance_fbp_roi = 1e-3 \ No newline at end of file diff --git a/Wrappers/Python/test/test_PluginsAstra_GPU.py b/Wrappers/Python/test/test_PluginsAstra_GPU.py index ae7c8febc2..81f65f3193 100644 --- a/Wrappers/Python/test/test_PluginsAstra_GPU.py +++ b/Wrappers/Python/test/test_PluginsAstra_GPU.py @@ -142,3 +142,28 @@ def setUp(self): self.Parallel2D() self.tolerance_linearity = 1e-10 self.tolerance_norm = 1e-6 + + +@unittest.skipUnless(has_astra and has_nvidia, "Requires ASTRA GPU") +class Test_Cone3DSOUV_Projectors_GPU_basic(unittest.TestCase, TestCommon_ProjectionOperator): + def setUp(self): + setup_parameters(self) + self.Cone3DSOUV() + self.tolerance_fp=0 + + +@unittest.skipUnless(has_astra and has_nvidia, "Requires ASTRA GPU") +class Test_Cone3DSOUV_Projectors_GPU_sim(unittest.TestCase, TestCommon_ProjectionOperator_SIM): + def setUp(self): + setup_parameters(self) + self.Cone3DSOUV() + self.tolerance_fp = 0.16 + + +@unittest.skipUnless(has_astra and has_nvidia, "Requires ASTRA GPU") +class Test_Cone3DSOUV_Projectors_GPU_toy(unittest.TestCase, TestCommon_ProjectionOperator_TOY): + def setUp(self): + setup_parameters(self) + self.Cone3DSOUV() + self.tolerance_linearity = 1e-3 + self.tolerance_norm = 0.1 \ No newline at end of file diff --git a/Wrappers/Python/test/utils_projectors.py b/Wrappers/Python/test/utils_projectors.py index a19577c684..3eb658d9ad 100644 --- a/Wrappers/Python/test/utils_projectors.py +++ b/Wrappers/Python/test/utils_projectors.py @@ -90,6 +90,54 @@ def Cone3D(self): self._get_roi_3D() + def Cone3DSOUV(self): + + self.acq_data = dataexample.SIMULATED_CONE_BEAM_DATA.get() + self.acq_data.reorder(self.backend) + + self.img_data = dataexample.SIMULATED_SPHERE_VOLUME.get() + + self.acq_data=np.log(self.acq_data) + self.acq_data*=-1.0 + + self.ig = self.img_data.geometry + + # convert geometry to SOUV + system = self.acq_data.geometry.config.system + src = system.source.position + det_pos = system.detector.position + det_dir_x = system.detector.direction_x + det_dir_y = system.detector.direction_y + obj_pos = system.rotation_axis.position + obj_dir = system.rotation_axis.direction + angles = self.acq_data.geometry.config.angles.angle_data + + src_pos_set = [None]*len(angles) + det_pos_set = [None]*len(angles) + det_dir_x_set = [None]*len(angles) + det_dir_y_set = [None]*len(angles) + + for i, ang in enumerate(angles): + ang_rad = -np.deg2rad(ang) + # rotation matrix + RotationMatrix = np.eye(3) + RotationMatrix[0,0] = RotationMatrix[1,1] = np.cos(ang_rad) + RotationMatrix[0,1] = -np.sin(ang_rad) + RotationMatrix[1,0] = np.sin(ang_rad) + + src_pos_set[i] = RotationMatrix.dot(src - obj_pos) + obj_pos + det_pos_set[i] = RotationMatrix.dot(det_pos - obj_pos) + obj_pos + det_dir_x_set[i] = RotationMatrix.dot(det_dir_x) + det_dir_y_set[i] = RotationMatrix.dot(det_dir_y) + + ag_new = AcquisitionGeometry.create_Cone3D_SOUV(src_pos_set, det_pos_set, det_dir_x_set, det_dir_y_set) + ag_new.set_panel([self.acq_data.geometry.pixel_num_h, self.acq_data.geometry.pixel_num_v], [self.acq_data.geometry.pixel_size_h, self.acq_data.geometry.pixel_size_v], origin='bottom-left') + ag_new.set_labels(self.acq_data.dimension_labels) + + self.ag = ag_new + self._get_roi_3D() + + def Parallel3D(self): self.acq_data = dataexample.SIMULATED_PARALLEL_BEAM_DATA.get() self.acq_data.reorder(self.backend) @@ -309,6 +357,44 @@ def Parallel2D(self): norm_3 = 2 self.test_geometries.append((ag_test_3, ig_test_3, norm_3)) + def Cone3DSOUV(self): + ''' + These are all single cone beam projection geometries. Pixels of 1, 2, 0.5, 0.5, Voxels of 1, 2, 0.5, 0.25 + ''' + + self.test_geometries=[] + ag_test_1 = AcquisitionGeometry.create_Cone3D_SOUV(source_position_set=[[0,-1000,0]],detector_position_set=[[0,0,0]], detector_direction_x_set=[[1, 0, 0]],detector_direction_y_set=[[0, 0, 1]], volume_centre_position=[0,0,0])\ + .set_panel([16,16],[1,1]) + ag_test_1.set_labels(AcquisitionDimension.get_order_for_engine(self.backend, ag_test_1)) + + ig_test_1 = ag_test_1.get_ImageGeometry() + norm_1 = 4 + self.test_geometries.append((ag_test_1, ig_test_1, 4)) + + + ag_test_2 = AcquisitionGeometry.create_Cone3D_SOUV(source_position_set=[[0,-1000,0]],detector_position_set=[[0,0,0]], detector_direction_x_set=[[1, 0, 0]],detector_direction_y_set=[[0, 0, 1]], volume_centre_position=[0,0,0])\ + .set_panel([16,16],[2,2]) + ag_test_2.set_labels(AcquisitionDimension.get_order_for_engine(self.backend, ag_test_2)) + + ig_test_2 = ag_test_2.get_ImageGeometry() + norm_2 = 8 + self.test_geometries.append((ag_test_2, ig_test_2, norm_2)) + + ag_test_3 = AcquisitionGeometry.create_Cone3D_SOUV(source_position_set=[[0,-1000,0]],detector_position_set=[[0,0,0]], detector_direction_x_set=[[1, 0, 0]],detector_direction_y_set=[[0, 0, 1]], volume_centre_position=[0,0,0])\ + .set_panel([16,16],[0.5,0.5]) + ag_test_3.set_labels(AcquisitionDimension.get_order_for_engine(self.backend, ag_test_3)) + ig_test_3 = ag_test_3.get_ImageGeometry() + + norm_3 = 2 + self.test_geometries.append((ag_test_3, ig_test_3, norm_3)) + + ag_test_4 = AcquisitionGeometry.create_Cone3D_SOUV(source_position_set=[[0,-1000,0]],detector_position_set=[[0,1000,0]], detector_direction_x_set=[[1, 0, 0]],detector_direction_y_set=[[0, 0, 1]], volume_centre_position=[0,0,0])\ + .set_panel([16,16],[0.5,0.5]) + ag_test_4.set_labels(AcquisitionDimension.get_order_for_engine(self.backend, ag_test_4)) + ig_test_4 = ag_test_4.get_ImageGeometry() + + norm_4 = 1 + self.test_geometries.append((ag_test_4, ig_test_4, norm_4)) def test_norm(self): count = 1 @@ -337,7 +423,7 @@ class TestCommon_ProjectionOperator(object): ''' def Cone3D(self): - self.ag = AcquisitionGeometry.create_Cone3D(source_position=[0,-100000,0],detector_position=[0,0,0])\ + self.ag = AcquisitionGeometry.create_Cone3D(source_position=[0,-100000,0],detector_position=[0,0,0],)\ .set_panel([16,16],[1,1])\ .set_angles([0])\ .set_labels(['vertical','horizontal']) @@ -360,6 +446,11 @@ def Parallel2D(self): .set_angles([0])\ .set_labels(['horizontal']) + def Cone3DSOUV(self): + self.ag = AcquisitionGeometry.create_Cone3D_SOUV(source_position_set=[[0,-100000,0]],detector_position_set=[[0,0,0]], detector_direction_x_set=[[1, 0, 0]],detector_direction_y_set=[[0, 0, 1]], volume_centre_position=[0,0,0])\ + .set_panel([16,16],[1,1])\ + .set_labels(['vertical','horizontal']) + def test_forward_projector(self): #create checker-board image @@ -423,7 +514,7 @@ def test_backward_projector(self): Op = self.ProjectionOperator(ig, self.ag, **self.PO_args) bp = Op.adjoint(data) - if self.ag.geom_type == 'cone': + if self.ag.geom_type == 'cone' or self.ag.geom_type == 'cone_souv': #as cone beam res is not perfect grid np.testing.assert_allclose(bp.array, res, atol=1e-3) else: @@ -524,6 +615,8 @@ def test_FBP_flipped(self): def test_FBP_roi(self): FBP = self.FBP(self.ig_roi, self.ag, **self.FBP_args) reco = FBP(self.acq_data) + + #show2D([reco, self.gold_roi]).save('test0_souv.png') np.testing.assert_allclose(reco.as_array(), self.gold_roi, atol=self.tolerance_fbp_roi) if AcquisitionType.DIM3 & self.ag.dimension: