|
| 1 | +from matplotlib.backend_bases import RendererBase |
| 2 | +from matplotlib.transforms import Bbox, IdentityTransform, Affine2D |
| 3 | +from matplotlib.path import Path |
| 4 | +import matplotlib._image as _image |
| 5 | +import numpy as np |
| 6 | +from matplotlib.image import _interpd_ |
| 7 | + |
| 8 | + |
| 9 | +class _TransformRenderer(RendererBase): |
| 10 | + """ |
| 11 | + A matplotlib renderer which performs transforms to change the final |
| 12 | + location of plotted elements, and then defers drawing work to the |
| 13 | + original renderer. |
| 14 | + """ |
| 15 | + |
| 16 | + def __init__( |
| 17 | + self, |
| 18 | + base_renderer, |
| 19 | + mock_transform, |
| 20 | + transform, |
| 21 | + bounding_axes, |
| 22 | + image_interpolation="nearest", |
| 23 | + scale_linewidths=True |
| 24 | + ): |
| 25 | + """ |
| 26 | + Constructs a new TransformRender. |
| 27 | +
|
| 28 | + Parameters |
| 29 | + ---------- |
| 30 | + base_renderer: `~matplotlib.backend_bases.RenderBase` |
| 31 | + The renderer to use for drawing objects after applying transforms. |
| 32 | +
|
| 33 | + mock_transform: `~matplotlib.transforms.Transform` |
| 34 | + The transform or coordinate space which all passed |
| 35 | + paths/triangles/images will be converted to before being placed |
| 36 | + back into display coordinates by the main transform. For example |
| 37 | + if the parent axes transData is passed, all objects will be |
| 38 | + converted to the parent axes data coordinate space before being |
| 39 | + transformed via the main transform back into coordinate space. |
| 40 | +
|
| 41 | + transform: `~matplotlib.transforms.Transform` |
| 42 | + The main transform to be used for plotting all objects once |
| 43 | + converted into the mock_transform coordinate space. Typically this |
| 44 | + is the child axes data coordinate space (transData). |
| 45 | +
|
| 46 | + bounding_axes: `~matplotlib.axes.Axes` |
| 47 | + The axes to plot everything within. Everything outside of this |
| 48 | + axes will be clipped. |
| 49 | +
|
| 50 | + image_interpolation: string |
| 51 | + Supported options are 'antialiased', 'nearest', 'bilinear', |
| 52 | + 'bicubic', 'spline16', 'spline36', 'hanning', 'hamming', 'hermite', |
| 53 | + 'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', |
| 54 | + 'sinc', 'lanczos', or 'none'. The default value is 'nearest'. This |
| 55 | + determines the interpolation used when attempting to render a |
| 56 | + zoomed version of an image. |
| 57 | +
|
| 58 | + scale_linewidths: bool, default is True |
| 59 | + Specifies if line widths should be scaled, in addition to the |
| 60 | + paths themselves. |
| 61 | +
|
| 62 | + Returns |
| 63 | + ------- |
| 64 | + `~._zoom_axes._TransformRenderer` |
| 65 | + The new transform renderer. |
| 66 | + """ |
| 67 | + super().__init__() |
| 68 | + self.__renderer = base_renderer |
| 69 | + self.__mock_trans = mock_transform |
| 70 | + self.__core_trans = transform |
| 71 | + self.__bounding_axes = bounding_axes |
| 72 | + self.__scale_widths = scale_linewidths |
| 73 | + |
| 74 | + try: |
| 75 | + self.__img_inter = _interpd_[image_interpolation.lower()] |
| 76 | + except KeyError: |
| 77 | + raise ValueError( |
| 78 | + f"Invalid Interpolation Mode: {image_interpolation}" |
| 79 | + ) |
| 80 | + |
| 81 | + def _scale_gc(self, gc): |
| 82 | + transfer_transform = self._get_transfer_transform(IdentityTransform()) |
| 83 | + new_gc = self.__renderer.new_gc() |
| 84 | + new_gc.copy_properties(gc) |
| 85 | + |
| 86 | + unit_box = Bbox.from_bounds(0, 0, 1, 1) |
| 87 | + unit_box = transfer_transform.transform_bbox(unit_box) |
| 88 | + mult_factor = np.sqrt(unit_box.width * unit_box.height) |
| 89 | + |
| 90 | + new_gc.set_linewidth(gc.get_linewidth() * mult_factor) |
| 91 | + new_gc._hatch_linewidth = gc.get_hatch_linewidth() * mult_factor |
| 92 | + |
| 93 | + return new_gc |
| 94 | + |
| 95 | + def _get_axes_display_box(self): |
| 96 | + """ |
| 97 | + Private method, get the bounding box of the child axes in display |
| 98 | + coordinates. |
| 99 | + """ |
| 100 | + return self.__bounding_axes.patch.get_bbox().transformed( |
| 101 | + self.__bounding_axes.transAxes |
| 102 | + ) |
| 103 | + |
| 104 | + def _get_transfer_transform(self, orig_transform): |
| 105 | + """ |
| 106 | + Private method, returns the transform which translates and scales |
| 107 | + coordinates as if they were originally plotted on the child axes |
| 108 | + instead of the parent axes. |
| 109 | +
|
| 110 | + Parameters |
| 111 | + ---------- |
| 112 | + orig_transform: `~matplotlib.transforms.Transform` |
| 113 | + The transform that was going to be originally used by the |
| 114 | + object/path/text/image. |
| 115 | +
|
| 116 | + Returns |
| 117 | + ------- |
| 118 | + `~matplotlib.transforms.Transform` |
| 119 | + A matplotlib transform which goes from original point data -> |
| 120 | + display coordinates if the data was originally plotted on the |
| 121 | + child axes instead of the parent axes. |
| 122 | + """ |
| 123 | + # We apply the original transform to go to display coordinates, then |
| 124 | + # apply the parent data transform inverted to go to the parent axes |
| 125 | + # coordinate space (data space), then apply the child axes data |
| 126 | + # transform to go back into display space, but as if we originally |
| 127 | + # plotted the artist on the child axes.... |
| 128 | + return ( |
| 129 | + orig_transform + self.__mock_trans.inverted() + self.__core_trans |
| 130 | + ) |
| 131 | + |
| 132 | + # We copy all of the properties of the renderer we are mocking, so that |
| 133 | + # artists plot themselves as if they were placed on the original renderer. |
| 134 | + @property |
| 135 | + def height(self): |
| 136 | + return self.__renderer.get_canvas_width_height()[1] |
| 137 | + |
| 138 | + @property |
| 139 | + def width(self): |
| 140 | + return self.__renderer.get_canvas_width_height()[0] |
| 141 | + |
| 142 | + def get_text_width_height_descent(self, s, prop, ismath): |
| 143 | + return self.__renderer.get_text_width_height_descent(s, prop, ismath) |
| 144 | + |
| 145 | + def get_canvas_width_height(self): |
| 146 | + return self.__renderer.get_canvas_width_height() |
| 147 | + |
| 148 | + def get_texmanager(self): |
| 149 | + return self.__renderer.get_texmanager() |
| 150 | + |
| 151 | + def get_image_magnification(self): |
| 152 | + return self.__renderer.get_image_magnification() |
| 153 | + |
| 154 | + def _get_text_path_transform(self, x, y, s, prop, angle, ismath): |
| 155 | + return self.__renderer._get_text_path_transform(x, y, s, prop, angle, |
| 156 | + ismath) |
| 157 | + |
| 158 | + def option_scale_image(self): |
| 159 | + return False |
| 160 | + |
| 161 | + def points_to_pixels(self, points): |
| 162 | + return self.__renderer.points_to_pixels(points) |
| 163 | + |
| 164 | + def flipy(self): |
| 165 | + return self.__renderer.flipy() |
| 166 | + |
| 167 | + def new_gc(self): |
| 168 | + return self.__renderer.new_gc() |
| 169 | + |
| 170 | + # Actual drawing methods below: |
| 171 | + def draw_path(self, gc, path, transform, rgbFace=None): |
| 172 | + # Convert the path to display coordinates, but if it was originally |
| 173 | + # drawn on the child axes. |
| 174 | + path = path.deepcopy() |
| 175 | + path.vertices = self._get_transfer_transform(transform).transform( |
| 176 | + path.vertices |
| 177 | + ) |
| 178 | + bbox = self._get_axes_display_box() |
| 179 | + |
| 180 | + # We check if the path intersects the axes box at all, if not don't |
| 181 | + # waste time drawing it. |
| 182 | + if(not path.intersects_bbox(bbox, True)): |
| 183 | + return |
| 184 | + |
| 185 | + if(self.__scale_widths): |
| 186 | + gc = self._scale_gc(gc) |
| 187 | + |
| 188 | + # Change the clip to the sub-axes box |
| 189 | + gc.set_clip_rectangle(bbox) |
| 190 | + |
| 191 | + self.__renderer.draw_path(gc, path, IdentityTransform(), rgbFace) |
| 192 | + |
| 193 | + def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath): |
| 194 | + # If the text field is empty, don't even try rendering it... |
| 195 | + if((s is None) or (s.strip() == "")): |
| 196 | + return |
| 197 | + |
| 198 | + # Call the super class instance, which works for all cases except one |
| 199 | + # checked above... (Above case causes error) |
| 200 | + super()._draw_text_as_path(gc, x, y, s, prop, angle, ismath) |
| 201 | + |
| 202 | + def draw_gouraud_triangle(self, gc, points, colors, transform): |
| 203 | + # Pretty much identical to draw_path, transform the points and adjust |
| 204 | + # clip to the child axes bounding box. |
| 205 | + points = self._get_transfer_transform(transform).transform(points) |
| 206 | + path = Path(points, closed=True) |
| 207 | + bbox = self._get_axes_display_box() |
| 208 | + |
| 209 | + if(not path.intersects_bbox(bbox, True)): |
| 210 | + return |
| 211 | + |
| 212 | + if(self.__scale_widths): |
| 213 | + gc = self._scale_gc(gc) |
| 214 | + |
| 215 | + gc.set_clip_rectangle(bbox) |
| 216 | + |
| 217 | + self.__renderer.draw_gouraud_triangle(gc, path.vertices, colors, |
| 218 | + IdentityTransform()) |
| 219 | + |
| 220 | + # Images prove to be especially messy to deal with... |
| 221 | + def draw_image(self, gc, x, y, im, transform=None): |
| 222 | + mag = self.get_image_magnification() |
| 223 | + shift_data_transform = self._get_transfer_transform( |
| 224 | + IdentityTransform() |
| 225 | + ) |
| 226 | + axes_bbox = self._get_axes_display_box() |
| 227 | + # Compute the image bounding box in display coordinates.... |
| 228 | + # Image arrives pre-magnified. |
| 229 | + img_bbox_disp = Bbox.from_bounds(x, y, im.shape[1], im.shape[0]) |
| 230 | + # Now compute the output location, clipping it with the final axes |
| 231 | + # patch. |
| 232 | + out_box = img_bbox_disp.transformed(shift_data_transform) |
| 233 | + clipped_out_box = Bbox.intersection(out_box, axes_bbox) |
| 234 | + |
| 235 | + if(clipped_out_box is None): |
| 236 | + return |
| 237 | + |
| 238 | + # We compute what the dimensions of the final output image within the |
| 239 | + # sub-axes are going to be. |
| 240 | + x, y, out_w, out_h = clipped_out_box.bounds |
| 241 | + out_w, out_h = int(np.ceil(out_w * mag)), int(np.ceil(out_h * mag)) |
| 242 | + |
| 243 | + if((out_w <= 0) or (out_h <= 0)): |
| 244 | + return |
| 245 | + |
| 246 | + # We can now construct the transform which converts between the |
| 247 | + # original image (a 2D numpy array which starts at the origin) to the |
| 248 | + # final zoomed image. |
| 249 | + img_trans = ( |
| 250 | + Affine2D().scale(1/mag, 1/mag) |
| 251 | + .translate(img_bbox_disp.x0, img_bbox_disp.y0) |
| 252 | + + shift_data_transform |
| 253 | + + Affine2D().translate(-clipped_out_box.x0, -clipped_out_box.y0) |
| 254 | + .scale(mag, mag) |
| 255 | + ) |
| 256 | + |
| 257 | + # We resize and zoom the original image onto the out_arr. |
| 258 | + out_arr = np.zeros((out_h, out_w, im.shape[2]), dtype=im.dtype) |
| 259 | + trans_msk = np.zeros((out_h, out_w), dtype=im.dtype) |
| 260 | + |
| 261 | + _image.resample(im, out_arr, img_trans, self.__img_inter, alpha=1) |
| 262 | + _image.resample(im[:, :, 3], trans_msk, img_trans, self.__img_inter, |
| 263 | + alpha=1) |
| 264 | + out_arr[:, :, 3] = trans_msk |
| 265 | + |
| 266 | + if(self.__scale_widths): |
| 267 | + gc = self._scale_gc(gc) |
| 268 | + |
| 269 | + gc.set_clip_rectangle(clipped_out_box) |
| 270 | + |
| 271 | + x, y = clipped_out_box.x0, clipped_out_box.y0 |
| 272 | + |
| 273 | + if(self.option_scale_image()): |
| 274 | + self.__renderer.draw_image(gc, x, y, out_arr, None) |
| 275 | + else: |
| 276 | + self.__renderer.draw_image(gc, x, y, out_arr) |
| 277 | + |
0 commit comments