Skip to content

Commit 29a1c3d

Browse files
committed
improve transform-seg
add simple crop methods add minRect to recognize mask
1 parent e62dd65 commit 29a1c3d

File tree

3 files changed

+118
-42
lines changed

3 files changed

+118
-42
lines changed

pv_vision/transform_seg/cell_crop.py

+11
Original file line numberDiff line numberDiff line change
@@ -250,6 +250,17 @@ def detect_peaks(split, direction, cell_size, busbar, thre=0.9, interval=None, m
250250
return peaks
251251

252252

253+
def plot_peaks(n, image, cell_size, busbar, split=None, split_size=None, direction=0, thre=0.9, interval=None, margin=None):
254+
splits = split_img(image, split, split_size, direction)
255+
split = splits[n]
256+
sum_split = np.sum(split, axis=direction)
257+
sum_split = sum_split / np.max(sum_split)
258+
sum_split[sum_split > thre] = 1
259+
peaks = detect_peaks(split, direction, cell_size, busbar, thre, interval, margin)
260+
plt.plot(list(range(len(sum_split))), sum_split)
261+
plt.scatter(peaks, sum_split[peaks])
262+
263+
253264
def detect_vertical_lines(image_thre, column, cell_size, thre=0.8, split=100, peak_interval=None):
254265
""" Detect vertical edges by segmenting image into horizontal splits
255266

pv_vision/transform_seg/perspective_transform.py

+81-15
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,38 @@ def base64_2_mask(s):
7272
return mask
7373

7474

75-
def load_mask(path, image, mask_name='module_unet'):
75+
def has_mask(mask_name, path=None, data=None):
76+
"""Check if mask exists
77+
Parameters
78+
----------
79+
mask_name: str
80+
The annotation name of the mask.
81+
82+
path: str or pathlib.PosixPath
83+
The path of annotation json file
84+
85+
data: dict
86+
If provided, will not open path
87+
88+
Returns
89+
-------
90+
If exist, return the index in objects list
91+
If not, return False
92+
"""
93+
if path is None and data is None:
94+
raise ValueError("Mask file not provided.")
95+
if path:
96+
with open(path, 'r') as file:
97+
data = json.load(file)
98+
99+
for inx, obj in enumerate(data["objects"]):
100+
if obj['classTitle'] == mask_name:
101+
return inx
102+
103+
return False
104+
105+
106+
def load_mask(path, image, mask_name='module_unet', center=True):
76107
"""Load the image of mask
77108
78109
Parameters
@@ -86,6 +117,9 @@ def load_mask(path, image, mask_name='module_unet'):
86117
mask_name: str
87118
The annotation name of the mask. Default is 'module_unet'.
88119
120+
center: bool
121+
If True, return mask center.
122+
89123
Returns
90124
-------
91125
mask: array
@@ -96,14 +130,27 @@ def load_mask(path, image, mask_name='module_unet'):
96130
"""
97131
with open(path, 'r') as file:
98132
data = json.load(file)
99-
if len(data["objects"]) == 1:
100-
code = data["objects"][0]["bitmap"]["data"]
101-
origin = data["objects"][0]["bitmap"]["origin"]
133+
# if len(data["objects"]) == 0:
134+
# return None
135+
# code = data["objects"][0]["bitmap"]["data"]
136+
# origin = data["objects"][0]["bitmap"]["origin"]
137+
# else:
138+
# flag = True
139+
# for obj in data["objects"]:
140+
# if obj['classTitle'] == mask_name:
141+
inx = has_mask(mask_name, data=data)
142+
if inx is not False:
143+
obj = data["objects"][inx]
144+
code = obj["bitmap"]["data"]
145+
origin = obj["bitmap"]["origin"]
102146
else:
103-
for obj in data["objects"]:
104-
if obj['classTitle'] == mask_name:
105-
code = obj["bitmap"]["data"]
106-
origin = obj["bitmap"]["origin"]
147+
mask = np.zeros((image.shape[0], image.shape[1]))
148+
mask = mask.astype('uint8')
149+
mask_center = np.array([mask.shape[1]/2, mask.shape[0]/2])
150+
if center:
151+
return mask, mask_center
152+
else:
153+
return mask
107154
mask = base64_2_mask(code)
108155
mask_center = np.array([mask.shape[1]/2, mask.shape[0]/2])
109156
mask_center += origin
@@ -117,7 +164,10 @@ def load_mask(path, image, mask_name='module_unet'):
117164
right = np.zeros((mask4.shape[0], image.shape[1] - mask4.shape[1]))
118165
mask5 = np.hstack((mask4, right))
119166

120-
return mask5.astype('uint8'), mask_center.astype(int)
167+
if center:
168+
return mask5.astype('uint8'), mask_center.astype(int)
169+
else:
170+
return mask5.astype('uint8')
121171

122172

123173
def find_intersection(mask_part, houghlinePara=50):
@@ -281,7 +331,7 @@ def find_module_corner(mask, mask_center, dist=200, displace=0, method=0, corner
281331
return np.array(corners_order)
282332

283333

284-
def perspective_transform(image, src, sizex, sizey):
334+
def perspective_transform(image, src, sizex, sizey, rotate=True):
285335
"""Do perspective transform on the solar modules. Orientation of the input module is auto-detected. The output
286336
module has short side vertically arranged and long side horizontally arranged.
287337
@@ -296,16 +346,24 @@ def perspective_transform(image, src, sizex, sizey):
296346
sizex, sizey: int
297347
size of the output image. x is the long side and y is the short side.
298348
349+
rotate: bool
350+
If true, auto-detection of orientation is on.
351+
299352
Returns
300353
-------
301354
warped: array
302355
Transformed image of solar module
303356
"""
304357
src = np.float32(src)
305-
if np.sum((src[0] - src[2])**2) <= np.sum((src[0] - src[1])**2):
306-
dst = np.float32([(0, 0), (sizex, 0), (0, sizey), (sizex, sizey)])
307-
else:
358+
359+
if rotate and np.sum((src[0] - src[2])**2) > np.sum((src[0] - src[1])**2):
308360
dst = np.float32([(0, sizey), (0, 0), (sizex, sizey), (sizex, 0)])
361+
else:
362+
dst = np.float32([(0, 0), (sizex, 0), (0, sizey), (sizex, sizey)])
363+
#if np.sum((src[0] - src[2])**2) <= np.sum((src[0] - src[1])**2):
364+
# dst = np.float32([(0, 0), (sizex, 0), (0, sizey), (sizex, sizey)])
365+
#else:
366+
309367
M = cv.getPerspectiveTransform(src, dst)
310368

311369
warped = cv.warpPerspective(image, M, (sizex, sizey))
@@ -441,6 +499,7 @@ def find_module_corner2(mask, mode=0):
441499
mode == 1: detect corners of the approximated convex of module
442500
mode == 2: detect corners of the approximated contour of the module
443501
mode == 3: detect corners of the blurred mask of the module
502+
mode == 4: detect corners using boudingRect
444503
445504
Returns
446505
-------
@@ -457,6 +516,13 @@ def find_module_corner2(mask, mode=0):
457516
length = len(cnt)
458517
inx = i
459518

519+
if mode == 4:
520+
rect = cv.minAreaRect(contours[inx])
521+
corners = cv.boxPoints(rect)
522+
corners_sorted = sort_corners(np.array(corners))
523+
corners_displaced = np.array([[-1, -1], [1, -1], [-1, 1], [1, 1]]) * 3 + corners_sorted
524+
return corners_displaced
525+
460526
cnt_approx = cv.approxPolyDP(contours[inx], 8, True)
461527
convex = cv.convexHull(contours[inx])
462528
conv_approx = cv.approxPolyDP(convex, 8, True)
@@ -473,7 +539,7 @@ def find_module_corner2(mask, mode=0):
473539
elif mode == 3:
474540
corners = find_polygon_corners(blur)
475541
else:
476-
print("mode must be one of 0, 1, 2")
477-
return
542+
raise Exception("mode must be one of 0, 1, 2, 3, 4")
543+
478544
return corners
479545

pv_vision/transform_seg/solarmodule.py

+26-27
Original file line numberDiff line numberDiff line change
@@ -370,7 +370,7 @@ def corner_detection_cont(self, mode=0, output=False):
370370
if output:
371371
return self._corners
372372

373-
def transform(self, width=None, height=None, cellsize=None, img_only=True):
373+
def transform(self, width=None, height=None, cellsize=None, auto_rotate=True, img_only=True):
374374
"""Do perspective transform on the solar module
375375
376376
Parameters
@@ -381,6 +381,9 @@ def transform(self, width=None, height=None, cellsize=None, img_only=True):
381381
cellsize: int
382382
Edge length of a cell
383383
384+
auto_rotate: bool
385+
If true, automatically adjust module orientation such that shorter side is vertically aligned.
386+
384387
img_only: bool
385388
If true, only return the image of transformed module.
386389
Otherwise return a transformed module instance
@@ -398,8 +401,8 @@ def transform(self, width=None, height=None, cellsize=None, img_only=True):
398401
if cellsize:
399402
width = self.col * cellsize
400403
height = self.row * cellsize
401-
wrap = transform.perspective_transform(self._image, self._corners, width, height)
402-
self._transformed = wrap
404+
wrap = transform.perspective_transform(self._image, self._corners, width, height, rotate=auto_rotate)
405+
self._transformed = TransformedModule(wrap, self._row, self._col, self._busbar)
403406
if img_only:
404407
return wrap
405408
else:
@@ -420,25 +423,15 @@ def is_transformed(self, x_min, y_min):
420423
-------
421424
bool
422425
"""
423-
res = TransformedModule(self._transformed, self._row,
424-
self._col, self.busbar).is_transformed(x_min, y_min)
426+
res = self._transformed.is_transformed(x_min, y_min)
425427
return res
426428

427-
def crop_cell(self, cellsize, vl_interval=None, vl_split_size=None,
429+
def crop_cell(self, cellsize, simple=False, vl_interval=None, vl_split_size=None,
428430
hl_interval=None, hl_split_size=None, margin=None):
429-
vinx_split, vline_split = seg.detect_edge(self._transformed, row_col=[self.row, self.col], cell_size=cellsize,
430-
busbar=self.busbar, peaks_on=0, split_size=vl_split_size,
431-
peak_interval=vl_interval, margin=margin)
432-
vline_abs = seg.linear_regression(vinx_split, vline_split)
433-
hinx_split, hline_split = seg.detect_edge(self._transformed, row_col=[self.row, self.col], cell_size=cellsize,
434-
busbar=self.busbar, peaks_on=1, split_size=hl_split_size,
435-
peak_interval=hl_interval, margin=margin)
436-
hline_abs = seg.linear_regression(hinx_split, hline_split)
431+
cells = self._transformed.crop_cell(cellsize, simple, vl_interval, vl_split_size,
432+
hl_interval, hl_split_size, margin)
437433

438-
hline_abs_couple = seg.couple_edges(hline_abs, length=self.size[1])
439-
vline_abs_couple = seg.couple_edges(vline_abs, length=self.size[0])
440-
441-
return np.array(seg.segment_cell(self._transformed, hline_abs_couple, vline_abs_couple, cellsize=cellsize))
434+
return cells
442435

443436

444437
class TransformedModule(AbstractModule):
@@ -468,16 +461,22 @@ def is_transformed(self, x_min, y_min):
468461
peak_x, peak_y = transform.find_inner_edge(self._image)
469462
return (len(peak_x) >= x_min) and (len(peak_y) >= y_min)
470463

471-
def crop_cell(self, cellsize, vl_interval=None, vl_split_size=None,
464+
def crop_cell(self, cellsize, simple=False, vl_interval=None, vl_split_size=None,
472465
hl_interval=None, hl_split_size=None, margin=None):
473-
vinx_split, vline_split = seg.detect_edge(self._image, row_col=[self.row, self.col], cell_size=cellsize,
474-
busbar=self.busbar, peaks_on=0, split_size=vl_split_size,
475-
peak_interval=vl_interval, margin=margin)
476-
vline_abs = seg.linear_regression(vinx_split, vline_split)
477-
hinx_split, hline_split = seg.detect_edge(self._image, row_col=[self.row, self.col], cell_size=cellsize,
478-
busbar=self.busbar, peaks_on=1, split_size=hl_split_size,
479-
peak_interval=hl_interval, margin=margin)
480-
hline_abs = seg.linear_regression(hinx_split, hline_split)
466+
if simple:
467+
vline_abs = list(zip(np.zeros(self.col - 1),
468+
np.linspace(0, self.size[0], self.col + 1)[1: -1].astype(int)))
469+
hline_abs = list(zip(np.zeros(self.row - 1),
470+
np.linspace(0, self.size[1], self.row + 1)[1: -1].astype(int)))
471+
else:
472+
vinx_split, vline_split = seg.detect_edge(self._image, row_col=[self.row, self.col], cell_size=cellsize,
473+
busbar=self.busbar, peaks_on=0, split_size=vl_split_size,
474+
peak_interval=vl_interval, margin=margin)
475+
vline_abs = seg.linear_regression(vinx_split, vline_split)
476+
hinx_split, hline_split = seg.detect_edge(self._image, row_col=[self.row, self.col], cell_size=cellsize,
477+
busbar=self.busbar, peaks_on=1, split_size=hl_split_size,
478+
peak_interval=hl_interval, margin=margin)
479+
hline_abs = seg.linear_regression(hinx_split, hline_split)
481480

482481
hline_abs_couple = seg.couple_edges(hline_abs, length=self.size[1])
483482
vline_abs_couple = seg.couple_edges(vline_abs, length=self.size[0])

0 commit comments

Comments
 (0)