forked from jahnavi2k/cb
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfinal_car_plate_number_recognition.py
163 lines (130 loc) · 4.96 KB
/
final_car_plate_number_recognition.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
# -- coding: utf-8 --
"""FINAL CAR PLATE NUMBER RECOGNITION
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1dWyreN1962LUNGVNbHY4Be1LbbtKwVa_
# Car plate number Recognition Model
"""
# !nvcc --version ## 3 7 10 11 13 14 17 22 23 24 32 33 41
# !pip install easyocr
# !pip install imutils
# !pip install opencv-python-headless==4.1.2.30
# !pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113
import cv2
import easyocr
text = ""
# Load image
image = cv2.imread('car.jpeg')
print("hello")
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Initialize EasyOCR reader
reader = easyocr.Reader(['en'])
print("hry")
# Detect license plate using EasyOCR
results = reader.readtext(gray, detail=0)
for result in results:
if len(result) == 10 and result.isalnum() and (not result.isnumeric()) and (not result.isalpha()):
text = result
print(result)
# Display result
# cv2.imshow('Result', image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# import cv2
# import easyocr
# # Load image
# image = cv2.imread(
# 'C:/Users/jahna/Dropbox/My PC (DESKTOP-UKVOUSD)/Desktop/cb/carplatenumber3.jpg')
# # Convert to grayscale
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# # Initialize EasyOCR reader
# reader = easyocr.Reader(['en'])
# # Detect license plate using EasyOCR
# results = reader.readtext(gray, detail=0)
# for result in results:
# if len(result) == 7 and result.isalnum():
# print(f"License Plate Detected: {result}")
# # Draw bounding box around license plate
# coords = reader.recognize(gray, detection_type='textline')
# x_min, y_min = coords[0][0][0], coords[0][0][1]
# x_max, y_max = coords[0][2][0], coords[0][2][1]
# cv2.rectangle(image, (x_min, y_min), (x_max, y_max), (0, 255, 0), 2)
# break
# # Display result
# cv2.imshow('Result', image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# ------------------------
# import cv2
# from matplotlib import pyplot as plt
# import numpy as np
# import imutils
# import easyocr
# # import main
# # from main import *
# import os
# import streamlit as st
# os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
# # preprocessing
# img = cv2.imread(
# r'C:/Users/jahna/Dropbox/My PC (DESKTOP-UKVOUSD)/Desktop/cb/carplatenumber3.jpg')
# # due red green blue, pixel increase so that is why convert to gray
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# # plt.imshow(cv2.cvtColor(gray, cv2.COLOR_BGR2RGB)) # savetime
# """*Apply Filters and edge detection*"""
# bfilter = cv2.bilateralFilter(gray, 11, 17, 17) # noise reduced
# edged = cv2.Canny(bfilter, 30, 200) # DETECT EDGE
# # plt.imshow(cv2.cvtColor(edged, cv2.COLOR_BGR2RGB))
# # when we converted color->gray a lot of data/pixels got reduced due to which the analysis took less time
# # now that we have used bfliter so that more time is reduced
# """*Find contours and apply mask*"""
# # contours stroes polygons in a pic seperatly we can easily detect no. plate
# keypoints = cv2.findContours(
# edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# contours = imutils.grab_contours(keypoints)
# contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]
# # the sorting in descending order so that better shapes come first
# # chain_approx line is used to simplofy the data ie if we dont use it then even pixels of line would get stored but we need proper polygons
# location = None
# for contour in contours:
# # how many approx values if we increase value the result would be more rough accordingly
# approx = cv2.approxPolyDP(contour, 10, True)
# if len(approx) == 4:
# location = approx
# break
# location
# # will finally mask
# mask = np.zeros(gray.shape, np.uint8)
# new_image = cv2.drawContours(mask, [location], 0, 255, -1)
# new_image = cv2.bitwise_and(img, img, mask=mask)
# # plt.imshow(cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB))
# (x, y) = np.where(mask == 255)
# (x1, y1) = (np.min(x), np.min(y))
# (x2, y2) = (np.max(x), np.max(y))
# cropped_image = gray[x1:x2+1, y1:y2+1]
# # plt.imshow(cv2.cvtColor(cropped_image, cv2.COLOR_BGR2RGB))
# """*Easy OCR use*"""
# reader = easyocr.Reader(['en']) # english
# result = reader.readtext(cropped_image)
# result
# """*Plot the Model*"""
# # if (len(result) == 1):
# # text = result[0][-2]
# # else:
# # text = result[1][-2]
# # print(text)
# # print(result, "juuuuu")
# if (len(result) == 1):
# text = result[0][-2]
# else:
# text = result[1][-2]
# # text = result[0][-2]
# st.write(text)
# font = cv2.FONT_HERSHEY_SIMPLEX
# # res=cv2.putText(img,text=text, org=(approx[0][0][0], approx[1][0][1]+60), fontFace=font, fontScale=1, color=(0,255,0), thickness=2, lineType=1)
# res = cv2.rectangle(img, tuple(approx[0][0]), tuple(
# approx[2][0]), (0, 255, 0), 3)
# img = cv2.putText(img, text, tuple(
# approx[0][0]), font, 0.5, (0, 255, 0), 2, cv2.LINE_AA)
# # plt.imshow(cv2.cvtColor(res, cv2.COLOR_BGR2RGB))