Skip to content

Commit c3002f3

Browse files
committed
Great!
0 parents  commit c3002f3

30 files changed

+1496
-0
lines changed

__pycache__/server.cpython-310.pyc

2.69 KB
Binary file not shown.

__pycache__/server.cpython-39.pyc

7.25 KB
Binary file not shown.

app/TFmane.py

+235
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,235 @@
1+
# Import packages
2+
import os
3+
import json
4+
import platform
5+
import cv2
6+
import numpy as np
7+
import sys
8+
import time
9+
from threading import Thread
10+
import importlib.util
11+
from loguru import logger
12+
13+
system_info = platform.system()
14+
if system_info == 'Linux':
15+
logger.info("Detected Linux system | Now using tflite_runtime")
16+
from tflite_runtime.interpreter import Interpreter
17+
else:
18+
logger.info("Detected non-Linux system | Now using tensorflow.lite")
19+
from tensorflow.lite.python.interpreter import Interpreter
20+
21+
22+
# Define VideoStream class to handle streaming of video from webcam in separate processing thread
23+
class VideoStream:
24+
"""Camera object that controls video streaming from the Picamera"""
25+
def __init__(self,resolution=(640,480),framerate=30):
26+
# Initialize the PiCamera and the camera image stream
27+
self.stream = cv2.VideoCapture(0)
28+
if self.stream.isOpened():
29+
logger.info("Hi")
30+
else:
31+
logger.error("No camera found")
32+
33+
ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
34+
ret = self.stream.set(3,resolution[0])
35+
ret = self.stream.set(4,resolution[1])
36+
37+
# Read first frame from the stream
38+
(self.grabbed, self.frame) = self.stream.read()
39+
40+
# Variable to control when the camera is stopped
41+
self.stopped = False
42+
43+
def start(self):
44+
# Start the thread that reads frames from the video stream
45+
Thread(target=self.update,args=()).start()
46+
return self
47+
48+
def update(self):
49+
# Keep looping indefinitely until the thread is stopped
50+
while True:
51+
# If the camera is stopped, stop the thread
52+
if self.stopped:
53+
# Close camera resources
54+
self.stream.release()
55+
return
56+
57+
# Otherwise, grab the next frame from the stream
58+
(self.grabbed, self.frame) = self.stream.read()
59+
60+
def read(self):
61+
# Return the most recent frame
62+
return self.frame
63+
64+
def stop(self):
65+
# Indicate that the camera and thread should be stopped
66+
self.stopped = True
67+
68+
class TFMane:
69+
def __init__(self, sysmame):
70+
71+
self.sysmane = sysmame
72+
self.model_config = self.sysmane.getCurrentModelConfig()
73+
self.model = self.model_config.get("model_file")
74+
self.labels = self.model_config.get("model_classes")
75+
76+
self.system_info = None
77+
self.interpreter = None
78+
79+
self.imageWidth = self.model_config.get("config","image_width")
80+
self.imageHeight = self.model_config.get("config","image_height")
81+
self.framerate = self.model_config.get("config","framerate")
82+
83+
self.video = None
84+
self.camera =[]
85+
86+
self.current_status = {
87+
"frame" : None,
88+
"confident_score" : 0,
89+
"classes" : "",
90+
"result_frame" : None,
91+
"detect_flag" : False,
92+
"fps" : 0
93+
}
94+
95+
self.run()
96+
97+
def run(self):
98+
logger.info("TFMane is running")
99+
self.camera=self.checkAvaiableCamera()
100+
logger.info("Camara index: {}".format(self.camera))
101+
102+
if self.camera==[]:
103+
logger.info("No camera avaliable right now, Please plug in the usb camera or picam ")
104+
105+
else:
106+
self.setup()
107+
self.detect()
108+
109+
def setup(self):
110+
logger.info("Loading model: {}".format(self.sysmane.getModelPath(self.sysmane.getCurrentModel())))
111+
self.interpreter = Interpreter(model_path=self.sysmane.getFullModelPath(self.sysmane.getCurrentModel()))
112+
self.interpreter.allocate_tensors()
113+
114+
# Get model details
115+
self.input_details = self.interpreter.get_input_details()
116+
self.output_details = self.interpreter.get_output_details()
117+
self.height = self.input_details[0]['shape'][1]
118+
self.width = self.input_details[0]['shape'][2]
119+
self.floating_model = (self.input_details[0]['dtype'] == np.float32)
120+
logger.info("Model loaded: {}".format(self.sysmane.getModelPath(self.sysmane.getCurrentModel())))
121+
logger.info("Height: {} | Width: {}".format(self.height, self.width))
122+
# Check output layer name to determine if this model was created with TF2 or TF1,
123+
# because outputs are ordered differently for TF2 and TF1 models
124+
self.outname = self.output_details[0]['name']
125+
126+
if ('StatefulPartitionedCall' in self.outname): # This is a TF2 model
127+
self.boxes_idx, self.classes_idx, self.scores_idx = 1, 3, 0
128+
else: # This is a TF1 model
129+
self.boxes_idx, self.classes_idx, self.scores_idx = 0, 1, 2
130+
131+
def checkAvaiableCamera(self):
132+
# checks the first 10 indexes.
133+
index = 0
134+
arr = []
135+
i = 10
136+
while i > 0:
137+
logger.info("Trying to open camera index: {}".format(index))
138+
cap = cv2.VideoCapture(index)
139+
if cap.read()[0]:
140+
arr.append(index)
141+
cap.release()
142+
index += 1
143+
i -= 1
144+
return arr
145+
146+
def getcurrentStatus(self):
147+
return self.current_status
148+
149+
def closeDetect(self):
150+
self.close = True
151+
152+
def detect(self):
153+
self.close = False
154+
self.video = VideoStream(resolution=(self.imageWidth,self.imageHeight),framerate=self.framerate).start()
155+
# Initialize frame rate calculation
156+
frame_rate_calc = 1
157+
freq = cv2.getTickFrequency()
158+
# Initialize video stream
159+
160+
#for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
161+
while True:
162+
163+
# Start timer (for calculating frame rate)
164+
t1 = cv2.getTickCount()
165+
166+
# Grab frame from video stream
167+
frame1 = self.video.read()
168+
169+
# Acquire frame and resize to expected shape [1xHxWx3]
170+
frame = frame1.copy()
171+
self.current_status['frame'] = frame
172+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
173+
frame_resized = cv2.resize(frame_rgb, (self.width, self.height))
174+
input_data = np.expand_dims(frame_resized, axis=0)
175+
176+
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
177+
if self.floating_model:
178+
input_data = (np.float32(input_data) - self.model_config.get("config","input_mean")) / self.model_config.get("config","input_std")
179+
180+
# Perform the actual detection by running the model with the image as input
181+
self.interpreter.set_tensor(self.input_details[0]['index'],input_data)
182+
self.interpreter.invoke()
183+
184+
# Retrieve detection results
185+
boxes = self.interpreter.get_tensor(self.output_details[self.boxes_idx]['index'])[0] # Bounding box coordinates of detected objects
186+
classes = self.interpreter.get_tensor(self.output_details[self.classes_idx]['index'])[0] # Class index of detected objects
187+
scores = self.interpreter.get_tensor(self.output_details[self.scores_idx]['index'])[0] # Confidence of detected objects
188+
189+
# Loop over all detections and draw detection box if confidence is above minimum threshold
190+
for i in range(len(scores)):
191+
if ((scores[i] > self.model_config.get("config","min_conf_threshold")) and (scores[i] <= 1.0)):
192+
self.current_status['detect_flag'] = True
193+
# Get bounding box coordinates and draw box
194+
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
195+
ymin = int(max(1,(boxes[i][0] * self.imageHeight)))
196+
xmin = int(max(1,(boxes[i][1] * self.imageWidth)))
197+
ymax = int(min(self.imageHeight,(boxes[i][2] * self.imageHeight)))
198+
xmax = int(min(self.imageWidth,(boxes[i][3] * self.imageWidth)))
199+
200+
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)
201+
202+
# Draw label
203+
object_name = self.labels[int(classes[i])] # Look up object name from "labels" array using class index
204+
self.current_status['classes'] = object_name
205+
persent_scores = int(scores[i]*100)# 0.72 to 72%'
206+
self.current_status['confident_score'] = persent_scores
207+
label = '%s: %d%%' % (object_name, persent_scores) # Example: 'person: 72%'
208+
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
209+
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
210+
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
211+
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
212+
213+
else:
214+
self.current_status['detect_flag'] = False
215+
216+
# Calculate framerate
217+
t2 = cv2.getTickCount()
218+
time1 = (t2-t1)/freq
219+
frame_rate_calc= 1/time1
220+
self.current_status['fps'] = frame_rate_calc
221+
222+
# Draw framerate in corner of frame
223+
cv2.putText(frame,'FPS: {0:.2f}'.format(frame_rate_calc),(30,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
224+
self.current_status['result_frame'] = frame
225+
226+
# All the results have been drawn on the frame, so it's time to display it.
227+
cv2.imshow('Object detector', frame)
228+
229+
# Press 'q' to quit
230+
if cv2.waitKey(1) == ord('q'):
231+
#f self.close :
232+
break
233+
# Clean up
234+
cv2.destroyAllWindows()
235+
self.video.stop()

app/__init__.py

Whitespace-only changes.
6.12 KB
Binary file not shown.
6.24 KB
Binary file not shown.

app/__pycache__/TFmane.cpython-39.pyc

6.28 KB
Binary file not shown.
121 Bytes
Binary file not shown.
138 Bytes
Binary file not shown.
666 Bytes
Binary file not shown.
675 Bytes
Binary file not shown.
2.48 KB
Binary file not shown.
2.47 KB
Binary file not shown.
8.42 KB
Binary file not shown.
2.63 KB
Binary file not shown.
3.64 KB
Binary file not shown.

app/armmane.py

+24
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
import os
2+
from loguru import logger
3+
# try:
4+
# # import TFmane as tfm
5+
# except:
6+
from app import sysmane as smn
7+
8+
from app import TFmane as tfm
9+
10+
11+
12+
class ArmMane:
13+
def __init__(self,sysmane):
14+
self.sysmane = sysmane
15+
# self.tfmane = tfm.TFMane(self.sysmane)
16+
17+
# self.tfmane = tfm.TFMane(self.sysmane)
18+
19+
20+
21+
if __name__ == "__main__":
22+
# Print Red Error Message
23+
print("[ERR] Armmane is a library, please run the program from server.py file.")
24+
exit(0)

app/conmane.py

+94
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,94 @@
1+
import os
2+
import json
3+
from loguru import logger
4+
5+
class ConfigMane:
6+
def __init__(self, config_file_name, config_path=None):
7+
self.config_path = config_path or os.path.join(os.getcwd(), 'store')
8+
self.config_file = os.path.join(self.config_path, config_file_name)
9+
self.config = {}
10+
self.loadConfig()
11+
12+
def loadConfig(self):
13+
try:
14+
with open(self.config_file, 'r') as f:
15+
self.config = json.load(f)
16+
except FileNotFoundError:
17+
print(f"Config file '{self.config_file}' not found.")
18+
self.config = {}
19+
20+
def saveConfig(self):
21+
with open(self.config_file, 'w') as f:
22+
json.dump(self.config, f, indent=4)
23+
24+
def get(self, *args):
25+
if not args:
26+
return self.config
27+
28+
current_level = self.config
29+
30+
for key in args:
31+
if key in current_level:
32+
current_level = current_level[key]
33+
else:
34+
logger.error(f"Key '{key}' not found in config.")
35+
return None
36+
37+
return current_level
38+
39+
40+
def getAll(self):
41+
return self.config
42+
43+
44+
def change(self, key, value):
45+
self.config[key] = value
46+
47+
def add(self, key, value):
48+
if key not in self.config:
49+
self.config[key] = value
50+
else:
51+
print(f"Key '{key}' already exists in the config.")
52+
53+
def remove(self, key):
54+
if key in self.config:
55+
del self.config[key]
56+
else:
57+
print(f"Key '{key}' not found in the config.")
58+
59+
def delete(self):
60+
if os.path.exists(self.config_file):
61+
os.remove(self.config_file)
62+
self.config = {}
63+
print("Config file deleted.")
64+
else:
65+
print("Config file does not exist.")
66+
67+
68+
def reload(self):
69+
self.loadConfig()
70+
print("Config reloaded.")
71+
72+
73+
74+
75+
# def loadDataSet():
76+
# try:
77+
# with open(self.dataSet, 'r') as file:
78+
# # Read the lines of the file, remove newline characters, and store them in an array
79+
# file_contents = [line.strip() for line in file]
80+
# return file_contents
81+
# except FileNotFoundError:
82+
# print(f"File '{self.filename}' not found.")
83+
# return []
84+
85+
# def get_value(self, index=None):
86+
# if index is None:
87+
# return self.file_contents
88+
# elif isinstance(index, int) and 0 <= index < len(self.file_contents):
89+
# return self.file_contents[index]
90+
# else:
91+
# print("Invalid index.")
92+
# return None
93+
94+

0 commit comments

Comments
 (0)