-
Notifications
You must be signed in to change notification settings - Fork 0
/
video_no_qt.py
86 lines (60 loc) · 2.98 KB
/
video_no_qt.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import os
import cv2
import sys
import time
import numpy as np
from tensorflow import keras, argmax
from utils.argmaxMeanIOU import ArgmaxMeanIOU
from utils.dataset import CATEGORIES_COLORS
import matplotlib.pyplot as plt
IMG_SIZE = (720, 480)
VIDEO_PATH = r"F:\ROAD_VIDEO\Clip"
MODEL_PATH = r"J:\PROJET\ROAD_SEGMENTATION\trained_models\AttentionResUNet-WITH-SOFTMAX_MultiDataset_384-384_epoch-35_loss-0.21_miou_0.52.h5"
OPTIONS = {
"showRoad": True,
"showObjects": True,
"showBackground": True,
}
if __name__ == "__main__":
values = CATEGORIES_COLORS.values()
categories_color = np.zeros((len(values) + 1, 3), dtype=np.uint8)
for o, data in enumerate(values):
i = o + 1
if (i >= 1 and i <= 5 and OPTIONS["showRoad"]) or (i >= 6 and i <= 13 and OPTIONS["showObjects"]) or (i >= 14 and OPTIONS["showBackground"]):
categories_color[i] = data["color"]
segmentation_model = keras.models.load_model(MODEL_PATH, custom_objects={'ArgmaxMeanIOU': ArgmaxMeanIOU})
segmentation_model_size = segmentation_model.get_layer(index=0).input_shape[0][1:-1][::-1]
for video_filename in os.listdir(VIDEO_PATH):
filename = os.path.join(VIDEO_PATH, video_filename)
cap = cv2.VideoCapture(filename)
new_frame_time = 0
prev_frame_time = 0
while(cap.isOpened()):
ret, frame = cap.read()
new_frame_time = time.time()
if not ret:
break
img_resized = cv2.resize(frame, segmentation_model_size, interpolation=cv2.INTER_AREA)
result_segmentation = segmentation_model.predict(np.expand_dims(cv2.cvtColor(img_resized, cv2.COLOR_RGB2BGR) / 255., axis=0))[0]
# result_segmentation[result_segmentation < 0.8] = 0
result_segmentation_with_temp = result_segmentation
# Argmax
argmax_result_segmentation = argmax(result_segmentation_with_temp, axis=-1)
# Index --> Couleur
argmax_result_segmentation = np.expand_dims(argmax_result_segmentation, axis=-1)
segmentation = np.squeeze(np.take(categories_color, argmax_result_segmentation, axis=0))
segmentation = cv2.bilateralFilter(segmentation, 10, 75, 75)
# On redimenssione les résultats pour les afficher correctement
if segmentation_model_size != (640, 480):
img_resized = cv2.resize(img_resized, (640, 480), interpolation=cv2.INTER_AREA)
segmentation = cv2.resize(segmentation, (640, 480), interpolation=cv2.INTER_AREA)
# On calcule le temps nécéssaire
fps = 1 // (new_frame_time - prev_frame_time)
prev_frame_time = new_frame_time
# On envoie les données
cv2.imshow("ROAD_IMAGE", img_resized)
cv2.imshow("SEGMENTATION_IMAGE", cv2.cvtColor(segmentation, cv2.COLOR_RGB2BGR))
print(fps)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()