Skip to content

Commit 4ad31d3

Browse files
author
salim laimeche
committed
add test mod
1 parent 0552ed1 commit 4ad31d3

File tree

7 files changed

+102
-88
lines changed

7 files changed

+102
-88
lines changed

Makefile

Whitespace-only changes.

app/video-inference/page.tsx

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,16 @@
11
import { UserView } from "../../lib/identity/definition"
2-
import { verifySession } from "../../lib/identity/session-local"
32
import VideoInference from "@/components/VideoInference"
43

54
export default async function VideoInferencePage() {
6-
const session = await verifySession()
5+
//const session = await verifySession()
76

8-
const user: UserView = {
9-
id: session?.userId as string,
10-
name: session?.name as string,
11-
surname: session?.surname as string,
12-
chatid: session?.chatid as string,
13-
container: session?.container as string,
14-
}
7+
// const user: UserView = {
8+
// id: session?.userId as string,
9+
// name: session?.name as string,
10+
// surname: session?.surname as string,
11+
// chatid: session?.chatid as string,
12+
// container: session?.container as string,
13+
// }
1514

16-
return <VideoInference user={user} />
15+
return <VideoInference />
1716
}

components/Login.tsx

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,18 @@ export default function Login() {
7575
balise="h3"
7676
className={`text-sm font-bold bg-gradient-to-r from-gray-500 via-gray-200 to-gray-500 inline-block text-transparent bg-clip-text`}
7777
/>
78+
<button
79+
onClick={() => router.push("/video-inference")}
80+
className="relative inline-flex h-8 overflow-hidden rounded-full p-[1px] focus:outline-none focus:ring-2 focus:ring-slate-400 focus:ring-offset-2 focus:ring-offset-slate-50 z-10">
81+
<span className="absolute inset-[-1000%] animate-[spin_2s_linear_infinite] bg-[conic-gradient(from_90deg_at_50%_50%,#E2CBFF_0%,#393BB2_50%,#E2CBFF_100%)]" />
82+
<span className="inline-flex h-full w-full cursor-pointer items-center justify-center rounded-full px-2 py-1 text-sm font-medium text-white backdrop-blur-3xl">
83+
<BlurIn
84+
text="Tester sans connexion"
85+
balise="h1"
86+
className={` font-bold bg-gradient-to-r from-gray-500 via-gray-200 to-gray-500 inline-block text-transparent bg-clip-text z-10`}
87+
/>{" "}
88+
</span>
89+
</button>
7890
</div>
7991
<Form {...form}>
8092
<form

components/VideoInference.tsx

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,11 +14,7 @@ import drawBoundingBoxes from "@/lib/model-detection/coco-ssd/utils"
1414
import useYolodisTfjs from "@/hooks/use-yolo-tfjs"
1515
import { detectVideo } from "@/lib/yolov8n/detect"
1616

17-
interface IProps {
18-
user: UserView
19-
}
20-
21-
export default function VideoInference({ user }: IProps) {
17+
export default function VideoInference() {
2218
const { modelName } = useModelStore()
2319
const ready = useTfjsBackendWeb({ backend: "webgl" })
2420
const { cocoSsd, loadCoco } = useCocoSsd({ ready })

components/model-selection.tsx

Lines changed: 30 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -14,13 +14,15 @@ import MultipleSelector, { Option } from "./ui/multiple-select"
1414
import useModelDetectionStorage from "@/hooks/use-model-detection-storage"
1515
import { useEffect, useState } from "react"
1616
import { Label } from "./ui/label"
17+
import { usePathname } from "next/navigation"
1718

1819
export default function ModelSelection() {
1920
const { modelName, setModel, disposeModel } = useModelStore()
2021
const [options, setOptions] = useState<Option[]>([])
2122
const { labelsToDetect, setLabelsToDetect } = useModelDetectionStorage({
2223
modelName,
2324
})
25+
const pathname = usePathname()
2426

2527
useEffect(() => {
2628
if (modelName === ModelComputerVision.EMPTY) {
@@ -83,32 +85,34 @@ export default function ModelSelection() {
8385
Réinitialiser
8486
</Button>
8587
</CardContent>
86-
<CardContent className="p-6">
87-
<Label>Labels à détecter</Label>
88-
<MultipleSelector
89-
disabled={!modelName}
90-
options={options && options}
91-
value={getDefaultValue()}
92-
className=" bg-white dark:bg-gray-800 border border-gray-300 dark:border-gray-700 rounded-md shadow-sm "
93-
emptyIndicator={
94-
<p className="text-center text-lg leading-10 text-gray-600 dark:text-gray-400">
95-
Aucun label à détecter
96-
</p>
97-
}
98-
onChange={(selectedOptions: Option[]) => {
99-
const labels = labelsToDetect.map(label => {
100-
return {
101-
label: label.label,
102-
toDetect: selectedOptions.some(
103-
selectedOption => selectedOption.value === label.label
104-
),
105-
}
106-
})
107-
console.log("labels", labels)
108-
setLabelsToDetect(labels)
109-
}}
110-
/>
111-
</CardContent>
88+
{!pathname.includes("video-inference") && (
89+
<CardContent className="p-6">
90+
<Label>Labels à détecter</Label>
91+
<MultipleSelector
92+
disabled={!modelName}
93+
options={options && options}
94+
value={getDefaultValue()}
95+
className=" bg-white dark:bg-gray-800 border border-gray-300 dark:border-gray-700 rounded-md shadow-sm "
96+
emptyIndicator={
97+
<p className="text-center text-lg leading-10 text-gray-600 dark:text-gray-400">
98+
Aucun label à détecter
99+
</p>
100+
}
101+
onChange={(selectedOptions: Option[]) => {
102+
const labels = labelsToDetect.map(label => {
103+
return {
104+
label: label.label,
105+
toDetect: selectedOptions.some(
106+
selectedOption => selectedOption.value === label.label
107+
),
108+
}
109+
})
110+
console.log("labels", labels)
111+
setLabelsToDetect(labels)
112+
}}
113+
/>
114+
</CardContent>
115+
)}
112116
</Card>
113117
)
114118
}

components/video-reader.tsx

Lines changed: 19 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -8,21 +8,24 @@ interface IProps {
88

99
export default function VideoReader({ videoRef, canvasRef, videoSrc }: IProps) {
1010
return (
11-
<div className="relative w-full overflow-hidden rounded-lg aspect-video">
12-
<video
13-
ref={videoRef}
14-
className="w-full object-cover"
15-
width={640}
16-
height={640}
17-
controls>
18-
{videoSrc && <source src={videoSrc} type="video/mp4" />}
19-
</video>
20-
<canvas
21-
ref={canvasRef}
22-
className="absolute top-0 left-0 w-full h-full pointer-events-none"
23-
width={640}
24-
height={640}
25-
/>
26-
</div>
11+
<>
12+
<div className="relative w-full overflow-hidden rounded-lg aspect-video">
13+
<video
14+
ref={videoRef}
15+
className="w-full object-cover"
16+
width={640}
17+
height={640}
18+
controls>
19+
{videoSrc && <source src={videoSrc} type="video/mp4" />}
20+
</video>
21+
<canvas
22+
ref={canvasRef}
23+
className="absolute top-0 left-0 w-full h-full pointer-events-none"
24+
width={640}
25+
height={640}
26+
/>
27+
</div>
28+
<div></div>
29+
</>
2730
)
2831
}

hooks/use-yolo-tfjs.tsx

Lines changed: 31 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,71 +1,71 @@
1-
import { useModelStore } from "@/lib/store/model-store"
2-
import { ModelComputerVision, modelList } from "@/models/model-list"
3-
import { useEffect, useState } from "react"
4-
import * as tf from "@tensorflow/tfjs"
1+
import { useModelStore } from "@/lib/store/model-store" // Importation du store de modèle
2+
import { ModelComputerVision, modelList } from "@/models/model-list" // Importation des modèles de vision par ordinateur
3+
import { useEffect, useState } from "react" // Importation des hooks React
4+
import * as tf from "@tensorflow/tfjs" // Importation de TensorFlow.js
55

66
interface IProps {
7-
ready: boolean
7+
ready: boolean // Propriété indiquant si le modèle est prêt
88
}
99

1010
export type YoloModel = {
11-
net: tf.GraphModel<string | tf.io.IOHandler | null>
12-
inputShape: number[]
11+
net: tf.GraphModel<string | tf.io.IOHandler | null> // Le modèle de réseau de neurones
12+
inputShape: number[] // La forme de l'entrée du modèle
1313
}
1414

1515
export default function useYolodisTfjs({ ready }: IProps) {
16-
const { modelName } = useModelStore()
16+
const { modelName } = useModelStore() // Récupération du nom du modèle depuis le store
1717
const [model, setModel] = useState<YoloModel>({
1818
net: null,
1919
inputShape: [1, 0, 0, 3],
20-
}) // init model & input shape
21-
const [loadModel, setLoadModel] = useState<boolean>(false)
22-
const [disposeDetect, setDisposeDetect] = useState<boolean>(false)
23-
const [percentLoaded, setPercentLoaded] = useState<number>(0)
20+
}) // Initialisation du modèle et de la forme de l'entrée
21+
const [loadModel, setLoadModel] = useState<boolean>(false) // État pour indiquer si le modèle est en cours de chargement
22+
const [disposeDetect, setDisposeDetect] = useState<boolean>(false) // État pour indiquer si le modèle doit être supprimé
23+
const [percentLoaded, setPercentLoaded] = useState<number>(0) // État pour indiquer le pourcentage de chargement du modèle
2424

25-
const modelDef = modelList.find(model => model.title === modelName)
25+
const modelDef = modelList.find(model => model.title === modelName) // Recherche de la définition du modèle dans la liste des modèles
2626

2727
async function fetchModel() {
2828
if (modelName === ModelComputerVision.DETECTION) {
29-
setLoadModel(true)
29+
setLoadModel(true) // Début du chargement du modèle
3030
const yolov8 = await tf.loadGraphModel(modelDef.url, {
3131
onProgress: fractions => {
32-
setPercentLoaded(fractions * 100)
32+
setPercentLoaded(fractions * 100) // Mise à jour du pourcentage de chargement
3333
},
34-
}) // load model
35-
// warming up model
36-
const dummyInput = tf.ones(yolov8.inputs[0].shape)
37-
const warmupResults = yolov8.execute(dummyInput)
34+
}) // Chargement du modèle
35+
// Préparation du modèle
36+
const dummyInput = tf.ones(yolov8.inputs[0].shape) // Création d'une entrée factice
37+
const warmupResults = yolov8.execute(dummyInput) // Exécution du modèle avec l'entrée factice
3838
setModel({
3939
net: yolov8,
4040
inputShape: yolov8.inputs[0].shape,
41-
}) // set model & input shape
42-
tf.dispose([warmupResults, dummyInput]) // cleanup memory
43-
setLoadModel(false)
41+
}) // Mise à jour du modèle et de la forme de l'entrée
42+
tf.dispose([warmupResults, dummyInput]) // Nettoyage de la mémoire
43+
setLoadModel(false) // Fin du chargement du modèle
4444
}
4545
}
4646

4747
useEffect(() => {
4848
if (ready) {
49-
fetchModel()
49+
fetchModel() // Chargement du modèle si prêt
5050
}
5151

5252
if (modelName === ModelComputerVision.EMPTY) {
5353
setModel({
5454
net: null,
5555
inputShape: [1, 0, 0, 3],
56-
})
57-
model.net?.dispose()
56+
}) // Réinitialisation du modèle
57+
model.net?.dispose() // Suppression du modèle existant
5858
}
59-
}, [ready, modelName])
59+
}, [ready, modelName]) // Dépendances du hook useEffect
6060

6161
function disposeModel() {
62-
disposeDetect && tf.disposeVariables()
63-
tf.dispose()
62+
disposeDetect && tf.disposeVariables() // Suppression des variables TensorFlow si nécessaire
63+
tf.dispose() // Suppression de TensorFlow
6464
}
6565

6666
useEffect(() => {
67-
disposeModel()
68-
}, [disposeDetect])
67+
disposeModel() // Suppression du modèle si disposeDetect change
68+
}, [disposeDetect]) // Dépendance du hook useEffect
6969

70-
return { model, loadModel, percentLoaded, setDisposeDetect }
70+
return { model, loadModel, percentLoaded, setDisposeDetect } // Retourne les états et fonctions nécessaires
7171
}

0 commit comments

Comments
 (0)