diff --git "a/S\303\251ries temporelles/Reseau_RNN.ipynb" "b/S\303\251ries temporelles/Reseau_RNN.ipynb"
index 384cc98..d3438ff 100644
--- "a/S\303\251ries temporelles/Reseau_RNN.ipynb"
+++ "b/S\303\251ries temporelles/Reseau_RNN.ipynb"
@@ -1,23 +1,9 @@
{
- "nbformat": 4,
- "nbformat_minor": 0,
- "metadata": {
- "colab": {
- "name": "Reseau_RNN.ipynb",
- "provenance": [],
- "include_colab_link": true
- },
- "kernelspec": {
- "name": "python3",
- "display_name": "Python 3"
- }
- },
"cells": [
{
"cell_type": "markdown",
"metadata": {
- "id": "view-in-github",
- "colab_type": "text"
+ "id": "view-in-github"
},
"source": [
""
@@ -35,18 +21,18 @@
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "RRhtHsNn5fc3"
},
+ "outputs": [],
"source": [
"import tensorflow as tf\n",
"from tensorflow import keras\n",
"\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt"
- ],
- "execution_count": 1,
- "outputs": []
+ ]
},
{
"cell_type": "markdown",
@@ -59,9 +45,11 @@
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "vJfSLtub6sdc"
},
+ "outputs": [],
"source": [
"# Fonction permettant d'afficher une série temporelle\n",
"def affiche_serie(temps, serie, format=\"-\", debut=0, fin=None, label=None):\n",
@@ -105,9 +93,7 @@
"affiche_serie(temps,serie)\n",
"plt.title('Série temporelle expérimentale')\n",
"plt.show()"
- ],
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "markdown",
@@ -120,9 +106,11 @@
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "78vEUlpIFMp6"
},
+ "outputs": [],
"source": [
"# Fonction permettant de créer un dataset à partir des données de la série temporelle\n",
"# au format X(X1,X2,...Xn) / Y(Y1,Y2,...,Yn)\n",
@@ -136,9 +124,7 @@
" dataset = dataset.shuffle(buffer_melange).map(lambda x: (x[:-1], x[-1:]))\n",
" dataset = dataset.batch(batch_size, drop_remainder=True).prefetch(1)\n",
" return dataset"
- ],
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "markdown",
@@ -160,9 +146,11 @@
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "_14USRg5JvSu"
},
+ "outputs": [],
"source": [
"temps_separation = 1000\n",
"\n",
@@ -173,9 +161,7 @@
"# Exctraction des temps et des données de valiadation\n",
"temps_validation = temps[temps_separation:]\n",
"x_validation = serie[temps_separation:]"
- ],
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "markdown",
@@ -201,14 +187,16 @@
"id": "C2CDLaYDoDms"
},
"source": [
- " "
+ ""
]
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "ziajfOefKvsu"
},
+ "outputs": [],
"source": [
"# Définition des caractéristiques du dataset que l'on souhaite créer\n",
"taille_fenetre = 20\n",
@@ -220,9 +208,7 @@
"\n",
"# Création du dataset X,Y de validation\n",
"dataset_Val = prepare_dataset_XY(x_validation,taille_fenetre,batch_size,buffer_melange)"
- ],
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "markdown",
@@ -302,20 +288,20 @@
"id": "1OZkfsmnBNHY"
},
"source": [
- " "
+ ""
]
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "5zOZ3yVyZmkl"
},
+ "outputs": [],
"source": [
"# Remise à zéro de tous les états générés par Keras\n",
"tf.keras.backend.clear_session()"
- ],
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "markdown",
@@ -323,14 +309,16 @@
"id": "86TUv-QxEFy9"
},
"source": [
- "Pour insérer une dimension de type `None` au format de l'entrée, on utilise la méthode [expand_dims](https://www.tensorflow.org/api_docs/python/tf/expand_dims) de tensorflow. "
+ "Pour insérer une dimension de type `None` au format de l'entrée, on utilise la méthode [expand_dims](https://www.tensorflow.org/api_docs/python/tf/expand_dims) de tensorflow."
]
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "LORqSH0DQKoG"
},
+ "outputs": [],
"source": [
"# Fonction de la couche lambda d'entrée\n",
"def Traitement_Entrees(x):\n",
@@ -357,7 +345,48 @@
"model.save_weights(\"model_initial.hdf5\")\n",
"\n",
"model.summary()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "# Normalisation des données"
],
+ "metadata": {
+ "id": "zJ0oY-QBxfqL"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Calcul de la moyenne et de l'écart type de la série\n",
+ "mean = tf.math.reduce_mean(np.asarray(serie))\n",
+ "std = tf.math.reduce_std(np.asarray((serie)))\n",
+ "\n",
+ "# Normalisation des données\n",
+ "Serie_Normalisee = (serie-mean)/std\n",
+ "serie_test = (serie-mean)/std"
+ ],
+ "metadata": {
+ "id": "pa6ULOIAyNJ_"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Extraction des temps et des données d'entrainement\n",
+ "x_entrainement_norm = Serie_Normalisee[:temps_separation]\n",
+ "x_validation_norm = Serie_Normalisee[temps_separation:]\n",
+ "\n",
+ "dataset_norm = prepare_dataset_XY(x_entrainement_norm, taille_fenetre, batch_size, buffer_melange)\n",
+ "\n",
+ "dataset_Val_norm = prepare_dataset_XY(x_validation_norm, taille_fenetre, batch_size, buffer_melange)"
+ ],
+ "metadata": {
+ "id": "_XCRrtuZyjqs"
+ },
"execution_count": null,
"outputs": []
},
@@ -385,7 +414,7 @@
"id": "J-cR2CjAVg6j"
},
"source": [
- " "
+ ""
]
},
{
@@ -412,27 +441,29 @@
"id": "XYL9uZn4Vtyb"
},
"source": [
- " "
+ ""
]
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "dqGYeKqJOlua"
},
+ "outputs": [],
"source": [
"# Remise à zéro des états du modèle\n",
"tf.keras.backend.clear_session()\n",
"model.load_weights(\"model_initial.hdf5\")"
- ],
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "9sLSLTvjOzCM"
},
+ "outputs": [],
"source": [
"# Fonction de la couche lambda d'entrée\n",
"def Traitement_Entrees(x):\n",
@@ -455,9 +486,7 @@
"# Construction du modèle\n",
"model = tf.keras.Model(entrees,sortie)\n",
"model.summary()"
- ],
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "markdown",
@@ -470,16 +499,18 @@
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "2uU0jcwFOlub"
},
+ "outputs": [],
"source": [
"# Définition de la fonction de régulation du taux d'apprentissage\n",
"def RegulationTauxApprentissage(periode, taux):\n",
" return 1e-8*20**(periode/10)\n",
"\n",
"# Définition de l'optimiseur à utiliser\n",
- "optimiseur=tf.keras.optimizers.SGD(lr=1e-8, momentum=0.9)\n",
+ "optimiseur=tf.keras.optimizers.SGD(lr=6e-4, momentum=0.9)\n",
"\n",
"# Utilisation de la méthode ModelCheckPoint\n",
"CheckPoint = tf.keras.callbacks.ModelCheckpoint(\"poids.hdf5\", monitor='loss', verbose=1, save_best_only=True, save_weights_only = True, mode='auto', save_freq='epoch')\n",
@@ -489,17 +520,17 @@
"\n",
"# Entraine le modèle en utilisant la méthode ModelCheckpoint pour sauvegarder les meilleurs poids du modèle\n",
"historique = model.fit(dataset_norm, epochs=100,verbose=1, callbacks=[tf.keras.callbacks.LearningRateScheduler(RegulationTauxApprentissage), CheckPoint])"
- ],
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "9iMyNGyXOlub"
},
+ "outputs": [],
"source": [
- "# Construit un vecteur avec les valeurs du taux d'apprentissage à chaque période \n",
+ "# Construit un vecteur avec les valeurs du taux d'apprentissage à chaque période\n",
"taux = 1e-8*(10**(np.arange(100)/10))\n",
"\n",
"# Affiche l'erreur en fonction du taux d'apprentissage\n",
@@ -507,27 +538,27 @@
"plt.semilogx(taux,historique.history[\"loss\"])\n",
"plt.axis([ 1e-8, 1e-1, 0, 0.2])\n",
"plt.title(\"Evolution de l'erreur en fonction du taux d'apprentissage\")"
- ],
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "QAIF0Gy2Oluc"
},
+ "outputs": [],
"source": [
"# Chargement des poids sauvegardés\n",
"model.load_weights(\"poids.hdf5\")"
- ],
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "crC29nqwOluc"
},
+ "outputs": [],
"source": [
"from timeit import default_timer as timer\n",
"\n",
@@ -567,15 +598,15 @@
"infos = cb.GetInfos()\n",
"print(\"Step time : %.3f\" %infos[0])\n",
"print(\"Total time : %.3f\" %infos[1])"
- ],
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "a2g48gUlOlud"
},
+ "outputs": [],
"source": [
"erreur_entrainement = historique.history[\"loss\"]\n",
"erreur_validation = historique.history[\"val_loss\"]\n",
@@ -587,15 +618,15 @@
"plt.legend()\n",
"\n",
"plt.title(\"Evolution de l'erreur en fonction de la période\")"
- ],
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "ZZaLzXnpOlud"
},
+ "outputs": [],
"source": [
"erreur_entrainement = historique.history[\"loss\"]\n",
"erreur_validation = historique.history[\"val_loss\"]\n",
@@ -607,9 +638,7 @@
"plt.legend()\n",
"\n",
"plt.title(\"Evolution de l'erreur en fonction de la période\")"
- ],
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "markdown",
@@ -622,9 +651,11 @@
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "_h5wZ20yOlud"
},
+ "outputs": [],
"source": [
"taille_fenetre = 20\n",
"\n",
@@ -636,15 +667,15 @@
"for t in temps[temps_separation:-taille_fenetre]:\n",
" X = np.reshape(Serie_Normalisee[t:t+taille_fenetre],(1,taille_fenetre))\n",
" predictions.append(model.predict(X))"
- ],
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "hLP355JJOlue"
},
+ "outputs": [],
"source": [
"# Affiche la série et les prédictions\n",
"plt.figure(figsize=(10, 6))\n",
@@ -659,26 +690,35 @@
"affiche_serie(temps[temps_separation+taille_fenetre:],np.asarray((predictions*std+mean))[:,0,0],label=\"Prédictions\")\n",
"plt.title(\"Prédictions avec le réseau récurrent (zoom sur l'intervalle de validation)\")\n",
"plt.show()"
- ],
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "4WkrGXlcOlue"
},
+ "outputs": [],
"source": [
- "# Calcule de l'erreur quadratique moyenne et de l'erreur absolue moyenne \n",
+ "# Calcule de l'erreur quadratique moyenne et de l'erreur absolue moyenne\n",
"\n",
"mae = tf.keras.metrics.mean_absolute_error(serie[temps_separation+taille_fenetre:],np.asarray((predictions*std+mean))[:,0,0]).numpy()\n",
"mse = tf.keras.metrics.mean_squared_error(serie[temps_separation+taille_fenetre:],np.asarray((predictions*std+mean))[:,0,0]).numpy()\n",
"\n",
"print(mae)\n",
"print(mse)"
- ],
- "execution_count": null,
- "outputs": []
+ ]
}
- ]
-}
\ No newline at end of file
+ ],
+ "metadata": {
+ "colab": {
+ "provenance": []
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}