diff --git a/AutresModeles/Reseau_GRU_End_To_End_MN_Layers.ipynb b/AutresModeles/Reseau_GRU_End_To_End_MN_Layers.ipynb
new file mode 100644
index 0000000..3bcf7d6
--- /dev/null
+++ b/AutresModeles/Reseau_GRU_End_To_End_MN_Layers.ipynb
@@ -0,0 +1,608 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "name": "Reseau_GRU_End_to_End_MN_Layers.ipynb",
+ "provenance": [],
+ "authorship_tag": "ABX9TyMJS/GlMKMK/Nzpl7rLSMOd",
+ "include_colab_link": true
+ },
+ "kernelspec": {
+ "name": "python3",
+ "display_name": "Python 3"
+ }
+ },
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "view-in-github",
+ "colab_type": "text"
+ },
+ "source": [
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "ubCeIvtF6R4W"
+ },
+ "source": [
+ "Dans ce carnet nous allons mettre en place un modèle à réseau de neurones à mémoire de type **End-To-End Memory Network**. \n",
+ "Le papier de recherche associé est disponible [ici](https://arxiv.org/pdf/1503.08895)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "tjsWqmW-vGmn"
+ },
+ "source": [
+ "
"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "id": "RRhtHsNn5fc3"
+ },
+ "source": [
+ "import tensorflow as tf\n",
+ "from tensorflow import keras\n",
+ "\n",
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt"
+ ],
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "XFeah3y_6kif"
+ },
+ "source": [
+ "# Création de la série temporelle et du dataset pour l'entrainement"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "08QBgakIvNMM"
+ },
+ "source": [
+ "
"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "id": "vJfSLtub6sdc"
+ },
+ "source": [
+ "# Fonction permettant d'afficher une série temporelle\n",
+ "def affiche_serie(temps, serie, format=\"-\", debut=0, fin=None, label=None):\n",
+ " plt.plot(temps[debut:fin], serie[debut:fin], format, label=label)\n",
+ " plt.xlabel(\"Temps\")\n",
+ " plt.ylabel(\"Valeur\")\n",
+ " if label:\n",
+ " plt.legend(fontsize=14)\n",
+ " plt.grid(True)\n",
+ "\n",
+ "# Fonction permettant de créer une tendance\n",
+ "def tendance(temps, pente=0):\n",
+ " return pente * temps\n",
+ "\n",
+ "# Fonction permettant de créer un motif\n",
+ "def motif_periodique(instants):\n",
+ " return (np.where(instants < 0.4, # Si les instants sont < 0.4\n",
+ " np.cos(instants * 2 * np.pi), # Alors on retourne la fonction cos(2*pi*t)\n",
+ " 1 / np.exp(3 * instants))) # Sinon, on retourne la fonction exp(-3t)\n",
+ "\n",
+ "# Fonction permettant de créer une saisonnalité avec un motif\n",
+ "def saisonnalite(temps, periode, amplitude=1, phase=0):\n",
+ " \"\"\"Répétition du motif sur la même période\"\"\"\n",
+ " instants = ((temps + phase) % periode) / periode # Mapping du temps =[0 1 2 ... 1460] => instants = [0.0 ... 1.0]\n",
+ " return amplitude * motif_periodique(instants)\n",
+ "\n",
+ "# Fonction permettant de générer du bruit gaussien N(0,1)\n",
+ "def bruit_blanc(temps, niveau_bruit=1, graine=None):\n",
+ " rnd = np.random.RandomState(graine)\n",
+ " return rnd.randn(len(temps)) * niveau_bruit\n",
+ "\n",
+ "def prepare_dataset_XY(serie, taille_fenetre, batch_size, buffer_melange,nbr_sequences):\n",
+ " dataset = tf.data.Dataset.from_tensor_slices(serie)\n",
+ " dataset = dataset.window(taille_fenetre+1, shift=1, drop_remainder=True)\n",
+ " dataset = dataset.flat_map(lambda x: x.batch(taille_fenetre + 1))\n",
+ " dataset = dataset.window(nbr_sequences+1, shift=1, drop_remainder=True)\n",
+ " dataset = dataset.flat_map(lambda x: x.batch(nbr_sequences+1,drop_remainder=True))\n",
+ " dataset = dataset.map(lambda x: [(tf.slice(x,[0,0],[nbr_sequences,taille_fenetre]), # (30;20) [((30,20),(20)),(1)]\n",
+ " tf.squeeze(tf.slice(x,[nbr_sequences,0],[1,taille_fenetre]),axis=0)), # (20)\n",
+ " tf.squeeze(tf.slice(x,[nbr_sequences,taille_fenetre],[1,1]),axis=0)]) # (1)\n",
+ " dataset = dataset.batch(batch_size,drop_remainder=True)\n",
+ " return dataset\n",
+ "\n",
+ "# Création de la série temporelle\n",
+ "temps = np.arange(4 * 365) # temps = [0 1 2 .... 4*365] = [0 1 2 .... 1460]\n",
+ "amplitude = 40 # Amplitude de la la saisonnalité\n",
+ "niveau_bruit = 5 # Niveau du bruit\n",
+ "offset = 10 # Offset de la série\n",
+ "\n",
+ "serie = offset + tendance(temps, 0.1) + saisonnalite(temps, periode=365, amplitude=amplitude) + bruit_blanc(temps,niveau_bruit,graine=40)\n",
+ "\n",
+ "temps_separation = 1000\n",
+ "\n",
+ "# Extraction des temps et des données d'entrainement\n",
+ "temps_entrainement = temps[:temps_separation]\n",
+ "x_entrainement = serie[:temps_separation]\n",
+ "\n",
+ "# Exctraction des temps et des données de valiadation\n",
+ "temps_validation = temps[temps_separation:]\n",
+ "x_validation = serie[temps_separation:]\n",
+ "\n",
+ "# Définition des caractéristiques du dataset que l'on souhaite créer\n",
+ "taille_fenetre = 20\n",
+ "batch_size = 32\n",
+ "buffer_melange = 1000\n",
+ "Nbr_Sequences = 30\n",
+ "\n",
+ "# Création du dataset X,Y\n",
+ "dataset = prepare_dataset_XY(x_entrainement,taille_fenetre,batch_size,buffer_melange,Nbr_Sequences)\n",
+ "\n",
+ "# Création du dataset X,Y de validation\n",
+ "dataset_Val = prepare_dataset_XY(x_validation,taille_fenetre,batch_size,buffer_melange,Nbr_Sequences)"
+ ],
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "id": "OyndQsxw0wue"
+ },
+ "source": [
+ "# Calcul de la moyenne et de l'écart type de la série\n",
+ "mean = tf.math.reduce_mean(serie)\n",
+ "std = tf.math.reduce_std(serie)\n",
+ "\n",
+ "# Normalise les données\n",
+ "Serie_Normalisee = (serie-mean)/std\n",
+ "min = tf.math.reduce_min(serie)\n",
+ "max = tf.math.reduce_max(serie)\n",
+ "\n",
+ "# Création des données pour l'entrainement et le test\n",
+ "x_entrainement_norm = Serie_Normalisee[:temps_separation]\n",
+ "x_validation_norm = Serie_Normalisee[temps_separation:]\n",
+ "\n",
+ "# Création du dataset X,Y\n",
+ "dataset_norm = prepare_dataset_XY(x_entrainement_norm,taille_fenetre,batch_size,buffer_melange,Nbr_Sequences)\n",
+ "\n",
+ "# Création du dataset X,Y de validation\n",
+ "dataset_Val_norm = prepare_dataset_XY(x_validation_norm,taille_fenetre,batch_size,buffer_melange,Nbr_Sequences)"
+ ],
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "2Yt-EgZ3sgPY"
+ },
+ "source": [
+ "# Création du modèle End-To-End Memory Network Multi-couches"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "dyrcfKcgCsZ7"
+ },
+ "source": [
+ "**1. Création du réseau et adaptation des formats d'entrée et de sortie**"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "zeJyix8HK7Kt"
+ },
+ "source": [
+ "Le schéma d'une couche basique reste le même :\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "1OZkfsmnBNHY"
+ },
+ "source": [
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "7RfslQ39wBCs"
+ },
+ "source": [
+ "Mais on ajoute la possibilité de supperposer des couches sur le principe suivant :"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "QwedvNDlyH1U"
+ },
+ "source": [
+ "
"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "id": "Hhk0kmPSgqva"
+ },
+ "source": [
+ "# Définition du de la couche du modèle\n",
+ "# End-to-End Memory Network\n",
+ "# Epaquetage des données avec le dernier état caché d'une couche GRU\n",
+ "\n",
+ "from keras import backend as K\n",
+ "\n",
+ "class Couche_End_to_End_MN_Layers(tf.keras.layers.Layer):\n",
+ " # Fonction d'initialisation de la classe d'attention\n",
+ " # dim_GRU : Dimension des vecteurs GRU\n",
+ " # x : Séquences à mémoriser (batch_size, Nbr_Sequence, taille_fenetre)\n",
+ " # Fonction de la couche lambda d'entrée\n",
+ " def __init__(self,dim_GRU, nbr_hop, regul=0.0):\n",
+ " self.dim_GRU = dim_GRU\n",
+ " self.nbr_hop = nbr_hop\n",
+ " self.regul = regul\n",
+ " super().__init__() # Appel du __init__() de la classe Layer\n",
+ " \n",
+ " def build(self,input_shape):\n",
+ " # Définition des couches GRU pour traiter les séquences d'entrée\n",
+ " self.couche_GRU_A = tf.keras.layers.GRU(self.dim_GRU,kernel_regularizer=tf.keras.regularizers.l2(self.regul))\n",
+ " self.couche_GRU_B = tf.keras.layers.GRU(self.dim_GRU,kernel_regularizer=tf.keras.regularizers.l2(self.regul))\n",
+ " self.couche_GRU_C = tf.keras.layers.GRU(self.dim_GRU,kernel_regularizer=tf.keras.regularizers.l2(self.regul))\n",
+ " self.dense_H = tf.keras.layers.Dense(self.dim_GRU)\n",
+ "\n",
+ " # Poids d'attention\n",
+ " self.p = self.add_weight(shape=(input_shape[1],1),initializer=\"zeros\",name=\"p\")\n",
+ " super().build(input_shape) # Appel de la méthode build()\n",
+ "\n",
+ " # Définit la logique de la couche d'attention\n",
+ " # Arguments : x : (batch_size, Nbr_Sequence, taille_fenetre)\n",
+ " # y : (batch_size, taille_fenetre)\n",
+ " # Exemple : batch_size = 32\n",
+ " # Nbr_Sequence =30\n",
+ " # taille_fenetre = 20\n",
+ " # dim_GRU = 40 \n",
+ " def call(self,x,y):\n",
+ " # Création des vecteurs mi dans le tenseur M\n",
+ " M = tf.expand_dims(x,axis=-1) # (32,30,20) => (32,30,20,1)\n",
+ " M = tf.keras.layers.TimeDistributed(self.couche_GRU_A)(M) # (32,30,20,1) => (32,30,40) : TimeStep = 30 : (32,20,1) envoyé\n",
+ " M = K.tanh(M)\n",
+ "\n",
+ " # Création des vecteurs ci dans le tenseur C\n",
+ " C = tf.expand_dims(x,axis=-1) # (32,30,20) => (32,30,20,1)\n",
+ " C = tf.keras.layers.TimeDistributed(self.couche_GRU_C)(C) # (32,30,20,1) => (32,30,40) : TimeStep = 30 : (32,20,1) envoyé\n",
+ " C = K.tanh(C)\n",
+ "\n",
+ " # Création du vecteur d'état u\n",
+ " u = tf.expand_dims(y,axis=-1) # (32,20) => (32,20,1)\n",
+ " u = self.couche_GRU_B(u) # (32,20,1) => (32,40)\n",
+ " u = tf.expand_dims(u,axis=-1) # (32,40) => (32,40,1)\n",
+ " u = K.tanh(u) # (32,40,1)\n",
+ "\n",
+ " for i in range(0,self.nbr_hop):\n",
+ " # Calcul des poids d'attention\n",
+ " p = tf.matmul(M,u) # (32,30,40)x(32,40,1)=(32,30,1)\n",
+ " p = tf.keras.activations.softmax(p,axis=1) # (32,30,1)\n",
+ "\n",
+ " # Calcul du vecteur réponse issu de la mémoire\n",
+ " o = tf.multiply(C,p) # (32,30,40)_x_(32,30,1) = (32,30,40)\n",
+ " o = K.sum(o, axis=1) # (32,40)\n",
+ " o = K.tanh(o) # (32,40)\n",
+ " \n",
+ " # Calcul du vecteur d'attention\n",
+ " u = self.dense_H(tf.squeeze(u,axis=2)) # (32,40)\n",
+ " u = o+u # (32,40)\n",
+ " u = tf.expand_dims(u,axis=2) # (32,40,1)\n",
+ " return(tf.squeeze(u,axis=2))"
+ ],
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "id": "RTqdYAsF_ici"
+ },
+ "source": [
+ "dim_GRU = 40\n",
+ "nbr_hop = 3\n",
+ "\n",
+ "# Fonction de la couche lambda d'entrée\n",
+ "def Traitement_Entrees(x):\n",
+ " return tf.expand_dims(x,axis=-1)\n",
+ "\n",
+ "# Définition des entrées du modèle\n",
+ "entrees_sequences = tf.keras.layers.Input(shape=(Nbr_Sequences,taille_fenetre),batch_size=batch_size)\n",
+ "entrees_entrainement = tf.keras.layers.Input(shape=(taille_fenetre),batch_size=batch_size)\n",
+ "\n",
+ "# Encodeur\n",
+ "s_encodeur = Couche_End_to_End_MN_Layers(dim_GRU=dim_GRU,nbr_hop=nbr_hop, regul=0.0)(entrees_sequences,entrees_entrainement)\n",
+ "\n",
+ "# Décodeur\n",
+ "s_decodeur = tf.keras.layers.Dense(dim_GRU,activation=\"tanh\")(s_encodeur)\n",
+ "s_decodeur = tf.keras.layers.Concatenate()([s_decodeur,s_encodeur])\n",
+ "\n",
+ "# Générateur\n",
+ "sortie = tf.keras.layers.Dense(1)(s_decodeur)\n",
+ "\n",
+ "# Construction du modèle\n",
+ "model = tf.keras.Model([entrees_sequences,entrees_entrainement],sortie)\n",
+ "\n",
+ "model.save_weights(\"model_initial.hdf5\")\n",
+ "model.summary()"
+ ],
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "MUM0-SSXGLIQ"
+ },
+ "source": [
+ "**2. Optimisation du taux d'apprentissage**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "id": "jejCBhXVuNQ4"
+ },
+ "source": [
+ "# Définition de la fonction de régulation du taux d'apprentissage\n",
+ "def RegulationTauxApprentissage(periode, taux):\n",
+ " return 1e-8*10**(periode/10)\n",
+ "\n",
+ "# Définition de l'optimiseur à utiliser\n",
+ "#optimiseur=tf.keras.optimizers.SGD(lr=1e-8,momentum=0.9)\n",
+ "optimiseur=tf.keras.optimizers.Adam()\n",
+ "\n",
+ "\n",
+ "# Utilisation de la méthode ModelCheckPoint\n",
+ "CheckPoint = tf.keras.callbacks.ModelCheckpoint(\"poids_opti.hdf5\", monitor='loss', verbose=1, save_best_only=True, save_weights_only = True, mode='auto', save_freq='epoch')\n",
+ "\n",
+ "# Compile le modèle\n",
+ "model.compile(loss=tf.keras.losses.Huber(), optimizer=optimiseur, metrics=\"mae\")\n",
+ "\n",
+ "# Entraine le modèle en utilisant notre fonction personnelle de régulation du taux d'apprentissage\n",
+ "historique = model.fit(dataset_norm,epochs=100,verbose=1, callbacks=[tf.keras.callbacks.LearningRateScheduler(RegulationTauxApprentissage), CheckPoint])"
+ ],
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "id": "q1_WMNlzu2B4"
+ },
+ "source": [
+ "# Construit un vecteur avec les valeurs du taux d'apprentissage à chaque période \n",
+ "taux = 1e-8*(10**(np.arange(100)/10))\n",
+ "\n",
+ "# Affiche l'erreur en fonction du taux d'apprentissage\n",
+ "plt.figure(figsize=(10, 6))\n",
+ "plt.semilogx(taux,historique.history[\"loss\"])\n",
+ "plt.axis([ taux[0], taux[99], 0, 1])\n",
+ "plt.title(\"Evolution de l'erreur en fonction du taux d'apprentissage\")"
+ ],
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "6XdXh_b0GP_F"
+ },
+ "source": [
+ "**3. Entrainement du modèle**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "id": "80pEtJ10wIfY"
+ },
+ "source": [
+ "# Charge les meilleurs poids\n",
+ "model.load_weights(\"poids_opti.hdf5\")"
+ ],
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "id": "16ujUiELwR33"
+ },
+ "source": [
+ "from timeit import default_timer as timer\n",
+ "\n",
+ "class TimingCallback(keras.callbacks.Callback):\n",
+ " def __init__(self, logs={}):\n",
+ " self.n_steps = 0\n",
+ " self.t_step = 0\n",
+ " self.n_batch = 0\n",
+ " self.total_batch = 0\n",
+ " def on_epoch_begin(self, epoch, logs={}):\n",
+ " self.starttime = timer()\n",
+ " def on_epoch_end(self, epoch, logs={}):\n",
+ " self.t_step = self.t_step + timer()-self.starttime\n",
+ " self.n_steps = self.n_steps + 1\n",
+ " if (self.total_batch == 0):\n",
+ " self.total_batch=self.n_batch - 1\n",
+ " def on_train_batch_begin(self,batch,logs=None):\n",
+ " self.n_batch= self.n_batch + 1\n",
+ " def GetInfos(self):\n",
+ " print(self.n_steps)\n",
+ " print(self.total_batch)\n",
+ " return([self.t_step/(self.n_steps*self.total_batch), self.t_step, self.total_batch])\n",
+ "\n",
+ "# Classe permettant d'arrêter l'entrainement si la variation\n",
+ "# devient plus petite qu'une valeur à choisir sur un nombre\n",
+ "# de périodes à choisir\n",
+ "class StopTrain(keras.callbacks.Callback):\n",
+ " def __init__(self, delta=0.01,periodes=100, term=\"loss\", logs={}):\n",
+ " self.n_periodes = 0\n",
+ " self.periodes = periodes\n",
+ " self.loss_1 = 100\n",
+ " self.delta = delta\n",
+ " self.term = term\n",
+ " def on_epoch_end(self, epoch, logs={}):\n",
+ " diff_loss = abs(self.loss_1 - logs[self.term])\n",
+ " self.loss_1 = logs[self.term]\n",
+ " if (diff_loss < self.delta):\n",
+ " self.n_periodes = self.n_periodes + 1\n",
+ " else:\n",
+ " self.n_periodes = 0\n",
+ " if (self.n_periodes == self.periodes):\n",
+ " print(\"Arrêt de l'entrainement...\")\n",
+ " self.model.stop_training = True\n",
+ "\n",
+ "cb = TimingCallback()\n",
+ "\n",
+ "# Définition des paramètres liés à l'évolution du taux d'apprentissage\n",
+ "lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(\n",
+ " initial_learning_rate=0.001,\n",
+ " decay_steps=10,\n",
+ " decay_rate=0.01)\n",
+ "\n",
+ "# Définition de l'optimiseur à utiliser\n",
+ "#optimiseur=tf.keras.optimizers.SGD(learning_rate=lr_schedule,momentum=0.9)\n",
+ "optimiseur=tf.keras.optimizers.Adam(learning_rate=lr_schedule)\n",
+ "\n",
+ "# Utilisation de la méthode ModelCheckPoint\n",
+ "CheckPoint = tf.keras.callbacks.ModelCheckpoint(\"poids_train.hdf5\", monitor='loss', verbose=1, save_best_only=True, save_weights_only = True, mode='auto', save_freq='epoch')\n",
+ "\n",
+ "# Compile le modèle\n",
+ "model.compile(loss=tf.keras.losses.Huber(), optimizer=optimiseur,metrics=\"mae\")\n",
+ "\n",
+ "# Entraine le modèle\n",
+ "historique = model.fit(dataset_norm,validation_data=dataset_Val_norm, epochs=500,verbose=1, callbacks=[CheckPoint,cb,StopTrain(delta=1e-6,periodes = 20, term=\"val_loss\")])\n",
+ "\n",
+ "# Affiche quelques informations sur les timings\n",
+ "infos = cb.GetInfos()\n",
+ "print(\"Step time : %.3f\" %infos[0])\n",
+ "print(\"Total time : %.3f\" %infos[1])"
+ ],
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "id": "COP9u4yitvmw"
+ },
+ "source": [
+ "erreur_entrainement = historique.history[\"loss\"]\n",
+ "erreur_validation = historique.history[\"val_loss\"]\n",
+ "\n",
+ "# Affiche l'erreur en fonction de la période\n",
+ "plt.figure(figsize=(10, 6))\n",
+ "plt.plot(np.arange(0,len(erreur_entrainement)),erreur_entrainement, label=\"Erreurs sur les entrainements\")\n",
+ "plt.plot(np.arange(0,len(erreur_entrainement)),erreur_validation, label =\"Erreurs sur les validations\")\n",
+ "plt.legend()\n",
+ "\n",
+ "plt.title(\"Evolution de l'erreur en fonction de la période\")"
+ ],
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "T6Gq2CkeGR_1"
+ },
+ "source": [
+ "**4. Prédictions**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "id": "CEm-s_2lhET4"
+ },
+ "source": [
+ "x_predictions = np.concatenate((x_entrainement_norm[-taille_fenetre-Nbr_Sequences:],x_validation_norm))\n",
+ "dataPredict = prepare_dataset_XY(x_predictions,taille_fenetre,batch_size,buffer_melange,Nbr_Sequences)\n",
+ "predictions = model.predict(dataPredict)"
+ ],
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "id": "Wd2nfDcgG8EO"
+ },
+ "source": [
+ "lag = Nbr_Sequences\n",
+ "\n",
+ "# Ecart entre la taille des prédictions et la taille de la zone de validation\n",
+ "ecart = len(x_validation)+taille_fenetre+Nbr_Sequences - len(predictions)\n",
+ "\n",
+ "# Ce qui n'a pas été pris par la prédiction\n",
+ "reste = ecart - taille_fenetre\n",
+ "\n",
+ "# Affiche la série et les prédictions\n",
+ "plt.figure(figsize=(10, 6))\n",
+ "affiche_serie(temps,serie,label=\"Série temporelle\")\n",
+ "\n",
+ "if reste != 0:\n",
+ " affiche_serie(temps[temps_separation:-reste+Nbr_Sequences],np.asarray(predictions*std.numpy()+mean.numpy()),label=\"Prédictions\")\n",
+ "else :\n",
+ " affiche_serie(temps[temps_separation+taille_fenetre:taille_fenetre],np.asarray(predictions*std.numpy()+mean.numpy()),label=\"Prédictions\")\n",
+ "\n",
+ "\n",
+ "plt.title('Prédictions avec le modèle End-to-End Memory Network GRU')\n",
+ "plt.show()\n",
+ "\n",
+ "# Zoom sur l'intervalle de validation\n",
+ "plt.figure(figsize=(10, 6))\n",
+ "affiche_serie(temps[temps_separation:],serie[temps_separation:],label=\"Série temporelle\")\n",
+ "if reste != 0:\n",
+ " affiche_serie(temps[temps_separation:-reste+Nbr_Sequences],np.asarray(predictions*std.numpy()+mean.numpy()),label=\"Prédictions\")\n",
+ "else :\n",
+ " affiche_serie(temps[temps_separation+taille_fenetre:taille_fenetre],np.asarray(predictions*std.numpy()+mean.numpy()),label=\"Prédictions\")\n",
+ "plt.title(\"Prédictions avec le modèle End-to-End Memory Network GRU (zoom sur l'intervalle de validation)\")\n",
+ "plt.show()"
+ ],
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "id": "PDS7BJvZG_e1"
+ },
+ "source": [
+ "# Calcule de l'erreur quadratique moyenne et de l'erreur absolue moyenne \n",
+ "\n",
+ "mae = tf.keras.metrics.mean_absolute_error(serie[temps_separation+taille_fenetre:-reste+Nbr_Sequences],np.asarray(predictions[taille_fenetre:,0]*std.numpy()+mean.numpy())).numpy()\n",
+ "mse = tf.keras.metrics.mean_squared_error(serie[temps_separation+taille_fenetre:-reste+Nbr_Sequences],np.asarray(predictions[taille_fenetre:,0]*std.numpy()+mean.numpy())).numpy()\n",
+ "\n",
+ "print(mae)\n",
+ "print(mse)"
+ ],
+ "execution_count": null,
+ "outputs": []
+ }
+ ]
+}
\ No newline at end of file