Skip to content

Commit

Permalink
Beginning support for multiple implementations
Browse files Browse the repository at this point in the history
  • Loading branch information
aunetx committed Feb 10, 2019
1 parent 2bc1f60 commit 5c43d65
Show file tree
Hide file tree
Showing 7 changed files with 45 additions and 22 deletions.
5 changes: 5 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1 +1,6 @@
# loulou

Dependencies : numpy
Optional dependencies : matplotlib - to show predicted image

* Version 1.0.0 : Initial version, works with another implementation - need some changes
4 changes: 4 additions & 0 deletions data/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# Ignore everything in this directory
*
# Except this file
!.gitignore
8 changes: 3 additions & 5 deletions scripts/rnn.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,15 @@
import numpy as np
import mnist as mnist

# Fordward propagation
def feed_forward(X, weights):
a = [X]
for w in weights:
a.append(np.maximum(a[-1].dot(w),0)) # Calculer l'avancée
# np.maximum(val, 0) -> relu
# a[-1].dot(w) -> valeur * poids
# a.append -> rajouter la valeur à la fin du tableau
a.append(np.maximum(a[-1].dot(w),0))
return a

def grads(X, Y, weights, square):
grads = np.empty_like(weights) # grads représente la matrice de correction des poids
grads = np.empty_like(weights) # |grads| : weights corrections matrix
a = feed_forward(X, weights) # on nourrit le réseau et on stocke les valeurs des neurones dans 'a'
if square:
delta = a[-1]*a[-1] - Y*Y # on met l'erreur au carré
Expand Down
41 changes: 24 additions & 17 deletions scripts/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,41 +3,48 @@
import matplotlib.image as image
import json

# Erreur si le nombre d'arguments n'est pas bon.
# Prediction function (fordward propagation)
def predire(X, weights):
a = [X]
for w in weights:
a.append(np.maximum(a[-1].dot(w),0))
return a

# Json conversion function
def saveJson(pred):
out = {}
out['hot_prediction'] = list(pred)
out['prediction'] = int(np.argmax(pred))
return json.dumps(out)

# Handling errors for arguments
try:
assert len(sys.argv) == 3
except AssertionError:
print("Erreur ! Veuillez donnez deux arguments, le nom du fichier de poids et de l'image à prédire.")
print("Error ! Please give two arguments : path to weights file and to image to predict.")
exit()

# On charge les poids
# Loading weights matrix
filename = sys.argv[1]
try:
weights = np.load(filename)
except FileNotFoundError:
print("Erreur ! Le fichier de poids n'a pas pu être ouvert, vérifiez qu'il existe bien.")
print("Error ! Weights matrix file could not be opened, please check that it exists.")
print("Fichier : ",filename)
exit()

# On charge l'image
# Loading image data
img = sys.argv[2]
try:
img = image.imread(img)
except FileNotFoundError:
print("Erreur ! L'image n'a pas pu être ouverte, vérifiez qu'elle existe bien.")
print("Error ! Image could not be opened, please check that it exists.")
print("Image : ",img)
exit()

def predire(X, weights):
a = [X]
for w in weights:
a.append(np.maximum(a[-1].dot(w),0))
return a

out = {}
# Shaping image onto matrix
topred = 1 - img.reshape(784,4).mean(axis=1)
# Making prediction
prediction = predire(topred, weights)[-1]
out['accuracy'] = list(prediction)
out['prediction'] = int(np.argmax(prediction))
out_json = json.dumps(out)
print(out_json)
# Printing json output
print(saveJson(prediction))
3 changes: 3 additions & 0 deletions todo.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# To-do - loulou

- [ ] Updating code to use with that implementation
6 changes: 6 additions & 0 deletions trains/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# Ignore everything in this directory
*.*
# Except those files
!.gitignore
!pre-trained/
!pre-trained/*
Binary file added trains/pre-trained/400-200-100_ln0.03.npy
Binary file not shown.

0 comments on commit 5c43d65

Please sign in to comment.