-
Notifications
You must be signed in to change notification settings - Fork 0
/
run_isi_lolcat.py
135 lines (111 loc) · 5.1 KB
/
run_isi_lolcat.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import pandas as pd
import argparse
from data_preprocess import *
from mlp import *
from trans import *
from dataset import *
import torch
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from tqdm import tqdm
from torch_geometric.data import DataLoader
from lolcat import *
def run(model, dimension, session_name):
data_path_json = '/scratch/cl7201/hippo_decoding/data_path.json'
with open(data_path_json, 'r') as file:
data_path = json.load(file)
channel_map = read_map(data_path.get('public')['mapping_path'])
data, label = read_data(data_path, session_name)
normalized_data = normalize(data)
print(normalized_data.shape)
channel_index_label, unique_label = label_data(label, channel_map)
print(len(channel_index_label))
print(unique_label)
# extract spike
# if exist "processed_data.npy", load it, otherwise, compute
processed_data = []
print("Processing data isi...")
for i in tqdm(range(normalized_data.shape[0])):
processed_data.append(isi_analysis(normalized_data[i]))
label_index = {'CA1':0,
'CA2':1,
'CA3':2,
'DG':3,
'cortex':4}
labels = ['CA1', 'CA2', 'CA3', 'DG', 'cortex']
if model == "lolcat":
# print(np.unique(y, return_counts=True)
model_save_path = f"lolcat_head{dimension}_{session_name}.pt"
lolcat_trainer = LOLCARTrainer(processed_data, channel_index_label, label_index, heads=dimension, model_save_path=model_save_path)
loss_values = lolcat_trainer.train()
accuracy, cm = lolcat_trainer.evaluate(best_model=True)
# plot heatmap of confusion matrix
fig, ax = plt.subplots()
im, cbar = heatmap(cm, labels, labels, ax=ax,
cmap="YlGn", cbarlabel="Accuracy")
texts = annotate_heatmap(im, valfmt="{x:.1f}")
fig.tight_layout()
title_name = f"cm_head{dimension}_{session_name}.pdf"
plt.savefig(title_name)
return accuracy, loss_values
def run_heads(model):
dimensions = range(1,5)
session_names = ['AD_HF01_1', 'AD_HF02_2', 'AD_HF02_4', 'NN_syn_01', 'NN_syn_02']
# Collect data
all_accuracies = np.zeros((len(dimensions), len(session_names)))
all_loss_values = np.zeros((len(dimensions), len(session_names), 100))
for i, dimension in enumerate(dimensions):
for j, session_name in enumerate(session_names):
print("Running heads ", dimension, "for session ", session_name)
accuracy, loss_values = run(model, dimension, session_name)
all_accuracies[i, j] = accuracy
all_loss_values[i, j, :] = loss_values
# Calculate mean accuracies and loss
mean_accuracies = np.mean(all_accuracies, axis=1)
std_accuracies = np.std(all_accuracies, axis=1)
mean_loss_values = np.mean(all_loss_values, axis=1)
std_loss_values = np.std(all_loss_values, axis=1)
# save as npy
np.save('mean_accuracies.npy', mean_accuracies)
np.save('std_accuracies.npy', std_accuracies)
np.save('mean_loss_values.npy', mean_loss_values)
np.save('std_loss_values.npy', std_loss_values)
# load
# mean_accuracies = np.load('mean_accuracies.npy')
# std_accuracies = np.load('std_accuracies.npy')
# mean_loss_values = np.load('mean_loss_values.npy')
# std_loss_values = np.load('std_loss_values.npy')
plt.figure(figsize=(10, 5))
# Plotting Accuracy vs. Parameter
plt.errorbar(dimensions, mean_accuracies, yerr=std_accuracies, marker='o', linestyle='-', color='b', label='Mean Accuracy ± Std')
plt.title('Accuracy vs Heads')
plt.xlabel('# of Heads')
plt.ylabel('Accuracy')
plt.grid(True)
plt.legend()
plt.savefig('accuracy_vs_heads.pdf')
plt.figure(figsize=(10, 6))
# Plotting Loss Curves for each Parameter
for i in range(len(dimensions)):
plt.plot(range(1, 101), mean_loss_values[i], marker='.', linestyle='-', label=f'Heads {dimensions[i]}')
plt.fill_between(range(1, 101), mean_loss_values[i] - std_loss_values[i], mean_loss_values[i] + std_loss_values[i], alpha=0.2)
plt.title('Mean Loss Curves Across Different Heads')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.grid(True)
plt.tight_layout()
plt.savefig('loss_curves.pdf')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Select model and session name')
parser.add_argument('--model', type=str, default='lolcat', help='Select model')
parser.add_argument('--dimension', type=int, default=1, help='Select dimensionality of the encoded representation')
parser.add_argument('--session', type=str, default='AD_HF01_1', help='Select session name')
parser.add_argument('--plot', action='store_true', help='Plot results for accuracy by dimension and reduction method')
args = parser.parse_args()
model, dimension, session_name, plot_acc = args.model, args.dimension, args.session, args.plot
if not plot_acc:
run(model, dimension, session_name)
else:
print("Running heads from 1 to 4")
run_heads(model)