|
2 | 2 |
|
3 | 3 | import numpy as np |
4 | 4 | from scipy.sparse.linalg import eigsh |
5 | | -from sklearn.cluster import KMeans |
6 | 5 |
|
7 | 6 | from kmeans import k_means |
8 | | -from laplacian import unnormalized_laplacian, normalized_laplacian |
| 7 | +from laplacian import unnormalized_laplacian |
9 | 8 | from read_graph import read_graph |
10 | 9 |
|
11 | 10 | ALGORITHM_1 = 1 |
12 | 11 | ALGORITHM_2 = 2 |
13 | 12 | ALGORITHM_3 = 3 |
14 | 13 | ALGORITHM_4 = 4 |
15 | | -ALGORITHM_5 = 5 |
16 | | -ALGORITHM_6 = 6 |
17 | | -ALGORITHM_7 = 7 |
18 | 14 |
|
19 | 15 |
|
20 | 16 | def spectral_clustering1(graph_src, k_user=None): |
@@ -101,40 +97,3 @@ def spectral_clustering2(graph_src, k_user=None): |
101 | 97 | print("Cluster sizes: %s" % cluster_sizes) |
102 | 98 | return clusters, seed, header |
103 | 99 |
|
104 | | - |
105 | | -def spectral_clustering3(graph_src, k_user=None): |
106 | | - print("Reading graph: " + graph_src) |
107 | | - start = time() |
108 | | - A, D, k, header = read_graph(graph_src) |
109 | | - if k_user or k is None: |
110 | | - k = k_user |
111 | | - header[4] = str(k_user) |
112 | | - print("Finished after %.2f seconds" % (time() - start)) |
113 | | - |
114 | | - # Calculate laplacian matrix |
115 | | - print("Calculating laplacian") |
116 | | - start = time() |
117 | | - laplacian_matrix = normalized_laplacian(D, A) |
118 | | - print("Finished after %.2f seconds" % (time() - start)) |
119 | | - |
120 | | - D = None # Free memory |
121 | | - A = None # Free memory |
122 | | - |
123 | | - # Eigen-decomposition of Laplacian matrix |
124 | | - print("Calculating Eigen-decomposition") |
125 | | - start = time() |
126 | | - e_values, e_vectors = eigsh(laplacian_matrix, k=k, which='SA') |
127 | | - print("Finished after %.2f seconds" % (time() - start)) |
128 | | - laplacian_matrix = None # Free memory |
129 | | - U = np.real(e_vectors) |
130 | | - print("Normalizing U") |
131 | | - start = time() |
132 | | - T = U[:, :k] / np.sum(U[:, :k], axis=0) # Normalize |
133 | | - print("Finished after %.2f seconds" % (time() - start)) |
134 | | - kmeans = KMeans(n_clusters=k, random_state=0).fit(T[:, :k]) |
135 | | - cluster_sizes = [0] * k |
136 | | - for i in kmeans.labels_: |
137 | | - cluster_sizes[i] += 1 |
138 | | - print("Cluster sizes: %s" % cluster_sizes) |
139 | | - print(kmeans.cluster_centers_) |
140 | | - return kmeans.labels_ |
0 commit comments