Skip to content

Commit ef7ad80

Browse files
authored
Merge pull request #179 from ljchang/decomposition
Add decompose method to brain_data Former-commit-id: b9b97b1
2 parents a26bf76 + 21c6e17 commit ef7ad80

File tree

6 files changed

+145
-22
lines changed

6 files changed

+145
-22
lines changed

MANIFEST.in

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
include requirements.txt

nltools/data/brain_data.py

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,8 @@
4646
glover_hrf,
4747
attempt_to_import,
4848
concatenate,
49-
_bootstrap_apply_func)
49+
_bootstrap_apply_func,
50+
set_decomposition_algorithm)
5051
from nltools.cross_validation import set_cv
5152
from nltools.plotting import (dist_from_hyperplane_plot,
5253
scatterplot,
@@ -1426,6 +1427,30 @@ def bootstrap(self, function, n_samples=5000, save_weights=False,
14261427
bootstrapped = Brain_Data(bootstrapped)
14271428
return summarize_bootstrap(bootstrapped, save_weights=save_weights)
14281429

1430+
def decompose(self, algorithm='pca', n_components=None, *args, **kwargs):
1431+
''' Decompose Brain_Data object
1432+
1433+
Args:
1434+
algorithm: (str) Algorithm to perform decomposition
1435+
types=['pca','ica','nnmf','fa']
1436+
n_components: (int) number of components. If None then retain
1437+
as many as possible.
1438+
Returns:
1439+
output: a dictionary of decomposition parameters
1440+
'''
1441+
1442+
out = {}
1443+
out['decomposition_object'] = set_decomposition_algorithm(
1444+
algorithm=algorithm,
1445+
n_components=n_components,
1446+
*args, **kwargs)
1447+
out['decomposition_object'].fit(self.data.T)
1448+
out['components'] = self.empty()
1449+
out['components'].data = out['decomposition_object'].transform(
1450+
self.data.T).T
1451+
out['weights'] = out['decomposition_object'].components_
1452+
return out
1453+
14291454
class Groupby(object):
14301455
def __init__(self, data, mask):
14311456

nltools/tests/test_data.py

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -239,6 +239,42 @@ def test_brain_data_3mm(tmpdir):
239239
diff = m2-m1
240240
assert np.sum(diff.data) == 0
241241

242+
# Test Bootstrap
243+
masked = dat.apply_mask(create_sphere(radius=10, coordinates=[0, 0, 0]))
244+
n_samples = 3
245+
b = masked.bootstrap('mean', n_samples=n_samples)
246+
assert isinstance(b['Z'], Brain_Data)
247+
b = masked.bootstrap('std', n_samples=n_samples)
248+
assert isinstance(b['Z'], Brain_Data)
249+
b = masked.bootstrap('predict', n_samples=n_samples, plot=False)
250+
assert isinstance(b['Z'], Brain_Data)
251+
b = masked.bootstrap('predict', n_samples=n_samples,
252+
plot=False, cv_dict={'type':'kfolds','n_folds':3})
253+
assert isinstance(b['Z'], Brain_Data)
254+
b = masked.bootstrap('predict', n_samples=n_samples,
255+
save_weights=True, plot=False)
256+
assert len(b['samples'])==n_samples
257+
258+
# Test decompose
259+
n_components = 3
260+
stats = dat.decompose(algorithm='pca', n_components=n_components)
261+
assert n_components == len(stats['components'])
262+
assert stats['weights'].shape == (n_components,len(dat))
263+
264+
stats = dat.decompose(algorithm='ica', n_components=n_components)
265+
assert n_components == len(stats['components'])
266+
assert stats['weights'].shape == (n_components,len(dat))
267+
268+
dat.data = dat.data + 2
269+
dat.data[dat.data<0] = 0
270+
stats = dat.decompose(algorithm='nnmf', n_components=n_components)
271+
assert n_components == len(stats['components'])
272+
assert stats['weights'].shape == (n_components,len(dat))
273+
274+
stats = dat.decompose(algorithm='fa', n_components=n_components)
275+
assert n_components == len(stats['components'])
276+
assert stats['weights'].shape == (n_components,len(dat))
277+
242278
def test_brain_data_2mm(tmpdir):
243279
MNI_Template["resolution"] = '2mm'
244280
sim = Simulator()
@@ -468,6 +504,26 @@ def test_brain_data_2mm(tmpdir):
468504
save_weights=True, plot=False)
469505
assert len(b['samples'])==n_samples
470506

507+
# Test decompose
508+
n_components = 3
509+
stats = dat.decompose(algorithm='pca', n_components=n_components)
510+
assert n_components == len(stats['components'])
511+
assert stats['weights'].shape == (n_components,len(dat))
512+
513+
stats = dat.decompose(algorithm='ica', n_components=n_components)
514+
assert n_components == len(stats['components'])
515+
assert stats['weights'].shape == (n_components,len(dat))
516+
517+
dat.data = dat.data + 2
518+
dat.data[dat.data<0] = 0
519+
stats = dat.decompose(algorithm='nnmf', n_components=n_components)
520+
assert n_components == len(stats['components'])
521+
assert stats['weights'].shape == (n_components,len(dat))
522+
523+
stats = dat.decompose(algorithm='fa', n_components=n_components)
524+
assert n_components == len(stats['components'])
525+
assert stats['weights'].shape == (n_components,len(dat))
526+
471527
def test_adjacency(tmpdir):
472528
n = 10
473529
sim = np.random.multivariate_normal([0,0,0,0],[[1, 0.8, 0.1, 0.4],

nltools/utils.py

Lines changed: 42 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
'all_same',
1818
'concatenate',
1919
'_bootstrap_apply_func',
20+
'set_decomposition_algorithm'
2021
]
2122
__author__ = ["Luke Chang"]
2223
__license__ = "MIT"
@@ -41,7 +42,7 @@ def get_anatomical():
4142
"""
4243
return nib.load(os.path.join(get_resource_path(),'MNI152_T1_2mm.nii.gz'))
4344

44-
def set_algorithm(algorithm, **kwargs):
45+
def set_algorithm(algorithm, *args, **kwargs):
4546
""" Setup the algorithm to use in subsequent prediction analyses.
4647
4748
Args:
@@ -90,11 +91,11 @@ def load_class(import_string):
9091
if algorithm in algs_classify.keys():
9192
predictor_settings['prediction_type'] = 'classification'
9293
alg = load_class(algs_classify[algorithm])
93-
predictor_settings['predictor'] = alg(**kwargs)
94+
predictor_settings['predictor'] = alg(*args, **kwargs)
9495
elif algorithm in algs_predict:
9596
predictor_settings['prediction_type'] = 'prediction'
9697
alg = load_class(algs_predict[algorithm])
97-
predictor_settings['predictor'] = alg(**kwargs)
98+
predictor_settings['predictor'] = alg(*args, **kwargs)
9899
elif algorithm == 'lassopcr':
99100
predictor_settings['prediction_type'] = 'prediction'
100101
from sklearn.linear_model import Lasso
@@ -121,6 +122,44 @@ def load_class(import_string):
121122

122123
return predictor_settings
123124

125+
def set_decomposition_algorithm(algorithm, n_components=None, *args, **kwargs):
126+
""" Setup the algorithm to use in subsequent decomposition analyses.
127+
128+
Args:
129+
algorithm: The decomposition algorithm to use. Either a string or an
130+
(uninitialized) scikit-learn decomposition object.
131+
If string must be one of 'pca','nnmf', ica','fa'
132+
kwargs: Additional keyword arguments to pass onto the scikit-learn
133+
clustering object.
134+
135+
Returns:
136+
predictor_settings: dictionary of settings for prediction
137+
138+
"""
139+
140+
# NOTE: function currently located here instead of analysis.py to avoid circular imports
141+
142+
def load_class(import_string):
143+
class_data = import_string.split(".")
144+
module_path = '.'.join(class_data[:-1])
145+
class_str = class_data[-1]
146+
module = importlib.import_module(module_path)
147+
return getattr(module, class_str)
148+
149+
algs = {
150+
'pca': 'sklearn.decomposition.PCA',
151+
'ica': 'sklearn.decomposition.FastICA',
152+
'nnmf': 'sklearn.decomposition.NMF',
153+
'fa': 'sklearn.decomposition.FactorAnalysis'
154+
}
155+
156+
if algorithm in algs.keys():
157+
alg = load_class(algs[algorithm])
158+
alg = alg(n_components, *args, **kwargs)
159+
else:
160+
raise ValueError("""Invalid prediction/classification algorithm name.
161+
Valid options are 'pca','ica', 'nnmf', 'fa'""")
162+
return alg
124163

125164
# The following are nipy source code implementations of the hemodynamic response function HRF
126165
# See the included nipy license file for use permission.

nltools/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
"""Specifies current version of nltools to be used by setup.py and __init__.py
22
"""
33

4-
__version__ = '0.3.4'
4+
__version__ = '0.3.5'

setup.py

Lines changed: 19 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,30 +1,32 @@
1-
# from nltools.version import __version__
21
from setuptools import setup, find_packages
32

4-
__version__ = '0.3.4'
3+
version = {}
4+
with open("nltools/version.py") as f:
5+
exec(f.read(), version)
6+
7+
with open('requirements.txt') as f:
8+
requirements = f.read().splitlines()
59

610
extra_setuptools_args = dict(
711
tests_require=['pytest']
812
)
913

10-
with open('requirements.txt') as f:
11-
requirements = f.read().splitlines()
12-
1314
setup(
14-
name='nltools',
15-
version=__version__,
16-
author='Luke Chang',
17-
author_email='[email protected]',
18-
url='http://neurolearn.readthedocs.org/en/latest/',
19-
install_requires=requirements,
20-
extras_require={
15+
name = 'nltools',
16+
version = version['__version__'],
17+
author = 'Luke Chang',
18+
author_email = '[email protected]',
19+
url = 'http://neurolearn.readthedocs.org/en/latest/',
20+
install_requires = requirements,
21+
extras_require = {
2122
'ibrainViewer':['ipywidgets>=5.2.2']
2223
},
23-
packages=find_packages(exclude=['nltools/tests']),
24-
package_data={'nltools': ['resources/*']},
25-
license='LICENSE.txt',
26-
description='A Python package to analyze neuroimaging data',
27-
long_description='nltools is a collection of python tools to perform '
24+
packages = find_packages(exclude=['nltools/tests']),
25+
package_data = {'nltools': ['resources/*']},
26+
include_package_data = True,
27+
license = 'LICENSE.txt',
28+
description = 'A Python package to analyze neuroimaging data',
29+
long_description = 'nltools is a collection of python tools to perform '
2830
'preprocessing, univariate GLMs, and predictive '
2931
'multivariate modeling of neuroimaging data. It is the '
3032
'analysis engine powering www.neuro-learn.org.',

0 commit comments

Comments
 (0)