Skip to content

Commit dfe9064

Browse files
authored
Merge pull request #340 from cosanlab/clean_code
Clean code
2 parents c5b3168 + 188f636 commit dfe9064

File tree

2 files changed

+21
-143
lines changed

2 files changed

+21
-143
lines changed

nltools/data/brain_data.py

Lines changed: 19 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -399,16 +399,28 @@ def median(self, axis=0):
399399
raise ValueError('axis must be 0 or 1')
400400
return out
401401

402-
def std(self):
403-
""" Get standard deviation of each voxel across images. """
402+
def std(self, axis=0):
403+
''' Get standard deviation of each voxel or image.
404+
405+
Args:
406+
axis: (int) across images=0 (default), within images=1
407+
408+
Returns:
409+
out: (float/np.array/Brain_Data)
410+
'''
404411

405412
out = deepcopy(self)
406-
if len(self.shape()) > 1:
407-
out.data = np.std(self.data, axis=0)
408-
out.X = pd.DataFrame()
409-
out.Y = pd.DataFrame()
410-
else:
413+
if check_brain_data_is_single(self):
411414
out = np.std(self.data)
415+
else:
416+
if axis == 0:
417+
out.data = np.std(self.data, axis=0)
418+
out.X = pd.DataFrame()
419+
out.Y = pd.DataFrame()
420+
elif axis == 1:
421+
out = np.std(self.data, axis=1)
422+
else:
423+
raise ValueError('axis must be 0 or 1')
412424
return out
413425

414426
def sum(self):

nltools/datasets.py

Lines changed: 2 additions & 136 deletions
Original file line numberDiff line numberDiff line change
@@ -13,21 +13,16 @@
1313
'get_collection_image_metadata',
1414
'download_collection',
1515
'fetch_emotion_ratings',
16-
'fetch_pain',
17-
'fetch_localizer']
16+
'fetch_pain']
1817
__author__ = ["Luke Chang"]
1918
__license__ = "MIT"
2019

2120
import os
2221
import pandas as pd
23-
import numpy as np
2422
from nltools.data import Brain_Data
2523
from nilearn.datasets.utils import (_get_dataset_dir,
2624
_fetch_file,
27-
_fetch_files,
28-
_get_dataset_descr)
29-
from nilearn._utils.compat import _urllib
30-
from sklearn.datasets.base import Bunch
25+
_fetch_files)
3126
from pynv import Client
3227

3328
# Optional dependencies
@@ -160,132 +155,3 @@ def fetch_emotion_ratings(data_dir=None, resume=True, verbose=1):
160155
verbose=verbose)
161156
return Brain_Data(data=files, X=metadata)
162157

163-
def fetch_localizer(subject_ids=None, get_anats=False, data_type='raw',
164-
data_dir=None, url=None, resume=True, verbose=1):
165-
""" Download and load Brainomics Localizer dataset (94 subjects).
166-
"The Functional Localizer is a simple and fast acquisition
167-
procedure based on a 5-minute functional magnetic resonance
168-
imaging (fMRI) sequence that can be run as easily and as
169-
systematically as an anatomical scan. This protocol captures the
170-
cerebral bases of auditory and visual perception, motor actions,
171-
reading, language comprehension and mental calculation at an
172-
individual level. Individual functional maps are reliable and
173-
quite precise. The procedure is decribed in more detail on the
174-
Functional Localizer page." This code is modified from
175-
`fetch_localizer_contrasts` from nilearn.datasets.funcs.py.
176-
(see http://brainomics.cea.fr/localizer/)
177-
"Scientific results obtained using this dataset are described in
178-
Pinel et al., 2007" [1]
179-
180-
Notes:
181-
It is better to perform several small requests than a big one because the
182-
Brainomics server has no cache (can lead to timeout while the archive
183-
is generated on the remote server). For example, download
184-
n_subjects=np.array(1,10), then n_subjects=np.array(10,20), etc.
185-
186-
Args:
187-
subject_ids: (list) List of Subject IDs (e.g., ['S01','S02'].
188-
If None is given, all 94 subjects are used.
189-
get_anats: (boolean) Whether individual structural images should be
190-
fetched or not.
191-
data_type: (string) type of data to download.
192-
Valid values are ['raw','preprocessed']
193-
data_dir: (string, optional) Path of the data directory.
194-
Used to force data storage in a specified location.
195-
url: (string, optional) Override download URL.
196-
Used for test only (or if you setup a mirror of the data).
197-
resume: (bool) Whether to resume download of a partly-downloaded file.
198-
verbose: (int) Verbosity level (0 means no message).
199-
200-
Returns:
201-
data: (Bunch)
202-
Dictionary-like object, the interest attributes are :
203-
- 'functional': string list
204-
Paths to nifti contrast maps
205-
- 'structural' string
206-
Path to nifti files corresponding to the subjects structural images
207-
208-
References
209-
----------
210-
Pinel, Philippe, et al.
211-
"Fast reproducible identification and large-scale databasing of
212-
individual functional cognitive networks."
213-
BMC neuroscience 8.1 (2007): 91.
214-
215-
"""
216-
217-
if subject_ids is None:
218-
subject_ids = ['S%02d' % x for x in np.arange(1,95)]
219-
elif not isinstance(subject_ids, (list)):
220-
raise ValueError("subject_ids must be a list of subject ids (e.g., ['S01','S02'])")
221-
222-
if data_type == 'raw':
223-
dat_type = "raw fMRI"
224-
dat_label = "raw bold"
225-
anat_type = "raw T1"
226-
anat_label = "raw anatomy"
227-
elif data_type == 'preprocessed':
228-
dat_type = "preprocessed fMRI"
229-
dat_label = "bold"
230-
anat_type = "normalized T1"
231-
anat_label = "anatomy"
232-
else:
233-
raise ValueError("Only ['raw','preprocessed'] data_types are currently supported.")
234-
235-
root_url = "http://brainomics.cea.fr/localizer/"
236-
dataset_name = 'brainomics_localizer'
237-
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
238-
verbose=verbose)
239-
fdescr = _get_dataset_descr(dataset_name)
240-
opts = {'uncompress': True}
241-
242-
bold_files = []; anat_files = [];
243-
for subject_id in subject_ids:
244-
base_query = ("Any X,XT,XL,XI,XF,XD WHERE X is Scan, X type XT, "
245-
"X concerns S, "
246-
"X label XL, X identifier XI, "
247-
"X format XF, X description XD, "
248-
'S identifier = "%s", ' % (subject_id, ) +
249-
'X type IN(%(types)s), X label "%(label)s"')
250-
251-
file_tarball_url = "%sbrainomics_data.zip?rql=%s&vid=data-zip" % (root_url, _urllib.parse.quote(base_query % {"types": "\"%s\"" % dat_type, "label": dat_label}, safe=',()'))
252-
name_aux = str.replace(str.join('_', [dat_type, dat_label]), ' ', '_')
253-
file_path = os.path.join("brainomics_data", subject_id, "%s.nii.gz" % name_aux)
254-
bold_files.append(_fetch_files(data_dir, [(file_path, file_tarball_url, opts)], verbose=verbose))
255-
256-
if get_anats:
257-
file_tarball_url = "%sbrainomics_data_anats.zip?rql=%s&vid=data-zip" % (root_url, _urllib.parse.quote(base_query % {"types": "\"%s\"" % anat_type, "label": anat_label}, safe=',()'))
258-
if data_type == 'raw':
259-
anat_name_aux = "raw_T1_raw_anat_defaced.nii.gz"
260-
elif data_type == 'preprocessed':
261-
anat_name_aux = "normalized_T1_anat_defaced.nii.gz"
262-
file_path = os.path.join("brainomics_data", subject_id, anat_name_aux)
263-
anat_files.append(_fetch_files(data_dir, [(file_path, file_tarball_url, opts)], verbose=verbose))
264-
265-
# Fetch subject characteristics (separated in two files)
266-
if url is None:
267-
url_csv = ("%sdataset/cubicwebexport.csv?rql=%s&vid=csvexport"
268-
% (root_url, _urllib.parse.quote("Any X WHERE X is Subject")))
269-
url_csv2 = ("%sdataset/cubicwebexport2.csv?rql=%s&vid=csvexport"
270-
% (root_url,
271-
_urllib.parse.quote("Any X,XI,XD WHERE X is QuestionnaireRun, "
272-
"X identifier XI, X datetime "
273-
"XD", safe=',')))
274-
else:
275-
url_csv = "%s/cubicwebexport.csv" % url
276-
url_csv2 = "%s/cubicwebexport2.csv" % url
277-
278-
filenames = [("cubicwebexport.csv", url_csv, {}),("cubicwebexport2.csv", url_csv2, {})]
279-
csv_files = _fetch_files(data_dir, filenames, verbose=verbose)
280-
metadata = pd.merge(pd.read_csv(csv_files[0], sep=';'), pd.read_csv(csv_files[1], sep=';'), on='"subject_id"')
281-
metadata.to_csv(os.path.join(data_dir,'metadata.csv'))
282-
for x in ['cubicwebexport.csv','cubicwebexport2.csv']:
283-
os.remove(os.path.join(data_dir, x))
284-
285-
if not get_anats:
286-
anat_files = None
287-
288-
return Bunch(functional=bold_files,
289-
structural=anat_files,
290-
ext_vars=metadata,
291-
description=fdescr)

0 commit comments

Comments
 (0)