In [1]:
import sys
sys.version_info
del sys # module sysの読み込み中止

自発波形 raw FIFFの表示

準備

In [2]:
import os.path as op
import numpy as np
import mne
mne.set_log_level('INFO')
data_path=op.join(mne.datasets.sample.data_path(),'MEG','sample') # このコードは
print(data_path)
data_path=mne.datasets.sample.data_path()+'\\MEG\\sample' # このコードと同じ意味です
print(data_path)
C:\Users\akira\mne_data\MNE-sample-data\MEG\sample
C:\Users\akira\mne_data\MNE-sample-data\MEG\sample

op.joinを使わなくてもpythonは+' 'で代用できます。

In [3]:
raw=mne.io.read_raw_fif(op.join(data_path,'sample_audvis_raw.fif'))
print(raw)
raw.set_eeg_reference() # EEG averageを関電位とする
events=mne.read_events(op.join(data_path,'sample_audvis_raw-eve.fif'))
events.shape
Opening raw data file C:\Users\akira\mne_data\MNE-sample-data\MEG\sample\sample_audvis_raw.fif...
    Read a total of 3 projection items:
        PCA-v1 (1 x 102)  idle
        PCA-v2 (1 x 102)  idle
        PCA-v3 (1 x 102)  idle
    Range : 25800 ... 192599 =     42.956 ...   320.670 secs
Ready.
Current compensation grade : 0
<Raw  |  sample_audvis_raw.fif, n_channels x n_times : 376 x 166800 (277.7 sec), ~3.7 MB, data not loaded>
Adding average EEG reference projection.
1 projection items deactivated
Out[3]:
(320L, 3L)
In [4]:
raw.plot(block=True); # jupyterでは;をつけないと2個絵が出てきます。
In [5]:
raw.plot(order='selection');
In [6]:
raw.plot_sensors(kind='3d',ch_type='mag',ch_groups='position');

SSPの追加です

In [7]:
projs=mne.read_proj(op.join(data_path,'sample_audvis_eog-proj.fif'));
print(projs);
raw.add_proj(projs)
raw.plot_projs_topomap();
    Read a total of 6 projection items:
        EOG-planar-998--0.200-0.200-PCA-01 (1 x 203)  idle
        EOG-planar-998--0.200-0.200-PCA-02 (1 x 203)  idle
        EOG-axial-998--0.200-0.200-PCA-01 (1 x 102)  idle
        EOG-axial-998--0.200-0.200-PCA-02 (1 x 102)  idle
        EOG-eeg-998--0.200-0.200-PCA-01 (1 x 59)  idle
        EOG-eeg-998--0.200-0.200-PCA-02 (1 x 59)  idle
[<Projection  |  EOG-planar-998--0.200-0.200-PCA-01, active : False, n_channels : 203>, <Projection  |  EOG-planar-998--0.200-0.200-PCA-02, active : False, n_channels : 203>, <Projection  |  EOG-axial-998--0.200-0.200-PCA-01, active : False, n_channels : 102>, <Projection  |  EOG-axial-998--0.200-0.200-PCA-02, active : False, n_channels : 102>, <Projection  |  EOG-eeg-998--0.200-0.200-PCA-01, active : False, n_channels : 59>, <Projection  |  EOG-eeg-998--0.200-0.200-PCA-02, active : False, n_channels : 59>]
6 projection items deactivated
In [8]:
raw.plot();

Projボタンを押せば、SSPのON/OFFを選択できますが、jupyterではできません・・・

In [9]:
raw.plot_psd(tmax=np.inf,average=False);
Effective window size : 3.410 (s)
Effective window size : 3.410 (s)
Effective window size : 3.410 (s)
C:\Users\akira\Anaconda2\lib\site-packages\matplotlib\figure.py:1744: UserWarning: This figure includes Axes that are not compatible with tight_layout, so its results might be incorrect.
  warnings.warn("This figure includes Axes that are not "
In [10]:
layout=mne.channels.read_layout('Vectorview-mag') # Vectorview-grad,Vectorview-all
layout.plot();
raw.plot_psd_topo(tmax=30.,fmin=5.,fmax=60.,n_fft=1024,layout=layout);
Effective window size : 1.705 (s)
In [11]:
print(layout.ids)
print(layout.kind)
print(layout.names);
print(layout.pos.shape)
[111, 121, 131, 141, 211, 221, 231, 241, 311, 321, 331, 341, 411, 421, 431, 441, 511, 521, 531, 541, 611, 621, 631, 641, 711, 721, 731, 741, 811, 821, 911, 921, 931, 941, 1011, 1021, 1031, 1041, 1111, 1121, 1131, 1141, 1211, 1221, 1231, 1241, 1311, 1321, 1331, 1341, 1411, 1421, 1431, 1441, 1511, 1521, 1531, 1541, 1611, 1621, 1631, 1641, 1711, 1721, 1731, 1741, 1811, 1821, 1831, 1841, 1911, 1921, 1931, 1941, 2011, 2021, 2031, 2041, 2111, 2121, 2131, 2141, 2211, 2221, 2231, 2241, 2311, 2321, 2331, 2341, 2411, 2421, 2431, 2441, 2511, 2521, 2531, 2541, 2611, 2621, 2631, 2641]
Vectorview-mag
['MEG 0111', 'MEG 0121', 'MEG 0131', 'MEG 0141', 'MEG 0211', 'MEG 0221', 'MEG 0231', 'MEG 0241', 'MEG 0311', 'MEG 0321', 'MEG 0331', 'MEG 0341', 'MEG 0411', 'MEG 0421', 'MEG 0431', 'MEG 0441', 'MEG 0511', 'MEG 0521', 'MEG 0531', 'MEG 0541', 'MEG 0611', 'MEG 0621', 'MEG 0631', 'MEG 0641', 'MEG 0711', 'MEG 0721', 'MEG 0731', 'MEG 0741', 'MEG 0811', 'MEG 0821', 'MEG 0911', 'MEG 0921', 'MEG 0931', 'MEG 0941', 'MEG 1011', 'MEG 1021', 'MEG 1031', 'MEG 1041', 'MEG 1111', 'MEG 1121', 'MEG 1131', 'MEG 1141', 'MEG 1211', 'MEG 1221', 'MEG 1231', 'MEG 1241', 'MEG 1311', 'MEG 1321', 'MEG 1331', 'MEG 1341', 'MEG 1411', 'MEG 1421', 'MEG 1431', 'MEG 1441', 'MEG 1511', 'MEG 1521', 'MEG 1531', 'MEG 1541', 'MEG 1611', 'MEG 1621', 'MEG 1631', 'MEG 1641', 'MEG 1711', 'MEG 1721', 'MEG 1731', 'MEG 1741', 'MEG 1811', 'MEG 1821', 'MEG 1831', 'MEG 1841', 'MEG 1911', 'MEG 1921', 'MEG 1931', 'MEG 1941', 'MEG 2011', 'MEG 2021', 'MEG 2031', 'MEG 2041', 'MEG 2111', 'MEG 2121', 'MEG 2131', 'MEG 2141', 'MEG 2211', 'MEG 2221', 'MEG 2231', 'MEG 2241', 'MEG 2311', 'MEG 2321', 'MEG 2331', 'MEG 2341', 'MEG 2411', 'MEG 2421', 'MEG 2431', 'MEG 2441', 'MEG 2511', 'MEG 2521', 'MEG 2531', 'MEG 2541', 'MEG 2611', 'MEG 2621', 'MEG 2631', 'MEG 2641']
(102L, 4L)

誘発波形表示(加算平均前)

In [12]:
% reset
Once deleted, variables cannot be recovered. Proceed (y/[n])? y
In [13]:
import mne
mne.set_log_level('INFO')
data_path=mne.datasets.sample.data_path()+'\\MEG\\sample\\'
In [14]:
raw=mne.io.read_raw_fif(data_path+'sample_audvis_raw.fif')
raw.set_eeg_reference() # averageをEEGのreferenceにする
event_id={'auditory/left':1,'auditory/right':2,'visual/left':3,'visual/right':4,'smiley':5,'button':32}
events=mne.read_events(data_path+'sample_audvis_raw-eve.fif')
epochs=mne.Epochs(raw,events,event_id=event_id,tmin=-0.2,tmax=1.)
Opening raw data file C:\Users\akira\mne_data\MNE-sample-data\MEG\sample\sample_audvis_raw.fif...
    Read a total of 3 projection items:
        PCA-v1 (1 x 102)  idle
        PCA-v2 (1 x 102)  idle
        PCA-v3 (1 x 102)  idle
    Range : 25800 ... 192599 =     42.956 ...   320.670 secs
Ready.
Current compensation grade : 0
Adding average EEG reference projection.
1 projection items deactivated
320 matching events found
Created an SSP operator (subspace dimension = 4)
4 projection items activated
In [15]:
print(epochs.get_data().shape)
Loading data for 320 events and 722 original time points ...
0 bad epochs dropped
(320L, 376L, 722L)
In [16]:
epochs.plot(block=True);
Loading data for 20 events and 722 original time points ...
In [17]:
events=mne.pick_events(events,include=[5,32]); # 5,32のトリガーsmileyとbutton
print(events.T)
mne.viz.plot_events(events);
epochs['smiley'].plot(events=events);
[[ 34089  34649  42598  42938  51437  51803  59320  59677  67401  67782
   75305  75624  84898  85111  92801  93018 101091 101540 108975 109324
  118101 118369 126123 126470 133927 134287 142446 142815 152030 152336
  168672]
 [     0      0      0      0      0      0      0      0      0      0
       0      0      0      0      0      0      0      0      0      0
       0      0      0      0      0      0      0      0      0      0
       0]
 [     5     32      5     32      5     32      5     32      5     32
       5     32      5     32      5     32      5     32      5     32
       5     32      5     32      5     32      5     32      5     32
      32]]
Loading data for 15 events and 722 original time points ...
In [18]:
print(raw.ch_names[278]);
epochs.plot_image(278,cmap='interactive');
MEG 2431
Loading data for 320 events and 722 original time points ...
In [19]:
epochs.plot_topo_image(vmin=-200,vmax=200,title='ERF images');
Loading data for 320 events and 722 original time points ...

誘発波形表示

In [20]:
% reset
Once deleted, variables cannot be recovered. Proceed (y/[n])? y
In [21]:
import numpy as np
import matplotlib.pyplot as plt
import mne
mne.set_log_level('INFO')
data_path=mne.datasets.sample.data_path()
In [22]:
fname=data_path+'\\MEG\\sample\\sample_audvis-ave.fif'
evoked=mne.read_evokeds(fname,baseline=(None,0),proj=True);
print(evoked);
Reading C:\Users\akira\mne_data\MNE-sample-data\MEG\sample\sample_audvis-ave.fif ...
    Read a total of 4 projection items:
        PCA-v1 (1 x 102) active
        PCA-v2 (1 x 102) active
        PCA-v3 (1 x 102) active
        Average EEG reference (1 x 60) active
    Found the data of interest:
        t =    -199.80 ...     499.49 ms (Left Auditory)
        0 CTF compensation matrices available
        nave = 55 - aspect type = 100
Projections have already been applied. Setting proj attribute to True.
Applying baseline correction (mode: mean)
    Read a total of 4 projection items:
        PCA-v1 (1 x 102) active
        PCA-v2 (1 x 102) active
        PCA-v3 (1 x 102) active
        Average EEG reference (1 x 60) active
    Found the data of interest:
        t =    -199.80 ...     499.49 ms (Right Auditory)
        0 CTF compensation matrices available
        nave = 61 - aspect type = 100
Projections have already been applied. Setting proj attribute to True.
Applying baseline correction (mode: mean)
    Read a total of 4 projection items:
        PCA-v1 (1 x 102) active
        PCA-v2 (1 x 102) active
        PCA-v3 (1 x 102) active
        Average EEG reference (1 x 60) active
    Found the data of interest:
        t =    -199.80 ...     499.49 ms (Left visual)
        0 CTF compensation matrices available
        nave = 67 - aspect type = 100
Projections have already been applied. Setting proj attribute to True.
Applying baseline correction (mode: mean)
    Read a total of 4 projection items:
        PCA-v1 (1 x 102) active
        PCA-v2 (1 x 102) active
        PCA-v3 (1 x 102) active
        Average EEG reference (1 x 60) active
    Found the data of interest:
        t =    -199.80 ...     499.49 ms (Right visual)
        0 CTF compensation matrices available
        nave = 58 - aspect type = 100
Projections have already been applied. Setting proj attribute to True.
Applying baseline correction (mode: mean)
[<Evoked  |  comment : 'Left Auditory', kind : average, time : [-0.199795, 0.499488], n_epochs : 55, n_channels x n_times : 376 x 421, ~4.9 MB>, <Evoked  |  comment : 'Right Auditory', kind : average, time : [-0.199795, 0.499488], n_epochs : 61, n_channels x n_times : 376 x 421, ~4.9 MB>, <Evoked  |  comment : 'Left visual', kind : average, time : [-0.199795, 0.499488], n_epochs : 67, n_channels x n_times : 376 x 421, ~4.9 MB>, <Evoked  |  comment : 'Right visual', kind : average, time : [-0.199795, 0.499488], n_epochs : 58, n_channels x n_times : 376 x 421, ~4.9 MB>]
In [23]:
evoked_l_aud=evoked[0]
evoked_r_aud=evoked[1]
evoked_l_vis=evoked[2]
evoked_r_vis=evoked[3]
fig=evoked_l_aud.plot(exclude=[]) # exclude=()でもOK
In [24]:
fig.tight_layout() # たぶんMATLABのaxis tight
In [25]:
picks=mne.pick_types(evoked_l_aud.info,meg=True,eeg=False,eog=False)
evoked_l_aud.plot(spatial_colors=True,gfp=True,picks=picks); # gfpはgrand field power
In [26]:
evoked_l_aud.plot_topomap();
In [27]:
times=np.arange(0.05,0.151,0.05)
evoked_r_aud.plot_topomap(times=times,ch_type='mag');
evoked_r_aud.plot_topomap(times=times,ch_type='grad');
In [28]:
evoked_r_aud.plot_topomap(times='peaks',ch_type='mag'); #適当にpeakでtopoを作成
evoked_r_aud.plot_topomap(times='peaks',ch_type='grad');
In [29]:
fig,ax=plt.subplots(1,5); # 返り値0番目がfigure 1番目がaxes MATLABでは別々に指定
evoked_l_aud.plot_topomap(times=0.1,axes=ax[0],show=False);
evoked_r_aud.plot_topomap(times=0.1,axes=ax[1],show=False);
evoked_l_vis.plot_topomap(times=0.1,axes=ax[2],show=False);
evoked_r_vis.plot_topomap(times=0.1,axes=ax[3],show=True); # colorbar もaxesを消費 colorbarは最後でないとダメ
Colorbar is drawn to the rightmost column of the figure. Be sure to provide enough space for it or turn it off with colorbar=False.
<ipython-input-29-c5b294febbaa>:2: RuntimeWarning: Colorbar is drawn to the rightmost column of the figure. Be sure to provide enough space for it or turn it off with colorbar=False.
  evoked_l_aud.plot_topomap(times=0.1,axes=ax[0],show=False);
Colorbar is drawn to the rightmost column of the figure. Be sure to provide enough space for it or turn it off with colorbar=False.
<ipython-input-29-c5b294febbaa>:3: RuntimeWarning: Colorbar is drawn to the rightmost column of the figure. Be sure to provide enough space for it or turn it off with colorbar=False.
  evoked_r_aud.plot_topomap(times=0.1,axes=ax[1],show=False);
Colorbar is drawn to the rightmost column of the figure. Be sure to provide enough space for it or turn it off with colorbar=False.
<ipython-input-29-c5b294febbaa>:4: RuntimeWarning: Colorbar is drawn to the rightmost column of the figure. Be sure to provide enough space for it or turn it off with colorbar=False.
  evoked_l_vis.plot_topomap(times=0.1,axes=ax[2],show=False);
Colorbar is drawn to the rightmost column of the figure. Be sure to provide enough space for it or turn it off with colorbar=False.
<ipython-input-29-c5b294febbaa>:5: RuntimeWarning: Colorbar is drawn to the rightmost column of the figure. Be sure to provide enough space for it or turn it off with colorbar=False.
  evoked_r_vis.plot_topomap(times=0.1,axes=ax[3],show=True); # colorbar もaxesを消費 colorbarは最後でないとダメ
In [30]:
ts_args=dict(gfp=True)
topomap_args=dict(sensors=False)
evoked_r_aud.plot_joint(title='right auditory',times=[0.07,0.105],ts_args=ts_args,topomap_args=topomap_args);
In [31]:
conditions=["Left Auditory","Right Auditory","Left visual","Right visual"] # Visualでなくvisual わざと文字列変換するようにしてある
evoked_dict=dict()
for condition in conditions:
    evoked_dict[condition.replace(" ","/")]=mne.read_evokeds(fname,baseline=(None,0),proj=True,condition=condition) 
print(evoked_dict);
Reading C:\Users\akira\mne_data\MNE-sample-data\MEG\sample\sample_audvis-ave.fif ...
    Read a total of 4 projection items:
        PCA-v1 (1 x 102) active
        PCA-v2 (1 x 102) active
        PCA-v3 (1 x 102) active
        Average EEG reference (1 x 60) active
    Found the data of interest:
        t =    -199.80 ...     499.49 ms (Left Auditory)
        0 CTF compensation matrices available
        nave = 55 - aspect type = 100
Projections have already been applied. Setting proj attribute to True.
Applying baseline correction (mode: mean)
Reading C:\Users\akira\mne_data\MNE-sample-data\MEG\sample\sample_audvis-ave.fif ...
    Read a total of 4 projection items:
        PCA-v1 (1 x 102) active
        PCA-v2 (1 x 102) active
        PCA-v3 (1 x 102) active
        Average EEG reference (1 x 60) active
    Found the data of interest:
        t =    -199.80 ...     499.49 ms (Right Auditory)
        0 CTF compensation matrices available
        nave = 61 - aspect type = 100
Projections have already been applied. Setting proj attribute to True.
Applying baseline correction (mode: mean)
Reading C:\Users\akira\mne_data\MNE-sample-data\MEG\sample\sample_audvis-ave.fif ...
    Read a total of 4 projection items:
        PCA-v1 (1 x 102) active
        PCA-v2 (1 x 102) active
        PCA-v3 (1 x 102) active
        Average EEG reference (1 x 60) active
    Found the data of interest:
        t =    -199.80 ...     499.49 ms (Left visual)
        0 CTF compensation matrices available
        nave = 67 - aspect type = 100
Projections have already been applied. Setting proj attribute to True.
Applying baseline correction (mode: mean)
Reading C:\Users\akira\mne_data\MNE-sample-data\MEG\sample\sample_audvis-ave.fif ...
    Read a total of 4 projection items:
        PCA-v1 (1 x 102) active
        PCA-v2 (1 x 102) active
        PCA-v3 (1 x 102) active
        Average EEG reference (1 x 60) active
    Found the data of interest:
        t =    -199.80 ...     499.49 ms (Right visual)
        0 CTF compensation matrices available
        nave = 58 - aspect type = 100
Projections have already been applied. Setting proj attribute to True.
Applying baseline correction (mode: mean)
{'Left/Auditory': <Evoked  |  comment : 'Left Auditory', kind : average, time : [-0.199795, 0.499488], n_epochs : 55, n_channels x n_times : 376 x 421, ~4.9 MB>, 'Left/visual': <Evoked  |  comment : 'Left visual', kind : average, time : [-0.199795, 0.499488], n_epochs : 67, n_channels x n_times : 376 x 421, ~4.9 MB>, 'Right/Auditory': <Evoked  |  comment : 'Right Auditory', kind : average, time : [-0.199795, 0.499488], n_epochs : 61, n_channels x n_times : 376 x 421, ~4.9 MB>, 'Right/visual': <Evoked  |  comment : 'Right visual', kind : average, time : [-0.199795, 0.499488], n_epochs : 58, n_channels x n_times : 376 x 421, ~4.9 MB>}
In [32]:
colors=dict(Left='Crimson',Right='CornFlowerBlue');
linestyles=dict(Auditory='-',visual='--');
pick=evoked_dict['Left/Auditory'].ch_names.index('MEG 1811')
mne.viz.plot_compare_evokeds(evoked_dict,picks=pick,colors=colors,linestyles=linestyles);
In [33]:
evoked_r_aud.plot_image(picks=picks);
In [34]:
title='MNE sample data (condition : %s)'
evoked_l_aud.plot_topo(title=title % evoked_l_aud.comment);
colors='yellow','green','red','blue'
mne.viz.plot_evoked_topo(evoked,color=colors,title=title % 'Left/Right Auditory/Visual');
In [35]:
% matplotlib inline
subjects_dir=data_path+'\\subjects'
trans_fname=data_path+'\\MEG\\sample\\sample_audvis_raw-trans.fif'
maps=mne.make_field_map(evoked_l_aud,trans=trans_fname,subject='sample',subjects_dir=subjects_dir,n_jobs=1) # n_jpbs=2にするとエラー
field_map=evoked_l_aud.plot_field(maps,time=0.1)
Using surface from C:\Users\akira\mne_data\MNE-sample-data\subjects\sample\bem\sample-5120-5120-5120-bem.fif.
Getting helmet for system 306m
Prepare EEG mapping...
Computing dot products for 59 electrodes...
Computing dot products for 2562 surface locations...
Field mapping data ready
    Preparing the mapping matrix...
    [Truncate at 20 missing 0.001]
    The map will have average electrode reference
Prepare MEG mapping...
Computing dot products for 305 coils...
Computing dot products for 304 surface locations...
Field mapping data ready
    Preparing the mapping matrix...
    [Truncate at 209 missing 0.0001]

三次元画像が出てくるはずなんですが・・・ 何も出てきませんでした。