针对pulse-transit的工具
This commit is contained in:
8
dist/client/mne/_fiff/__init__.py
vendored
Normal file
8
dist/client/mne/_fiff/__init__.py
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
"""Private module for FIF basic I/O routines."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
# All imports should be done directly to submodules, so we don't import
|
||||
# anything here or use lazy_loader.
|
||||
BIN
dist/client/mne/_fiff/__pycache__/__init__.cpython-310.pyc
vendored
Normal file
BIN
dist/client/mne/_fiff/__pycache__/__init__.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
dist/client/mne/_fiff/__pycache__/_digitization.cpython-310.pyc
vendored
Normal file
BIN
dist/client/mne/_fiff/__pycache__/_digitization.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
dist/client/mne/_fiff/__pycache__/compensator.cpython-310.pyc
vendored
Normal file
BIN
dist/client/mne/_fiff/__pycache__/compensator.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
dist/client/mne/_fiff/__pycache__/constants.cpython-310.pyc
vendored
Normal file
BIN
dist/client/mne/_fiff/__pycache__/constants.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
dist/client/mne/_fiff/__pycache__/ctf_comp.cpython-310.pyc
vendored
Normal file
BIN
dist/client/mne/_fiff/__pycache__/ctf_comp.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
dist/client/mne/_fiff/__pycache__/matrix.cpython-310.pyc
vendored
Normal file
BIN
dist/client/mne/_fiff/__pycache__/matrix.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
dist/client/mne/_fiff/__pycache__/meas_info.cpython-310.pyc
vendored
Normal file
BIN
dist/client/mne/_fiff/__pycache__/meas_info.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
dist/client/mne/_fiff/__pycache__/open.cpython-310.pyc
vendored
Normal file
BIN
dist/client/mne/_fiff/__pycache__/open.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
dist/client/mne/_fiff/__pycache__/pick.cpython-310.pyc
vendored
Normal file
BIN
dist/client/mne/_fiff/__pycache__/pick.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
dist/client/mne/_fiff/__pycache__/proc_history.cpython-310.pyc
vendored
Normal file
BIN
dist/client/mne/_fiff/__pycache__/proc_history.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
dist/client/mne/_fiff/__pycache__/proj.cpython-310.pyc
vendored
Normal file
BIN
dist/client/mne/_fiff/__pycache__/proj.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
dist/client/mne/_fiff/__pycache__/reference.cpython-310.pyc
vendored
Normal file
BIN
dist/client/mne/_fiff/__pycache__/reference.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
dist/client/mne/_fiff/__pycache__/tag.cpython-310.pyc
vendored
Normal file
BIN
dist/client/mne/_fiff/__pycache__/tag.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
dist/client/mne/_fiff/__pycache__/tree.cpython-310.pyc
vendored
Normal file
BIN
dist/client/mne/_fiff/__pycache__/tree.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
dist/client/mne/_fiff/__pycache__/utils.cpython-310.pyc
vendored
Normal file
BIN
dist/client/mne/_fiff/__pycache__/utils.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
dist/client/mne/_fiff/__pycache__/what.cpython-310.pyc
vendored
Normal file
BIN
dist/client/mne/_fiff/__pycache__/what.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
dist/client/mne/_fiff/__pycache__/write.cpython-310.pyc
vendored
Normal file
BIN
dist/client/mne/_fiff/__pycache__/write.cpython-310.pyc
vendored
Normal file
Binary file not shown.
586
dist/client/mne/_fiff/_digitization.py
vendored
Normal file
586
dist/client/mne/_fiff/_digitization.py
vendored
Normal file
@@ -0,0 +1,586 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import heapq
|
||||
from collections import Counter
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..utils import Bunch, _check_fname, _validate_type, logger, verbose, warn
|
||||
from .constants import FIFF, _coord_frame_named
|
||||
from .tag import read_tag
|
||||
from .tree import dir_tree_find
|
||||
from .write import start_and_end_file, write_dig_points
|
||||
|
||||
_dig_kind_dict = {
|
||||
"cardinal": FIFF.FIFFV_POINT_CARDINAL,
|
||||
"hpi": FIFF.FIFFV_POINT_HPI,
|
||||
"eeg": FIFF.FIFFV_POINT_EEG,
|
||||
"extra": FIFF.FIFFV_POINT_EXTRA,
|
||||
}
|
||||
_dig_kind_ints = tuple(sorted(_dig_kind_dict.values()))
|
||||
_dig_kind_proper = {
|
||||
"cardinal": "Cardinal",
|
||||
"hpi": "HPI",
|
||||
"eeg": "EEG",
|
||||
"extra": "Extra",
|
||||
"unknown": "Unknown",
|
||||
}
|
||||
_dig_kind_rev = {val: key for key, val in _dig_kind_dict.items()}
|
||||
_cardinal_kind_rev = {1: "LPA", 2: "Nasion", 3: "RPA", 4: "Inion"}
|
||||
|
||||
|
||||
def _format_dig_points(dig, enforce_order=False):
|
||||
"""Format the dig points nicely."""
|
||||
if enforce_order and dig is not None:
|
||||
# reorder points based on type:
|
||||
# Fiducials/HPI, EEG, extra (headshape)
|
||||
fids_digpoints = []
|
||||
hpi_digpoints = []
|
||||
eeg_digpoints = []
|
||||
extra_digpoints = []
|
||||
head_digpoints = []
|
||||
|
||||
# use a heap to enforce order on FIDS, EEG, Extra
|
||||
for idx, digpoint in enumerate(dig):
|
||||
ident = digpoint["ident"]
|
||||
kind = digpoint["kind"]
|
||||
|
||||
# push onto heap based on 'ident' (for the order) for
|
||||
# each of the possible DigPoint 'kind's
|
||||
# keep track of 'idx' in case of any clashes in
|
||||
# the 'ident' variable, which can occur when
|
||||
# user passes in DigMontage + DigMontage
|
||||
if kind == FIFF.FIFFV_POINT_CARDINAL:
|
||||
heapq.heappush(fids_digpoints, (ident, idx, digpoint))
|
||||
elif kind == FIFF.FIFFV_POINT_HPI:
|
||||
heapq.heappush(hpi_digpoints, (ident, idx, digpoint))
|
||||
elif kind == FIFF.FIFFV_POINT_EEG:
|
||||
heapq.heappush(eeg_digpoints, (ident, idx, digpoint))
|
||||
elif kind == FIFF.FIFFV_POINT_EXTRA:
|
||||
heapq.heappush(extra_digpoints, (ident, idx, digpoint))
|
||||
elif kind == FIFF.FIFFV_POINT_HEAD:
|
||||
heapq.heappush(head_digpoints, (ident, idx, digpoint))
|
||||
|
||||
# now recreate dig based on sorted order
|
||||
fids_digpoints.sort(), hpi_digpoints.sort()
|
||||
eeg_digpoints.sort()
|
||||
extra_digpoints.sort(), head_digpoints.sort()
|
||||
new_dig = []
|
||||
for idx, d in enumerate(
|
||||
fids_digpoints
|
||||
+ hpi_digpoints
|
||||
+ extra_digpoints
|
||||
+ eeg_digpoints
|
||||
+ head_digpoints
|
||||
):
|
||||
new_dig.append(d[-1])
|
||||
dig = new_dig
|
||||
|
||||
return [DigPoint(d) for d in dig] if dig is not None else dig
|
||||
|
||||
|
||||
def _get_dig_eeg(dig):
|
||||
return [d for d in dig if d["kind"] == FIFF.FIFFV_POINT_EEG]
|
||||
|
||||
|
||||
def _count_points_by_type(dig):
|
||||
"""Get the number of points of each type."""
|
||||
occurrences = Counter([d["kind"] for d in dig])
|
||||
return dict(
|
||||
fid=occurrences[FIFF.FIFFV_POINT_CARDINAL],
|
||||
hpi=occurrences[FIFF.FIFFV_POINT_HPI],
|
||||
eeg=occurrences[FIFF.FIFFV_POINT_EEG],
|
||||
extra=occurrences[FIFF.FIFFV_POINT_EXTRA],
|
||||
)
|
||||
|
||||
|
||||
_dig_keys = {"kind", "ident", "r", "coord_frame"}
|
||||
|
||||
|
||||
class DigPoint(dict):
|
||||
"""Container for a digitization point.
|
||||
|
||||
This is a simple subclass of the standard dict type designed to provide
|
||||
a readable string representation.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
kind : int
|
||||
The kind of channel,
|
||||
e.g. ``FIFFV_POINT_EEG``, ``FIFFV_POINT_CARDINAL``.
|
||||
r : array, shape (3,)
|
||||
3D position in m. and coord_frame.
|
||||
ident : int
|
||||
Number specifying the identity of the point.
|
||||
e.g. ``FIFFV_POINT_NASION`` if kind is ``FIFFV_POINT_CARDINAL``,
|
||||
or 42 if kind is ``FIFFV_POINT_EEG``.
|
||||
coord_frame : int
|
||||
The coordinate frame used, e.g. ``FIFFV_COORD_HEAD``.
|
||||
"""
|
||||
|
||||
def __repr__(self): # noqa: D105
|
||||
from ..transforms import _coord_frame_name
|
||||
|
||||
if self["kind"] == FIFF.FIFFV_POINT_CARDINAL:
|
||||
id_ = _cardinal_kind_rev.get(self["ident"], "Unknown cardinal")
|
||||
else:
|
||||
id_ = _dig_kind_proper[_dig_kind_rev.get(self["kind"], "unknown")]
|
||||
id_ = f"{id_} #{self['ident']}"
|
||||
id_ = id_.rjust(10)
|
||||
cf = _coord_frame_name(self["coord_frame"])
|
||||
x, y, z = self["r"]
|
||||
if "voxel" in cf:
|
||||
pos = (f"({x:0.1f}, {y:0.1f}, {z:0.1f})").ljust(25)
|
||||
else:
|
||||
pos = (f"({x * 1e3:0.1f}, {y * 1e3:0.1f}, {z * 1e3:0.1f}) mm").ljust(25)
|
||||
return f"<DigPoint | {id_} : {pos} : {cf} frame>"
|
||||
|
||||
# speed up info copy by only deep copying the mutable item
|
||||
def __deepcopy__(self, memodict):
|
||||
"""Make a deepcopy."""
|
||||
return DigPoint(
|
||||
kind=self["kind"],
|
||||
r=self["r"].copy(),
|
||||
ident=self["ident"],
|
||||
coord_frame=self["coord_frame"],
|
||||
)
|
||||
|
||||
def __eq__(self, other): # noqa: D105
|
||||
"""Compare two DigPoints.
|
||||
|
||||
Two digpoints are equal if they are the same kind, share the same
|
||||
coordinate frame and position.
|
||||
"""
|
||||
my_keys = ["kind", "ident", "coord_frame"]
|
||||
if set(self.keys()) != set(other.keys()):
|
||||
return False
|
||||
elif any(self[_] != other[_] for _ in my_keys):
|
||||
return False
|
||||
else:
|
||||
return np.allclose(self["r"], other["r"])
|
||||
|
||||
|
||||
def _read_dig_fif(fid, meas_info):
|
||||
"""Read digitizer data from a FIFF file."""
|
||||
isotrak = dir_tree_find(meas_info, FIFF.FIFFB_ISOTRAK)
|
||||
dig = None
|
||||
if len(isotrak) == 0:
|
||||
logger.info("Isotrak not found")
|
||||
elif len(isotrak) > 1:
|
||||
warn("Multiple Isotrak found")
|
||||
else:
|
||||
isotrak = isotrak[0]
|
||||
coord_frame = FIFF.FIFFV_COORD_HEAD
|
||||
dig = []
|
||||
for k in range(isotrak["nent"]):
|
||||
kind = isotrak["directory"][k].kind
|
||||
pos = isotrak["directory"][k].pos
|
||||
if kind == FIFF.FIFF_DIG_POINT:
|
||||
tag = read_tag(fid, pos)
|
||||
dig.append(tag.data)
|
||||
elif kind == FIFF.FIFF_MNE_COORD_FRAME:
|
||||
tag = read_tag(fid, pos)
|
||||
coord_frame = _coord_frame_named.get(int(tag.data.item()))
|
||||
for d in dig:
|
||||
d["coord_frame"] = coord_frame
|
||||
return _format_dig_points(dig)
|
||||
|
||||
|
||||
@verbose
|
||||
def write_dig(fname, pts, coord_frame=None, *, overwrite=False, verbose=None):
|
||||
"""Write digitization data to a FIF file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Destination file name.
|
||||
pts : iterator of dict
|
||||
Iterator through digitizer points. Each point is a dictionary with
|
||||
the keys 'kind', 'ident' and 'r'.
|
||||
coord_frame : int | str | None
|
||||
If all the points have the same coordinate frame, specify the type
|
||||
here. Can be None (default) if the points could have varying
|
||||
coordinate frames.
|
||||
%(overwrite)s
|
||||
|
||||
.. versionadded:: 1.0
|
||||
%(verbose)s
|
||||
|
||||
.. versionadded:: 1.0
|
||||
"""
|
||||
from ..transforms import _to_const
|
||||
|
||||
fname = _check_fname(fname, overwrite=overwrite)
|
||||
if coord_frame is not None:
|
||||
coord_frame = _to_const(coord_frame)
|
||||
pts_frames = {pt.get("coord_frame", coord_frame) for pt in pts}
|
||||
bad_frames = pts_frames - {coord_frame}
|
||||
if len(bad_frames) > 0:
|
||||
raise ValueError(
|
||||
"Points have coord_frame entries that are incompatible with "
|
||||
f"coord_frame={coord_frame}: {tuple(bad_frames)}."
|
||||
)
|
||||
|
||||
with start_and_end_file(fname) as fid:
|
||||
write_dig_points(fid, pts, block=True, coord_frame=coord_frame)
|
||||
|
||||
|
||||
_cardinal_ident_mapping = {
|
||||
FIFF.FIFFV_POINT_NASION: "nasion",
|
||||
FIFF.FIFFV_POINT_LPA: "lpa",
|
||||
FIFF.FIFFV_POINT_RPA: "rpa",
|
||||
}
|
||||
|
||||
|
||||
def _ensure_fiducials_head(dig):
|
||||
# Ensure that there are all three fiducials in the head coord frame
|
||||
fids = dict()
|
||||
for d in dig:
|
||||
if d["kind"] == FIFF.FIFFV_POINT_CARDINAL:
|
||||
name = _cardinal_ident_mapping.get(d["ident"], None)
|
||||
if name is not None:
|
||||
fids[name] = d
|
||||
radius = None
|
||||
mults = dict(
|
||||
lpa=[-1, 0, 0],
|
||||
rpa=[1, 0, 0],
|
||||
nasion=[0, 1, 0],
|
||||
)
|
||||
for ident, name in _cardinal_ident_mapping.items():
|
||||
if name not in fids:
|
||||
if radius is None:
|
||||
radius = [
|
||||
np.linalg.norm(d["r"])
|
||||
for d in dig
|
||||
if d["coord_frame"] == FIFF.FIFFV_COORD_HEAD
|
||||
and not np.isnan(d["r"]).any()
|
||||
]
|
||||
if not radius:
|
||||
return # can't complete, no head points
|
||||
radius = np.mean(radius)
|
||||
dig.append(
|
||||
DigPoint(
|
||||
kind=FIFF.FIFFV_POINT_CARDINAL,
|
||||
ident=ident,
|
||||
r=np.array(mults[name], float) * radius,
|
||||
coord_frame=FIFF.FIFFV_COORD_HEAD,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# XXXX:
|
||||
# This does something really similar to _read_dig_montage_fif but:
|
||||
# - does not check coord_frame
|
||||
# - does not do any operation that implies assumptions with the names
|
||||
def _get_data_as_dict_from_dig(dig, exclude_ref_channel=True):
|
||||
"""Obtain coordinate data from a Dig.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
dig : list of dicts
|
||||
A container of DigPoints to be added to the info['dig'].
|
||||
|
||||
Returns
|
||||
-------
|
||||
ch_pos : dict
|
||||
The container of all relevant channel positions inside dig.
|
||||
"""
|
||||
# Split up the dig points by category
|
||||
hsp, hpi, elp = list(), list(), list()
|
||||
fids, dig_ch_pos_location = dict(), list()
|
||||
dig = [] if dig is None else dig
|
||||
|
||||
for d in dig:
|
||||
if d["kind"] == FIFF.FIFFV_POINT_CARDINAL:
|
||||
fids[_cardinal_ident_mapping[d["ident"]]] = d["r"]
|
||||
elif d["kind"] == FIFF.FIFFV_POINT_HPI:
|
||||
hpi.append(d["r"])
|
||||
elp.append(d["r"])
|
||||
elif d["kind"] == FIFF.FIFFV_POINT_EXTRA:
|
||||
hsp.append(d["r"])
|
||||
elif d["kind"] == FIFF.FIFFV_POINT_EEG:
|
||||
if d["ident"] != 0 or not exclude_ref_channel:
|
||||
dig_ch_pos_location.append(d["r"])
|
||||
|
||||
dig_coord_frames = set([d["coord_frame"] for d in dig])
|
||||
if len(dig_coord_frames) == 0:
|
||||
dig_coord_frames = set([FIFF.FIFFV_COORD_HEAD])
|
||||
if len(dig_coord_frames) != 1:
|
||||
raise RuntimeError(
|
||||
"Only single coordinate frame in dig is supported, "
|
||||
f"got {dig_coord_frames}"
|
||||
)
|
||||
dig_ch_pos_location = np.array(dig_ch_pos_location)
|
||||
dig_ch_pos_location.shape = (-1, 3) # empty will be (0, 3)
|
||||
return Bunch(
|
||||
nasion=fids.get("nasion", None),
|
||||
lpa=fids.get("lpa", None),
|
||||
rpa=fids.get("rpa", None),
|
||||
hsp=np.array(hsp) if len(hsp) else None,
|
||||
hpi=np.array(hpi) if len(hpi) else None,
|
||||
elp=np.array(elp) if len(elp) else None,
|
||||
dig_ch_pos_location=dig_ch_pos_location,
|
||||
coord_frame=dig_coord_frames.pop(),
|
||||
)
|
||||
|
||||
|
||||
def _get_fid_coords(dig, raise_error=True):
|
||||
fid_coords = Bunch(nasion=None, lpa=None, rpa=None)
|
||||
fid_coord_frames = dict()
|
||||
|
||||
for d in dig:
|
||||
if d["kind"] == FIFF.FIFFV_POINT_CARDINAL:
|
||||
key = _cardinal_ident_mapping[d["ident"]]
|
||||
fid_coords[key] = d["r"]
|
||||
fid_coord_frames[key] = d["coord_frame"]
|
||||
|
||||
if len(fid_coord_frames) > 0 and raise_error:
|
||||
if set(fid_coord_frames.keys()) != set(["nasion", "lpa", "rpa"]):
|
||||
raise ValueError(
|
||||
f"Some fiducial points are missing (got {fid_coord_frames.keys()})."
|
||||
)
|
||||
|
||||
if len(set(fid_coord_frames.values())) > 1:
|
||||
raise ValueError(
|
||||
"All fiducial points must be in the same coordinate system "
|
||||
f"(got {len(fid_coord_frames)})"
|
||||
)
|
||||
|
||||
coord_frame = fid_coord_frames.popitem()[1] if fid_coord_frames else None
|
||||
|
||||
return fid_coords, coord_frame
|
||||
|
||||
|
||||
def _coord_frame_const(coord_frame):
|
||||
from ..transforms import _str_to_frame
|
||||
|
||||
if not isinstance(coord_frame, str) or coord_frame not in _str_to_frame:
|
||||
raise ValueError(
|
||||
f"coord_frame must be one of {sorted(_str_to_frame.keys())}, got "
|
||||
f"{coord_frame}"
|
||||
)
|
||||
return _str_to_frame[coord_frame]
|
||||
|
||||
|
||||
def _make_dig_points(
|
||||
nasion=None,
|
||||
lpa=None,
|
||||
rpa=None,
|
||||
hpi=None,
|
||||
extra_points=None,
|
||||
dig_ch_pos=None,
|
||||
*,
|
||||
coord_frame="head",
|
||||
add_missing_fiducials=False,
|
||||
):
|
||||
"""Construct digitizer info for the info.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
nasion : array-like | numpy.ndarray, shape (3,) | None
|
||||
Point designated as the nasion point.
|
||||
lpa : array-like | numpy.ndarray, shape (3,) | None
|
||||
Point designated as the left auricular point.
|
||||
rpa : array-like | numpy.ndarray, shape (3,) | None
|
||||
Point designated as the right auricular point.
|
||||
hpi : array-like | numpy.ndarray, shape (n_points, 3) | None
|
||||
Points designated as head position indicator points.
|
||||
extra_points : array-like | numpy.ndarray, shape (n_points, 3)
|
||||
Points designed as the headshape points.
|
||||
dig_ch_pos : dict
|
||||
Dict of EEG channel positions.
|
||||
coord_frame : str
|
||||
The coordinate frame of the points. Usually this is "unknown"
|
||||
for native digitizer space. Defaults to "head".
|
||||
add_missing_fiducials : bool
|
||||
If True, add fiducials to the dig points if they are not present.
|
||||
Requires that coord_frame='head' and that lpa, nasion, and rpa are all
|
||||
None.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dig : list of dicts
|
||||
A container of DigPoints to be added to the info['dig'].
|
||||
"""
|
||||
coord_frame = _coord_frame_const(coord_frame)
|
||||
|
||||
dig = []
|
||||
if lpa is not None:
|
||||
lpa = np.asarray(lpa)
|
||||
if lpa.shape != (3,):
|
||||
raise ValueError(f"LPA should have the shape (3,) instead of {lpa.shape}")
|
||||
dig.append(
|
||||
{
|
||||
"r": lpa,
|
||||
"ident": FIFF.FIFFV_POINT_LPA,
|
||||
"kind": FIFF.FIFFV_POINT_CARDINAL,
|
||||
"coord_frame": coord_frame,
|
||||
}
|
||||
)
|
||||
if nasion is not None:
|
||||
nasion = np.asarray(nasion)
|
||||
if nasion.shape != (3,):
|
||||
raise ValueError(
|
||||
f"Nasion should have the shape (3,) instead of {nasion.shape}"
|
||||
)
|
||||
dig.append(
|
||||
{
|
||||
"r": nasion,
|
||||
"ident": FIFF.FIFFV_POINT_NASION,
|
||||
"kind": FIFF.FIFFV_POINT_CARDINAL,
|
||||
"coord_frame": coord_frame,
|
||||
}
|
||||
)
|
||||
if rpa is not None:
|
||||
rpa = np.asarray(rpa)
|
||||
if rpa.shape != (3,):
|
||||
raise ValueError(f"RPA should have the shape (3,) instead of {rpa.shape}")
|
||||
dig.append(
|
||||
{
|
||||
"r": rpa,
|
||||
"ident": FIFF.FIFFV_POINT_RPA,
|
||||
"kind": FIFF.FIFFV_POINT_CARDINAL,
|
||||
"coord_frame": coord_frame,
|
||||
}
|
||||
)
|
||||
if hpi is not None:
|
||||
hpi = np.asarray(hpi)
|
||||
if hpi.ndim != 2 or hpi.shape[1] != 3:
|
||||
raise ValueError(
|
||||
f"HPI should have the shape (n_points, 3) instead of {hpi.shape}"
|
||||
)
|
||||
for idx, point in enumerate(hpi):
|
||||
dig.append(
|
||||
{
|
||||
"r": point,
|
||||
"ident": idx + 1,
|
||||
"kind": FIFF.FIFFV_POINT_HPI,
|
||||
"coord_frame": coord_frame,
|
||||
}
|
||||
)
|
||||
if extra_points is not None:
|
||||
extra_points = np.asarray(extra_points)
|
||||
if len(extra_points) and extra_points.shape[1] != 3:
|
||||
raise ValueError(
|
||||
"Points should have the shape (n_points, 3) instead of "
|
||||
f"{extra_points.shape}"
|
||||
)
|
||||
for idx, point in enumerate(extra_points):
|
||||
dig.append(
|
||||
{
|
||||
"r": point,
|
||||
"ident": idx + 1,
|
||||
"kind": FIFF.FIFFV_POINT_EXTRA,
|
||||
"coord_frame": coord_frame,
|
||||
}
|
||||
)
|
||||
if dig_ch_pos is not None:
|
||||
idents = []
|
||||
use_arange = False
|
||||
for key, value in dig_ch_pos.items():
|
||||
_validate_type(key, str, "dig_ch_pos")
|
||||
try:
|
||||
idents.append(int(key[-3:]))
|
||||
except ValueError:
|
||||
use_arange = True
|
||||
_validate_type(value, (np.ndarray, list, tuple), "dig_ch_pos")
|
||||
value = np.array(value, dtype=float)
|
||||
dig_ch_pos[key] = value
|
||||
if value.shape != (3,):
|
||||
raise RuntimeError(
|
||||
"The position should be a 1D array of 3 floats. "
|
||||
f"Provided shape {value.shape}."
|
||||
)
|
||||
if use_arange:
|
||||
idents = np.arange(1, len(dig_ch_pos) + 1)
|
||||
for key, ident in zip(dig_ch_pos, idents):
|
||||
dig.append(
|
||||
{
|
||||
"r": dig_ch_pos[key],
|
||||
"ident": int(ident),
|
||||
"kind": FIFF.FIFFV_POINT_EEG,
|
||||
"coord_frame": coord_frame,
|
||||
}
|
||||
)
|
||||
if add_missing_fiducials:
|
||||
assert coord_frame == FIFF.FIFFV_COORD_HEAD
|
||||
# These being none is really an assumption that if you have one you
|
||||
# should have all three. But we can relax this later if necessary.
|
||||
assert lpa is None
|
||||
assert rpa is None
|
||||
assert nasion is None
|
||||
_ensure_fiducials_head(dig)
|
||||
|
||||
return _format_dig_points(dig)
|
||||
|
||||
|
||||
def _call_make_dig_points(nasion, lpa, rpa, hpi, extra, convert=True):
|
||||
from ..transforms import (
|
||||
Transform,
|
||||
apply_trans,
|
||||
get_ras_to_neuromag_trans,
|
||||
)
|
||||
|
||||
if convert:
|
||||
neuromag_trans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
|
||||
nasion = apply_trans(neuromag_trans, nasion)
|
||||
lpa = apply_trans(neuromag_trans, lpa)
|
||||
rpa = apply_trans(neuromag_trans, rpa)
|
||||
|
||||
if hpi is not None:
|
||||
hpi = apply_trans(neuromag_trans, hpi)
|
||||
|
||||
extra = apply_trans(neuromag_trans, extra).astype(np.float32)
|
||||
else:
|
||||
neuromag_trans = None
|
||||
|
||||
ctf_head_t = Transform(fro="ctf_head", to="head", trans=neuromag_trans)
|
||||
|
||||
info_dig = _make_dig_points(
|
||||
nasion=nasion, lpa=lpa, rpa=rpa, hpi=hpi, extra_points=extra
|
||||
)
|
||||
|
||||
return info_dig, ctf_head_t
|
||||
|
||||
|
||||
##############################################################################
|
||||
# From artemis123 (we have modified the function a bit)
|
||||
def _artemis123_read_pos(nas, lpa, rpa, hpi, extra):
|
||||
# move into MNE head coords
|
||||
dig_points, _ = _call_make_dig_points(nas, lpa, rpa, hpi, extra)
|
||||
return dig_points
|
||||
|
||||
|
||||
##############################################################################
|
||||
# From bti
|
||||
def _make_bti_dig_points(
|
||||
nasion,
|
||||
lpa,
|
||||
rpa,
|
||||
hpi,
|
||||
extra,
|
||||
convert=False,
|
||||
use_hpi=False,
|
||||
bti_dev_t=False,
|
||||
dev_ctf_t=False,
|
||||
):
|
||||
from ..transforms import (
|
||||
Transform,
|
||||
combine_transforms,
|
||||
invert_transform,
|
||||
)
|
||||
|
||||
_hpi = hpi if use_hpi else None
|
||||
info_dig, ctf_head_t = _call_make_dig_points(nasion, lpa, rpa, _hpi, extra, convert)
|
||||
|
||||
if convert:
|
||||
t = combine_transforms(
|
||||
invert_transform(bti_dev_t), dev_ctf_t, "meg", "ctf_head"
|
||||
)
|
||||
dev_head_t = combine_transforms(t, ctf_head_t, "meg", "head")
|
||||
else:
|
||||
dev_head_t = Transform("meg", "head", trans=None)
|
||||
|
||||
return info_dig, dev_head_t, ctf_head_t # ctf_head_t should not be needed
|
||||
168
dist/client/mne/_fiff/compensator.py
vendored
Normal file
168
dist/client/mne/_fiff/compensator.py
vendored
Normal file
@@ -0,0 +1,168 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..utils import fill_doc
|
||||
from .constants import FIFF
|
||||
|
||||
|
||||
def get_current_comp(info):
|
||||
"""Get the current compensation in effect in the data."""
|
||||
comp = None
|
||||
first_comp = -1
|
||||
for k, chan in enumerate(info["chs"]):
|
||||
if chan["kind"] == FIFF.FIFFV_MEG_CH:
|
||||
comp = int(chan["coil_type"]) >> 16
|
||||
if first_comp < 0:
|
||||
first_comp = comp
|
||||
elif comp != first_comp:
|
||||
raise ValueError("Compensation is not set equally on all MEG channels")
|
||||
return comp
|
||||
|
||||
|
||||
def set_current_comp(info, comp):
|
||||
"""Set the current compensation in effect in the data."""
|
||||
comp_now = get_current_comp(info)
|
||||
for k, chan in enumerate(info["chs"]):
|
||||
if chan["kind"] == FIFF.FIFFV_MEG_CH:
|
||||
rem = chan["coil_type"] - (comp_now << 16)
|
||||
chan["coil_type"] = int(rem + (comp << 16))
|
||||
|
||||
|
||||
def _make_compensator(info, grade):
|
||||
"""Auxiliary function for make_compensator."""
|
||||
for k in range(len(info["comps"])):
|
||||
if info["comps"][k]["kind"] == grade:
|
||||
this_data = info["comps"][k]["data"]
|
||||
|
||||
# Create the preselector
|
||||
presel = np.zeros((this_data["ncol"], info["nchan"]))
|
||||
for col, col_name in enumerate(this_data["col_names"]):
|
||||
ind = [k for k, ch in enumerate(info["ch_names"]) if ch == col_name]
|
||||
if len(ind) == 0:
|
||||
raise ValueError(f"Channel {col_name} is not available in data")
|
||||
elif len(ind) > 1:
|
||||
raise ValueError(f"Ambiguous channel {col_name}")
|
||||
presel[col, ind[0]] = 1.0
|
||||
|
||||
# Create the postselector (zero entries for channels not found)
|
||||
postsel = np.zeros((info["nchan"], this_data["nrow"]))
|
||||
for c, ch_name in enumerate(info["ch_names"]):
|
||||
ind = [
|
||||
k for k, ch in enumerate(this_data["row_names"]) if ch == ch_name
|
||||
]
|
||||
if len(ind) > 1:
|
||||
raise ValueError(f"Ambiguous channel {ch_name}")
|
||||
elif len(ind) == 1:
|
||||
postsel[c, ind[0]] = 1.0
|
||||
# else, don't use it at all (postsel[c, ?] = 0.0) by allocation
|
||||
this_comp = np.dot(postsel, np.dot(this_data["data"], presel))
|
||||
return this_comp
|
||||
|
||||
raise ValueError(f"Desired compensation matrix (grade = {grade:d}) not found")
|
||||
|
||||
|
||||
@fill_doc
|
||||
def make_compensator(info, from_, to, exclude_comp_chs=False):
|
||||
"""Return compensation matrix eg. for CTF system.
|
||||
|
||||
Create a compensation matrix to bring the data from one compensation
|
||||
state to another.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
%(info_not_none)s
|
||||
from_ : int
|
||||
Compensation in the input data.
|
||||
to : int
|
||||
Desired compensation in the output.
|
||||
exclude_comp_chs : bool
|
||||
Exclude compensation channels from the output.
|
||||
|
||||
Returns
|
||||
-------
|
||||
comp : array | None.
|
||||
The compensation matrix. Might be None if no compensation
|
||||
is needed (from == to).
|
||||
"""
|
||||
if from_ == to:
|
||||
return None
|
||||
|
||||
# s_orig = s_from + C1*s_from = (I + C1)*s_from
|
||||
# s_to = s_orig - C2*s_orig = (I - C2)*s_orig
|
||||
# s_to = (I - C2)*(I + C1)*s_from = (I + C1 - C2 - C2*C1)*s_from
|
||||
if from_ != 0:
|
||||
C1 = _make_compensator(info, from_)
|
||||
comp_from_0 = np.linalg.inv(np.eye(info["nchan"]) - C1)
|
||||
if to != 0:
|
||||
C2 = _make_compensator(info, to)
|
||||
comp_0_to = np.eye(info["nchan"]) - C2
|
||||
if from_ != 0:
|
||||
if to != 0:
|
||||
# This is mathematically equivalent, but has higher numerical
|
||||
# error than using the inverse to always go to zero and back
|
||||
# comp = np.eye(info['nchan']) + C1 - C2 - np.dot(C2, C1)
|
||||
comp = np.dot(comp_0_to, comp_from_0)
|
||||
else:
|
||||
comp = comp_from_0
|
||||
else:
|
||||
# from == 0, to != 0 guaranteed here
|
||||
comp = comp_0_to
|
||||
|
||||
if exclude_comp_chs:
|
||||
pick = [
|
||||
k for k, c in enumerate(info["chs"]) if c["kind"] != FIFF.FIFFV_REF_MEG_CH
|
||||
]
|
||||
|
||||
if len(pick) == 0:
|
||||
raise ValueError(
|
||||
"Nothing remains after excluding the compensation channels"
|
||||
)
|
||||
|
||||
comp = comp[pick, :]
|
||||
|
||||
return comp
|
||||
|
||||
|
||||
# @verbose
|
||||
# def compensate_to(data, to, verbose=None):
|
||||
# """
|
||||
# %
|
||||
# % [newdata] = mne_compensate_to(data,to)
|
||||
# %
|
||||
# % Apply compensation to the data as desired
|
||||
# %
|
||||
# """
|
||||
#
|
||||
# newdata = data.copy()
|
||||
# now = get_current_comp(newdata['info'])
|
||||
#
|
||||
# # Are we there already?
|
||||
# if now == to:
|
||||
# logger.info('Data are already compensated as desired')
|
||||
#
|
||||
# # Make the compensator and apply it to all data sets
|
||||
# comp = make_compensator(newdata['info'], now, to)
|
||||
# for k in range(len(newdata['evoked'])):
|
||||
# newdata['evoked'][k]['epochs'] = np.dot(comp,
|
||||
# newdata['evoked'][k]['epochs'])
|
||||
#
|
||||
# # Update the compensation info in the channel descriptors
|
||||
# newdata['info']['chs'] = set_current_comp(newdata['info']['chs'], to)
|
||||
# return newdata
|
||||
|
||||
|
||||
# def set_current_comp(chs, value):
|
||||
# """Set the current compensation value in the channel info structures
|
||||
# """
|
||||
# new_chs = chs
|
||||
#
|
||||
# lower_half = int('FFFF', 16) # hex2dec('FFFF')
|
||||
# for k in range(len(chs)):
|
||||
# if chs[k]['kind'] == FIFF.FIFFV_MEG_CH:
|
||||
# coil_type = float(chs[k]['coil_type']) & lower_half
|
||||
# new_chs[k]['coil_type'] = int(coil_type | (value << 16))
|
||||
#
|
||||
# return new_chs
|
||||
1218
dist/client/mne/_fiff/constants.py
vendored
Normal file
1218
dist/client/mne/_fiff/constants.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
189
dist/client/mne/_fiff/ctf_comp.py
vendored
Normal file
189
dist/client/mne/_fiff/ctf_comp.py
vendored
Normal file
@@ -0,0 +1,189 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..utils import _pl, logger, verbose
|
||||
from .constants import FIFF
|
||||
from .matrix import _read_named_matrix, write_named_matrix
|
||||
from .tag import read_tag
|
||||
from .tree import dir_tree_find
|
||||
from .write import end_block, start_block, write_int
|
||||
|
||||
|
||||
def _add_kind(one):
|
||||
"""Convert CTF kind to MNE kind."""
|
||||
if one["ctfkind"] == int("47314252", 16):
|
||||
one["kind"] = 1
|
||||
elif one["ctfkind"] == int("47324252", 16):
|
||||
one["kind"] = 2
|
||||
elif one["ctfkind"] == int("47334252", 16):
|
||||
one["kind"] = 3
|
||||
else:
|
||||
one["kind"] = int(one["ctfkind"])
|
||||
|
||||
|
||||
def _calibrate_comp(
|
||||
comp, chs, row_names, col_names, mult_keys=("range", "cal"), flip=False
|
||||
):
|
||||
"""Get row and column cals."""
|
||||
ch_names = [c["ch_name"] for c in chs]
|
||||
row_cals = np.zeros(len(row_names))
|
||||
col_cals = np.zeros(len(col_names))
|
||||
for names, cals, inv in zip(
|
||||
(row_names, col_names), (row_cals, col_cals), (False, True)
|
||||
):
|
||||
for ii in range(len(cals)):
|
||||
p = ch_names.count(names[ii])
|
||||
if p != 1:
|
||||
raise RuntimeError(
|
||||
f"Channel {names[ii]} does not appear exactly once "
|
||||
f"in data, found {p:d} instance{_pl(p)}"
|
||||
)
|
||||
idx = ch_names.index(names[ii])
|
||||
val = chs[idx][mult_keys[0]] * chs[idx][mult_keys[1]]
|
||||
val = float(1.0 / val) if inv else float(val)
|
||||
val = 1.0 / val if flip else val
|
||||
cals[ii] = val
|
||||
comp["rowcals"] = row_cals
|
||||
comp["colcals"] = col_cals
|
||||
comp["data"]["data"] = row_cals[:, None] * comp["data"]["data"] * col_cals[None, :]
|
||||
|
||||
|
||||
@verbose
|
||||
def read_ctf_comp(fid, node, chs, verbose=None):
|
||||
"""Read the CTF software compensation data from the given node.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fid : file
|
||||
The file descriptor.
|
||||
node : dict
|
||||
The node in the FIF tree.
|
||||
chs : list
|
||||
The list of channels from info['chs'] to match with
|
||||
compensators that are read.
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
compdata : list
|
||||
The compensation data
|
||||
"""
|
||||
return _read_ctf_comp(fid, node, chs, None)
|
||||
|
||||
|
||||
def _read_ctf_comp(fid, node, chs, ch_names_mapping):
|
||||
"""Read the CTF software compensation data from the given node.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fid : file
|
||||
The file descriptor.
|
||||
node : dict
|
||||
The node in the FIF tree.
|
||||
chs : list
|
||||
The list of channels from info['chs'] to match with
|
||||
compensators that are read.
|
||||
ch_names_mapping : dict | None
|
||||
The channel renaming to use.
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
compdata : list
|
||||
The compensation data
|
||||
"""
|
||||
from .meas_info import _rename_comps
|
||||
|
||||
ch_names_mapping = dict() if ch_names_mapping is None else ch_names_mapping
|
||||
compdata = []
|
||||
comps = dir_tree_find(node, FIFF.FIFFB_MNE_CTF_COMP_DATA)
|
||||
|
||||
for node in comps:
|
||||
# Read the data we need
|
||||
mat = _read_named_matrix(fid, node, FIFF.FIFF_MNE_CTF_COMP_DATA)
|
||||
for p in range(node["nent"]):
|
||||
kind = node["directory"][p].kind
|
||||
pos = node["directory"][p].pos
|
||||
if kind == FIFF.FIFF_MNE_CTF_COMP_KIND:
|
||||
tag = read_tag(fid, pos)
|
||||
break
|
||||
else:
|
||||
raise Exception("Compensation type not found")
|
||||
|
||||
# Get the compensation kind and map it to a simple number
|
||||
one = dict(ctfkind=tag.data.item())
|
||||
del tag
|
||||
_add_kind(one)
|
||||
for p in range(node["nent"]):
|
||||
kind = node["directory"][p].kind
|
||||
pos = node["directory"][p].pos
|
||||
if kind == FIFF.FIFF_MNE_CTF_COMP_CALIBRATED:
|
||||
tag = read_tag(fid, pos)
|
||||
calibrated = tag.data
|
||||
break
|
||||
else:
|
||||
calibrated = False
|
||||
|
||||
one["save_calibrated"] = bool(calibrated)
|
||||
one["data"] = mat
|
||||
_rename_comps([one], ch_names_mapping)
|
||||
if not calibrated:
|
||||
# Calibrate...
|
||||
_calibrate_comp(one, chs, mat["row_names"], mat["col_names"])
|
||||
else:
|
||||
one["rowcals"] = np.ones(mat["data"].shape[0], dtype=np.float64)
|
||||
one["colcals"] = np.ones(mat["data"].shape[1], dtype=np.float64)
|
||||
|
||||
compdata.append(one)
|
||||
|
||||
if len(compdata) > 0:
|
||||
logger.info(f" Read {len(compdata)} compensation matrices")
|
||||
|
||||
return compdata
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Writing
|
||||
|
||||
|
||||
def write_ctf_comp(fid, comps):
|
||||
"""Write the CTF compensation data into a fif file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fid : file
|
||||
The open FIF file descriptor
|
||||
|
||||
comps : list
|
||||
The compensation data to write
|
||||
"""
|
||||
if len(comps) <= 0:
|
||||
return
|
||||
|
||||
# This is very simple in fact
|
||||
start_block(fid, FIFF.FIFFB_MNE_CTF_COMP)
|
||||
for comp in comps:
|
||||
start_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA)
|
||||
# Write the compensation kind
|
||||
write_int(fid, FIFF.FIFF_MNE_CTF_COMP_KIND, comp["ctfkind"])
|
||||
if comp.get("save_calibrated", False):
|
||||
write_int(fid, FIFF.FIFF_MNE_CTF_COMP_CALIBRATED, comp["save_calibrated"])
|
||||
|
||||
if not comp.get("save_calibrated", True):
|
||||
# Undo calibration
|
||||
comp = deepcopy(comp)
|
||||
data = (
|
||||
(1.0 / comp["rowcals"][:, None])
|
||||
* comp["data"]["data"]
|
||||
* (1.0 / comp["colcals"][None, :])
|
||||
)
|
||||
comp["data"]["data"] = data
|
||||
write_named_matrix(fid, FIFF.FIFF_MNE_CTF_COMP_DATA, comp["data"])
|
||||
end_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA)
|
||||
|
||||
end_block(fid, FIFF.FIFFB_MNE_CTF_COMP)
|
||||
138
dist/client/mne/_fiff/matrix.py
vendored
Normal file
138
dist/client/mne/_fiff/matrix.py
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from ..utils import logger
|
||||
from .constants import FIFF
|
||||
from .tag import find_tag, has_tag
|
||||
from .write import (
|
||||
end_block,
|
||||
start_block,
|
||||
write_float_matrix,
|
||||
write_int,
|
||||
write_name_list,
|
||||
)
|
||||
|
||||
|
||||
def _transpose_named_matrix(mat):
|
||||
"""Transpose mat inplace (no copy)."""
|
||||
mat["nrow"], mat["ncol"] = mat["ncol"], mat["nrow"]
|
||||
mat["row_names"], mat["col_names"] = mat["col_names"], mat["row_names"]
|
||||
mat["data"] = mat["data"].T
|
||||
|
||||
|
||||
def _read_named_matrix(fid, node, matkind, indent=" ", transpose=False):
|
||||
"""Read named matrix from the given node.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fid : file
|
||||
The opened file descriptor.
|
||||
node : dict
|
||||
The node in the tree.
|
||||
matkind : int
|
||||
The type of matrix.
|
||||
transpose : bool
|
||||
If True, transpose the matrix. Default is False.
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
mat: dict
|
||||
The matrix data
|
||||
"""
|
||||
# Descend one level if necessary
|
||||
if node["block"] != FIFF.FIFFB_MNE_NAMED_MATRIX:
|
||||
for k in range(node["nchild"]):
|
||||
if node["children"][k]["block"] == FIFF.FIFFB_MNE_NAMED_MATRIX:
|
||||
if has_tag(node["children"][k], matkind):
|
||||
node = node["children"][k]
|
||||
break
|
||||
else:
|
||||
logger.info(
|
||||
f"{indent}Desired named matrix (kind = {matkind}) not available"
|
||||
)
|
||||
return None
|
||||
else:
|
||||
if not has_tag(node, matkind):
|
||||
logger.info(
|
||||
f"{indent}Desired named matrix (kind = {matkind}) not available"
|
||||
)
|
||||
return None
|
||||
|
||||
# Read everything we need
|
||||
tag = find_tag(fid, node, matkind)
|
||||
if tag is None:
|
||||
raise ValueError("Matrix data missing")
|
||||
else:
|
||||
data = tag.data
|
||||
|
||||
nrow, ncol = data.shape
|
||||
tag = find_tag(fid, node, FIFF.FIFF_MNE_NROW)
|
||||
if tag is not None and tag.data != nrow:
|
||||
raise ValueError(
|
||||
"Number of rows in matrix data and FIFF_MNE_NROW tag do not match"
|
||||
)
|
||||
|
||||
tag = find_tag(fid, node, FIFF.FIFF_MNE_NCOL)
|
||||
if tag is not None and tag.data != ncol:
|
||||
raise ValueError(
|
||||
"Number of columns in matrix data and FIFF_MNE_NCOL tag do not match"
|
||||
)
|
||||
|
||||
tag = find_tag(fid, node, FIFF.FIFF_MNE_ROW_NAMES)
|
||||
row_names = tag.data.split(":") if tag is not None else []
|
||||
|
||||
tag = find_tag(fid, node, FIFF.FIFF_MNE_COL_NAMES)
|
||||
col_names = tag.data.split(":") if tag is not None else []
|
||||
|
||||
mat = dict(
|
||||
nrow=nrow, ncol=ncol, row_names=row_names, col_names=col_names, data=data
|
||||
)
|
||||
if transpose:
|
||||
_transpose_named_matrix(mat)
|
||||
return mat
|
||||
|
||||
|
||||
def write_named_matrix(fid, kind, mat):
|
||||
"""Write named matrix from the given node.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fid : file
|
||||
The opened file descriptor.
|
||||
kind : int
|
||||
The kind of the matrix.
|
||||
matkind : int
|
||||
The type of matrix.
|
||||
"""
|
||||
# let's save ourselves from disaster
|
||||
n_tot = mat["nrow"] * mat["ncol"]
|
||||
if mat["data"].size != n_tot:
|
||||
ratio = n_tot / float(mat["data"].size)
|
||||
if n_tot < mat["data"].size and ratio > 0:
|
||||
ratio = 1 / ratio
|
||||
raise ValueError(
|
||||
"Cannot write matrix: row (%i) and column (%i) "
|
||||
"total element (%i) mismatch with data size (%i), "
|
||||
"appears to be off by a factor of %gx"
|
||||
% (mat["nrow"], mat["ncol"], n_tot, mat["data"].size, ratio)
|
||||
)
|
||||
start_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX)
|
||||
write_int(fid, FIFF.FIFF_MNE_NROW, mat["nrow"])
|
||||
write_int(fid, FIFF.FIFF_MNE_NCOL, mat["ncol"])
|
||||
|
||||
if len(mat["row_names"]) > 0:
|
||||
# let's prevent unintentional stupidity
|
||||
if len(mat["row_names"]) != mat["nrow"]:
|
||||
raise ValueError('len(mat["row_names"]) != mat["nrow"]')
|
||||
write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, mat["row_names"])
|
||||
|
||||
if len(mat["col_names"]) > 0:
|
||||
# let's prevent unintentional stupidity
|
||||
if len(mat["col_names"]) != mat["ncol"]:
|
||||
raise ValueError('len(mat["col_names"]) != mat["ncol"]')
|
||||
write_name_list(fid, FIFF.FIFF_MNE_COL_NAMES, mat["col_names"])
|
||||
|
||||
write_float_matrix(fid, kind, mat["data"])
|
||||
end_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX)
|
||||
3698
dist/client/mne/_fiff/meas_info.py
vendored
Normal file
3698
dist/client/mne/_fiff/meas_info.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
385
dist/client/mne/_fiff/open.py
vendored
Normal file
385
dist/client/mne/_fiff/open.py
vendored
Normal file
@@ -0,0 +1,385 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import os.path as op
|
||||
from gzip import GzipFile
|
||||
from io import SEEK_SET, BytesIO
|
||||
|
||||
import numpy as np
|
||||
from scipy.sparse import issparse
|
||||
|
||||
from ..utils import _file_like, logger, verbose, warn
|
||||
from .constants import FIFF
|
||||
from .tag import (
|
||||
Tag,
|
||||
_call_dict_names,
|
||||
_matrix_info,
|
||||
_read_tag_header,
|
||||
read_tag,
|
||||
)
|
||||
from .tree import dir_tree_find, make_dir_tree
|
||||
|
||||
|
||||
class _NoCloseRead:
|
||||
"""Create a wrapper that will not close when used as a context manager."""
|
||||
|
||||
def __init__(self, fid):
|
||||
self.fid = fid
|
||||
|
||||
def __enter__(self):
|
||||
return self.fid
|
||||
|
||||
def __exit__(self, type_, value, traceback):
|
||||
return
|
||||
|
||||
def close(self):
|
||||
return
|
||||
|
||||
def seek(self, offset, whence=SEEK_SET):
|
||||
return self.fid.seek(offset, whence)
|
||||
|
||||
def read(self, size=-1):
|
||||
return self.fid.read(size)
|
||||
|
||||
|
||||
def _fiff_get_fid(fname):
|
||||
"""Open a FIF file with no additional parsing."""
|
||||
if _file_like(fname):
|
||||
fid = _NoCloseRead(fname)
|
||||
fid.seek(0)
|
||||
else:
|
||||
fname = str(fname)
|
||||
if op.splitext(fname)[1].lower() == ".gz":
|
||||
logger.debug("Using gzip")
|
||||
fid = GzipFile(fname, "rb") # Open in binary mode
|
||||
else:
|
||||
logger.debug("Using normal I/O")
|
||||
fid = open(fname, "rb") # Open in binary mode
|
||||
return fid
|
||||
|
||||
|
||||
def _get_next_fname(fid, fname, tree):
|
||||
"""Get the next filename in split files."""
|
||||
nodes_list = dir_tree_find(tree, FIFF.FIFFB_REF)
|
||||
next_fname = None
|
||||
for nodes in nodes_list:
|
||||
next_fname = None
|
||||
for ent in nodes["directory"]:
|
||||
if ent.kind == FIFF.FIFF_REF_ROLE:
|
||||
tag = read_tag(fid, ent.pos)
|
||||
role = int(tag.data.item())
|
||||
if role != FIFF.FIFFV_ROLE_NEXT_FILE:
|
||||
next_fname = None
|
||||
break
|
||||
if ent.kind == FIFF.FIFF_REF_FILE_NAME:
|
||||
tag = read_tag(fid, ent.pos)
|
||||
next_fname = op.join(op.dirname(fname), tag.data)
|
||||
if ent.kind == FIFF.FIFF_REF_FILE_NUM:
|
||||
# Some files don't have the name, just the number. So
|
||||
# we construct the name from the current name.
|
||||
if next_fname is not None:
|
||||
continue
|
||||
next_num = read_tag(fid, ent.pos).data.item()
|
||||
path, base = op.split(fname)
|
||||
idx = base.find(".")
|
||||
idx2 = base.rfind("-")
|
||||
num_str = base[idx2 + 1 : idx]
|
||||
if not num_str.isdigit():
|
||||
idx2 = -1
|
||||
|
||||
if idx2 < 0 and next_num == 1:
|
||||
# this is the first file, which may not be numbered
|
||||
next_fname = op.join(
|
||||
path,
|
||||
f"{base[:idx]}-{next_num:d}.{base[idx + 1 :]}",
|
||||
)
|
||||
continue
|
||||
|
||||
next_fname = op.join(
|
||||
path, f"{base[:idx2]}-{next_num:d}.{base[idx + 1 :]}"
|
||||
)
|
||||
if next_fname is not None:
|
||||
break
|
||||
return next_fname
|
||||
|
||||
|
||||
@verbose
|
||||
def fiff_open(fname, preload=False, verbose=None):
|
||||
"""Open a FIF file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like | fid
|
||||
Name of the fif file, or an opened file (will seek back to 0).
|
||||
preload : bool
|
||||
If True, all data from the file is read into a memory buffer. This
|
||||
requires more memory, but can be faster for I/O operations that require
|
||||
frequent seeks.
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
fid : file
|
||||
The file descriptor of the open file.
|
||||
tree : fif tree
|
||||
The tree is a complex structure filled with dictionaries,
|
||||
lists and tags.
|
||||
directory : list
|
||||
A list of tags.
|
||||
"""
|
||||
fid = _fiff_get_fid(fname)
|
||||
try:
|
||||
return _fiff_open(fname, fid, preload)
|
||||
except Exception:
|
||||
fid.close()
|
||||
raise
|
||||
|
||||
|
||||
def _fiff_open(fname, fid, preload):
|
||||
# do preloading of entire file
|
||||
if preload:
|
||||
# note that StringIO objects instantiated this way are read-only,
|
||||
# but that's okay here since we are using mode "rb" anyway
|
||||
with fid as fid_old:
|
||||
fid = BytesIO(fid_old.read())
|
||||
|
||||
tag = _read_tag_header(fid, 0)
|
||||
|
||||
# Check that this looks like a fif file
|
||||
prefix = f"file {repr(fname)} does not"
|
||||
if tag.kind != FIFF.FIFF_FILE_ID:
|
||||
raise ValueError(f"{prefix} start with a file id tag")
|
||||
|
||||
if tag.type != FIFF.FIFFT_ID_STRUCT:
|
||||
raise ValueError(f"{prefix} start with a file id tag")
|
||||
|
||||
if tag.size != 20:
|
||||
raise ValueError(f"{prefix} start with a file id tag")
|
||||
|
||||
tag = read_tag(fid, tag.next_pos)
|
||||
|
||||
if tag.kind != FIFF.FIFF_DIR_POINTER:
|
||||
raise ValueError(f"{prefix} have a directory pointer")
|
||||
|
||||
# Read or create the directory tree
|
||||
logger.debug(f" Creating tag directory for {fname}...")
|
||||
|
||||
dirpos = int(tag.data.item())
|
||||
read_slow = True
|
||||
if dirpos > 0:
|
||||
dir_tag = read_tag(fid, dirpos)
|
||||
if dir_tag is None or dir_tag.data is None:
|
||||
fid.seek(0, 2) # move to end of file
|
||||
size = fid.tell()
|
||||
extra = "" if size > dirpos else f" > file size {size}"
|
||||
warn(
|
||||
"FIF tag directory missing at the end of the file "
|
||||
f"(at byte {dirpos}{extra}), possibly corrupted file: {fname}"
|
||||
)
|
||||
else:
|
||||
directory = dir_tag.data
|
||||
read_slow = False
|
||||
if read_slow:
|
||||
pos = 0
|
||||
fid.seek(pos, 0)
|
||||
directory = list()
|
||||
while pos is not None:
|
||||
tag = _read_tag_header(fid, pos)
|
||||
if tag is None:
|
||||
break # HACK : to fix file ending with empty tag...
|
||||
pos = tag.next_pos
|
||||
directory.append(tag)
|
||||
|
||||
tree, _ = make_dir_tree(fid, directory)
|
||||
|
||||
logger.debug("[done]")
|
||||
|
||||
# Back to the beginning
|
||||
fid.seek(0)
|
||||
|
||||
return fid, tree, directory
|
||||
|
||||
|
||||
@verbose
|
||||
def show_fiff(
|
||||
fname,
|
||||
indent=" ",
|
||||
read_limit=np.inf,
|
||||
max_str=30,
|
||||
output=str,
|
||||
tag=None,
|
||||
*,
|
||||
show_bytes=False,
|
||||
verbose=None,
|
||||
):
|
||||
"""Show FIFF information.
|
||||
|
||||
This function is similar to mne_show_fiff.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Filename to evaluate.
|
||||
indent : str
|
||||
How to indent the lines.
|
||||
read_limit : int
|
||||
Max number of bytes of data to read from a tag. Can be np.inf
|
||||
to always read all data (helps test read completion).
|
||||
max_str : int
|
||||
Max number of characters of string representation to print for
|
||||
each tag's data.
|
||||
output : type
|
||||
Either str or list. str is a convenience output for printing.
|
||||
tag : int | None
|
||||
Provide information about this tag. If None (default), all information
|
||||
is shown.
|
||||
show_bytes : bool
|
||||
If True (default False), print the byte offsets of each tag.
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
contents : str
|
||||
The contents of the file.
|
||||
"""
|
||||
if output not in [list, str]:
|
||||
raise ValueError("output must be list or str")
|
||||
if isinstance(tag, str): # command mne show_fiff passes string
|
||||
tag = int(tag)
|
||||
f, tree, directory = fiff_open(fname)
|
||||
# This gets set to 0 (unknown) by fiff_open, but FIFFB_ROOT probably
|
||||
# makes more sense for display
|
||||
tree["block"] = FIFF.FIFFB_ROOT
|
||||
with f as fid:
|
||||
out = _show_tree(
|
||||
fid,
|
||||
tree,
|
||||
indent=indent,
|
||||
level=0,
|
||||
read_limit=read_limit,
|
||||
max_str=max_str,
|
||||
tag_id=tag,
|
||||
show_bytes=show_bytes,
|
||||
)
|
||||
if output is str:
|
||||
out = "\n".join(out)
|
||||
return out
|
||||
|
||||
|
||||
def _find_type(value, fmts=("FIFF_",), exclude=("FIFF_UNIT",)):
|
||||
"""Find matching values."""
|
||||
value = int(value)
|
||||
vals = [
|
||||
k
|
||||
for k, v in FIFF.items()
|
||||
if v == value
|
||||
and any(fmt in k for fmt in fmts)
|
||||
and not any(exc in k for exc in exclude)
|
||||
]
|
||||
if len(vals) == 0:
|
||||
vals = ["???"]
|
||||
return vals
|
||||
|
||||
|
||||
def _show_tree(
|
||||
fid,
|
||||
tree,
|
||||
indent,
|
||||
level,
|
||||
read_limit,
|
||||
max_str,
|
||||
tag_id,
|
||||
*,
|
||||
show_bytes=False,
|
||||
):
|
||||
"""Show FIFF tree."""
|
||||
this_idt = indent * level
|
||||
next_idt = indent * (level + 1)
|
||||
# print block-level information
|
||||
found_types = "/".join(_find_type(tree["block"], fmts=["FIFFB_"]))
|
||||
out = [f"{this_idt}{str(int(tree['block'])).ljust(4)} = {found_types}"]
|
||||
tag_found = False
|
||||
if tag_id is None or out[0].strip().startswith(str(tag_id)):
|
||||
tag_found = True
|
||||
|
||||
if tree["directory"] is not None:
|
||||
kinds = [ent.kind for ent in tree["directory"]] + [-1]
|
||||
types = [ent.type for ent in tree["directory"]]
|
||||
sizes = [ent.size for ent in tree["directory"]]
|
||||
poss = [ent.pos for ent in tree["directory"]]
|
||||
counter = 0
|
||||
good = True
|
||||
for k, kn, size, pos, type_ in zip(kinds[:-1], kinds[1:], sizes, poss, types):
|
||||
if not tag_found and k != tag_id:
|
||||
continue
|
||||
tag = Tag(kind=k, type=type_, size=size, next=FIFF.FIFFV_NEXT_NONE, pos=pos)
|
||||
if read_limit is None or size <= read_limit:
|
||||
try:
|
||||
tag = read_tag(fid, pos)
|
||||
except Exception:
|
||||
good = False
|
||||
|
||||
if kn == k:
|
||||
# don't print if the next item is the same type (count 'em)
|
||||
counter += 1
|
||||
else:
|
||||
if show_bytes:
|
||||
at = f" @{pos}"
|
||||
else:
|
||||
at = ""
|
||||
# find the tag type
|
||||
this_type = _find_type(k, fmts=["FIFF_"])
|
||||
# prepend a count if necessary
|
||||
prepend = "x" + str(counter + 1) + ": " if counter > 0 else ""
|
||||
postpend = ""
|
||||
# print tag data nicely
|
||||
if tag.data is not None:
|
||||
postpend = " = " + str(tag.data)[:max_str]
|
||||
if isinstance(tag.data, np.ndarray):
|
||||
if tag.data.size > 1:
|
||||
postpend += " ... array size=" + str(tag.data.size)
|
||||
elif isinstance(tag.data, dict):
|
||||
postpend += " ... dict len=" + str(len(tag.data))
|
||||
elif isinstance(tag.data, str):
|
||||
postpend += " ... str len=" + str(len(tag.data))
|
||||
elif isinstance(tag.data, (list, tuple)):
|
||||
postpend += " ... list len=" + str(len(tag.data))
|
||||
elif issparse(tag.data):
|
||||
postpend += (
|
||||
f" ... sparse ({tag.data.getformat()}) shape="
|
||||
f"{tag.data.shape}"
|
||||
)
|
||||
else:
|
||||
postpend += " ... type=" + str(type(tag.data))
|
||||
postpend = ">" * 20 + f"BAD @{pos}" if not good else postpend
|
||||
matrix_info = _matrix_info(tag)
|
||||
if matrix_info is not None:
|
||||
_, type_, _, _ = matrix_info
|
||||
type_ = _call_dict_names.get(type_, f"?{type_}?")
|
||||
this_type = "/".join(this_type)
|
||||
out += [
|
||||
f"{next_idt}{prepend}{str(k).ljust(4)} = "
|
||||
f"{this_type}{at} ({size}b {type_}) {postpend}"
|
||||
]
|
||||
out[-1] = out[-1].replace("\n", "¶")
|
||||
counter = 0
|
||||
good = True
|
||||
if tag_id in kinds:
|
||||
tag_found = True
|
||||
if not tag_found:
|
||||
out = [""]
|
||||
level = -1 # removes extra indent
|
||||
# deal with children
|
||||
for branch in tree["children"]:
|
||||
out += _show_tree(
|
||||
fid,
|
||||
branch,
|
||||
indent,
|
||||
level + 1,
|
||||
read_limit,
|
||||
max_str,
|
||||
tag_id,
|
||||
show_bytes=show_bytes,
|
||||
)
|
||||
return out
|
||||
1413
dist/client/mne/_fiff/pick.py
vendored
Normal file
1413
dist/client/mne/_fiff/pick.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
331
dist/client/mne/_fiff/proc_history.py
vendored
Normal file
331
dist/client/mne/_fiff/proc_history.py
vendored
Normal file
@@ -0,0 +1,331 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..fixes import _csc_array_cast
|
||||
from ..utils import _check_fname, warn
|
||||
from .constants import FIFF
|
||||
from .open import fiff_open, read_tag
|
||||
from .tag import _float_item, _int_item, find_tag
|
||||
from .tree import dir_tree_find
|
||||
from .write import (
|
||||
_safe_name_list,
|
||||
end_block,
|
||||
start_block,
|
||||
write_float,
|
||||
write_float_matrix,
|
||||
write_float_sparse,
|
||||
write_id,
|
||||
write_int,
|
||||
write_int_matrix,
|
||||
write_name_list_sanitized,
|
||||
write_string,
|
||||
)
|
||||
|
||||
_proc_keys = [
|
||||
"parent_file_id",
|
||||
"block_id",
|
||||
"parent_block_id",
|
||||
"date",
|
||||
"experimenter",
|
||||
"creator",
|
||||
]
|
||||
_proc_ids = [
|
||||
FIFF.FIFF_PARENT_FILE_ID,
|
||||
FIFF.FIFF_BLOCK_ID,
|
||||
FIFF.FIFF_PARENT_BLOCK_ID,
|
||||
FIFF.FIFF_MEAS_DATE,
|
||||
FIFF.FIFF_EXPERIMENTER,
|
||||
FIFF.FIFF_CREATOR,
|
||||
]
|
||||
_proc_writers = [write_id, write_id, write_id, write_int, write_string, write_string]
|
||||
_proc_casters = [dict, dict, dict, np.array, str, str]
|
||||
|
||||
|
||||
def _read_proc_history(fid, tree):
|
||||
"""Read processing history from fiff file.
|
||||
|
||||
This function reads the SSS info, the CTC correction and the
|
||||
calibaraions from the SSS processing logs inside af a raw file
|
||||
(C.f. Maxfilter v2.2 manual (October 2010), page 21)::
|
||||
|
||||
104 = { 900 = proc. history
|
||||
104 = { 901 = proc. record
|
||||
103 = block ID
|
||||
204 = date
|
||||
212 = scientist
|
||||
113 = creator program
|
||||
104 = { 502 = SSS info
|
||||
264 = SSS task
|
||||
263 = SSS coord frame
|
||||
265 = SSS origin
|
||||
266 = SSS ins.order
|
||||
267 = SSS outs.order
|
||||
268 = SSS nr chnls
|
||||
269 = SSS components
|
||||
278 = SSS nfree
|
||||
243 = HPI g limit 0.98
|
||||
244 = HPI dist limit 0.005
|
||||
105 = } 502 = SSS info
|
||||
104 = { 504 = MaxST info
|
||||
264 = SSS task
|
||||
272 = SSST subspace correlation
|
||||
279 = SSST buffer length
|
||||
105 = }
|
||||
104 = { 501 = CTC correction
|
||||
103 = block ID
|
||||
204 = date
|
||||
113 = creator program
|
||||
800 = CTC matrix
|
||||
3417 = proj item chs
|
||||
105 = } 501 = CTC correction
|
||||
104 = { 503 = SSS finecalib.
|
||||
270 = SSS cal chnls
|
||||
271 = SSS cal coeff
|
||||
105 = } 503 = SSS finecalib.
|
||||
105 = } 901 = proc. record
|
||||
105 = } 900 = proc. history
|
||||
"""
|
||||
proc_history = dir_tree_find(tree, FIFF.FIFFB_PROCESSING_HISTORY)
|
||||
out = list()
|
||||
if len(proc_history) > 0:
|
||||
proc_history = proc_history[0]
|
||||
proc_records = dir_tree_find(proc_history, FIFF.FIFFB_PROCESSING_RECORD)
|
||||
for proc_record in proc_records:
|
||||
record = dict()
|
||||
for i_ent in range(proc_record["nent"]):
|
||||
kind = proc_record["directory"][i_ent].kind
|
||||
pos = proc_record["directory"][i_ent].pos
|
||||
for key, id_, cast in zip(_proc_keys, _proc_ids, _proc_casters):
|
||||
if kind == id_:
|
||||
tag = read_tag(fid, pos)
|
||||
record[key] = cast(tag.data)
|
||||
break
|
||||
else:
|
||||
warn(f"Unknown processing history item {kind}")
|
||||
record["max_info"] = _read_maxfilter_record(fid, proc_record)
|
||||
iass = dir_tree_find(proc_record, FIFF.FIFFB_IAS)
|
||||
if len(iass) > 0:
|
||||
# XXX should eventually populate this
|
||||
ss = [dict() for _ in range(len(iass))]
|
||||
record["ias"] = ss
|
||||
if len(record["max_info"]) > 0:
|
||||
out.append(record)
|
||||
return out
|
||||
|
||||
|
||||
def _write_proc_history(fid, info):
|
||||
"""Write processing history to file."""
|
||||
if len(info["proc_history"]) > 0:
|
||||
start_block(fid, FIFF.FIFFB_PROCESSING_HISTORY)
|
||||
for record in info["proc_history"]:
|
||||
start_block(fid, FIFF.FIFFB_PROCESSING_RECORD)
|
||||
for key, id_, writer in zip(_proc_keys, _proc_ids, _proc_writers):
|
||||
if key in record:
|
||||
writer(fid, id_, record[key])
|
||||
_write_maxfilter_record(fid, record["max_info"])
|
||||
if "ias" in record:
|
||||
for _ in record["ias"]:
|
||||
start_block(fid, FIFF.FIFFB_IAS)
|
||||
# XXX should eventually populate this
|
||||
end_block(fid, FIFF.FIFFB_IAS)
|
||||
end_block(fid, FIFF.FIFFB_PROCESSING_RECORD)
|
||||
end_block(fid, FIFF.FIFFB_PROCESSING_HISTORY)
|
||||
|
||||
|
||||
_sss_info_keys = (
|
||||
"job",
|
||||
"frame",
|
||||
"origin",
|
||||
"in_order",
|
||||
"out_order",
|
||||
"nchan",
|
||||
"components",
|
||||
"nfree",
|
||||
"hpi_g_limit",
|
||||
"hpi_dist_limit",
|
||||
)
|
||||
_sss_info_ids = (
|
||||
FIFF.FIFF_SSS_JOB,
|
||||
FIFF.FIFF_SSS_FRAME,
|
||||
FIFF.FIFF_SSS_ORIGIN,
|
||||
FIFF.FIFF_SSS_ORD_IN,
|
||||
FIFF.FIFF_SSS_ORD_OUT,
|
||||
FIFF.FIFF_SSS_NMAG,
|
||||
FIFF.FIFF_SSS_COMPONENTS,
|
||||
FIFF.FIFF_SSS_NFREE,
|
||||
FIFF.FIFF_HPI_FIT_GOOD_LIMIT,
|
||||
FIFF.FIFF_HPI_FIT_DIST_LIMIT,
|
||||
)
|
||||
_sss_info_writers = (
|
||||
write_int,
|
||||
write_int,
|
||||
write_float,
|
||||
write_int,
|
||||
write_int,
|
||||
write_int,
|
||||
write_int,
|
||||
write_int,
|
||||
write_float,
|
||||
write_float,
|
||||
)
|
||||
_sss_info_casters = (
|
||||
_int_item,
|
||||
_int_item,
|
||||
np.array,
|
||||
_int_item,
|
||||
_int_item,
|
||||
_int_item,
|
||||
np.array,
|
||||
_int_item,
|
||||
_float_item,
|
||||
_float_item,
|
||||
)
|
||||
|
||||
_max_st_keys = ("job", "subspcorr", "buflen")
|
||||
_max_st_ids = (FIFF.FIFF_SSS_JOB, FIFF.FIFF_SSS_ST_CORR, FIFF.FIFF_SSS_ST_LENGTH)
|
||||
_max_st_writers = (write_int, write_float, write_float)
|
||||
_max_st_casters = (_int_item, _float_item, _float_item)
|
||||
|
||||
_sss_ctc_keys = ("block_id", "date", "creator", "decoupler")
|
||||
_sss_ctc_ids = (
|
||||
FIFF.FIFF_BLOCK_ID,
|
||||
FIFF.FIFF_MEAS_DATE,
|
||||
FIFF.FIFF_CREATOR,
|
||||
FIFF.FIFF_DECOUPLER_MATRIX,
|
||||
)
|
||||
_sss_ctc_writers = (write_id, write_int, write_string, write_float_sparse)
|
||||
_sss_ctc_casters = (dict, np.array, str, _csc_array_cast)
|
||||
|
||||
_sss_cal_keys = ("cal_chans", "cal_corrs")
|
||||
_sss_cal_ids = (FIFF.FIFF_SSS_CAL_CHANS, FIFF.FIFF_SSS_CAL_CORRS)
|
||||
_sss_cal_writers = (write_int_matrix, write_float_matrix)
|
||||
_sss_cal_casters = (np.array, np.array)
|
||||
|
||||
|
||||
def _read_ctc(fname):
|
||||
"""Read cross-talk correction matrix."""
|
||||
fname = _check_fname(fname, overwrite="read", must_exist=True)
|
||||
f, tree, _ = fiff_open(fname)
|
||||
with f as fid:
|
||||
sss_ctc = _read_maxfilter_record(fid, tree)["sss_ctc"]
|
||||
bad_str = f"Invalid cross-talk FIF: {fname}"
|
||||
if len(sss_ctc) == 0:
|
||||
raise ValueError(bad_str)
|
||||
node = dir_tree_find(tree, FIFF.FIFFB_DATA_CORRECTION)[0]
|
||||
comment = find_tag(fid, node, FIFF.FIFF_COMMENT).data
|
||||
if comment != "cross-talk compensation matrix":
|
||||
raise ValueError(bad_str)
|
||||
sss_ctc["creator"] = find_tag(fid, node, FIFF.FIFF_CREATOR).data
|
||||
sss_ctc["date"] = find_tag(fid, node, FIFF.FIFF_MEAS_DATE).data
|
||||
return sss_ctc
|
||||
|
||||
|
||||
def _read_maxfilter_record(fid, tree):
|
||||
"""Read maxfilter processing record from file."""
|
||||
sss_info_block = dir_tree_find(tree, FIFF.FIFFB_SSS_INFO) # 502
|
||||
sss_info = dict()
|
||||
if len(sss_info_block) > 0:
|
||||
sss_info_block = sss_info_block[0]
|
||||
for i_ent in range(sss_info_block["nent"]):
|
||||
kind = sss_info_block["directory"][i_ent].kind
|
||||
pos = sss_info_block["directory"][i_ent].pos
|
||||
for key, id_, cast in zip(_sss_info_keys, _sss_info_ids, _sss_info_casters):
|
||||
if kind == id_:
|
||||
tag = read_tag(fid, pos)
|
||||
sss_info[key] = cast(tag.data)
|
||||
break
|
||||
|
||||
max_st_block = dir_tree_find(tree, FIFF.FIFFB_SSS_ST_INFO) # 504
|
||||
max_st = dict()
|
||||
if len(max_st_block) > 0:
|
||||
max_st_block = max_st_block[0]
|
||||
for i_ent in range(max_st_block["nent"]):
|
||||
kind = max_st_block["directory"][i_ent].kind
|
||||
pos = max_st_block["directory"][i_ent].pos
|
||||
for key, id_, cast in zip(_max_st_keys, _max_st_ids, _max_st_casters):
|
||||
if kind == id_:
|
||||
tag = read_tag(fid, pos)
|
||||
max_st[key] = cast(tag.data)
|
||||
break
|
||||
|
||||
sss_ctc_block = dir_tree_find(tree, FIFF.FIFFB_CHANNEL_DECOUPLER) # 501
|
||||
sss_ctc = dict()
|
||||
if len(sss_ctc_block) > 0:
|
||||
sss_ctc_block = sss_ctc_block[0]
|
||||
for i_ent in range(sss_ctc_block["nent"]):
|
||||
kind = sss_ctc_block["directory"][i_ent].kind
|
||||
pos = sss_ctc_block["directory"][i_ent].pos
|
||||
for key, id_, cast in zip(_sss_ctc_keys, _sss_ctc_ids, _sss_ctc_casters):
|
||||
if kind == id_:
|
||||
tag = read_tag(fid, pos)
|
||||
sss_ctc[key] = cast(tag.data)
|
||||
break
|
||||
else:
|
||||
if kind == FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST:
|
||||
tag = read_tag(fid, pos)
|
||||
chs = _safe_name_list(tag.data, "read", "proj_items_chs")
|
||||
# This list can null chars in the last entry, e.g.:
|
||||
# [..., 'MEG2642', 'MEG2643', 'MEG2641\x00 ... \x00']
|
||||
chs[-1] = chs[-1].split("\x00")[0]
|
||||
sss_ctc["proj_items_chs"] = chs
|
||||
|
||||
sss_cal_block = dir_tree_find(tree, FIFF.FIFFB_SSS_CAL) # 503
|
||||
sss_cal = dict()
|
||||
if len(sss_cal_block) > 0:
|
||||
sss_cal_block = sss_cal_block[0]
|
||||
for i_ent in range(sss_cal_block["nent"]):
|
||||
kind = sss_cal_block["directory"][i_ent].kind
|
||||
pos = sss_cal_block["directory"][i_ent].pos
|
||||
for key, id_, cast in zip(_sss_cal_keys, _sss_cal_ids, _sss_cal_casters):
|
||||
if kind == id_:
|
||||
tag = read_tag(fid, pos)
|
||||
sss_cal[key] = cast(tag.data)
|
||||
break
|
||||
|
||||
max_info = dict(sss_info=sss_info, sss_ctc=sss_ctc, sss_cal=sss_cal, max_st=max_st)
|
||||
return max_info
|
||||
|
||||
|
||||
def _write_maxfilter_record(fid, record):
|
||||
"""Write maxfilter processing record to file."""
|
||||
sss_info = record["sss_info"]
|
||||
if len(sss_info) > 0:
|
||||
start_block(fid, FIFF.FIFFB_SSS_INFO)
|
||||
for key, id_, writer in zip(_sss_info_keys, _sss_info_ids, _sss_info_writers):
|
||||
if key in sss_info:
|
||||
writer(fid, id_, sss_info[key])
|
||||
end_block(fid, FIFF.FIFFB_SSS_INFO)
|
||||
|
||||
max_st = record["max_st"]
|
||||
if len(max_st) > 0:
|
||||
start_block(fid, FIFF.FIFFB_SSS_ST_INFO)
|
||||
for key, id_, writer in zip(_max_st_keys, _max_st_ids, _max_st_writers):
|
||||
if key in max_st:
|
||||
writer(fid, id_, max_st[key])
|
||||
end_block(fid, FIFF.FIFFB_SSS_ST_INFO)
|
||||
|
||||
sss_ctc = record["sss_ctc"]
|
||||
if len(sss_ctc) > 0: # dict has entries
|
||||
start_block(fid, FIFF.FIFFB_CHANNEL_DECOUPLER)
|
||||
for key, id_, writer in zip(_sss_ctc_keys, _sss_ctc_ids, _sss_ctc_writers):
|
||||
if key in sss_ctc:
|
||||
writer(fid, id_, sss_ctc[key])
|
||||
if "proj_items_chs" in sss_ctc:
|
||||
write_name_list_sanitized(
|
||||
fid,
|
||||
FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST,
|
||||
sss_ctc["proj_items_chs"],
|
||||
"proj_items_chs",
|
||||
)
|
||||
end_block(fid, FIFF.FIFFB_CHANNEL_DECOUPLER)
|
||||
|
||||
sss_cal = record["sss_cal"]
|
||||
if len(sss_cal) > 0:
|
||||
start_block(fid, FIFF.FIFFB_SSS_CAL)
|
||||
for key, id_, writer in zip(_sss_cal_keys, _sss_cal_ids, _sss_cal_writers):
|
||||
if key in sss_cal:
|
||||
writer(fid, id_, sss_cal[key])
|
||||
end_block(fid, FIFF.FIFFB_SSS_CAL)
|
||||
1189
dist/client/mne/_fiff/proj.py
vendored
Normal file
1189
dist/client/mne/_fiff/proj.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
652
dist/client/mne/_fiff/reference.py
vendored
Normal file
652
dist/client/mne/_fiff/reference.py
vendored
Normal file
@@ -0,0 +1,652 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..defaults import DEFAULTS
|
||||
from ..utils import (
|
||||
_check_option,
|
||||
_check_preload,
|
||||
_on_missing,
|
||||
_validate_type,
|
||||
fill_doc,
|
||||
logger,
|
||||
pinv,
|
||||
verbose,
|
||||
warn,
|
||||
)
|
||||
from .constants import FIFF
|
||||
from .meas_info import _check_ch_keys
|
||||
from .pick import _ELECTRODE_CH_TYPES, pick_channels, pick_channels_forward, pick_types
|
||||
from .proj import _has_eeg_average_ref_proj, make_eeg_average_ref_proj, setup_proj
|
||||
|
||||
|
||||
def _check_before_reference(inst, ref_from, ref_to, ch_type):
|
||||
"""Prepare instance for referencing."""
|
||||
# Check to see that data is preloaded
|
||||
_check_preload(inst, "Applying a reference")
|
||||
|
||||
ch_type = _get_ch_type(inst, ch_type)
|
||||
ch_dict = {**{type_: True for type_ in ch_type}, "meg": False, "ref_meg": False}
|
||||
eeg_idx = pick_types(inst.info, **ch_dict)
|
||||
|
||||
if ref_to is None:
|
||||
ref_to = [inst.ch_names[i] for i in eeg_idx]
|
||||
extra = "EEG channels found"
|
||||
else:
|
||||
extra = "channels supplied"
|
||||
if len(ref_to) == 0:
|
||||
raise ValueError(f"No {extra} to apply the reference to")
|
||||
|
||||
# After referencing, existing SSPs might not be valid anymore.
|
||||
projs_to_remove = []
|
||||
for i, proj in enumerate(inst.info["projs"]):
|
||||
# Remove any average reference projections
|
||||
if (
|
||||
proj["desc"] == "Average EEG reference"
|
||||
or proj["kind"] == FIFF.FIFFV_PROJ_ITEM_EEG_AVREF
|
||||
):
|
||||
logger.info("Removing existing average EEG reference projection.")
|
||||
# Don't remove the projection right away, but do this at the end of
|
||||
# this loop.
|
||||
projs_to_remove.append(i)
|
||||
|
||||
# Inactive SSPs may block re-referencing
|
||||
elif (
|
||||
not proj["active"]
|
||||
and len(
|
||||
[ch for ch in (ref_from + ref_to) if ch in proj["data"]["col_names"]]
|
||||
)
|
||||
> 0
|
||||
):
|
||||
raise RuntimeError(
|
||||
"Inactive signal space projection (SSP) operators are "
|
||||
"present that operate on sensors involved in the desired "
|
||||
"referencing scheme. These projectors need to be applied "
|
||||
"using the apply_proj() method function before the desired "
|
||||
"reference can be set."
|
||||
)
|
||||
|
||||
for i in projs_to_remove:
|
||||
del inst.info["projs"][i]
|
||||
|
||||
# Need to call setup_proj after changing the projs:
|
||||
inst._projector, _ = setup_proj(inst.info, add_eeg_ref=False, activate=False)
|
||||
|
||||
# If the reference touches EEG/ECoG/sEEG/DBS electrodes, note in the
|
||||
# info that a non-CAR has been applied.
|
||||
ref_to_channels = pick_channels(inst.ch_names, ref_to, ordered=True)
|
||||
if len(np.intersect1d(ref_to_channels, eeg_idx)) > 0:
|
||||
with inst.info._unlock():
|
||||
inst.info["custom_ref_applied"] = FIFF.FIFFV_MNE_CUSTOM_REF_ON
|
||||
|
||||
return ref_to
|
||||
|
||||
|
||||
def _apply_reference(inst, ref_from, ref_to=None, forward=None, ch_type="auto"):
|
||||
"""Apply a custom EEG referencing scheme."""
|
||||
ref_to = _check_before_reference(inst, ref_from, ref_to, ch_type)
|
||||
|
||||
# Compute reference
|
||||
if len(ref_from) > 0:
|
||||
# this is guaranteed below, but we should avoid the crazy pick_channels
|
||||
# behavior that [] gives all. Also use ordered=True just to make sure
|
||||
# that all supplied channels actually exist.
|
||||
assert len(ref_to) > 0
|
||||
ref_names = ref_from
|
||||
ref_from = pick_channels(inst.ch_names, ref_from, ordered=True)
|
||||
ref_to = pick_channels(inst.ch_names, ref_to, ordered=True)
|
||||
|
||||
data = inst._data
|
||||
ref_data = data[..., ref_from, :].mean(-2, keepdims=True)
|
||||
data[..., ref_to, :] -= ref_data
|
||||
ref_data = ref_data[..., 0, :]
|
||||
|
||||
# REST
|
||||
if forward is not None:
|
||||
# use ch_sel and the given forward
|
||||
forward = pick_channels_forward(forward, ref_names, ordered=True)
|
||||
# 1-3. Compute a forward (G) and avg-ref'ed data (done above)
|
||||
G = forward["sol"]["data"]
|
||||
assert G.shape[0] == len(ref_names)
|
||||
# 4. Compute the forward (G) and average-reference it (Ga):
|
||||
Ga = G - np.mean(G, axis=0, keepdims=True)
|
||||
# 5. Compute the Ga_inv by SVD
|
||||
Ga_inv = pinv(Ga, rtol=1e-6)
|
||||
# 6. Compute Ra = (G @ Ga_inv) in eq (8) from G and Ga_inv
|
||||
Ra = G @ Ga_inv
|
||||
# 7-8. Compute Vp = Ra @ Va; then Vpa=average(Vp)
|
||||
Vpa = np.mean(Ra @ data[..., ref_from, :], axis=-2, keepdims=True)
|
||||
data[..., ref_to, :] += Vpa
|
||||
else:
|
||||
ref_data = None
|
||||
|
||||
return inst, ref_data
|
||||
|
||||
|
||||
@fill_doc
|
||||
def add_reference_channels(inst, ref_channels, copy=True):
|
||||
"""Add reference channels to data that consists of all zeros.
|
||||
|
||||
Adds reference channels to data that were not included during recording.
|
||||
This is useful when you need to re-reference your data to different
|
||||
channels. These added channels will consist of all zeros.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
inst : instance of Raw | Epochs | Evoked
|
||||
Instance of Raw or Epochs with EEG channels and reference channel(s).
|
||||
%(ref_channels)s
|
||||
copy : bool
|
||||
Specifies whether the data will be copied (True) or modified in-place
|
||||
(False). Defaults to True.
|
||||
|
||||
Returns
|
||||
-------
|
||||
inst : instance of Raw | Epochs | Evoked
|
||||
Data with added EEG reference channels.
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. warning::
|
||||
When :ref:`re-referencing <tut-set-eeg-ref>`,
|
||||
make sure to apply the montage using :meth:`mne.io.Raw.set_montage`
|
||||
only after calling this function. Applying a montage will only set
|
||||
locations of channels that exist at the time it is applied.
|
||||
"""
|
||||
from ..epochs import BaseEpochs
|
||||
from ..evoked import Evoked
|
||||
from ..io import BaseRaw
|
||||
|
||||
# Check to see that data is preloaded
|
||||
_check_preload(inst, "add_reference_channels")
|
||||
_validate_type(ref_channels, (list, tuple, str), "ref_channels")
|
||||
if isinstance(ref_channels, str):
|
||||
ref_channels = [ref_channels]
|
||||
for ch in ref_channels:
|
||||
if ch in inst.info["ch_names"]:
|
||||
raise ValueError(f"Channel {ch} already specified in inst.")
|
||||
|
||||
# Once CAR is applied (active), don't allow adding channels
|
||||
if _has_eeg_average_ref_proj(inst.info, check_active=True):
|
||||
raise RuntimeError("Average reference already applied to data.")
|
||||
|
||||
if copy:
|
||||
inst = inst.copy()
|
||||
|
||||
if isinstance(inst, (BaseRaw, Evoked)):
|
||||
data = inst._data
|
||||
refs = np.zeros((len(ref_channels), data.shape[1]))
|
||||
data = np.vstack((data, refs))
|
||||
inst._data = data
|
||||
elif isinstance(inst, BaseEpochs):
|
||||
data = inst._data
|
||||
x, y, z = data.shape
|
||||
refs = np.zeros((x * len(ref_channels), z))
|
||||
data = np.vstack((data.reshape((x * y, z), order="F"), refs))
|
||||
data = data.reshape(x, y + len(ref_channels), z, order="F")
|
||||
inst._data = data
|
||||
else:
|
||||
raise TypeError(
|
||||
f"inst should be Raw, Epochs, or Evoked instead of {type(inst)}."
|
||||
)
|
||||
nchan = len(inst.info["ch_names"])
|
||||
|
||||
# only do this if we actually have digitisation points
|
||||
if inst.info.get("dig", None) is not None:
|
||||
# "zeroth" EEG electrode dig points is reference
|
||||
ref_dig_loc = [
|
||||
dl
|
||||
for dl in inst.info["dig"]
|
||||
if (dl["kind"] == FIFF.FIFFV_POINT_EEG and dl["ident"] == 0)
|
||||
]
|
||||
if len(ref_channels) > 1 or len(ref_dig_loc) != len(ref_channels):
|
||||
ref_dig_array = np.full(12, np.nan)
|
||||
warn("The locations of multiple reference channels are ignored.")
|
||||
else: # n_ref_channels == 1 and a single ref digitization exists
|
||||
ref_dig_array = np.concatenate(
|
||||
(ref_dig_loc[0]["r"], ref_dig_loc[0]["r"], np.zeros(6))
|
||||
)
|
||||
# Replace the (possibly new) Ref location for each channel
|
||||
for idx in pick_types(inst.info, meg=False, eeg=True, exclude=[]):
|
||||
inst.info["chs"][idx]["loc"][3:6] = ref_dig_loc[0]["r"]
|
||||
else:
|
||||
# Ideally we'd fall back on getting the location from a montage, but
|
||||
# locations for non-present channels aren't stored, so location is
|
||||
# unknown. Users can call set_montage() again if needed.
|
||||
ref_dig_array = np.full(12, np.nan)
|
||||
logger.info(
|
||||
"Location for this channel is unknown; consider calling "
|
||||
"set_montage() after adding new reference channels if needed. "
|
||||
"Applying a montage will only set locations of channels that "
|
||||
"exist at the time it is applied."
|
||||
)
|
||||
|
||||
for ch in ref_channels:
|
||||
chan_info = {
|
||||
"ch_name": ch,
|
||||
"coil_type": FIFF.FIFFV_COIL_EEG,
|
||||
"kind": FIFF.FIFFV_EEG_CH,
|
||||
"logno": nchan + 1,
|
||||
"scanno": nchan + 1,
|
||||
"cal": 1,
|
||||
"range": 1.0,
|
||||
"unit_mul": FIFF.FIFF_UNITM_NONE,
|
||||
"unit": FIFF.FIFF_UNIT_V,
|
||||
"coord_frame": FIFF.FIFFV_COORD_HEAD,
|
||||
"loc": ref_dig_array,
|
||||
}
|
||||
inst.info["chs"].append(chan_info)
|
||||
inst.info._update_redundant()
|
||||
range_ = np.arange(1, len(ref_channels) + 1)
|
||||
if isinstance(inst, BaseRaw):
|
||||
inst._cals = np.hstack((inst._cals, [1] * len(ref_channels)))
|
||||
for pi, picks in enumerate(inst._read_picks):
|
||||
inst._read_picks[pi] = np.concatenate([picks, np.max(picks) + range_])
|
||||
elif isinstance(inst, BaseEpochs):
|
||||
picks = inst.picks
|
||||
inst.picks = np.concatenate([picks, np.max(picks) + range_])
|
||||
inst.info._check_consistency()
|
||||
set_eeg_reference(inst, ref_channels=ref_channels, copy=False, verbose=False)
|
||||
return inst
|
||||
|
||||
|
||||
_ref_dict = {
|
||||
FIFF.FIFFV_MNE_CUSTOM_REF_ON: "on",
|
||||
FIFF.FIFFV_MNE_CUSTOM_REF_OFF: "off",
|
||||
FIFF.FIFFV_MNE_CUSTOM_REF_CSD: "CSD",
|
||||
}
|
||||
|
||||
|
||||
def _check_can_reref(inst):
|
||||
from ..epochs import BaseEpochs
|
||||
from ..evoked import Evoked
|
||||
from ..io import BaseRaw
|
||||
|
||||
_validate_type(inst, (BaseRaw, BaseEpochs, Evoked), "Instance")
|
||||
current_custom = inst.info["custom_ref_applied"]
|
||||
if current_custom not in (
|
||||
FIFF.FIFFV_MNE_CUSTOM_REF_ON,
|
||||
FIFF.FIFFV_MNE_CUSTOM_REF_OFF,
|
||||
):
|
||||
raise RuntimeError(
|
||||
"Cannot set new reference on data with custom reference type "
|
||||
f"{_ref_dict[current_custom]!r}"
|
||||
)
|
||||
|
||||
|
||||
@verbose
|
||||
def set_eeg_reference(
|
||||
inst,
|
||||
ref_channels="average",
|
||||
copy=True,
|
||||
projection=False,
|
||||
ch_type="auto",
|
||||
forward=None,
|
||||
*,
|
||||
joint=False,
|
||||
verbose=None,
|
||||
):
|
||||
"""Specify which reference to use for EEG data.
|
||||
|
||||
Use this function to explicitly specify the desired reference for EEG.
|
||||
This can be either an existing electrode or a new virtual channel.
|
||||
This function will re-reference the data according to the desired
|
||||
reference.
|
||||
|
||||
Note that it is also possible to re-reference the signal using a
|
||||
Laplacian (LAP) "reference-free" transformation using the
|
||||
:func:`.compute_current_source_density` function.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
inst : instance of Raw | Epochs | Evoked
|
||||
Instance of Raw or Epochs with EEG channels and reference channel(s).
|
||||
%(ref_channels_set_eeg_reference)s
|
||||
copy : bool
|
||||
Specifies whether the data will be copied (True) or modified in-place
|
||||
(False). Defaults to True.
|
||||
%(projection_set_eeg_reference)s
|
||||
%(ch_type_set_eeg_reference)s
|
||||
%(forward_set_eeg_reference)s
|
||||
%(joint_set_eeg_reference)s
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
inst : instance of Raw | Epochs | Evoked
|
||||
Data with EEG channels re-referenced. If ``ref_channels='average'`` and
|
||||
``projection=True`` a projection will be added instead of directly
|
||||
re-referencing the data.
|
||||
ref_data : array
|
||||
Array of reference data subtracted from EEG channels. This will be
|
||||
``None`` if ``projection=True`` or ``ref_channels='REST'``.
|
||||
%(set_eeg_reference_see_also_notes)s
|
||||
"""
|
||||
from ..forward import Forward
|
||||
|
||||
_check_can_reref(inst)
|
||||
|
||||
ch_type = _get_ch_type(inst, ch_type)
|
||||
|
||||
if projection: # average reference projector
|
||||
if ref_channels != "average":
|
||||
raise ValueError(
|
||||
'Setting projection=True is only supported for ref_channels="average", '
|
||||
f"got {ref_channels!r}."
|
||||
)
|
||||
# We need verbose='error' here in case we add projs sequentially
|
||||
if _has_eeg_average_ref_proj(inst.info, ch_type=ch_type, verbose="error"):
|
||||
warn(
|
||||
"An average reference projection was already added. The data "
|
||||
"has been left untouched."
|
||||
)
|
||||
else:
|
||||
# Creating an average reference may fail. In this case, make
|
||||
# sure that the custom_ref_applied flag is left untouched.
|
||||
custom_ref_applied = inst.info["custom_ref_applied"]
|
||||
|
||||
try:
|
||||
with inst.info._unlock():
|
||||
inst.info["custom_ref_applied"] = FIFF.FIFFV_MNE_CUSTOM_REF_OFF
|
||||
if joint:
|
||||
inst.add_proj(
|
||||
make_eeg_average_ref_proj(
|
||||
inst.info, ch_type=ch_type, activate=False
|
||||
)
|
||||
)
|
||||
else:
|
||||
for this_ch_type in ch_type:
|
||||
inst.add_proj(
|
||||
make_eeg_average_ref_proj(
|
||||
inst.info, ch_type=this_ch_type, activate=False
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
with inst.info._unlock():
|
||||
inst.info["custom_ref_applied"] = custom_ref_applied
|
||||
raise
|
||||
# If the data has been preloaded, projections will no
|
||||
# longer be automatically applied.
|
||||
if inst.preload:
|
||||
logger.info(
|
||||
"Average reference projection was added, "
|
||||
"but has not been applied yet. Use the "
|
||||
"apply_proj method to apply it."
|
||||
)
|
||||
return inst, None
|
||||
del projection # not used anymore
|
||||
|
||||
inst = inst.copy() if copy else inst
|
||||
ch_dict = {**{type_: True for type_ in ch_type}, "meg": False, "ref_meg": False}
|
||||
ch_sel = [inst.ch_names[i] for i in pick_types(inst.info, **ch_dict)]
|
||||
|
||||
if ref_channels == "REST":
|
||||
_validate_type(forward, Forward, 'forward when ref_channels="REST"')
|
||||
else:
|
||||
forward = None # signal to _apply_reference not to do REST
|
||||
|
||||
if ref_channels in ("average", "REST"):
|
||||
logger.info(f"Applying {ref_channels} reference.")
|
||||
ref_channels = ch_sel
|
||||
|
||||
if ref_channels == []:
|
||||
logger.info("EEG data marked as already having the desired reference.")
|
||||
else:
|
||||
logger.info(
|
||||
"Applying a custom "
|
||||
f"{tuple(DEFAULTS['titles'][type_] for type_ in ch_type)} "
|
||||
"reference."
|
||||
)
|
||||
|
||||
return _apply_reference(inst, ref_channels, ch_sel, forward, ch_type=ch_type)
|
||||
|
||||
|
||||
def _get_ch_type(inst, ch_type):
|
||||
_validate_type(ch_type, (str, list, tuple), "ch_type")
|
||||
valid_ch_types = ("auto",) + _ELECTRODE_CH_TYPES
|
||||
if isinstance(ch_type, str):
|
||||
_check_option("ch_type", ch_type, valid_ch_types)
|
||||
if ch_type != "auto":
|
||||
ch_type = [ch_type]
|
||||
elif isinstance(ch_type, (list, tuple)):
|
||||
for type_ in ch_type:
|
||||
_validate_type(type_, str, "ch_type")
|
||||
_check_option("ch_type", type_, valid_ch_types[1:])
|
||||
ch_type = list(ch_type)
|
||||
|
||||
# if ch_type is 'auto', search through list to find first reasonable
|
||||
# reference-able channel type.
|
||||
if ch_type == "auto":
|
||||
for type_ in _ELECTRODE_CH_TYPES:
|
||||
if type_ in inst:
|
||||
ch_type = [type_]
|
||||
logger.info(
|
||||
f"{DEFAULTS['titles'][type_]} channel type selected for "
|
||||
"re-referencing"
|
||||
)
|
||||
break
|
||||
# if auto comes up empty, or the user specifies a bad ch_type.
|
||||
else:
|
||||
raise ValueError("No EEG, ECoG, sEEG or DBS channels found to rereference.")
|
||||
return ch_type
|
||||
|
||||
|
||||
@verbose
|
||||
def set_bipolar_reference(
|
||||
inst,
|
||||
anode,
|
||||
cathode,
|
||||
ch_name=None,
|
||||
ch_info=None,
|
||||
drop_refs=True,
|
||||
copy=True,
|
||||
on_bad="warn",
|
||||
verbose=None,
|
||||
):
|
||||
"""Re-reference selected channels using a bipolar referencing scheme.
|
||||
|
||||
A bipolar reference takes the difference between two channels (the anode
|
||||
minus the cathode) and adds it as a new virtual channel. The original
|
||||
channels will be dropped by default.
|
||||
|
||||
Multiple anodes and cathodes can be specified, in which case multiple
|
||||
virtual channels will be created. The 1st cathode will be subtracted
|
||||
from the 1st anode, the 2nd cathode from the 2nd anode, etc.
|
||||
|
||||
By default, the virtual channels will be annotated with channel-info and
|
||||
-location of the anodes and coil types will be set to EEG_BIPOLAR.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
inst : instance of Raw | Epochs | Evoked
|
||||
Data containing the unreferenced channels.
|
||||
anode : str | list of str
|
||||
The name(s) of the channel(s) to use as anode in the bipolar reference.
|
||||
cathode : str | list of str
|
||||
The name(s) of the channel(s) to use as cathode in the bipolar
|
||||
reference.
|
||||
ch_name : str | list of str | None
|
||||
The channel name(s) for the virtual channel(s) containing the resulting
|
||||
signal. By default, bipolar channels are named after the anode and
|
||||
cathode, but it is recommended to supply a more meaningful name.
|
||||
ch_info : dict | list of dict | None
|
||||
This parameter can be used to supply a dictionary (or a dictionary for
|
||||
each bipolar channel) containing channel information to merge in,
|
||||
overwriting the default values. Defaults to None.
|
||||
drop_refs : bool
|
||||
Whether to drop the anode/cathode channels from the instance.
|
||||
copy : bool
|
||||
Whether to operate on a copy of the data (True) or modify it in-place
|
||||
(False). Defaults to True.
|
||||
on_bad : str
|
||||
If a bipolar channel is created from a bad anode or a bad cathode, mne
|
||||
warns if on_bad="warns", raises ValueError if on_bad="raise", and does
|
||||
nothing if on_bad="ignore". For "warn" and "ignore", the new bipolar
|
||||
channel will be marked as bad. Defaults to on_bad="warns".
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
inst : instance of Raw | Epochs | Evoked
|
||||
Data with the specified channels re-referenced.
|
||||
|
||||
See Also
|
||||
--------
|
||||
set_eeg_reference : Convenience function for creating an EEG reference.
|
||||
|
||||
Notes
|
||||
-----
|
||||
1. If the anodes contain any EEG channels, this function removes
|
||||
any pre-existing average reference projections.
|
||||
|
||||
2. During source localization, the EEG signal should have an average
|
||||
reference.
|
||||
|
||||
3. The data must be preloaded.
|
||||
|
||||
.. versionadded:: 0.9.0
|
||||
"""
|
||||
from ..epochs import BaseEpochs, EpochsArray
|
||||
from ..evoked import EvokedArray
|
||||
from ..io import BaseRaw, RawArray
|
||||
from .meas_info import create_info
|
||||
|
||||
_check_can_reref(inst)
|
||||
if not isinstance(anode, list):
|
||||
anode = [anode]
|
||||
|
||||
if not isinstance(cathode, list):
|
||||
cathode = [cathode]
|
||||
|
||||
if len(anode) != len(cathode):
|
||||
raise ValueError(
|
||||
f"Number of anodes (got {len(anode)}) must equal the number "
|
||||
f"of cathodes (got {len(cathode)})."
|
||||
)
|
||||
|
||||
if ch_name is None:
|
||||
ch_name = [f"{a}-{c}" for (a, c) in zip(anode, cathode)]
|
||||
elif not isinstance(ch_name, list):
|
||||
ch_name = [ch_name]
|
||||
if len(ch_name) != len(anode):
|
||||
raise ValueError(
|
||||
"Number of channel names must equal the number of "
|
||||
f"anodes/cathodes (got {len(ch_name)})."
|
||||
)
|
||||
|
||||
# Check for duplicate channel names (it is allowed to give the name of the
|
||||
# anode or cathode channel, as they will be replaced).
|
||||
for ch, a, c in zip(ch_name, anode, cathode):
|
||||
if ch not in [a, c] and ch in inst.ch_names:
|
||||
raise ValueError(
|
||||
f'There is already a channel named "{ch}", please '
|
||||
"specify a different name for the bipolar "
|
||||
"channel using the ch_name parameter."
|
||||
)
|
||||
|
||||
if ch_info is None:
|
||||
ch_info = [{} for _ in anode]
|
||||
elif not isinstance(ch_info, list):
|
||||
ch_info = [ch_info]
|
||||
if len(ch_info) != len(anode):
|
||||
raise ValueError(
|
||||
"Number of channel info dictionaries must equal the "
|
||||
"number of anodes/cathodes."
|
||||
)
|
||||
|
||||
if copy:
|
||||
inst = inst.copy()
|
||||
|
||||
anode = _check_before_reference(
|
||||
inst, ref_from=cathode, ref_to=anode, ch_type="auto"
|
||||
)
|
||||
|
||||
# Create bipolar reference channels by multiplying the data
|
||||
# (channels x time) with a matrix (n_virtual_channels x channels)
|
||||
# and add them to the instance.
|
||||
multiplier = np.zeros((len(anode), len(inst.ch_names)))
|
||||
for idx, (a, c) in enumerate(zip(anode, cathode)):
|
||||
multiplier[idx, inst.ch_names.index(a)] = 1
|
||||
multiplier[idx, inst.ch_names.index(c)] = -1
|
||||
|
||||
ref_info = create_info(
|
||||
ch_names=ch_name,
|
||||
sfreq=inst.info["sfreq"],
|
||||
ch_types=inst.get_channel_types(picks=anode),
|
||||
)
|
||||
|
||||
# Update "chs" in Reference-Info.
|
||||
for ch_idx, (an, info) in enumerate(zip(anode, ch_info)):
|
||||
_check_ch_keys(info, ch_idx, name="ch_info", check_min=False)
|
||||
an_idx = inst.ch_names.index(an)
|
||||
# Copy everything from anode (except ch_name).
|
||||
an_chs = {k: v for k, v in inst.info["chs"][an_idx].items() if k != "ch_name"}
|
||||
ref_info["chs"][ch_idx].update(an_chs)
|
||||
# Set coil-type to bipolar.
|
||||
ref_info["chs"][ch_idx]["coil_type"] = FIFF.FIFFV_COIL_EEG_BIPOLAR
|
||||
# Update with info from ch_info-parameter.
|
||||
ref_info["chs"][ch_idx].update(info)
|
||||
|
||||
# Set other info-keys from original instance.
|
||||
pick_info = {
|
||||
k: v
|
||||
for k, v in inst.info.items()
|
||||
if k not in ["chs", "ch_names", "bads", "nchan", "sfreq"]
|
||||
}
|
||||
|
||||
with ref_info._unlock():
|
||||
ref_info.update(pick_info)
|
||||
|
||||
# Rereferencing of data.
|
||||
ref_data = multiplier @ inst._data
|
||||
|
||||
if isinstance(inst, BaseRaw):
|
||||
ref_inst = RawArray(ref_data, ref_info, first_samp=inst.first_samp, copy=None)
|
||||
elif isinstance(inst, BaseEpochs):
|
||||
ref_inst = EpochsArray(
|
||||
ref_data,
|
||||
ref_info,
|
||||
events=inst.events,
|
||||
tmin=inst.tmin,
|
||||
event_id=inst.event_id,
|
||||
metadata=inst.metadata,
|
||||
)
|
||||
else:
|
||||
ref_inst = EvokedArray(
|
||||
ref_data,
|
||||
ref_info,
|
||||
tmin=inst.tmin,
|
||||
comment=inst.comment,
|
||||
nave=inst.nave,
|
||||
kind="average",
|
||||
)
|
||||
|
||||
# Add referenced instance to original instance.
|
||||
inst.add_channels([ref_inst], force_update_info=True)
|
||||
|
||||
# Handle bad channels.
|
||||
bad_bipolar_chs = []
|
||||
for ch_idx, (a, c) in enumerate(zip(anode, cathode)):
|
||||
if a in inst.info["bads"] or c in inst.info["bads"]:
|
||||
bad_bipolar_chs.append(ch_name[ch_idx])
|
||||
|
||||
# Add warnings if bad channels are present.
|
||||
if bad_bipolar_chs:
|
||||
msg = f"Bipolar channels are based on bad channels: {bad_bipolar_chs}."
|
||||
_on_missing(on_bad, msg)
|
||||
inst.info["bads"] += bad_bipolar_chs
|
||||
|
||||
added_channels = ", ".join([name for name in ch_name])
|
||||
logger.info(f"Added the following bipolar channels:\n{added_channels}")
|
||||
|
||||
for attr_name in ["picks", "_projector"]:
|
||||
setattr(inst, attr_name, None)
|
||||
|
||||
# Drop remaining channels.
|
||||
if drop_refs:
|
||||
drop_channels = list((set(anode) | set(cathode)) & set(inst.ch_names))
|
||||
inst.drop_channels(drop_channels)
|
||||
|
||||
return inst
|
||||
523
dist/client/mne/_fiff/tag.py
vendored
Normal file
523
dist/client/mne/_fiff/tag.py
vendored
Normal file
@@ -0,0 +1,523 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import html
|
||||
import re
|
||||
import struct
|
||||
from dataclasses import dataclass
|
||||
from functools import partial
|
||||
from typing import Any
|
||||
|
||||
import numpy as np
|
||||
from scipy.sparse import csc_array, csr_array
|
||||
|
||||
from ..utils import _check_option, warn
|
||||
from ..utils.numerics import _julian_to_date
|
||||
from .constants import (
|
||||
FIFF,
|
||||
_ch_coil_type_named,
|
||||
_ch_kind_named,
|
||||
_ch_unit_mul_named,
|
||||
_ch_unit_named,
|
||||
_dig_cardinal_named,
|
||||
_dig_kind_named,
|
||||
)
|
||||
|
||||
##############################################################################
|
||||
# HELPERS
|
||||
|
||||
|
||||
@dataclass
|
||||
class Tag:
|
||||
"""Tag in FIF tree structure."""
|
||||
|
||||
kind: int
|
||||
type: int
|
||||
size: int
|
||||
next: int
|
||||
pos: int
|
||||
data: Any = None
|
||||
|
||||
def __eq__(self, tag): # noqa: D105
|
||||
return int(
|
||||
self.kind == tag.kind
|
||||
and self.type == tag.type
|
||||
and self.size == tag.size
|
||||
and self.next == tag.next
|
||||
and self.pos == tag.pos
|
||||
and self.data == tag.data
|
||||
)
|
||||
|
||||
@property
|
||||
def next_pos(self):
|
||||
"""The next tag position."""
|
||||
if self.next == FIFF.FIFFV_NEXT_SEQ: # 0
|
||||
return self.pos + 16 + self.size
|
||||
elif self.next > 0:
|
||||
return self.next
|
||||
else: # self.next should be -1 if we get here
|
||||
return None # safest to return None so that things like fid.seek die
|
||||
|
||||
|
||||
def _frombuffer_rows(fid, tag_size, dtype=None, shape=None, rlims=None):
|
||||
"""Get a range of rows from a large tag."""
|
||||
if shape is not None:
|
||||
item_size = np.dtype(dtype).itemsize
|
||||
if not len(shape) == 2:
|
||||
raise ValueError("Only implemented for 2D matrices")
|
||||
want_shape = np.prod(shape)
|
||||
have_shape = tag_size // item_size
|
||||
if want_shape != have_shape:
|
||||
raise ValueError(
|
||||
f"Wrong shape specified, requested {want_shape} but got "
|
||||
f"{have_shape}"
|
||||
)
|
||||
if not len(rlims) == 2:
|
||||
raise ValueError("rlims must have two elements")
|
||||
n_row_out = rlims[1] - rlims[0]
|
||||
if n_row_out <= 0:
|
||||
raise ValueError("rlims must yield at least one output")
|
||||
row_size = item_size * shape[1]
|
||||
# # of bytes to skip at the beginning, # to read, where to end
|
||||
start_skip = int(rlims[0] * row_size)
|
||||
read_size = int(n_row_out * row_size)
|
||||
end_pos = int(fid.tell() + tag_size)
|
||||
# Move the pointer ahead to the read point
|
||||
fid.seek(start_skip, 1)
|
||||
# Do the reading
|
||||
out = np.frombuffer(fid.read(read_size), dtype=dtype)
|
||||
# Move the pointer ahead to the end of the tag
|
||||
fid.seek(end_pos)
|
||||
else:
|
||||
out = np.frombuffer(fid.read(tag_size), dtype=dtype)
|
||||
return out
|
||||
|
||||
|
||||
def _loc_to_coil_trans(loc):
|
||||
"""Convert loc vector to coil_trans."""
|
||||
assert loc.shape[-1] == 12
|
||||
coil_trans = np.zeros(loc.shape[:-1] + (4, 4))
|
||||
coil_trans[..., :3, 3] = loc[..., :3]
|
||||
coil_trans[..., :3, :3] = np.reshape(
|
||||
loc[..., 3:], loc.shape[:-1] + (3, 3)
|
||||
).swapaxes(-1, -2)
|
||||
coil_trans[..., -1, -1] = 1.0
|
||||
return coil_trans
|
||||
|
||||
|
||||
def _coil_trans_to_loc(coil_trans):
|
||||
"""Convert coil_trans to loc."""
|
||||
coil_trans = coil_trans.astype(np.float64)
|
||||
return np.roll(coil_trans.T[:, :3], 1, 0).flatten()
|
||||
|
||||
|
||||
def _loc_to_eeg_loc(loc):
|
||||
"""Convert a loc to an EEG loc."""
|
||||
if not np.isfinite(loc[:3]).all():
|
||||
raise RuntimeError("Missing EEG channel location")
|
||||
if np.isfinite(loc[3:6]).all() and (loc[3:6]).any():
|
||||
return np.array([loc[0:3], loc[3:6]]).T
|
||||
else:
|
||||
return loc[0:3][:, np.newaxis].copy()
|
||||
|
||||
|
||||
##############################################################################
|
||||
# READING FUNCTIONS
|
||||
|
||||
# None of these functions have docstring because it's more compact that way,
|
||||
# and hopefully it's clear what they do by their names and variable values.
|
||||
# See ``read_tag`` for variable descriptions. Return values are implied
|
||||
# by the function names.
|
||||
|
||||
|
||||
def _read_tag_header(fid, pos):
|
||||
"""Read only the header of a Tag."""
|
||||
fid.seek(pos, 0)
|
||||
s = fid.read(16)
|
||||
if len(s) != 16:
|
||||
where = fid.tell() - len(s)
|
||||
extra = f" in file {fid.name}" if hasattr(fid, "name") else ""
|
||||
warn(f"Invalid tag with only {len(s)}/16 bytes at position {where}{extra}")
|
||||
return None
|
||||
# struct.unpack faster than np.frombuffer, saves ~10% of time some places
|
||||
kind, type_, size, next_ = struct.unpack(">iIii", s)
|
||||
return Tag(kind, type_, size, next_, pos)
|
||||
|
||||
|
||||
def _read_matrix(fid, tag, shape, rlims):
|
||||
"""Read a matrix (dense or sparse) tag."""
|
||||
# This should be easy to implement (see _frombuffer_rows)
|
||||
# if we need it, but for now, it's not...
|
||||
if shape is not None or rlims is not None:
|
||||
raise ValueError("Row reading not implemented for matrices yet")
|
||||
|
||||
matrix_coding, matrix_type, bit, dtype = _matrix_info(tag)
|
||||
|
||||
pos = tag.pos + 16
|
||||
fid.seek(pos + tag.size - 4, 0)
|
||||
if matrix_coding == "dense":
|
||||
# Find dimensions and return to the beginning of tag data
|
||||
ndim = int(np.frombuffer(fid.read(4), dtype=">i4").item())
|
||||
fid.seek(-(ndim + 1) * 4, 1)
|
||||
dims = np.frombuffer(fid.read(4 * ndim), dtype=">i4")[::-1]
|
||||
#
|
||||
# Back to where the data start
|
||||
#
|
||||
fid.seek(pos, 0)
|
||||
|
||||
if ndim > 3:
|
||||
raise Exception(
|
||||
"Only 2 or 3-dimensional matrices are supported at this time"
|
||||
)
|
||||
|
||||
data = fid.read(int(bit * dims.prod()))
|
||||
data = np.frombuffer(data, dtype=dtype)
|
||||
# Note: we need the non-conjugate transpose here
|
||||
if matrix_type == FIFF.FIFFT_COMPLEX_FLOAT:
|
||||
data = data.view(">c8")
|
||||
elif matrix_type == FIFF.FIFFT_COMPLEX_DOUBLE:
|
||||
data = data.view(">c16")
|
||||
data.shape = dims
|
||||
else:
|
||||
# Find dimensions and return to the beginning of tag data
|
||||
ndim = int(np.frombuffer(fid.read(4), dtype=">i4").item())
|
||||
fid.seek(-(ndim + 2) * 4, 1)
|
||||
dims = np.frombuffer(fid.read(4 * (ndim + 1)), dtype=">i4")
|
||||
if ndim != 2:
|
||||
raise Exception("Only two-dimensional matrices are supported at this time")
|
||||
|
||||
# Back to where the data start
|
||||
fid.seek(pos, 0)
|
||||
nnz = int(dims[0])
|
||||
nrow = int(dims[1])
|
||||
ncol = int(dims[2])
|
||||
# We need to make a copy so that we can own the data, otherwise we get:
|
||||
# _sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr,
|
||||
# E ValueError: WRITEBACKIFCOPY base is read-only
|
||||
data = np.frombuffer(fid.read(bit * nnz), dtype=dtype).astype(np.float32)
|
||||
shape = (dims[1], dims[2])
|
||||
if matrix_coding == "sparse CCS":
|
||||
tmp_indices = fid.read(4 * nnz)
|
||||
indices = np.frombuffer(tmp_indices, dtype=">i4")
|
||||
tmp_ptr = fid.read(4 * (ncol + 1))
|
||||
indptr = np.frombuffer(tmp_ptr, dtype=">i4")
|
||||
swap = nrow
|
||||
klass = csc_array
|
||||
else:
|
||||
assert matrix_coding == "sparse RCS", matrix_coding
|
||||
tmp_indices = fid.read(4 * nnz)
|
||||
indices = np.frombuffer(tmp_indices, dtype=">i4")
|
||||
tmp_ptr = fid.read(4 * (nrow + 1))
|
||||
indptr = np.frombuffer(tmp_ptr, dtype=">i4")
|
||||
swap = ncol
|
||||
klass = csr_array
|
||||
if indptr[-1] > len(indices) or np.any(indptr < 0):
|
||||
# There was a bug in MNE-C that caused some data to be
|
||||
# stored without byte swapping
|
||||
indices = np.concatenate(
|
||||
(
|
||||
np.frombuffer(tmp_indices[: 4 * (swap + 1)], dtype=">i4"),
|
||||
np.frombuffer(tmp_indices[4 * (swap + 1) :], dtype="<i4"),
|
||||
)
|
||||
)
|
||||
indptr = np.frombuffer(tmp_ptr, dtype="<i4")
|
||||
data = klass((data, indices, indptr), shape=shape)
|
||||
return data
|
||||
|
||||
|
||||
def _read_simple(fid, tag, shape, rlims, dtype):
|
||||
"""Read simple datatypes from tag (typically used with partial)."""
|
||||
return _frombuffer_rows(fid, tag.size, dtype=dtype, shape=shape, rlims=rlims)
|
||||
|
||||
|
||||
def _read_string(fid, tag, shape, rlims):
|
||||
"""Read a string tag."""
|
||||
# Always decode to ISO 8859-1 / latin1 (FIFF standard).
|
||||
d = _frombuffer_rows(fid, tag.size, dtype=">c", shape=shape, rlims=rlims)
|
||||
string = str(d.tobytes().decode("latin1", "ignore"))
|
||||
if re.search(r"&#[0-9a-fA-F]{6};", string):
|
||||
string = html.unescape(string)
|
||||
return string
|
||||
|
||||
|
||||
def _read_complex_float(fid, tag, shape, rlims):
|
||||
"""Read complex float tag."""
|
||||
# data gets stored twice as large
|
||||
if shape is not None:
|
||||
shape = (shape[0], shape[1] * 2)
|
||||
d = _frombuffer_rows(fid, tag.size, dtype=">f4", shape=shape, rlims=rlims)
|
||||
d = d.view(">c8")
|
||||
return d
|
||||
|
||||
|
||||
def _read_complex_double(fid, tag, shape, rlims):
|
||||
"""Read complex double tag."""
|
||||
# data gets stored twice as large
|
||||
if shape is not None:
|
||||
shape = (shape[0], shape[1] * 2)
|
||||
d = _frombuffer_rows(fid, tag.size, dtype=">f8", shape=shape, rlims=rlims)
|
||||
d = d.view(">c16")
|
||||
return d
|
||||
|
||||
|
||||
def _read_id_struct(fid, tag, shape, rlims):
|
||||
"""Read ID struct tag."""
|
||||
return dict(
|
||||
version=int(np.frombuffer(fid.read(4), dtype=">i4").item()),
|
||||
machid=np.frombuffer(fid.read(8), dtype=">i4"),
|
||||
secs=int(np.frombuffer(fid.read(4), dtype=">i4").item()),
|
||||
usecs=int(np.frombuffer(fid.read(4), dtype=">i4").item()),
|
||||
)
|
||||
|
||||
|
||||
def _read_dig_point_struct(fid, tag, shape, rlims):
|
||||
"""Read dig point struct tag."""
|
||||
kind = int(np.frombuffer(fid.read(4), dtype=">i4").item())
|
||||
kind = _dig_kind_named.get(kind, kind)
|
||||
ident = int(np.frombuffer(fid.read(4), dtype=">i4").item())
|
||||
if kind == FIFF.FIFFV_POINT_CARDINAL:
|
||||
ident = _dig_cardinal_named.get(ident, ident)
|
||||
return dict(
|
||||
kind=kind,
|
||||
ident=ident,
|
||||
r=np.frombuffer(fid.read(12), dtype=">f4"),
|
||||
coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
|
||||
)
|
||||
|
||||
|
||||
def _read_coord_trans_struct(fid, tag, shape, rlims):
|
||||
"""Read coord trans struct tag."""
|
||||
from ..transforms import Transform
|
||||
|
||||
fro = int(np.frombuffer(fid.read(4), dtype=">i4").item())
|
||||
to = int(np.frombuffer(fid.read(4), dtype=">i4").item())
|
||||
rot = np.frombuffer(fid.read(36), dtype=">f4").reshape(3, 3)
|
||||
move = np.frombuffer(fid.read(12), dtype=">f4")
|
||||
trans = np.r_[np.c_[rot, move], np.array([[0], [0], [0], [1]]).T]
|
||||
data = Transform(fro, to, trans)
|
||||
fid.seek(48, 1) # Skip over the inverse transformation
|
||||
return data
|
||||
|
||||
|
||||
_ch_coord_dict = {
|
||||
FIFF.FIFFV_MEG_CH: FIFF.FIFFV_COORD_DEVICE,
|
||||
FIFF.FIFFV_REF_MEG_CH: FIFF.FIFFV_COORD_DEVICE,
|
||||
FIFF.FIFFV_EEG_CH: FIFF.FIFFV_COORD_HEAD,
|
||||
FIFF.FIFFV_ECOG_CH: FIFF.FIFFV_COORD_HEAD,
|
||||
FIFF.FIFFV_SEEG_CH: FIFF.FIFFV_COORD_HEAD,
|
||||
FIFF.FIFFV_DBS_CH: FIFF.FIFFV_COORD_HEAD,
|
||||
FIFF.FIFFV_FNIRS_CH: FIFF.FIFFV_COORD_HEAD,
|
||||
}
|
||||
|
||||
|
||||
def _read_ch_info_struct(fid, tag, shape, rlims):
|
||||
"""Read channel info struct tag."""
|
||||
d = dict(
|
||||
scanno=int(np.frombuffer(fid.read(4), dtype=">i4").item()),
|
||||
logno=int(np.frombuffer(fid.read(4), dtype=">i4").item()),
|
||||
kind=int(np.frombuffer(fid.read(4), dtype=">i4").item()),
|
||||
range=float(np.frombuffer(fid.read(4), dtype=">f4").item()),
|
||||
cal=float(np.frombuffer(fid.read(4), dtype=">f4").item()),
|
||||
coil_type=int(np.frombuffer(fid.read(4), dtype=">i4").item()),
|
||||
# deal with really old OSX Anaconda bug by casting to float64
|
||||
loc=np.frombuffer(fid.read(48), dtype=">f4").astype(np.float64),
|
||||
# unit and exponent
|
||||
unit=int(np.frombuffer(fid.read(4), dtype=">i4").item()),
|
||||
unit_mul=int(np.frombuffer(fid.read(4), dtype=">i4").item()),
|
||||
)
|
||||
# channel name
|
||||
ch_name = np.frombuffer(fid.read(16), dtype=">c")
|
||||
ch_name = ch_name[: np.argmax(ch_name == b"")].tobytes()
|
||||
d["ch_name"] = ch_name.decode()
|
||||
# coil coordinate system definition
|
||||
_update_ch_info_named(d)
|
||||
return d
|
||||
|
||||
|
||||
def _update_ch_info_named(d):
|
||||
d["coord_frame"] = _ch_coord_dict.get(d["kind"], FIFF.FIFFV_COORD_UNKNOWN)
|
||||
d["kind"] = _ch_kind_named.get(d["kind"], d["kind"])
|
||||
d["coil_type"] = _ch_coil_type_named.get(d["coil_type"], d["coil_type"])
|
||||
d["unit"] = _ch_unit_named.get(d["unit"], d["unit"])
|
||||
d["unit_mul"] = _ch_unit_mul_named.get(d["unit_mul"], d["unit_mul"])
|
||||
|
||||
|
||||
def _read_old_pack(fid, tag, shape, rlims):
|
||||
"""Read old pack tag."""
|
||||
offset = float(np.frombuffer(fid.read(4), dtype=">f4").item())
|
||||
scale = float(np.frombuffer(fid.read(4), dtype=">f4").item())
|
||||
data = np.frombuffer(fid.read(tag.size - 8), dtype=">i2")
|
||||
data = data * scale # to float64
|
||||
data += offset
|
||||
return data
|
||||
|
||||
|
||||
def _read_dir_entry_struct(fid, tag, shape, rlims):
|
||||
"""Read dir entry struct tag."""
|
||||
pos = tag.pos + 16
|
||||
entries = list()
|
||||
for offset in range(1, tag.size // 16):
|
||||
ent = _read_tag_header(fid, pos + offset * 16)
|
||||
# The position of the real tag on disk is stored in the "next" entry within the
|
||||
# directory, so we need to overwrite ent.pos. For safety let's also overwrite
|
||||
# ent.next to point nowhere
|
||||
ent.pos, ent.next = ent.next, FIFF.FIFFV_NEXT_NONE
|
||||
entries.append(ent)
|
||||
return entries
|
||||
|
||||
|
||||
def _read_julian(fid, tag, shape, rlims):
|
||||
"""Read julian tag."""
|
||||
return _julian_to_date(int(np.frombuffer(fid.read(4), dtype=">i4").item()))
|
||||
|
||||
|
||||
# Read types call dict
|
||||
_call_dict = {
|
||||
FIFF.FIFFT_STRING: _read_string,
|
||||
FIFF.FIFFT_COMPLEX_FLOAT: _read_complex_float,
|
||||
FIFF.FIFFT_COMPLEX_DOUBLE: _read_complex_double,
|
||||
FIFF.FIFFT_ID_STRUCT: _read_id_struct,
|
||||
FIFF.FIFFT_DIG_POINT_STRUCT: _read_dig_point_struct,
|
||||
FIFF.FIFFT_COORD_TRANS_STRUCT: _read_coord_trans_struct,
|
||||
FIFF.FIFFT_CH_INFO_STRUCT: _read_ch_info_struct,
|
||||
FIFF.FIFFT_OLD_PACK: _read_old_pack,
|
||||
FIFF.FIFFT_DIR_ENTRY_STRUCT: _read_dir_entry_struct,
|
||||
FIFF.FIFFT_JULIAN: _read_julian,
|
||||
}
|
||||
_call_dict_names = {
|
||||
FIFF.FIFFT_STRING: "str",
|
||||
FIFF.FIFFT_COMPLEX_FLOAT: "c8",
|
||||
FIFF.FIFFT_COMPLEX_DOUBLE: "c16",
|
||||
FIFF.FIFFT_ID_STRUCT: "ids",
|
||||
FIFF.FIFFT_DIG_POINT_STRUCT: "dps",
|
||||
FIFF.FIFFT_COORD_TRANS_STRUCT: "cts",
|
||||
FIFF.FIFFT_CH_INFO_STRUCT: "cis",
|
||||
FIFF.FIFFT_OLD_PACK: "op_",
|
||||
FIFF.FIFFT_DIR_ENTRY_STRUCT: "dir",
|
||||
FIFF.FIFFT_JULIAN: "jul",
|
||||
FIFF.FIFFT_VOID: "nul", # 0
|
||||
}
|
||||
|
||||
# Append the simple types
|
||||
_simple_dict = {
|
||||
FIFF.FIFFT_BYTE: ">B",
|
||||
FIFF.FIFFT_SHORT: ">i2",
|
||||
FIFF.FIFFT_INT: ">i4",
|
||||
FIFF.FIFFT_USHORT: ">u2",
|
||||
FIFF.FIFFT_UINT: ">u4",
|
||||
FIFF.FIFFT_FLOAT: ">f4",
|
||||
FIFF.FIFFT_DOUBLE: ">f8",
|
||||
FIFF.FIFFT_DAU_PACK16: ">i2",
|
||||
}
|
||||
for key, dtype in _simple_dict.items():
|
||||
_call_dict[key] = partial(_read_simple, dtype=dtype)
|
||||
_call_dict_names[key] = dtype
|
||||
|
||||
|
||||
def read_tag(fid, pos, shape=None, rlims=None):
|
||||
"""Read a Tag from a file at a given position.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fid : file
|
||||
The open FIF file descriptor.
|
||||
pos : int
|
||||
The position of the Tag in the file.
|
||||
shape : tuple | None
|
||||
If tuple, the shape of the stored matrix. Only to be used with
|
||||
data stored as a vector (not implemented for matrices yet).
|
||||
rlims : tuple | None
|
||||
If tuple, the first (inclusive) and last (exclusive) rows to retrieve.
|
||||
Note that data are assumed to be stored row-major in the file. Only to
|
||||
be used with data stored as a vector (not implemented for matrices
|
||||
yet).
|
||||
|
||||
Returns
|
||||
-------
|
||||
tag : Tag
|
||||
The Tag read.
|
||||
"""
|
||||
tag = _read_tag_header(fid, pos)
|
||||
if tag is None:
|
||||
return tag
|
||||
if tag.size > 0:
|
||||
if _matrix_info(tag) is not None:
|
||||
tag.data = _read_matrix(fid, tag, shape, rlims)
|
||||
else:
|
||||
# All other data types
|
||||
try:
|
||||
fun = _call_dict[tag.type]
|
||||
except KeyError:
|
||||
raise Exception(f"Unimplemented tag data type {tag.type}") from None
|
||||
tag.data = fun(fid, tag, shape, rlims)
|
||||
return tag
|
||||
|
||||
|
||||
def find_tag(fid, node, findkind):
|
||||
"""Find Tag in an open FIF file descriptor.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fid : file-like
|
||||
Open file.
|
||||
node : dict
|
||||
Node to search.
|
||||
findkind : int
|
||||
Tag kind to find.
|
||||
|
||||
Returns
|
||||
-------
|
||||
tag : instance of Tag
|
||||
The first tag found.
|
||||
"""
|
||||
if node["directory"] is not None:
|
||||
for subnode in node["directory"]:
|
||||
if subnode.kind == findkind:
|
||||
return read_tag(fid, subnode.pos)
|
||||
return None
|
||||
|
||||
|
||||
def has_tag(node, kind):
|
||||
"""Check if the node contains a Tag of a given kind."""
|
||||
for d in node["directory"]:
|
||||
if d.kind == kind:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _rename_list(bads, ch_names_mapping):
|
||||
return [ch_names_mapping.get(bad, bad) for bad in bads]
|
||||
|
||||
|
||||
def _int_item(x):
|
||||
return int(x.item())
|
||||
|
||||
|
||||
def _float_item(x):
|
||||
return float(x.item())
|
||||
|
||||
|
||||
def _matrix_info(tag):
|
||||
matrix_coding = tag.type & 0xFFFF0000
|
||||
if matrix_coding == 0 or tag.size == 0:
|
||||
return None
|
||||
matrix_type = tag.type & 0x0000FFFF
|
||||
matrix_coding_dict = {
|
||||
FIFF.FIFFT_MATRIX: "dense",
|
||||
FIFF.FIFFT_MATRIX | FIFF.FIFFT_SPARSE_CCS_MATRIX: "sparse CCS",
|
||||
FIFF.FIFFT_MATRIX | FIFF.FIFFT_SPARSE_RCS_MATRIX: "sparse RCS",
|
||||
}
|
||||
_check_option("matrix_coding", matrix_coding, list(matrix_coding_dict))
|
||||
matrix_coding = matrix_coding_dict[matrix_coding]
|
||||
matrix_bit_dtype = {
|
||||
FIFF.FIFFT_INT: (4, ">i4"),
|
||||
FIFF.FIFFT_JULIAN: (4, ">i4"),
|
||||
FIFF.FIFFT_FLOAT: (4, ">f4"),
|
||||
FIFF.FIFFT_DOUBLE: (8, ">f8"),
|
||||
FIFF.FIFFT_COMPLEX_FLOAT: (8, ">f4"),
|
||||
FIFF.FIFFT_COMPLEX_DOUBLE: (16, ">f8"),
|
||||
}
|
||||
_check_option("matrix_type", matrix_type, list(matrix_bit_dtype))
|
||||
bit, dtype = matrix_bit_dtype[matrix_type]
|
||||
return matrix_coding, matrix_type, bit, dtype
|
||||
105
dist/client/mne/_fiff/tree.py
vendored
Normal file
105
dist/client/mne/_fiff/tree.py
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
|
||||
from ..utils import logger, verbose
|
||||
from .constants import FIFF
|
||||
from .tag import read_tag
|
||||
|
||||
|
||||
def dir_tree_find(tree, kind):
|
||||
"""Find nodes of the given kind from a directory tree structure.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tree : dict
|
||||
Directory tree.
|
||||
kind : int
|
||||
Kind to find.
|
||||
|
||||
Returns
|
||||
-------
|
||||
nodes : list
|
||||
List of matching nodes.
|
||||
"""
|
||||
nodes = []
|
||||
|
||||
if isinstance(tree, list):
|
||||
for t in tree:
|
||||
nodes += dir_tree_find(t, kind)
|
||||
else:
|
||||
# Am I desirable myself?
|
||||
if tree["block"] == kind:
|
||||
nodes.append(tree)
|
||||
|
||||
# Search the subtrees
|
||||
for child in tree["children"]:
|
||||
nodes += dir_tree_find(child, kind)
|
||||
return nodes
|
||||
|
||||
|
||||
@verbose
|
||||
def make_dir_tree(fid, directory, start=0, indent=0, verbose=None):
|
||||
"""Create the directory tree structure."""
|
||||
if directory[start].kind == FIFF.FIFF_BLOCK_START:
|
||||
tag = read_tag(fid, directory[start].pos)
|
||||
block = tag.data.item()
|
||||
else:
|
||||
block = 0
|
||||
|
||||
logger.debug(" " * indent + f"start {{ {block}")
|
||||
|
||||
this = start
|
||||
|
||||
tree = dict()
|
||||
tree["block"] = block
|
||||
tree["id"] = None
|
||||
tree["parent_id"] = None
|
||||
tree["nent"] = 0
|
||||
tree["nchild"] = 0
|
||||
tree["directory"] = directory[this]
|
||||
tree["children"] = []
|
||||
|
||||
while this < len(directory):
|
||||
if directory[this].kind == FIFF.FIFF_BLOCK_START:
|
||||
if this != start:
|
||||
child, this = make_dir_tree(fid, directory, this, indent + 1)
|
||||
tree["nchild"] += 1
|
||||
tree["children"].append(child)
|
||||
elif directory[this].kind == FIFF.FIFF_BLOCK_END:
|
||||
tag = read_tag(fid, directory[start].pos)
|
||||
if tag.data == block:
|
||||
break
|
||||
else:
|
||||
tree["nent"] += 1
|
||||
if tree["nent"] == 1:
|
||||
tree["directory"] = list()
|
||||
tree["directory"].append(directory[this])
|
||||
|
||||
# Add the id information if available
|
||||
if block == 0:
|
||||
if directory[this].kind == FIFF.FIFF_FILE_ID:
|
||||
tag = read_tag(fid, directory[this].pos)
|
||||
tree["id"] = tag.data
|
||||
else:
|
||||
if directory[this].kind == FIFF.FIFF_BLOCK_ID:
|
||||
tag = read_tag(fid, directory[this].pos)
|
||||
tree["id"] = tag.data
|
||||
elif directory[this].kind == FIFF.FIFF_PARENT_BLOCK_ID:
|
||||
tag = read_tag(fid, directory[this].pos)
|
||||
tree["parent_id"] = tag.data
|
||||
|
||||
this += 1
|
||||
|
||||
# Eliminate the empty directory
|
||||
if tree["nent"] == 0:
|
||||
tree["directory"] = None
|
||||
|
||||
logger.debug(
|
||||
" " * (indent + 1)
|
||||
+ f"block = {tree['block']} nent = {tree['nent']} nchild = {tree['nchild']}"
|
||||
)
|
||||
logger.debug(" " * indent + f"end }} {block:d}")
|
||||
last = this
|
||||
return tree, last
|
||||
327
dist/client/mne/_fiff/utils.py
vendored
Normal file
327
dist/client/mne/_fiff/utils.py
vendored
Normal file
@@ -0,0 +1,327 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import os
|
||||
import os.path as op
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .constants import FIFF
|
||||
from .meas_info import _get_valid_units
|
||||
|
||||
|
||||
def _check_orig_units(orig_units):
|
||||
"""Check original units from a raw file.
|
||||
|
||||
Units that are close to a valid_unit but not equal can be remapped to fit
|
||||
into the valid_units. All other units that are not valid will be replaced
|
||||
with "n/a".
|
||||
|
||||
Parameters
|
||||
----------
|
||||
orig_units : dict
|
||||
Dictionary mapping channel names to their units as specified in
|
||||
the header file. Example: {'FC1': 'nV'}
|
||||
|
||||
Returns
|
||||
-------
|
||||
orig_units_remapped : dict
|
||||
Dictionary mapping channel names to their VALID units as specified in
|
||||
the header file. Invalid units are now labeled "n/a".
|
||||
Example: {'FC1': 'nV', 'Hfp3erz': 'n/a'}
|
||||
"""
|
||||
if orig_units is None:
|
||||
return
|
||||
valid_units = _get_valid_units()
|
||||
valid_units_lowered = [unit.lower() for unit in valid_units]
|
||||
orig_units_remapped = dict(orig_units)
|
||||
for ch_name, unit in orig_units.items():
|
||||
# Be lenient: we ignore case for now.
|
||||
if unit.lower() in valid_units_lowered:
|
||||
continue
|
||||
|
||||
# Common "invalid units" can be remapped to their valid equivalent
|
||||
remap_dict = dict()
|
||||
remap_dict["uv"] = "µV"
|
||||
remap_dict["μv"] = "µV" # greek letter mu vs micro sign. use micro
|
||||
remap_dict["\x83\xeav"] = "µV" # for shift-jis mu, use micro
|
||||
if unit.lower() in remap_dict:
|
||||
orig_units_remapped[ch_name] = remap_dict[unit.lower()]
|
||||
continue
|
||||
|
||||
# Some units cannot be saved, they are invalid: assign "n/a"
|
||||
orig_units_remapped[ch_name] = "n/a"
|
||||
|
||||
return orig_units_remapped
|
||||
|
||||
|
||||
def _find_channels(ch_names, ch_type="EOG"):
|
||||
"""Find EOG channel."""
|
||||
substrings = (ch_type,)
|
||||
substrings = [s.upper() for s in substrings]
|
||||
if ch_type == "EOG":
|
||||
substrings = ("EOG", "EYE")
|
||||
eog_idx = [
|
||||
idx
|
||||
for idx, ch in enumerate(ch_names)
|
||||
if any(substring in ch.upper() for substring in substrings)
|
||||
]
|
||||
return eog_idx
|
||||
|
||||
|
||||
def _mult_cal_one(data_view, one, idx, cals, mult):
|
||||
"""Take a chunk of raw data, multiply by mult or cals, and store."""
|
||||
one = np.asarray(one, dtype=data_view.dtype)
|
||||
assert data_view.shape[1] == one.shape[1], (
|
||||
data_view.shape[1],
|
||||
one.shape[1],
|
||||
) # noqa: E501
|
||||
if mult is not None:
|
||||
assert mult.ndim == one.ndim == 2
|
||||
data_view[:] = mult @ one[idx]
|
||||
else:
|
||||
assert cals is not None
|
||||
if isinstance(idx, slice):
|
||||
data_view[:] = one[idx]
|
||||
else:
|
||||
# faster than doing one = one[idx]
|
||||
np.take(one, idx, axis=0, out=data_view)
|
||||
data_view *= cals
|
||||
|
||||
|
||||
def _blk_read_lims(start, stop, buf_len):
|
||||
"""Deal with indexing in the middle of a data block.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
start : int
|
||||
Starting index.
|
||||
stop : int
|
||||
Ending index (exclusive).
|
||||
buf_len : int
|
||||
Buffer size in samples.
|
||||
|
||||
Returns
|
||||
-------
|
||||
block_start_idx : int
|
||||
The first block to start reading from.
|
||||
r_lims : list
|
||||
The read limits.
|
||||
d_lims : list
|
||||
The write limits.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Consider this example::
|
||||
|
||||
>>> start, stop, buf_len = 2, 27, 10
|
||||
|
||||
+---------+---------+---------
|
||||
File structure: | buf0 | buf1 | buf2 |
|
||||
+---------+---------+---------
|
||||
File time: 0 10 20 30
|
||||
+---------+---------+---------
|
||||
Requested time: 2 27
|
||||
|
||||
| |
|
||||
blockstart blockstop
|
||||
| |
|
||||
start stop
|
||||
|
||||
We need 27 - 2 = 25 samples (per channel) to store our data, and
|
||||
we need to read from 3 buffers (30 samples) to get all of our data.
|
||||
|
||||
On all reads but the first, the data we read starts at
|
||||
the first sample of the buffer. On all reads but the last,
|
||||
the data we read ends on the last sample of the buffer.
|
||||
|
||||
We call ``this_data`` the variable that stores the current buffer's data,
|
||||
and ``data`` the variable that stores the total output.
|
||||
|
||||
On the first read, we need to do this::
|
||||
|
||||
>>> data[0:buf_len-2] = this_data[2:buf_len] # doctest: +SKIP
|
||||
|
||||
On the second read, we need to do::
|
||||
|
||||
>>> data[1*buf_len-2:2*buf_len-2] = this_data[0:buf_len] # doctest: +SKIP
|
||||
|
||||
On the final read, we need to do::
|
||||
|
||||
>>> data[2*buf_len-2:3*buf_len-2-3] = this_data[0:buf_len-3] # doctest: +SKIP
|
||||
|
||||
This function encapsulates this logic to allow a loop over blocks, where
|
||||
data is stored using the following limits::
|
||||
|
||||
>>> data[d_lims[ii, 0]:d_lims[ii, 1]] = this_data[r_lims[ii, 0]:r_lims[ii, 1]] # doctest: +SKIP
|
||||
|
||||
""" # noqa: E501
|
||||
# this is used to deal with indexing in the middle of a sampling period
|
||||
assert all(isinstance(x, int) for x in (start, stop, buf_len))
|
||||
block_start_idx = start // buf_len
|
||||
block_start = block_start_idx * buf_len
|
||||
last_used_samp = stop - 1
|
||||
block_stop = last_used_samp - last_used_samp % buf_len + buf_len
|
||||
read_size = block_stop - block_start
|
||||
n_blk = read_size // buf_len + (read_size % buf_len != 0)
|
||||
start_offset = start - block_start
|
||||
end_offset = block_stop - stop
|
||||
d_lims = np.empty((n_blk, 2), int)
|
||||
r_lims = np.empty((n_blk, 2), int)
|
||||
for bi in range(n_blk):
|
||||
# Triage start (sidx) and end (eidx) indices for
|
||||
# data (d) and read (r)
|
||||
if bi == 0:
|
||||
d_sidx = 0
|
||||
r_sidx = start_offset
|
||||
else:
|
||||
d_sidx = bi * buf_len - start_offset
|
||||
r_sidx = 0
|
||||
if bi == n_blk - 1:
|
||||
d_eidx = stop - start
|
||||
r_eidx = buf_len - end_offset
|
||||
else:
|
||||
d_eidx = (bi + 1) * buf_len - start_offset
|
||||
r_eidx = buf_len
|
||||
d_lims[bi] = [d_sidx, d_eidx]
|
||||
r_lims[bi] = [r_sidx, r_eidx]
|
||||
return block_start_idx, r_lims, d_lims
|
||||
|
||||
|
||||
def _file_size(fname):
|
||||
"""Get the file size in bytes."""
|
||||
with open(fname, "rb") as f:
|
||||
f.seek(0, os.SEEK_END)
|
||||
return f.tell()
|
||||
|
||||
|
||||
def _read_segments_file(
|
||||
raw,
|
||||
data,
|
||||
idx,
|
||||
fi,
|
||||
start,
|
||||
stop,
|
||||
cals,
|
||||
mult,
|
||||
dtype,
|
||||
n_channels=None,
|
||||
offset=0,
|
||||
trigger_ch=None,
|
||||
):
|
||||
"""Read a chunk of raw data."""
|
||||
if n_channels is None:
|
||||
n_channels = raw._raw_extras[fi]["orig_nchan"]
|
||||
|
||||
n_bytes = np.dtype(dtype).itemsize
|
||||
# data_offset and data_left count data samples (channels x time points),
|
||||
# not bytes.
|
||||
data_offset = n_channels * start * n_bytes + offset
|
||||
data_left = (stop - start) * n_channels
|
||||
|
||||
# Read up to 100 MB of data at a time, block_size is in data samples
|
||||
block_size = ((int(100e6) // n_bytes) // n_channels) * n_channels
|
||||
block_size = min(data_left, block_size)
|
||||
with open(raw._filenames[fi], "rb", buffering=0) as fid:
|
||||
fid.seek(data_offset)
|
||||
# extract data in chunks
|
||||
for sample_start in np.arange(0, data_left, block_size) // n_channels:
|
||||
count = min(block_size, data_left - sample_start * n_channels)
|
||||
block = np.fromfile(fid, dtype, count)
|
||||
if block.size != count:
|
||||
raise RuntimeError(
|
||||
f"Incorrect number of samples ({block.size} != {count}), please "
|
||||
"report this error to MNE-Python developers"
|
||||
)
|
||||
block = block.reshape(n_channels, -1, order="F")
|
||||
n_samples = block.shape[1] # = count // n_channels
|
||||
sample_stop = sample_start + n_samples
|
||||
if trigger_ch is not None:
|
||||
stim_ch = trigger_ch[start:stop][sample_start:sample_stop]
|
||||
block = np.vstack((block, stim_ch))
|
||||
data_view = data[:, sample_start:sample_stop]
|
||||
_mult_cal_one(data_view, block, idx, cals, mult)
|
||||
|
||||
|
||||
def read_str(fid, count=1):
|
||||
"""Read string from a binary file in a python version compatible way."""
|
||||
dtype = np.dtype(f">S{count}")
|
||||
string = fid.read(dtype.itemsize)
|
||||
data = np.frombuffer(string, dtype=dtype)[0]
|
||||
bytestr = b"".join([data[0 : data.index(b"\x00") if b"\x00" in data else count]])
|
||||
|
||||
return str(bytestr.decode("ascii")) # Return native str type for Py2/3
|
||||
|
||||
|
||||
def _create_chs(ch_names, cals, ch_coil, ch_kind, eog, ecg, emg, misc):
|
||||
"""Initialize info['chs'] for eeg channels."""
|
||||
chs = list()
|
||||
for idx, ch_name in enumerate(ch_names):
|
||||
if ch_name in eog or idx in eog:
|
||||
coil_type = FIFF.FIFFV_COIL_NONE
|
||||
kind = FIFF.FIFFV_EOG_CH
|
||||
elif ch_name in ecg or idx in ecg:
|
||||
coil_type = FIFF.FIFFV_COIL_NONE
|
||||
kind = FIFF.FIFFV_ECG_CH
|
||||
elif ch_name in emg or idx in emg:
|
||||
coil_type = FIFF.FIFFV_COIL_NONE
|
||||
kind = FIFF.FIFFV_EMG_CH
|
||||
elif ch_name in misc or idx in misc:
|
||||
coil_type = FIFF.FIFFV_COIL_NONE
|
||||
kind = FIFF.FIFFV_MISC_CH
|
||||
else:
|
||||
coil_type = ch_coil
|
||||
kind = ch_kind
|
||||
|
||||
chan_info = {
|
||||
"cal": cals[idx],
|
||||
"logno": idx + 1,
|
||||
"scanno": idx + 1,
|
||||
"range": 1.0,
|
||||
"unit_mul": FIFF.FIFF_UNITM_NONE,
|
||||
"ch_name": ch_name,
|
||||
"unit": FIFF.FIFF_UNIT_V,
|
||||
"coord_frame": FIFF.FIFFV_COORD_HEAD,
|
||||
"coil_type": coil_type,
|
||||
"kind": kind,
|
||||
"loc": np.zeros(12),
|
||||
}
|
||||
if coil_type == FIFF.FIFFV_COIL_EEG:
|
||||
chan_info["loc"][:3] = np.nan
|
||||
chs.append(chan_info)
|
||||
return chs
|
||||
|
||||
|
||||
def _construct_bids_filename(base, ext, part_idx, validate=True):
|
||||
"""Construct a BIDS compatible filename for split files."""
|
||||
# insert index in filename
|
||||
dirname = op.dirname(base)
|
||||
base = op.basename(base)
|
||||
deconstructed_base = base.split("_")
|
||||
if len(deconstructed_base) < 2 and validate:
|
||||
raise ValueError(
|
||||
"Filename base must end with an underscore followed "
|
||||
f"by the modality (e.g., _eeg or _meg), got {base}"
|
||||
)
|
||||
suffix = deconstructed_base[-1]
|
||||
base = "_".join(deconstructed_base[:-1])
|
||||
use_fname = f"{base}_split-{part_idx + 1:02}_{suffix}{ext}"
|
||||
if dirname:
|
||||
use_fname = op.join(dirname, use_fname)
|
||||
return use_fname
|
||||
|
||||
|
||||
def _make_split_fnames(fname, n_splits, split_naming):
|
||||
"""Make a list of split filenames."""
|
||||
if n_splits == 1:
|
||||
return [fname]
|
||||
res = []
|
||||
base, ext = op.splitext(fname)
|
||||
for i in range(n_splits):
|
||||
if split_naming == "neuromag":
|
||||
res.append(f"{base}-{i:d}{ext}" if i else fname)
|
||||
else:
|
||||
assert split_naming == "bids"
|
||||
res.append(_construct_bids_filename(base, ext, i))
|
||||
return res
|
||||
70
dist/client/mne/_fiff/what.py
vendored
Normal file
70
dist/client/mne/_fiff/what.py
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from collections import OrderedDict
|
||||
from inspect import signature
|
||||
|
||||
from ..utils import _check_fname, logger
|
||||
|
||||
|
||||
def what(fname):
|
||||
"""Try to determine the type of the FIF file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
The filename. Should end in ``.fif`` or ``.fif.gz``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
what : str | None
|
||||
The type of the file. Will be 'unknown' if it could not be determined.
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionadded:: 0.19
|
||||
"""
|
||||
from ..bem import read_bem_solution, read_bem_surfaces
|
||||
from ..cov import read_cov
|
||||
from ..epochs import read_epochs
|
||||
from ..event import read_events
|
||||
from ..evoked import read_evokeds
|
||||
from ..forward import read_forward_solution
|
||||
from ..io import read_raw_fif
|
||||
from ..minimum_norm import read_inverse_operator
|
||||
from ..preprocessing import read_ica
|
||||
from ..proj import read_proj
|
||||
from ..source_space import read_source_spaces
|
||||
from ..transforms import read_trans
|
||||
from .meas_info import read_fiducials
|
||||
|
||||
_check_fname(fname, overwrite="read", must_exist=True)
|
||||
checks = OrderedDict()
|
||||
checks["raw"] = read_raw_fif
|
||||
checks["ica"] = read_ica
|
||||
checks["epochs"] = read_epochs
|
||||
checks["evoked"] = read_evokeds
|
||||
checks["forward"] = read_forward_solution
|
||||
checks["inverse"] = read_inverse_operator
|
||||
checks["src"] = read_source_spaces
|
||||
checks["bem solution"] = read_bem_solution
|
||||
checks["bem surfaces"] = read_bem_surfaces
|
||||
checks["cov"] = read_cov
|
||||
checks["transform"] = read_trans
|
||||
checks["events"] = read_events
|
||||
checks["fiducials"] = read_fiducials
|
||||
checks["proj"] = read_proj
|
||||
for what, func in checks.items():
|
||||
args = signature(func).parameters
|
||||
assert "verbose" in args, func
|
||||
kwargs = dict(verbose="error")
|
||||
if "preload" in args:
|
||||
kwargs["preload"] = False
|
||||
try:
|
||||
func(fname, **kwargs)
|
||||
except Exception as exp:
|
||||
logger.debug(f"Not {what}: {exp}")
|
||||
else:
|
||||
return what
|
||||
return "unknown"
|
||||
448
dist/client/mne/_fiff/write.py
vendored
Normal file
448
dist/client/mne/_fiff/write.py
vendored
Normal file
@@ -0,0 +1,448 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import datetime
|
||||
import os.path as op
|
||||
import re
|
||||
import time
|
||||
import uuid
|
||||
from contextlib import contextmanager
|
||||
from gzip import GzipFile
|
||||
|
||||
import numpy as np
|
||||
from scipy.sparse import csc_array, csr_array
|
||||
|
||||
from ..utils import _file_like, _validate_type, logger
|
||||
from ..utils.numerics import _date_to_julian
|
||||
from .constants import FIFF
|
||||
|
||||
# We choose a "magic" date to store (because meas_date is obligatory)
|
||||
# to treat as meas_date=None. This one should be impossible for systems
|
||||
# to write -- the second field is microseconds, so anything >= 1e6
|
||||
# should be moved into the first field (seconds).
|
||||
DATE_NONE = (0, 2**31 - 1)
|
||||
|
||||
|
||||
def _write(fid, data, kind, data_size, FIFFT_TYPE, dtype):
|
||||
"""Write data."""
|
||||
if isinstance(data, np.ndarray):
|
||||
data_size *= data.size
|
||||
|
||||
# XXX for string types the data size is used as
|
||||
# computed in ``write_string``.
|
||||
|
||||
fid.write(np.array(kind, dtype=">i4").tobytes())
|
||||
fid.write(np.array(FIFFT_TYPE, dtype=">i4").tobytes())
|
||||
fid.write(np.array(data_size, dtype=">i4").tobytes())
|
||||
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype=">i4").tobytes())
|
||||
fid.write(np.array(data, dtype=dtype).tobytes())
|
||||
|
||||
|
||||
def _get_split_size(split_size):
|
||||
"""Convert human-readable bytes to machine-readable bytes."""
|
||||
if isinstance(split_size, str):
|
||||
exp = dict(MB=20, GB=30).get(split_size[-2:], None)
|
||||
if exp is None:
|
||||
raise ValueError('split_size has to end with either "MB" or "GB"')
|
||||
split_size = int(float(split_size[:-2]) * 2**exp)
|
||||
|
||||
if split_size > 2147483648:
|
||||
raise ValueError("split_size cannot be larger than 2GB")
|
||||
return split_size
|
||||
|
||||
|
||||
_NEXT_FILE_BUFFER = 1048576 # 2 ** 20 extra cushion for last post-data tags
|
||||
|
||||
|
||||
def write_nop(fid, last=False):
|
||||
"""Write a FIFF_NOP."""
|
||||
fid.write(np.array(FIFF.FIFF_NOP, dtype=">i4").tobytes())
|
||||
fid.write(np.array(FIFF.FIFFT_VOID, dtype=">i4").tobytes())
|
||||
fid.write(np.array(0, dtype=">i4").tobytes())
|
||||
next_ = FIFF.FIFFV_NEXT_NONE if last else FIFF.FIFFV_NEXT_SEQ
|
||||
fid.write(np.array(next_, dtype=">i4").tobytes())
|
||||
|
||||
|
||||
INT32_MAX = 2147483647
|
||||
|
||||
|
||||
def write_int(fid, kind, data):
|
||||
"""Write a 32-bit integer tag to a fif file."""
|
||||
data_size = 4
|
||||
data = np.asarray(data)
|
||||
if data.dtype.kind not in "uib" and data.size > 0:
|
||||
raise TypeError(f"Cannot safely write data with dtype {data.dtype} as int")
|
||||
max_val = data.max() if data.size > 0 else 0
|
||||
if max_val > INT32_MAX:
|
||||
raise TypeError(
|
||||
f"Value {max_val} exceeds maximum allowed ({INT32_MAX}) for tag {kind}"
|
||||
)
|
||||
data = data.astype(">i4").T
|
||||
_write(fid, data, kind, data_size, FIFF.FIFFT_INT, ">i4")
|
||||
|
||||
|
||||
def write_double(fid, kind, data):
|
||||
"""Write a double-precision floating point tag to a fif file."""
|
||||
data_size = 8
|
||||
data = np.array(data, dtype=">f8").T
|
||||
_write(fid, data, kind, data_size, FIFF.FIFFT_DOUBLE, ">f8")
|
||||
|
||||
|
||||
def write_float(fid, kind, data):
|
||||
"""Write a single-precision floating point tag to a fif file."""
|
||||
data_size = 4
|
||||
data = np.array(data, dtype=">f4").T
|
||||
_write(fid, data, kind, data_size, FIFF.FIFFT_FLOAT, ">f4")
|
||||
|
||||
|
||||
def write_dau_pack16(fid, kind, data):
|
||||
"""Write a dau_pack16 tag to a fif file."""
|
||||
data_size = 2
|
||||
data = np.array(data, dtype=">i2").T
|
||||
_write(fid, data, kind, data_size, FIFF.FIFFT_DAU_PACK16, ">i2")
|
||||
|
||||
|
||||
def write_complex64(fid, kind, data):
|
||||
"""Write a 64 bit complex floating point tag to a fif file."""
|
||||
data_size = 8
|
||||
data = np.array(data, dtype=">c8").T
|
||||
_write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, ">c8")
|
||||
|
||||
|
||||
def write_complex128(fid, kind, data):
|
||||
"""Write a 128 bit complex floating point tag to a fif file."""
|
||||
data_size = 16
|
||||
data = np.array(data, dtype=">c16").T
|
||||
_write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, ">c16")
|
||||
|
||||
|
||||
def write_julian(fid, kind, data):
|
||||
"""Write a Julian-formatted date to a FIF file."""
|
||||
assert isinstance(data, datetime.date), type(data)
|
||||
data_size = 4
|
||||
jd = _date_to_julian(data)
|
||||
data = np.array(jd, dtype=">i4")
|
||||
_write(fid, data, kind, data_size, FIFF.FIFFT_JULIAN, ">i4")
|
||||
|
||||
|
||||
def write_string(fid, kind, data):
|
||||
"""Write a string tag."""
|
||||
try:
|
||||
str_data = str(data).encode("latin1")
|
||||
except UnicodeEncodeError:
|
||||
str_data = str(data).encode("latin1", errors="xmlcharrefreplace")
|
||||
data_size = len(str_data) # therefore compute size here
|
||||
if data_size > 0:
|
||||
_write(fid, str_data, kind, data_size, FIFF.FIFFT_STRING, ">S")
|
||||
|
||||
|
||||
def write_name_list(fid, kind, data):
|
||||
"""Write a colon-separated list of names.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
data : list of strings
|
||||
"""
|
||||
write_string(fid, kind, ":".join(data))
|
||||
|
||||
|
||||
def write_name_list_sanitized(fid, kind, lst, name):
|
||||
"""Write a sanitized, colon-separated list of names."""
|
||||
write_string(fid, kind, _safe_name_list(lst, "write", name))
|
||||
|
||||
|
||||
def _safe_name_list(lst, operation, name):
|
||||
if operation == "write":
|
||||
assert isinstance(lst, (list, tuple, np.ndarray)), type(lst)
|
||||
if any("{COLON}" in val for val in lst):
|
||||
raise ValueError(f'The substring "{{COLON}}" in {name} not supported.')
|
||||
return ":".join(val.replace(":", "{COLON}") for val in lst)
|
||||
else:
|
||||
# take a sanitized string and return a list of strings
|
||||
assert operation == "read"
|
||||
assert lst is None or isinstance(lst, str)
|
||||
if not lst: # None or empty string
|
||||
return []
|
||||
return [val.replace("{COLON}", ":") for val in lst.split(":")]
|
||||
|
||||
|
||||
def write_float_matrix(fid, kind, mat):
|
||||
"""Write a single-precision floating-point matrix tag."""
|
||||
_write_matrix_data(fid, kind, mat, FIFF.FIFFT_FLOAT)
|
||||
|
||||
|
||||
def write_double_matrix(fid, kind, mat):
|
||||
"""Write a double-precision floating-point matrix tag."""
|
||||
_write_matrix_data(fid, kind, mat, FIFF.FIFFT_DOUBLE)
|
||||
|
||||
|
||||
def write_int_matrix(fid, kind, mat):
|
||||
"""Write integer 32 matrix tag."""
|
||||
_write_matrix_data(fid, kind, mat, FIFF.FIFFT_INT)
|
||||
|
||||
|
||||
def write_complex_float_matrix(fid, kind, mat):
|
||||
"""Write complex 64 matrix tag."""
|
||||
_write_matrix_data(fid, kind, mat, FIFF.FIFFT_COMPLEX_FLOAT)
|
||||
|
||||
|
||||
def write_complex_double_matrix(fid, kind, mat):
|
||||
"""Write complex 128 matrix tag."""
|
||||
_write_matrix_data(fid, kind, mat, FIFF.FIFFT_COMPLEX_DOUBLE)
|
||||
|
||||
|
||||
def _write_matrix_data(fid, kind, mat, data_type):
|
||||
dtype = {
|
||||
FIFF.FIFFT_FLOAT: ">f4",
|
||||
FIFF.FIFFT_DOUBLE: ">f8",
|
||||
FIFF.FIFFT_COMPLEX_FLOAT: ">c8",
|
||||
FIFF.FIFFT_COMPLEX_DOUBLE: ">c16",
|
||||
FIFF.FIFFT_INT: ">i4",
|
||||
}[data_type]
|
||||
dtype = np.dtype(dtype)
|
||||
data_size = dtype.itemsize * mat.size + 4 * (mat.ndim + 1)
|
||||
matrix_type = data_type | FIFF.FIFFT_MATRIX
|
||||
fid.write(np.array(kind, dtype=">i4").tobytes())
|
||||
fid.write(np.array(matrix_type, dtype=">i4").tobytes())
|
||||
fid.write(np.array(data_size, dtype=">i4").tobytes())
|
||||
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype=">i4").tobytes())
|
||||
fid.write(np.array(mat, dtype=dtype).tobytes())
|
||||
dims = np.empty(mat.ndim + 1, dtype=np.int32)
|
||||
dims[: mat.ndim] = mat.shape[::-1]
|
||||
dims[-1] = mat.ndim
|
||||
fid.write(np.array(dims, dtype=">i4").tobytes())
|
||||
check_fiff_length(fid)
|
||||
|
||||
|
||||
def get_machid():
|
||||
"""Get (mostly) unique machine ID.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ids : array (length 2, int32)
|
||||
The machine identifier used in MNE.
|
||||
"""
|
||||
mac = f"{uuid.getnode():012x}".encode() # byte conversion for Py3
|
||||
mac = re.findall(b"..", mac) # split string
|
||||
mac += [b"00", b"00"] # add two more fields
|
||||
|
||||
# Convert to integer in reverse-order (for some reason)
|
||||
from codecs import encode
|
||||
|
||||
mac = b"".join([encode(h, "hex_codec") for h in mac[::-1]])
|
||||
ids = np.flipud(np.frombuffer(mac, np.int32, count=2))
|
||||
return ids
|
||||
|
||||
|
||||
def get_new_file_id():
|
||||
"""Create a new file ID tag."""
|
||||
secs, usecs = divmod(time.time(), 1.0)
|
||||
secs, usecs = int(secs), int(usecs * 1e6)
|
||||
return {
|
||||
"machid": get_machid(),
|
||||
"version": FIFF.FIFFC_VERSION,
|
||||
"secs": secs,
|
||||
"usecs": usecs,
|
||||
}
|
||||
|
||||
|
||||
def write_id(fid, kind, id_=None):
|
||||
"""Write fiff id."""
|
||||
id_ = _generate_meas_id() if id_ is None else id_
|
||||
|
||||
data_size = 5 * 4 # The id comprises five integers
|
||||
fid.write(np.array(kind, dtype=">i4").tobytes())
|
||||
fid.write(np.array(FIFF.FIFFT_ID_STRUCT, dtype=">i4").tobytes())
|
||||
fid.write(np.array(data_size, dtype=">i4").tobytes())
|
||||
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype=">i4").tobytes())
|
||||
|
||||
# Collect the bits together for one write
|
||||
arr = np.array(
|
||||
[id_["version"], id_["machid"][0], id_["machid"][1], id_["secs"], id_["usecs"]],
|
||||
dtype=">i4",
|
||||
)
|
||||
fid.write(arr.tobytes())
|
||||
|
||||
|
||||
def start_block(fid, kind):
|
||||
"""Write a FIFF_BLOCK_START tag."""
|
||||
write_int(fid, FIFF.FIFF_BLOCK_START, kind)
|
||||
|
||||
|
||||
def end_block(fid, kind):
|
||||
"""Write a FIFF_BLOCK_END tag."""
|
||||
write_int(fid, FIFF.FIFF_BLOCK_END, kind)
|
||||
|
||||
|
||||
def start_file(fname, id_=None):
|
||||
"""Open a fif file for writing and writes the compulsory header tags.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like | fid
|
||||
The name of the file to open. It is recommended
|
||||
that the name ends with .fif or .fif.gz. Can also be an
|
||||
already opened file.
|
||||
id_ : dict | None
|
||||
ID to use for the FIFF_FILE_ID.
|
||||
"""
|
||||
if _file_like(fname):
|
||||
logger.debug(f"Writing using {type(fname)} I/O")
|
||||
fid = fname
|
||||
fid.seek(0)
|
||||
else:
|
||||
fname = str(fname)
|
||||
if op.splitext(fname)[1].lower() == ".gz":
|
||||
logger.debug("Writing using gzip")
|
||||
# defaults to compression level 9, which is barely smaller but much
|
||||
# slower. 2 offers a good compromise.
|
||||
fid = GzipFile(fname, "wb", compresslevel=2)
|
||||
else:
|
||||
logger.debug("Writing using normal I/O")
|
||||
fid = open(fname, "wb")
|
||||
# Write the compulsory items
|
||||
write_id(fid, FIFF.FIFF_FILE_ID, id_)
|
||||
write_int(fid, FIFF.FIFF_DIR_POINTER, -1)
|
||||
write_int(fid, FIFF.FIFF_FREE_LIST, -1)
|
||||
return fid
|
||||
|
||||
|
||||
@contextmanager
|
||||
def start_and_end_file(fname, id_=None):
|
||||
"""Start and (if successfully written) close the file."""
|
||||
with start_file(fname, id_=id_) as fid:
|
||||
yield fid
|
||||
end_file(fid) # we only hit this line if the yield does not err
|
||||
|
||||
|
||||
def check_fiff_length(fid, close=True):
|
||||
"""Ensure our file hasn't grown too large to work properly."""
|
||||
if fid.tell() > 2147483648: # 2 ** 31, FIFF uses signed 32-bit locations
|
||||
if close:
|
||||
fid.close()
|
||||
raise OSError(
|
||||
"FIFF file exceeded 2GB limit, please split file, reduce"
|
||||
" split_size (if possible), or save to a different "
|
||||
"format"
|
||||
)
|
||||
|
||||
|
||||
def end_file(fid):
|
||||
"""Write the closing tags to a fif file and closes the file."""
|
||||
write_nop(fid, last=True)
|
||||
check_fiff_length(fid)
|
||||
fid.close()
|
||||
|
||||
|
||||
def write_coord_trans(fid, trans):
|
||||
"""Write a coordinate transformation structure."""
|
||||
data_size = 4 * 2 * 12 + 4 * 2
|
||||
fid.write(np.array(FIFF.FIFF_COORD_TRANS, dtype=">i4").tobytes())
|
||||
fid.write(np.array(FIFF.FIFFT_COORD_TRANS_STRUCT, dtype=">i4").tobytes())
|
||||
fid.write(np.array(data_size, dtype=">i4").tobytes())
|
||||
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype=">i4").tobytes())
|
||||
fid.write(np.array(trans["from"], dtype=">i4").tobytes())
|
||||
fid.write(np.array(trans["to"], dtype=">i4").tobytes())
|
||||
|
||||
# The transform...
|
||||
rot = trans["trans"][:3, :3]
|
||||
move = trans["trans"][:3, 3]
|
||||
fid.write(np.array(rot, dtype=">f4").tobytes())
|
||||
fid.write(np.array(move, dtype=">f4").tobytes())
|
||||
|
||||
# ...and its inverse
|
||||
trans_inv = np.linalg.inv(trans["trans"])
|
||||
rot = trans_inv[:3, :3]
|
||||
move = trans_inv[:3, 3]
|
||||
fid.write(np.array(rot, dtype=">f4").tobytes())
|
||||
fid.write(np.array(move, dtype=">f4").tobytes())
|
||||
|
||||
|
||||
def write_ch_info(fid, ch):
|
||||
"""Write a channel information record to a fif file."""
|
||||
data_size = 4 * 13 + 4 * 7 + 16
|
||||
|
||||
fid.write(np.array(FIFF.FIFF_CH_INFO, dtype=">i4").tobytes())
|
||||
fid.write(np.array(FIFF.FIFFT_CH_INFO_STRUCT, dtype=">i4").tobytes())
|
||||
fid.write(np.array(data_size, dtype=">i4").tobytes())
|
||||
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype=">i4").tobytes())
|
||||
|
||||
# Start writing fiffChInfoRec
|
||||
fid.write(np.array(ch["scanno"], dtype=">i4").tobytes())
|
||||
fid.write(np.array(ch["logno"], dtype=">i4").tobytes())
|
||||
fid.write(np.array(ch["kind"], dtype=">i4").tobytes())
|
||||
fid.write(np.array(ch["range"], dtype=">f4").tobytes())
|
||||
fid.write(np.array(ch["cal"], dtype=">f4").tobytes())
|
||||
fid.write(np.array(ch["coil_type"], dtype=">i4").tobytes())
|
||||
fid.write(np.array(ch["loc"], dtype=">f4").tobytes()) # writing 12 values
|
||||
|
||||
# unit and unit multiplier
|
||||
fid.write(np.array(ch["unit"], dtype=">i4").tobytes())
|
||||
fid.write(np.array(ch["unit_mul"], dtype=">i4").tobytes())
|
||||
|
||||
# Finally channel name
|
||||
ch_name = ch["ch_name"][:15]
|
||||
fid.write(np.array(ch_name, dtype=">c").tobytes())
|
||||
fid.write(b"\0" * (16 - len(ch_name)))
|
||||
|
||||
|
||||
def write_dig_points(fid, dig, block=False, coord_frame=None):
|
||||
"""Write a set of digitizer data points into a fif file."""
|
||||
if dig is not None:
|
||||
data_size = 5 * 4
|
||||
if block:
|
||||
start_block(fid, FIFF.FIFFB_ISOTRAK)
|
||||
if coord_frame is not None:
|
||||
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, coord_frame)
|
||||
for d in dig:
|
||||
fid.write(np.array(FIFF.FIFF_DIG_POINT, ">i4").tobytes())
|
||||
fid.write(np.array(FIFF.FIFFT_DIG_POINT_STRUCT, ">i4").tobytes())
|
||||
fid.write(np.array(data_size, dtype=">i4").tobytes())
|
||||
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, ">i4").tobytes())
|
||||
# Start writing fiffDigPointRec
|
||||
fid.write(np.array(d["kind"], ">i4").tobytes())
|
||||
fid.write(np.array(d["ident"], ">i4").tobytes())
|
||||
fid.write(np.array(d["r"][:3], ">f4").tobytes())
|
||||
if block:
|
||||
end_block(fid, FIFF.FIFFB_ISOTRAK)
|
||||
|
||||
|
||||
def write_float_sparse_rcs(fid, kind, mat):
|
||||
"""Write a single-precision sparse compressed row matrix tag."""
|
||||
return write_float_sparse(fid, kind, mat, fmt="csr")
|
||||
|
||||
|
||||
def write_float_sparse(fid, kind, mat, fmt="auto"):
|
||||
"""Write a single-precision floating-point sparse matrix tag."""
|
||||
if fmt == "auto":
|
||||
fmt = "csr" if isinstance(mat, csr_array) else "csc"
|
||||
need = csr_array if fmt == "csr" else csc_array
|
||||
matrix_type = getattr(FIFF, f"FIFFT_SPARSE_{fmt[-1].upper()}CS_MATRIX")
|
||||
_validate_type(mat, need, "sparse")
|
||||
matrix_type = matrix_type | FIFF.FIFFT_MATRIX | FIFF.FIFFT_FLOAT
|
||||
nnzm = mat.nnz
|
||||
nrow = mat.shape[0]
|
||||
data_size = 4 * nnzm + 4 * nnzm + 4 * (nrow + 1) + 4 * 4
|
||||
|
||||
fid.write(np.array(kind, dtype=">i4").tobytes())
|
||||
fid.write(np.array(matrix_type, dtype=">i4").tobytes())
|
||||
fid.write(np.array(data_size, dtype=">i4").tobytes())
|
||||
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype=">i4").tobytes())
|
||||
|
||||
fid.write(np.array(mat.data, dtype=">f4").tobytes())
|
||||
fid.write(np.array(mat.indices, dtype=">i4").tobytes())
|
||||
fid.write(np.array(mat.indptr, dtype=">i4").tobytes())
|
||||
|
||||
dims = [nnzm, mat.shape[0], mat.shape[1], 2]
|
||||
fid.write(np.array(dims, dtype=">i4").tobytes())
|
||||
check_fiff_length(fid)
|
||||
|
||||
|
||||
def _generate_meas_id():
|
||||
"""Generate a new meas_id dict."""
|
||||
id_ = dict()
|
||||
id_["version"] = FIFF.FIFFC_VERSION
|
||||
id_["machid"] = get_machid()
|
||||
id_["secs"], id_["usecs"] = DATE_NONE
|
||||
return id_
|
||||
Reference in New Issue
Block a user