diff --git a/.codecov.yml b/.codecov.yml new file mode 100644 index 0000000..fd85925 --- /dev/null +++ b/.codecov.yml @@ -0,0 +1,25 @@ +coverage: + round: up + precision: 2 + status: + patch: + default: + # basic + target: 90% + threshold: 2% + base: auto + flags: + - unit + # advanced + branches: + - master + - dev + if_no_uploads: error + if_not_found: error + if_ci_failed: error + only_pulls: false + +# Files to ignore +ignore: + - "data" + - "notebooks" diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000..94a9d5a --- /dev/null +++ b/.coveragerc @@ -0,0 +1,3 @@ +[report] +fail_under = 90 +show_missing = True diff --git a/.deepsource.toml b/.deepsource.toml index 93a9d6f..b4f754a 100644 --- a/.deepsource.toml +++ b/.deepsource.toml @@ -1,6 +1,6 @@ version = 1 -test_patterns = ["test/**"] +test_patterns = ["tests/**"] exclude_patterns = ["README.md"] @@ -8,17 +8,11 @@ exclude_patterns = ["README.md"] name = "python" enabled = true - [analyzers.meta] - runtime_version = "3.x.x" - [[analyzers]] name = "test-coverage" enabled = true -[[transformers]] -name = "isort" -enabled = true - [[transformers]] name = "black" enabled = true + diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..ece9ae4 --- /dev/null +++ b/.flake8 @@ -0,0 +1,6 @@ +[flake8] +docstring-convention = numpy +import_order_style = smarkets +max-line-length = 88 +extend-ignore = E203,I202,I100 +exclude = reconstructSPI/__init__.py,reconstructSPI/iterative_refinement/__init.py,tests/__init__.py diff --git a/.github/auto-assign.yml b/.github/auto-assign.yml index 115af98..a0a74eb 100644 --- a/.github/auto-assign.yml +++ b/.github/auto-assign.yml @@ -5,10 +5,10 @@ addReviewers: true addAssignees: false # A list of reviewers to be added to pull requests (GitHub user name) -reviewers: +reviewers: - fredericpoitevin -# A list of keywords to be skipped the process that add reviewers if pull requests include it +# A list of keywords to be skipped the process that add reviewers if pull requests include it skipKeywords: - wip diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 0000000..38ca19a --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,54 @@ +name: "Lint" + +on: + push: + branches: [master,github-actions-test,dev] + paths-ignore: + - 'README.md' + - '.deepsource.toml' + - '.gitignore' + - 'setup.py' + + + pull_request: + branches: [master,dev] + paths-ignore: + - 'README.md' + - '.deepsource.toml' + - '.gitignore' + - 'setup.py' + + +jobs: + build: + + runs-on: ${{matrix.os}} + strategy: + matrix: + os: [ubuntu-18.04] + python-version: [3.9] + test-folder : ['tests'] + fail-fast: false + + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Build using Python ${{matrix.python-version}} + uses: actions/setup-python@v2 + with: + python-version: ${{matrix.python-version}} + - name: install dependencies [pip] + run: | + pip install --upgrade pip setuptools wheel + for req in dev-requirements.txt; do + pip install -q -r $req + done + pip install -e . + - name: linting [black and isort] + run: | + black . --check + isort --profile black --check . + - name: linting [flake8] + run: | + flake8 reconstructSPI tests diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..0d86279 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,62 @@ +name: "Test" + +on: + push: + branches: [master,github-actions-test,dev] + paths-ignore: + - 'README.md' + - '.deepsource.toml' + - '.gitignore' + - 'setup.py' + + + pull_request: + branches: [master,dev] + paths-ignore: + - 'README.md' + - '.deepsource.toml' + - '.gitignore' + - 'setup.py' + + +jobs: + build: + + runs-on: ${{matrix.os}} + strategy: + matrix: + os: [ubuntu-18.04] + python-version: [3.7,3.8,3.9] + test-folder : ['tests'] + fail-fast: false + + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Build using Python ${{matrix.python-version}} + uses: actions/setup-python@v2 + with: + python-version: ${{matrix.python-version}} + + - name: cache conda + uses: actions/cache@v1 + with: + path: $CONDA + key: ${{ runner.os }}-conda-${{ hashFiles('environment.yml') }} + restore-keys: | + ${{ runner.os }}-conda- + - name: install dependencies [conda] + run: | + # $CONDA is an environment variable pointing to the root of the miniconda directory + $CONDA/bin/conda env update --file environment.yml --name base + $CONDA/bin/pip install -e . + - name: unit testing [pytest] + env: + TEST_TOKEN: ${{ secrets.TEST_TOKEN }} + run: | + $CONDA/bin/pytest --cov-report term --cov-report xml:coverage.xml --cov=reconstructSPI ${{matrix.test-folder}} + - name: uploading code coverage [codecov] + if: ${{matrix.python-version == 3.7}} + run: | + bash <(curl -s https://codecov.io/bash) -c diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..e0b31c6 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,42 @@ +default_language_version : + python : python3 +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.1.0 + hooks: + - id: check-byte-order-marker + - id: check-case-conflict + - id: check-merge-conflict + - id: check-yaml + - id: mixed-line-ending + args: + - --fix=no + - id: no-commit-to-branch + args: + - --branch=master + - id: check-added-large-files + args: + - --maxkb=2048 + - id: trailing-whitespace + - repo: https://github.com/psf/black + rev: 21.8b0 + hooks: + - id: black + - repo: https://github.com/pycqa/isort + rev: 5.9.3 + hooks: + - id : isort + args : ["--profile", "black", "--filter-files"] + - repo: https://github.com/asottile/blacken-docs + rev: v1.8.0 + hooks: + - id: blacken-docs + additional_dependencies: [black==20.8b0] + - repo: https://gitlab.com/pycqa/flake8 + rev: 3.7.9 + hooks: + - id: flake8 + additional_dependencies: + - flake8-docstrings + - flake8-import-order + exclude: reconstructSPI/__init__.py,tests/__init__.py \ No newline at end of file diff --git a/dev-requirements.txt b/dev-requirements.txt new file mode 100644 index 0000000..663cad4 --- /dev/null +++ b/dev-requirements.txt @@ -0,0 +1,5 @@ +black +flake8 +flake8-docstrings +isort +pre-commit diff --git a/environment.yml b/environment.yml new file mode 100644 index 0000000..e52e0d7 --- /dev/null +++ b/environment.yml @@ -0,0 +1,18 @@ +name: reconstructSPI +channels: + - conda-forge + - defaults +dependencies: + - codecov + - coverage + - gemmi + - numpy + - numba + - pillow>=8.2.0 + - pip + - pytest + - pytest-cov + - pytorch + - pip : + - git+https://github.com/compSPI/simSPI.git + diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..cf93d60 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,4 @@ +[pytest] +testpaths = tests +python_files = *.py +python_functions = test_* \ No newline at end of file diff --git a/reconstructSPI/__init__.py b/reconstructSPI/__init__.py new file mode 100644 index 0000000..a16d370 --- /dev/null +++ b/reconstructSPI/__init__.py @@ -0,0 +1 @@ +"""Various particle reconstruction methods.""" diff --git a/reconstructSPI/iterative_refinement/__init__.py b/reconstructSPI/iterative_refinement/__init__.py new file mode 100644 index 0000000..5500bfe --- /dev/null +++ b/reconstructSPI/iterative_refinement/__init__.py @@ -0,0 +1 @@ +"""Iterative refinement reconstruction methods.""" diff --git a/reconstructSPI/iterative_refinement/expectation_maximization.py b/reconstructSPI/iterative_refinement/expectation_maximization.py new file mode 100644 index 0000000..14c5010 --- /dev/null +++ b/reconstructSPI/iterative_refinement/expectation_maximization.py @@ -0,0 +1,533 @@ +"""Iterative refinement with Bayesian expectation maximization.""" + +import numpy as np +from simSPI.transfer import eval_ctf + + +class IterativeRefinement: + """Iterative refinement with max likelihood estimation. + + Parameters + ---------- + map_3d_init : arr + Initial particle map. + Shape (n_pix, n_pix, n_pix) + particles : arr + Particles to be reconstructed. + Shape (n_particles, n_pix, n_pix) + ctf_info : list of dicts + Each dict contains CTF k,v pairs per particle. + Shape (n_particles,) + + References + ---------- + 1. Nelson, P. C. (2021). Physical Models of Living Systems new + chapter: Single Particle Reconstruction in Cryo-electron + Microscopy. + https://repository.upenn.edu/physics_papers/656 + 2. Scheres, S. H. W. (2012). RELION: Implementation of a + Bayesian approach to cryo-EM structure determination. + Journal of Structural Biology, 180(3), 519–530. + http://doi.org/10.1016/j.jsb.2012.09.006 + 3. Sigworth, F. J., Doerschuk, P. C., Carazo, J.-M., & Scheres, + S. H. W. (2010). + An Introduction to Maximum-Likelihood Methods in Cryo-EM. + In Methods in Enzymology (1st ed., Vol. 482, + pp. 263–294). Elsevier Inc. + http://doi.org/10.1016/S0076-6879(10)82011-7 + """ + + def __init__(self, map_3d_init, particles, ctf_info, max_itr=7): + self.map_3d_init = map_3d_init + self.particles = particles + self.ctf_info = ctf_info + self.max_itr = max_itr + + def iterative_refinement(self, wiener_small_number=0.01, count_norm_const=1): + """Perform iterative refinement. + + Acts in a Bayesian expectation maximization setting, + i.e. maximum a posteriori estimation. + + Parameters + ---------- + wiener_small_number : float + Used to tune Wiener filter. + count_norm_const : float + Used to tune normalization of slice inserting. + + Returns + ------- + map_3d_update : arr + Current iteration of map. + Shape (n_pix, n_pix, n_pix) + map_3d_final : arr + Final updated map. + Shape (n_pix, n_pix, n_pix) + half_map_3d_final_1 : arr + Shape (n_pix, n_pix, n_pix) + half_map_3d_final_2 : arr + Shape (n_pix, n_pix, n_pix) + fsc_1d : arr + Final one dimensional fourier shell correlation. + Shape (n_pix // 2,) + """ + particles_1, particles_2 = IterativeRefinement.split_array(self.particles) + + ctfs = self.build_ctf_array() + ctfs_1, ctfs_2 = IterativeRefinement.split_array(ctfs) + + particles_f_1 = IterativeRefinement.fft_3d(particles_1) + particles_f_2 = IterativeRefinement.fft_3d(particles_2) + + n_pix = self.map_3d_init.shape[0] + + n_rotations = self.particles.shape[0] + + half_map_3d_r_1, half_map_3d_r_2 = ( + self.map_3d_init.copy(), + self.map_3d_init.copy(), + ) + + half_map_3d_f_1 = IterativeRefinement.fft_3d(half_map_3d_r_1) + half_map_3d_f_2 = IterativeRefinement.fft_3d(half_map_3d_r_2) + + for _ in range(self.max_itr): + + half_map_3d_f_1 = IterativeRefinement.fft_3d(half_map_3d_r_1) + half_map_3d_f_2 = IterativeRefinement.fft_3d(half_map_3d_r_2) + + rots = IterativeRefinement.grid_SO3_uniform(n_rotations) + + xy0_plane = IterativeRefinement.generate_xy_plane(n_pix) + + slices_1, xyz_rotated = IterativeRefinement.generate_slices( + half_map_3d_f_1, xy0_plane, n_pix, rots + ) + + slices_2, xyz_rotated = IterativeRefinement.generate_slices( + half_map_3d_f_2, xy0_plane, n_pix, rots + ) + + map_3d_f_updated_1 = np.zeros_like(half_map_3d_f_1) + map_3d_f_updated_2 = np.zeros_like(half_map_3d_f_2) + map_3d_f_norm_1 = np.zeros_like(half_map_3d_f_1) + map_3d_f_norm_2 = np.zeros_like(half_map_3d_f_2) + counts_3d_updated_1 = np.zeros_like(half_map_3d_r_1) + counts_3d_updated_2 = np.zeros_like(half_map_3d_r_2) + + for particle_idx in range(particles_f_1.shape[0]): + ctf_1 = ctfs_1[particle_idx] + ctf_2 = ctfs_2[particle_idx] + + particle_f_deconv_1 = IterativeRefinement.apply_wiener_filter( + particles_f_1, ctf_1, wiener_small_number + ) + particle_f_deconv_2 = IterativeRefinement.apply_wiener_filter( + particles_f_2, ctf_1, wiener_small_number + ) + + ctf_vectorized = np.vectorize(IterativeRefinement.apply_ctf_to_slice) + + slices_conv_ctfs_1 = ctf_vectorized(slices_1, ctf_1) + slices_conv_ctfs_2 = ctf_vectorized(slices_2, ctf_2) + + bayes_factors_1 = IterativeRefinement.compute_bayesian_weights( + particles_f_1[particle_idx], slices_conv_ctfs_1 + ) + bayes_factors_2 = IterativeRefinement.compute_bayesian_weights( + particles_f_2[particle_idx], slices_conv_ctfs_2 + ) + + for one_slice_idx in range(bayes_factors_1.shape[0]): + xyz = xyz_rotated[one_slice_idx] + inserted_slice_3d_r, count_3d_r = IterativeRefinement.insert_slice( + particle_f_deconv_1.real, xyz, n_pix + ) + inserted_slice_3d_i, count_3d_i = IterativeRefinement.insert_slice( + particle_f_deconv_1.imag, xyz, n_pix + ) + map_3d_f_updated_1 += inserted_slice_3d_r + 1j * inserted_slice_3d_i + counts_3d_updated_1 += count_3d_r + count_3d_i + + for one_slice_idx in range(bayes_factors_2.shape[0]): + xyz = xyz_rotated[one_slice_idx] + inserted_slice_3d_r, count_3d_r = IterativeRefinement.insert_slice( + particle_f_deconv_2.real, xyz, n_pix + ) + inserted_slice_3d_i, count_3d_i = IterativeRefinement.insert_slice( + particle_f_deconv_2.imag, xyz, n_pix + ) + map_3d_f_updated_2 += inserted_slice_3d_r + 1j * inserted_slice_3d_i + counts_3d_updated_2 += count_3d_r + count_3d_i + + map_3d_f_norm_1 = IterativeRefinement.normalize_map( + map_3d_f_updated_1, counts_3d_updated_1, count_norm_const + ) + map_3d_f_norm_2 = IterativeRefinement.normalize_map( + map_3d_f_updated_2, counts_3d_updated_2, count_norm_const + ) + + half_map_3d_f_1, half_map_3d_f_2 = IterativeRefinement.apply_noise_model( + map_3d_f_norm_1, map_3d_f_norm_2 + ) + + fsc_1d = IterativeRefinement.compute_fsc(half_map_3d_f_1, half_map_3d_f_2) + fsc_3d = IterativeRefinement.expand_1d_to_3d(fsc_1d) + map_3d_f_final = (half_map_3d_f_1 + half_map_3d_f_2 / 2) * fsc_3d + map_3d_r_final = IterativeRefinement.ifft_3d(map_3d_f_final) + half_map_3d_r_1 = IterativeRefinement.ifft_3d(half_map_3d_f_1) + half_map_3d_r_2 = IterativeRefinement.ifft_3d(half_map_3d_f_2) + + return map_3d_r_final, half_map_3d_r_1, half_map_3d_r_2, fsc_1d + + @staticmethod + def normalize_map(map_3d, counts, norm_const): + """Normalize map by slice counts per voxel. + + Parameters + ---------- + map_3d : arr + Shape (n_pix, n_pix, n_pix) + The map to be normalized. + counts : arr + Shape (n_pix, n_pix, n_pix) + The number of slices that were added within each voxel. + norm_const : float + A small number used as part of the wiener-filter-like + normalization. + + Returns + ------- + norm_map : arr + Shape (n_pix, n_pix, n_pix) + map normalized by counts. + """ + return map_3d * counts / (norm_const + counts**2) + + @staticmethod + def apply_noise_model(map_3d_f_norm_1, map_3d_f_norm_2): + """Apply noise model to normalized maps in fourier space. + + Parameters + ---------- + map_3d_f_norm_1 : arr + Shape (n_pix, n_pix, n_pix) + Normalized fourier space half-map 1. + map_3d_f_norm_2 : arr + Shape (n_pix, n_pix, n_pix) + Normalized fourier space half-map 2. + + Returns + ------- + (map_3d_f_filtered_1, map_3d_f_filtered_2) : (arr, arr) + Shapes (n_pix, n_pix, n_pix) + Half-maps with fsc noise filtering applied. + """ + fsc_1d = IterativeRefinement.compute_fsc(map_3d_f_norm_1, map_3d_f_norm_2) + + fsc_3d = IterativeRefinement.expand_1d_to_3d(fsc_1d) + + map_3d_f_filtered_1 = map_3d_f_norm_1 * fsc_3d + map_3d_f_filtered_2 = map_3d_f_norm_2 * fsc_3d + + return (map_3d_f_filtered_1, map_3d_f_filtered_2) + + @staticmethod + def split_array(arr): + """Split array into two halves along 0th axis. + + Parameters + ---------- + arr : arr + Shape (n_particles, ...) + + Returns + ------- + arr1 : arr + Shape (n_particles // 2, ...) + arr2: arr + Shape (n_particles // 2, ...) + """ + idx_half = len(arr) // 2 + arr_1, arr_2 = arr[:idx_half], arr[idx_half:] + + if len(arr_1) != len(arr_2): + arr_2 = arr[idx_half : 2 * idx_half] + + return arr_1, arr_2 + + def build_ctf_array(self): + """Build 2D array of evaluated CTFs. + + Use inputted CTF parameters, act for each particle. + + Returns + ------- + ctfs : arr + Shape (n_ctfs, n_pix, n_pix) + """ + n_ctfs = len(self.ctf_info) + ctfs = [] + + for i in range(n_ctfs): + ctfs.append(eval_ctf(**self.ctf_info[i])) + + return ctfs + + @staticmethod + def grid_SO3_uniform(n_rotations): + """Generate uniformly distributed rotations in SO(3). + + Parameters + ---------- + n_rotations : int + Number of rotations + + Returns + ------- + rots : arr + Array describing rotations. + Shape (n_rotations, 3, 3) + """ + rots = np.ones((n_rotations, 3, 3)) + return rots + + @staticmethod + def generate_xy_plane(n_pix): + """Generate xy plane. + + Parameters + ---------- + n_pix : int + Number of pixels along one edge of the plane. + + Returns + ------- + xy_plane : arr + Array describing xy plane in space. + Shape (n_pix, n_pix, 3) + """ + # See how meshgrid and generate coordinates functions used + # https://github.com/geoffwoollard/compSPI/blob/stash_simulate/src/simulate.py#L96 + + xy_plane = np.ones((n_pix * n_pix, 3)) + return xy_plane + + @staticmethod + def generate_slices(map_3d_f, xy_plane, n_pix, rots): + """Generate slice coordinates by rotating xy plane. + + Interpolate values from map_3d_f onto 3D coordinates. + + See how scipy map_values used to interpolate in + https://github.com/geoffwoollard/compSPI/blob/stash_simulate/src/simulate.py#L111 + + Parameters + ---------- + map_3d_f : arr + Shape (n_pix, n_pix, n_pix) + xy_plane : arr + Array describing xy plane in space. + Shape (n_pix**2, 3) + n_pix : int + Number of pixels along one edge of the plane. + rots : arr + Array describing rotations. + Shape (n_rotations, 3, 3) + + Returns + ------- + slices : arr + Slice of map_3d_f. Corresponds to Fourier transform + of projection of rotated map_3d_f. + Shape (n_rotations, n_pix, n_pix) + xyz_rotated : arr + Rotated xy plane. + Shape (n_pix**2, 3) + """ + n_rotations = rots.shape[0] + # map_values interpolation, calculate from map, rots + map_3d_f = np.ones_like(map_3d_f) + xyz_rotated = np.ones_like(xy_plane) + + size = n_rotations * n_pix**2 + slices = np.random.normal(size=size) + slices = slices.reshape((n_rotations, n_pix, n_pix)) + return slices, xyz_rotated + + @staticmethod + def apply_ctf_to_slice(particle_slice, ctf): + """Apply CTF to projected slice by convolution. + + particle_slice : arr + Slice of map_3d_f. Corresponds to Fourier transform + of projection of rotated map_3d_r. + Shape (n_pix, n_pix) + ctf : arr + CTF parameters for particle. + Shape (n_pix,n_pix) + """ + # vectorize and have shape match + projection_f_conv_ctf = ctf * particle_slice + return projection_f_conv_ctf + + @staticmethod + def compute_bayesian_weights(particle, slices): + """Compute Bayesian weights of particle to slice. + + Use Gaussian white noise model. + + Parameters + ---------- + particle : arr + Shape (n_pix, n_pix) + + slices : complex64 arr + Shape (n_slices, n_pix, n_pix) + + Returns + ------- + bayesian_weights : float64 arr + Shape (n_slices,) + """ + n_slices = slices.shape[0] + particle = np.ones_like(particle) + bayes_factors = np.random.normal(size=n_slices) + return bayes_factors + + @staticmethod + def apply_wiener_filter(projection, ctf, small_number): + """Apply Wiener filter to particle projection. + + Parameters + ---------- + projection : arr + Shape (n_pix, n_pix) + ctf : arr + Shape (n_pix, n_pix) + small_number : float + Used for tuning Wiener filter. + + Returns + ------- + projection_wfilter_f : arr + Shape (n_pix, n_pix) the filtered projection. + """ + wfilter = ctf / (ctf * ctf + small_number) + projection_wfilter_f = projection * wfilter + return projection_wfilter_f + + @staticmethod + def insert_slice(slice_real, xyz, n_pix): + """Rotate slice and interpolate onto a 3D grid. + + Parameters + ---------- + slice_real : float64 arr + Shape (n_pix, n_pix) the slice of interest. + xyz : arr + Shape (n_pix**2, 3) plane corresponding to slice rotation. + n_pix : int + Number of pixels. + + Returns + ------- + inserted_slice_3d : float64 arr + Rotated slice in 3D voxel array. + Shape (n_pix, n_pix, n_pix) + count_3d : arr + Voxel array to count slice presence: 1 if slice present, + otherwise 0. + Shape (n_pix, n_pix, n_pix) + """ + shape = xyz.shape[0] + count_3d = np.ones((n_pix, n_pix, n_pix)) + count_3d[0, 0, 0] *= shape + inserted_slice_3d = np.ones((n_pix, n_pix, n_pix)) + return inserted_slice_3d, count_3d + + @staticmethod + def compute_fsc(map_3d_f_1, map_3d_f_2): + """Compute the Fourier shell correlation. + + Estimate noise from half maps. + + Parameters + ---------- + map_3d_f_1 : arr + Shape (n_pix, n_pix, n_pix) + map_3d_f_2 : arr + Shape (n_pix, n_pix, n_pix) + + Returns + ------- + noise_estimate : arr + Noise estimates from half maps. + Shape (n_pix // 2,) + """ + # write fast vectorized fsc from code snippets in + # https://github.com/geoffwoollard/learn_cryoem_math/blob/master/nb/fsc.ipynb + # https://github.com/geoffwoollard/learn_cryoem_math/blob/master/nb/mFSC.ipynb + # https://github.com/geoffwoollard/learn_cryoem_math/blob/master/nb/guinier_fsc_sharpen.ipynb + n_pix_1 = map_3d_f_1.shape[0] + n_pix_2 = map_3d_f_2.shape[0] + fsc_1d_1 = np.ones(n_pix_1 // 2) + fsc_1d_2 = np.ones(n_pix_2 // 2) + noise_estimates = fsc_1d_1 * fsc_1d_2 + return noise_estimates + + @staticmethod + def expand_1d_to_3d(arr_1d): + """Expand 1D array data into spherical shell. + + Parameters + ---------- + arr_1d : arr + Shape (n_pix // 2) + + Returns + ------- + arr_3d : arr + Shape (spherical coords) + """ + n_pix = arr_1d.shape[0] * 2 + arr_3d = np.ones((n_pix, n_pix, n_pix)) + # arr_1d fsc_1d to 3d (spherical shells) + return arr_3d + + @staticmethod + def fft_3d(array): + """3D Fast Fourier Transform. + + Parameters + ---------- + array : arr + Input array. + Shape (n_pix, n_pix, n_pix) + + Returns + ------- + fft_array : arr + Fourier transform of array. + Shape (n_pix, n_pix, n_pix) + """ + return np.zeros(array.shape, dtype=np.cdouble) + + @staticmethod + def ifft_3d(fft_array): + """3D Inverse Fast Fourier Transform. + + Parameters + ---------- + fft_array : arr + Fourier transform of array. + Shape (n_pix, n_pix, n_pix) + + Returns + ------- + array : arr + Original array. + Shape (n_pix, n_pix, n_pix) + """ + return np.zeros(fft_array.shape) diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..9631ff3 --- /dev/null +++ b/setup.py @@ -0,0 +1,23 @@ +"""Create instructions to build the reconstructSPI package.""" + +import setuptools + +requirements = [] + +setuptools.setup( + name="reconstructSPI", + maintainer="Frederic Poitevin", + version="0.0.1", + maintainer_email="frederic.poitevin@stanford.edu", + description="Reconstruction methods and tools for SPI", + long_description=open("README.md", encoding="utf8").read(), + long_description_content_type="text/markdown", + url="https://github.com/compSPI/reconstructSPI.git", + packages=setuptools.find_packages(), + install_requires=requirements, + classifiers=[ + "Programming Language :: Python :: 3", + "Operating System :: OS Independent", + ], + zip_safe=False, +) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_expectation_maximization.py b/tests/test_expectation_maximization.py new file mode 100644 index 0000000..37ee222 --- /dev/null +++ b/tests/test_expectation_maximization.py @@ -0,0 +1,179 @@ +"""Test bayesian expectation maximization process.""" + +import numpy as np +import pytest + +from reconstructSPI.iterative_refinement import expectation_maximization as em + + +@pytest.fixture +def n_pix(): + """Get test pixel count for consistency.""" + return 2 + + +@pytest.fixture +def n_particles(): + """Get test particle count for consistency.""" + return 2 + + +@pytest.fixture +def test_ir(n_pix, n_particles): + """Instantiate IterativeRefinement class for testing.""" + ex_ctf = { + "s": np.ones((n_pix, n_pix)), + "a": np.ones((n_pix, n_pix)), + "def1": 1.0, + "def2": 1.0, + "angast": 0.1, + "kv": 0.1, + "cs": 0.1, + "bf": 0.1, + "lp": 0.1, + } + map_3d = np.zeros((n_pix, n_pix, n_pix)) + particles = np.zeros((n_particles, n_pix, n_pix)) + ctf_info = [ + ex_ctf, + ] * n_particles + itr = 2 + ir = em.IterativeRefinement(map_3d, particles, ctf_info, itr) + return ir + + +def test_split_array(test_ir, n_particles): + """Test splitting of array in two halves.""" + arr = np.zeros(n_particles) + arr1, arr2 = test_ir.split_array(arr) + assert arr1.shape == (n_particles // 2,) + assert arr2.shape == (n_particles // 2,) + + arr = np.zeros(n_particles + 1) + arr1, arr2 = test_ir.split_array(arr) + assert arr1.shape == ((n_particles + 1) // 2,) + assert arr2.shape == ((n_particles + 1) // 2,) + + arr = ["a", "b"] + arr1, arr2 = test_ir.split_array(arr) + assert len(arr1) == 1 + assert len(arr2) == 1 + + +def test_fft_3d(test_ir, n_pix): + """Test 3D fourier transform.""" + arr = np.zeros((n_pix, n_pix, n_pix)) + fft_arr = test_ir.fft_3d(arr) + assert fft_arr.shape == arr.shape + + +def test_ifft_3d(test_ir, n_pix): + """Test 3D inverse fourier transform.""" + fft_arr = np.zeros((n_pix, n_pix, n_pix)) + arr = test_ir.fft_3d(fft_arr) + assert fft_arr.shape == arr.shape + + +def test_build_ctf_array(test_ir, n_particles, n_pix): + """Test bulding arbitrary CTF array.""" + ctfs = test_ir.build_ctf_array() + assert len(ctfs) == n_particles + assert ctfs[0].shape == (n_pix, n_pix) + + +def test_grid_SO3_uniform(test_ir, n_particles): + """Test generation of rotations in SO(3).""" + rots = test_ir.grid_SO3_uniform(n_particles) + assert rots.shape == (n_particles, 3, 3) + + +def test_generate_xy_plane(test_ir, n_pix): + """Test generation of xy plane.""" + xy_plane = test_ir.generate_xy_plane(n_pix) + assert xy_plane.shape == (n_pix**2, 3) + + +def test_generate_slices(test_ir, n_particles, n_pix): + """Test generation of slices.""" + map_3d = np.ones((n_pix, n_pix, n_pix)) + rots = test_ir.grid_SO3_uniform(n_particles) + xy_plane = test_ir.generate_xy_plane(n_pix) + + slices, xyz_rotated = test_ir.generate_slices(map_3d, xy_plane, n_pix, rots) + + assert xy_plane.shape == (n_pix**2, 3) + assert slices.shape == (n_particles, n_pix, n_pix) + assert xyz_rotated.shape == (n_pix**2, 3) + + +def test_apply_ctf_to_slice(test_ir, n_pix): + """Test convolution of particle slice with CTF.""" + particle_slice = np.ones((n_pix, n_pix)) + ctf = np.ones((n_pix, n_pix)) + convolved = test_ir.apply_ctf_to_slice(particle_slice, ctf) + + assert convolved.shape == (n_pix, n_pix) + + +def test_compute_bayesian_weights(test_ir, n_particles, n_pix): + """Test computation of bayesian weights. + + For use under Gaussian white noise model. + """ + particle = np.ones((n_pix, n_pix)) + slices = np.ones((n_particles, n_pix, n_pix)) + bayesian_weights = test_ir.compute_bayesian_weights(particle, slices) + + assert bayesian_weights.shape == (n_particles,) + + +def test_apply_wiener_filter(test_ir, n_pix): + """Test application of Wiener filter to particle projection.""" + projection = np.ones((n_pix, n_pix)) + ctf = np.zeros((n_pix, n_pix)) + small_number = 0.01 + + projection_wfilter_f = test_ir.apply_wiener_filter(projection, ctf, small_number) + assert projection_wfilter_f.shape == (n_pix, n_pix) + + +def test_insert_slice(test_ir, n_pix): + """Test insertion of particle slice.""" + particle_slice = np.ones((n_pix, n_pix)) + xyz = test_ir.generate_xy_plane(n_pix) + + inserted, count = test_ir.insert_slice(particle_slice, xyz, n_pix) + assert inserted.shape == (n_pix, n_pix, n_pix) + assert count.shape == (n_pix, n_pix, n_pix) + + +def test_compute_fsc(test_ir, n_pix): + """Test computation of FSC.""" + map_1 = np.ones((n_pix, n_pix, n_pix)) + map_2 = np.ones((n_pix, n_pix, n_pix)) + + fsc_1 = test_ir.compute_fsc(map_1, map_2) + assert fsc_1.shape == (n_pix // 2,) + + +def test_expand_1d_to_3d(test_ir, n_pix): + """Test expansion of 1D array into spherical shell.""" + arr1d = np.ones(n_pix // 2) + spherical = test_ir.expand_1d_to_3d(arr1d) + + assert spherical.shape == (n_pix, n_pix, n_pix) + + +def test_iterative_refinement(test_ir, n_pix): + """Test complete iterative refinement algorithm.""" + ( + map_3d_r_final, + half_map_3d_r_1, + half_map_3d_r_2, + fsc_1d, + ) = test_ir.iterative_refinement() + + assert map_3d_r_final.shape == (n_pix, n_pix, n_pix) + assert half_map_3d_r_1.shape == (n_pix, n_pix, n_pix) + assert half_map_3d_r_2.shape == (n_pix, n_pix, n_pix) + assert fsc_1d.shape == (n_pix // 2,)