Unit Test for Calculate_MDI

[1]:
import unittest
import numpy as np
import pandas as pd
import anndata as ad
from unittest.mock import patch
import os
os.sys.path.append('../../../')

from mesa import ecospatial as eco
/opt/miniconda3/envs/mesa/lib/python3.11/site-packages/geopandas/_compat.py:106: UserWarning: The Shapely GEOS version (3.8.0-CAPI-1.13.1) is incompatible with the GEOS version PyGEOS was compiled with (3.10.4-CAPI-1.16.2). Conversions between both will be slow.
  warnings.warn(
OMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.
/opt/miniconda3/envs/mesa/lib/python3.11/site-packages/spaghetti/network.py:41: FutureWarning: The next major release of pysal/spaghetti (2.0.0) will drop support for all ``libpysal.cg`` geometries. This change is a first step in refactoring ``spaghetti`` that is expected to result in dramatically reduced runtimes for network instantiation and operations. Users currently requiring network and point pattern input as ``libpysal.cg`` geometries should prepare for this simply by converting to ``shapely`` geometries.
  warnings.warn(dep_msg, FutureWarning, stacklevel=1)
[2]:
class TestCalculateMDI(unittest.TestCase):
    def setUp(self):
        # Set up test data
        self.spatial_data = pd.DataFrame({
            'x': 1000 * np.random.rand(5000),
            'y': 1000 * np.random.rand(5000),
            'library_key': ['sample_1'] * 2500 + ['sample_2'] * 2500,
            'cluster_key': np.random.randint(0, 10, size=5000)
        })
        self.scales = [1., 2., 4., 8.]
        self.library_key = 'library_key'
        self.library_id = ['sample_1', 'sample_2']
        self.spatial_key = ['x', 'y']
        self.cluster_key = 'cluster_key'
        self.expected_index = self.library_id
        self.expected_columns = [scale for scale in self.scales] + ['Slope']

    def test_calculate_MDI_with_random_patch_false(self):
            # Call the function under test with random_patch=False
            result = eco.calculate_MDI(
                spatial_data=self.spatial_data,
                scales=self.scales,
                library_key=self.library_key,
                library_id=self.library_id,
                spatial_key=self.spatial_key,
                cluster_key=self.cluster_key,
                random_patch=False,
                plotfigs=False,
                savefigs=False,
                patch_kwargs={},
                other_kwargs={}
            )
            # Check that result is a DataFrame
            self.assertIsInstance(result, pd.DataFrame)

            # Check that the DataFrame has the expected index and columns
            self.assertListEqual(list(result.columns), self.expected_columns)
            self.assertListEqual(list(result.index), self.expected_index)

            # Verify that 'Slope' column exists and is numeric
            self.assertIn('Slope', result.columns)
            self.assertTrue(np.issubdtype(result['Slope'].dtype, np.number))

    def test_calculate_MDI_with_random_patch_true(self):
            # Call the function under test with random_patch=True
            result_random = eco.calculate_MDI(
                spatial_data=self.spatial_data,
                scales=self.scales,
                library_key=self.library_key,
                library_id=self.library_id,
                spatial_key=self.spatial_key,
                cluster_key=self.cluster_key,
                random_patch=True,
                plotfigs=False,
                savefigs=False,
                patch_kwargs={},
                other_kwargs={}
            )

            # Check the results as before
            self.assertIsInstance(result_random, pd.DataFrame)
            self.assertListEqual(list(result_random.columns), self.expected_columns)
            self.assertListEqual(list(result_random.index), self.expected_index)

            # Verify that 'Slope' column exists and is numeric
            self.assertIn('Slope', result_random.columns)
            self.assertTrue(np.issubdtype(result_random['Slope'].dtype, np.number))
[3]:
# Run the tests in the notebook
unittest.main(argv=['first-arg-is-ignored'], exit=False)
Processing region: sample_1 at scale 1.0
0.000 per cent patches are empty
sample_1 at scale 1.0 has 0 patches with zero diveristy
sample_1 at scale 1.0 diversity is 3.320548430536853
Processing region: sample_2 at scale 1.0
0.000 per cent patches are empty
sample_2 at scale 1.0 has 0 patches with zero diveristy
sample_2 at scale 1.0 diversity is 3.318675527712075
Processing region: sample_1 at scale 2.0
0.000 per cent patches are empty
sample_1 at scale 2.0 has 0 patches with zero diveristy
sample_1 at scale 2.0 diversity is 3.3157615698139944
Processing region: sample_2 at scale 2.0
0.000 per cent patches are empty
sample_2 at scale 2.0 has 0 patches with zero diveristy
sample_2 at scale 2.0 diversity is 3.3098586262794134
Processing region: sample_1 at scale 4.0
0.000 per cent patches are empty
sample_1 at scale 4.0 has 0 patches with zero diveristy
sample_1 at scale 4.0 diversity is 3.2738724660974885
Processing region: sample_2 at scale 4.0
0.000 per cent patches are empty
sample_2 at scale 4.0 has 0 patches with zero diveristy
sample_2 at scale 4.0 diversity is 3.2712741280351403
Processing region: sample_1 at scale 8.0
0.000 per cent patches are empty
sample_1 at scale 8.0 has 0 patches with zero diveristy
sample_1 at scale 8.0 diversity is 3.1357805949294244
Processing region: sample_2 at scale 8.0
0.000 per cent patches are empty
sample_2 at scale 8.0 has 0 patches with zero diveristy
sample_2 at scale 8.0 diversity is 3.136249663683922
.
[0.5, 0.5571428571428572, 0.6714285714285715, 0.9]
Processing region: sample_1 at scale 1.0
0.000 per cent patches are empty
sample_1 at scale 1.0 has 0 patches with zero diveristy
sample_1 at scale 1.0 diversity is 3.319397130221288
Processing region: sample_2 at scale 1.0
0.000 per cent patches are empty
sample_2 at scale 1.0 has 0 patches with zero diveristy
sample_2 at scale 1.0 diversity is 3.3198048602087438
Processing region: sample_1 at scale 2.0
0.000 per cent patches are empty
sample_1 at scale 2.0 has 0 patches with zero diveristy
sample_1 at scale 2.0 diversity is 3.311824991920736
Processing region: sample_2 at scale 2.0
0.000 per cent patches are empty
sample_2 at scale 2.0 has 0 patches with zero diveristy
sample_2 at scale 2.0 diversity is 3.3127017488287462
Processing region: sample_1 at scale 4.0
0.000 per cent patches are empty
sample_1 at scale 4.0 has 0 patches with zero diveristy
sample_1 at scale 4.0 diversity is 3.2742115498481206
Processing region: sample_2 at scale 4.0
0.000 per cent patches are empty
sample_2 at scale 4.0 has 0 patches with zero diveristy
sample_2 at scale 4.0 diversity is 3.280909902805943
Processing region: sample_1 at scale 8.0
0.000 per cent patches are empty
sample_1 at scale 8.0 has 0 patches with zero diveristy
sample_1 at scale 8.0 diversity is 3.1625338119527555
Processing region: sample_2 at scale 8.0
0.000 per cent patches are empty
sample_2 at scale 8.0 has 0 patches with zero diveristy
sample_2 at scale 8.0 diversity is 3.153896959644078
.
----------------------------------------------------------------------
Ran 2 tests in 1.135s

OK
[3]:
<unittest.main.TestProgram at 0x150b6c9d0>