In [ ]:
import scanpy as sc

from continuousvi.trajectory import ContinuousTrajectory, ContinuousVI, TrainedContinuousVI
In [ ]:
INPUTS = ["./data/trajectory/GSE123335/adata.h5ad", "./data/trajectory/GSE161690/adata.h5ad"]
In [ ]:
adata = sc.concat([sc.read_h5ad(f) for f in INPUTS])
In [ ]:
adata.obs
Out[ ]:
project_id time
p0-WT2_AAAACATGAACC GSE123335 P0
p0-WT2_AAAAGACACCAC GSE123335 P0
p0-WT2_AAAAGGCTCGTG GSE123335 P0
p0-WT2_AAAATACGCAAG GSE123335 P0
p0-WT2_AAACACCGGATA GSE123335 P0
... ... ...
E18_2_CGCGTTAGAGGC GSE161690 E18
E18_2_ACCCTGATCTGG GSE161690 E18
E18_2_TCGCCTGTGGAC GSE161690 E18
E18_2_GAGCTTCTCCGC GSE161690 E18
E18_2_ATCGTGGCGTTC GSE161690 E18

29018 rows × 2 columns

In [ ]:
import pandas as pd

adata.obs = pd.read_excel("./data/trajectory/cell_info.xlsx")
In [ ]:
adata.obs
Out[ ]:
index project_id time converted_time _scvi_batch _scvi_labels leiden annotation
0 p0-WT2_AAAACATGAACC GSE123335 P0 19 0 0 15 OPC
1 p0-WT2_AAAAGACACCAC GSE123335 P0 19 0 0 11 Other
2 p0-WT2_AAAAGGCTCGTG GSE123335 P0 19 0 0 16 EC
3 p0-WT2_AAAATACGCAAG GSE123335 P0 19 0 0 4 IN
4 p0-WT2_AAACACCGGATA GSE123335 P0 19 0 0 4 IN
... ... ... ... ... ... ... ... ...
29013 E18_2_CGCGTTAGAGGC GSE161690 E18 18 1 0 2 Other
29014 E18_2_ACCCTGATCTGG GSE161690 E18 18 1 0 9 oRG
29015 E18_2_TCGCCTGTGGAC GSE161690 E18 18 1 0 2 Other
29016 E18_2_GAGCTTCTCCGC GSE161690 E18 18 1 0 7 Other
29017 E18_2_ATCGTGGCGTTC GSE161690 E18 18 1 0 2 Other

29018 rows × 8 columns

In [ ]:
TRAGET_CELLTYPES = ["NSC", "IPC", "EX"]
adata_sub = adata[adata.obs["annotation"].isin(TRAGET_CELLTYPES)].copy()
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/anndata/_core/aligned_df.py:68: ImplicitModificationWarning: Transforming to str index.
  warnings.warn("Transforming to str index.", ImplicitModificationWarning)
In [ ]:
trained = ContinuousTrajectory(adata_sub).train(batch_key="project_id", label_key="annotation", continuous_key=None, n_train=1)
Epoch 91/800:  11%|█▏        | 90/800 [14:46<1:58:18, 10.00s/it, v_num=1, train_loss_step=4.76e+3, train_loss_epoch=4.92e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 92/800:  11%|█▏        | 91/800 [14:56<1:57:48,  9.97s/it, v_num=1, train_loss_step=4.6e+3, train_loss_epoch=4.92e+3] 
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 93/800:  12%|█▏        | 92/800 [15:06<1:57:36,  9.97s/it, v_num=1, train_loss_step=4.79e+3, train_loss_epoch=4.92e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 94/800:  12%|█▏        | 93/800 [15:16<1:57:53, 10.01s/it, v_num=1, train_loss_step=4.71e+3, train_loss_epoch=4.92e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 95/800:  12%|█▏        | 94/800 [15:26<1:57:57, 10.02s/it, v_num=1, train_loss_step=5.05e+3, train_loss_epoch=4.92e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 96/800:  12%|█▏        | 95/800 [15:36<1:57:40, 10.02s/it, v_num=1, train_loss_step=5.13e+3, train_loss_epoch=4.92e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 97/800:  12%|█▏        | 96/800 [15:46<1:57:53, 10.05s/it, v_num=1, train_loss_step=5.38e+3, train_loss_epoch=4.92e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 98/800:  12%|█▏        | 97/800 [15:56<1:57:14, 10.01s/it, v_num=1, train_loss_step=4.57e+3, train_loss_epoch=4.92e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 99/800:  12%|█▏        | 98/800 [16:06<1:57:01, 10.00s/it, v_num=1, train_loss_step=5.02e+3, train_loss_epoch=4.92e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 100/800:  12%|█▏        | 99/800 [16:16<1:56:50, 10.00s/it, v_num=1, train_loss_step=4.61e+3, train_loss_epoch=4.92e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 101/800:  12%|█▎        | 100/800 [16:26<1:56:50, 10.02s/it, v_num=1, train_loss_step=4.77e+3, train_loss_epoch=4.92e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 102/800:  13%|█▎        | 101/800 [16:36<1:57:19, 10.07s/it, v_num=1, train_loss_step=5.29e+3, train_loss_epoch=4.92e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 103/800:  13%|█▎        | 102/800 [16:46<1:57:04, 10.06s/it, v_num=1, train_loss_step=5e+3, train_loss_epoch=4.92e+3]   
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 104/800:  13%|█▎        | 103/800 [16:56<1:57:49, 10.14s/it, v_num=1, train_loss_step=4.92e+3, train_loss_epoch=4.92e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 105/800:  13%|█▎        | 104/800 [17:07<1:58:15, 10.19s/it, v_num=1, train_loss_step=5.03e+3, train_loss_epoch=4.92e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 106/800:  13%|█▎        | 105/800 [17:17<1:57:06, 10.11s/it, v_num=1, train_loss_step=5.04e+3, train_loss_epoch=4.91e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 107/800:  13%|█▎        | 106/800 [17:27<1:56:21, 10.06s/it, v_num=1, train_loss_step=4.79e+3, train_loss_epoch=4.91e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 108/800:  13%|█▎        | 107/800 [17:37<1:55:47, 10.03s/it, v_num=1, train_loss_step=4.55e+3, train_loss_epoch=4.91e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 109/800:  14%|█▎        | 108/800 [17:47<1:55:20, 10.00s/it, v_num=1, train_loss_step=5.03e+3, train_loss_epoch=4.91e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 110/800:  14%|█▎        | 109/800 [17:57<1:57:52, 10.23s/it, v_num=1, train_loss_step=5.29e+3, train_loss_epoch=4.91e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 111/800:  14%|█▍        | 110/800 [18:07<1:57:24, 10.21s/it, v_num=1, train_loss_step=4.68e+3, train_loss_epoch=4.91e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 112/800:  14%|█▍        | 111/800 [18:18<1:57:10, 10.20s/it, v_num=1, train_loss_step=4.85e+3, train_loss_epoch=4.91e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 113/800:  14%|█▍        | 112/800 [18:28<1:56:49, 10.19s/it, v_num=1, train_loss_step=4.84e+3, train_loss_epoch=4.91e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 114/800:  14%|█▍        | 113/800 [18:38<1:56:34, 10.18s/it, v_num=1, train_loss_step=5.08e+3, train_loss_epoch=4.91e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Epoch 114/800:  14%|█▍        | 114/800 [18:48<1:53:11,  9.90s/it, v_num=1, train_loss_step=4.92e+3, train_loss_epoch=4.91e+3]
                                                                              
Monitored metric elbo_validation did not improve in the last 45 records. Best score: 5055.361. Signaling Trainer to stop.

WARNING: Package 'fa2-modified' is not installed, falling back to layout 'fr'.To use the faster and better ForceAtlas2 layout, install package 'fa2-modified' (`pip install fa2-modified`).
/mnt/work3/yuyasato/libs/ContinuousVI/src/continuousvi/trajectory.py:86: FutureWarning: In the future, the default backend for leiden will be igraph instead of leidenalg.

 To achieve the future defaults please pass: flavor="igraph" and n_iterations=2.  directed must also be False to work with igraph's implementation.
  sc.tl.leiden(
In [ ]:
trained.trainedVI.save("./dist/trajectory/models", overwrite=True)
Saving trained model.: 100%|██████████| 1/1 [00:00<00:00,  1.04it/s]
Out[ ]:
<continuousvi.continuousVI.TrainedContinuousVI at 0x7fc1f438a0e0>
In [ ]:
trained = ContinuousTrajectory(adata_sub)
In [ ]:
trained.trainedVI = TrainedContinuousVI(adata=adata_sub, batch_key="project_id", label_key="annotation", continuous_key=None, trained_model_path="./dist/trajectory/models")
Loading pre-trained models:   0%|          | 0/1 [00:00<?, ?it/s]
INFO     File dist/trajectory/models/model_0/model.pt already downloaded                                           
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/torch/cuda/__init__.py:734: UserWarning: Can't initialize NVML
  warnings.warn("Can't initialize NVML")
Loading pre-trained models: 100%|██████████| 1/1 [00:00<00:00,  5.50it/s]
In [2]:
trained.trainedVI.calc_embeddings(resolution=0.3)
/mnt/work3/yuyasato/libs/ContinuousVI/src/continuousvi/continuousVI.py:165: FutureWarning: In the future, the default backend for leiden will be igraph instead of leidenalg.

 To achieve the future defaults please pass: flavor="igraph" and n_iterations=2.  directed must also be False to work with igraph's implementation.
  sc.tl.leiden(self.adata, key_added=KEY_CLUSTER, resolution=resolution, directed=False)
Out[2]:
<continuousvi.continuousVI.TrainedContinuousVI at 0x724c7d7cf190>
In [3]:
trained.trainedVI.embeddings.umap(color_by=["Sox2", "annotation", "clusters"])
Sampling expression: 100%|██████████| 1/1 [00:24<00:00, 24.55s/it]
Out[3]:
<continuousvi.continuousVI.TrainedContinuousVI.Plots at 0x724c7d0f4ca0>
In [4]:
lineages: dict[str, list[int]] = {
    "NSC --> IPC": [1, 3],
    "IPC --> EX": [0, 2],
}
trained.adata = trained.trainedVI.adata.copy()
In [ ]:
trained = trained.define_lineages(lineages)
In [6]:
trained = trained.calculate_pseudotime()
Calculating the pseudotime.:   0%|          | 0/2 [00:00<?, ?it/s]
WARNING: Trying to run `tl.dpt` without prior call of `tl.diffmap`. Falling back to `tl.diffmap` with default parameters.
/mnt/work3/yuyasato/libs/ContinuousVI/src/continuousvi/trajectory.py:170: ImplicitModificationWarning: Trying to modify attribute `._uns` of view, initializing view as actual.
  _adata_sub.uns["iroot"] = ContinuousTrajectory._calc_iroot_idx(
Calculating the pseudotime.:  50%|█████     | 1/2 [00:00<00:00,  6.98it/s]/mnt/work3/yuyasato/libs/ContinuousVI/src/continuousvi/trajectory.py:170: ImplicitModificationWarning: Trying to modify attribute `._uns` of view, initializing view as actual.
  _adata_sub.uns["iroot"] = ContinuousTrajectory._calc_iroot_idx(
WARNING: Trying to run `tl.dpt` without prior call of `tl.diffmap`. Falling back to `tl.diffmap` with default parameters.
Calculating the pseudotime.: 100%|██████████| 2/2 [00:00<00:00,  7.43it/s]
Calculating the pseudotime.: 100%|██████████| 2/2 [00:00<00:00,  7.43it/s]
In [7]:
import warnings

warnings.filterwarnings("ignore", message="Can't initialize NVML")

regression_result = trained.regression_lineage(batch_key="project_id", label_key="annotation", n_train=5)
Calculating the regression.:   0%|          | 0/2 [00:00<?, ?it/s]/mnt/work3/yuyasato/libs/ContinuousVI/src/continuousvi/trajectory.py:221: ImplicitModificationWarning: Trying to modify attribute `.obs` of view, initializing view as actual.
  _adata_sub.obs["pseudotime"] = self.pseudotimes[name]["pseudotime"]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/scvi/data/fields/_dataframe_field.py:187: UserWarning: Category 0 in adata.obs['_scvi_labels'] has fewer than 3 cells. Models may not train properly.
  categorical_mapping = _make_column_categorical(
GPU available: False, used: False
TPU available: False, using: 0 TPU cores
HPU available: False, using: 0 HPUs
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'train_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'val_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
Epoch 130/800:  16%|█▋        | 130/800 [10:46<55:33,  4.98s/it, v_num=1, train_loss_step=4.36e+3, train_loss_epoch=4.85e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/scvi/data/fields/_dataframe_field.py:187: UserWarning: Category 0 in adata.obs['_scvi_labels'] has fewer than 3 cells. Models may not train properly.
  categorical_mapping = _make_column_categorical(
GPU available: False, used: False
TPU available: False, using: 0 TPU cores
HPU available: False, using: 0 HPUs
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'train_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'val_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
Monitored metric elbo_validation did not improve in the last 45 records. Best score: 5128.705. Signaling Trainer to stop.
Epoch 142/800:  18%|█▊        | 142/800 [11:29<53:15,  4.86s/it, v_num=1, train_loss_step=5.16e+3, train_loss_epoch=4.84e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/scvi/data/fields/_dataframe_field.py:187: UserWarning: Category 0 in adata.obs['_scvi_labels'] has fewer than 3 cells. Models may not train properly.
  categorical_mapping = _make_column_categorical(
GPU available: False, used: False
TPU available: False, using: 0 TPU cores
HPU available: False, using: 0 HPUs
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'train_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'val_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
Monitored metric elbo_validation did not improve in the last 45 records. Best score: 5074.546. Signaling Trainer to stop.
Epoch 142/800:  18%|█▊        | 142/800 [11:44<54:23,  4.96s/it, v_num=1, train_loss_step=5.02e+3, train_loss_epoch=4.84e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/scvi/data/fields/_dataframe_field.py:187: UserWarning: Category 0 in adata.obs['_scvi_labels'] has fewer than 3 cells. Models may not train properly.
  categorical_mapping = _make_column_categorical(
GPU available: False, used: False
TPU available: False, using: 0 TPU cores
HPU available: False, using: 0 HPUs
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'train_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'val_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
Monitored metric elbo_validation did not improve in the last 45 records. Best score: 5057.078. Signaling Trainer to stop.
Epoch 124/800:  16%|█▌        | 124/800 [10:12<55:36,  4.94s/it, v_num=1, train_loss_step=4.91e+3, train_loss_epoch=4.86e+3]
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/scvi/data/fields/_dataframe_field.py:187: UserWarning: Category 0 in adata.obs['_scvi_labels'] has fewer than 3 cells. Models may not train properly.
  categorical_mapping = _make_column_categorical(
GPU available: False, used: False
TPU available: False, using: 0 TPU cores
HPU available: False, using: 0 HPUs
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'train_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'val_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
Monitored metric elbo_validation did not improve in the last 45 records. Best score: 5048.759. Signaling Trainer to stop.
Epoch 134/800:  17%|█▋        | 134/800 [10:58<54:35,  4.92s/it, v_num=1, train_loss_step=4.23e+3, train_loss_epoch=4.83e+3]

Monitored metric elbo_validation did not improve in the last 45 records. Best score: 5260.245. Signaling Trainer to stop.
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/scvi/data/fields/_dataframe_field.py:187: UserWarning: Category 0 in adata.obs['_scvi_labels'] has fewer than 3 cells. Models may not train properly.
  categorical_mapping = _make_column_categorical(
sampling px distribution: 100%|██████████| 25/25 [00:16<00:00,  1.49it/s]
sampling px distribution: 100%|██████████| 25/25 [00:16<00:00,  1.50it/s]
sampling px distribution: 100%|██████████| 25/25 [00:16<00:00,  1.47it/s]
sampling px distribution: 100%|██████████| 25/25 [00:17<00:00,  1.46it/s]
sampling px distribution: 100%|██████████| 25/25 [00:16<00:00,  1.50it/s]
100%|██████████| 5/5 [02:21<00:00, 28.29s/it]
Calculating each genes: 100%|██████████| 19124/19124 [00:09<00:00, 1926.15it/s]
Calculating the regression.:  50%|█████     | 1/2 [57:46<57:46, 3466.15s/it]/mnt/work3/yuyasato/libs/ContinuousVI/src/continuousvi/trajectory.py:221: ImplicitModificationWarning: Trying to modify attribute `.obs` of view, initializing view as actual.
  _adata_sub.obs["pseudotime"] = self.pseudotimes[name]["pseudotime"]
Training multiple scVI models:   0%|          | 0/5 [00:00<?, ?it/s]GPU available: False, used: False
TPU available: False, using: 0 TPU cores
HPU available: False, using: 0 HPUs
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'train_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'val_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
Epoch 133/800:  17%|█▋        | 133/800 [12:22<1:02:02,  5.58s/it, v_num=1, train_loss_step=4.43e+3, train_loss_epoch=4.7e+3] 
Training multiple scVI models:  20%|██        | 1/5 [12:22<49:29, 742.42s/it]GPU available: False, used: False
TPU available: False, using: 0 TPU cores
HPU available: False, using: 0 HPUs
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'train_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'val_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
Monitored metric elbo_validation did not improve in the last 45 records. Best score: 5094.408. Signaling Trainer to stop.
Epoch 117/800:  15%|█▍        | 117/800 [11:04<1:04:37,  5.68s/it, v_num=1, train_loss_step=5.51e+3, train_loss_epoch=4.74e+3]
Training multiple scVI models:  40%|████      | 2/5 [23:26<34:49, 696.51s/it]GPU available: False, used: False
TPU available: False, using: 0 TPU cores
HPU available: False, using: 0 HPUs
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'train_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'val_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
Monitored metric elbo_validation did not improve in the last 45 records. Best score: 4890.971. Signaling Trainer to stop.
Epoch 140/800:  18%|█▊        | 140/800 [13:37<1:04:12,  5.84s/it, v_num=1, train_loss_step=3.91e+3, train_loss_epoch=4.71e+3]
Training multiple scVI models:  60%|██████    | 3/5 [37:04<25:03, 751.69s/it]GPU available: False, used: False
TPU available: False, using: 0 TPU cores
HPU available: False, using: 0 HPUs
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'train_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'val_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
Monitored metric elbo_validation did not improve in the last 45 records. Best score: 4989.429. Signaling Trainer to stop.
Epoch 142/800:  18%|█▊        | 142/800 [18:01<1:23:30,  7.61s/it, v_num=1, train_loss_step=4.4e+3, train_loss_epoch=4.71e+3] 
Training multiple scVI models:  80%|████████  | 4/5 [55:05<14:41, 881.83s/it]GPU available: False, used: False
TPU available: False, using: 0 TPU cores
HPU available: False, using: 0 HPUs
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'train_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
/home/yuyasato/work3/libs/ContinuousVI/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:425: The 'val_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=23` in the `DataLoader` to improve performance.
Monitored metric elbo_validation did not improve in the last 45 records. Best score: 4973.798. Signaling Trainer to stop.
Epoch 127/800:  16%|█▌        | 127/800 [13:00<1:08:56,  6.15s/it, v_num=1, train_loss_step=4.9e+3, train_loss_epoch=4.71e+3] 
                                                                               
Monitored metric elbo_validation did not improve in the last 45 records. Best score: 5097.752. Signaling Trainer to stop.
sampling px distribution: 100%|██████████| 25/25 [00:18<00:00,  1.32it/s]
sampling px distribution: 100%|██████████| 25/25 [00:20<00:00,  1.22it/s]
sampling px distribution: 100%|██████████| 25/25 [00:20<00:00,  1.21it/s]
sampling px distribution: 100%|██████████| 25/25 [00:20<00:00,  1.21it/s]
sampling px distribution: 100%|██████████| 25/25 [00:20<00:00,  1.20it/s]
100%|██████████| 5/5 [02:46<00:00, 33.34s/it]
Calculating each genes: 100%|██████████| 19124/19124 [00:10<00:00, 1810.69it/s]
Calculating the regression.: 100%|██████████| 2/2 [2:08:52<00:00, 3866.26s/it]
In [8]:
for name, reg in regression_result.items():
    reg.to_csv(f"./dist/trajectory/regression_{name}.tsv", sep="\t")
In [ ]:
# Up
#  Eomes: IPC's marker (https://arc.net/l/quote/uznkomhu)
# Down
#  Nes: Neural stem cell's marker (https://arc.net/l/quote/genbnnhm)
target_lineage = list(trained.defined_lineages.keys())[0]
print(f"@Lineage: {target_lineage}")
regression_result[target_lineage]
@Lineage: NSC --> IPC
Out[ ]:
gene slope intercept r2
9774 Igfbpl1 3.919986 0.042765 0.576759
5396 Eomes 3.663179 -0.265159 0.691716
16120 Sox4 2.803581 0.561999 0.494562
12101 Neurod6 2.788903 -0.131581 0.585619
6286 Gadd45g 2.788765 0.007986 0.438869
... ... ... ... ...
15330 Sfrp1 -1.881385 1.432907 0.180729
1714 Aldoc -1.922470 1.132011 0.226049
17751 Tuba1b -1.939113 1.377589 0.182468
13951 Ptn -2.352473 1.612374 0.231150
5601 Fabp7 -3.261634 2.394049 0.279647

19124 rows × 4 columns

In [ ]:
# Up:
#  Stmn2: Excitatory marker (https://arc.net/l/quote/vdbdmvqc)
# Down:
#  Neurod1: Progenitor marker (https://arc.net/l/quote/bkxwdxco)

target_lineage = list(trained.defined_lineages.keys())[1]
print(f"@Lineage: {target_lineage}")
regression_result[target_lineage]
@Lineage: IPC --> EX
Out[ ]:
gene slope intercept r2
11194 Meg3 4.257950 0.155017 0.575140
11032 Mapt 4.246047 0.186789 0.460254
16434 Stmn2 3.787384 0.823437 0.267230
9872 Ina 3.707868 0.180680 0.400880
17758 Tubb2a 3.465068 0.190151 0.423433
... ... ... ... ...
9774 Igfbpl1 -1.278309 1.897858 0.074115
12098 Neurod1 -1.284228 0.803582 0.109375
14709 Robo2 -1.461415 0.719719 0.256030
14612 Rnd2 -1.474085 0.735223 0.264990
12379 Nrp1 -1.560335 0.818908 0.158502

19124 rows × 4 columns

In [22]:
trained = ContinuousTrajectory(adata_sub)
trained.trainedVI = TrainedContinuousVI(adata=adata_sub, batch_key="project_id", label_key="annotation", continuous_key=None, trained_model_path="./dist/trajectory/models")
trained.trainedVI.calc_embeddings(resolution=0.3)

INFO     File dist/trajectory/models/model_0/model.pt already downloaded                                           
Loading pre-trained models: 100%|██████████| 1/1 [00:00<00:00,  8.56it/s]
Out[22]:
<continuousvi.continuousVI.TrainedContinuousVI at 0x724c7a3964a0>

Additional data¶

In [25]:
trained.trainedVI.embeddings.umap(color_by=["Nes", "Eomes", "annotation"])
Sampling expression: 100%|██████████| 1/1 [00:27<00:00, 27.68s/it]
Out[25]:
<continuousvi.continuousVI.TrainedContinuousVI.Plots at 0x724c7963d600>
In [26]:
trained.trainedVI.embeddings.umap(color_by=["Neurod1", "Stmn2", "annotation"])
Sampling expression: 100%|██████████| 1/1 [00:28<00:00, 28.42s/it]
Out[26]:
<continuousvi.continuousVI.TrainedContinuousVI.Plots at 0x724c799895a0>
In [40]:
sc.pl.draw_graph(trained.adata, color=["annotation"], legend_loc="on data")
In [39]:
sc.pl.paga(trained.adata, color=["annotation"])
In [ ]:
adata_sub
Out[ ]:
AnnData object with n_obs × n_vars = 29018 × 19124
    obs: 'index', 'project_id', 'time', 'converted_time', '_scvi_batch', '_scvi_labels', 'leiden', 'annotation'

Raw data (2 data)¶

In [42]:
sc.pp.pca(adata_sub, n_comps=30)
sc.pp.neighbors(adata_sub)
sc.tl.umap(adata_sub)
In [46]:
sc.pl.umap(adata_sub, color=["project_id", "annotation"])
In [ ]: