MLFlow¶
We show how LaminDB can be integrated with MLflow to track the training process and associate datasets & parameters with models.
# !pip install 'lamindb[jupyter]' torchvision lightning wandb
!lamin init --storage ./lamin-mlops
import lamindb as ln
import mlflow
import lightning
from torch import utils
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
from autoencoder import LitAutoEncoder
ln.track()
Define a model¶
We use a basic PyTorch Lightning autoencoder as an example model.
Code of LitAutoEncoder
import torch
import lightning
from torch import optim, nn
class LitAutoEncoder(lightning.LightningModule):
def __init__(self, hidden_size: int, bottleneck_size: int) -> None:
super().__init__()
self.encoder = nn.Sequential(
nn.Linear(28 * 28, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, bottleneck_size),
)
self.decoder = nn.Sequential(
nn.Linear(bottleneck_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, 28 * 28),
)
self.save_hyperparameters()
def training_step(
self, batch: tuple[torch.Tensor, torch.Tensor], batch_idx: int
) -> torch.Tensor:
x, y = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = nn.functional.mse_loss(x_hat, x)
self.log("train_loss", loss)
return loss
def configure_optimizers(self) -> optim.Optimizer:
optimizer = optim.Adam(self.parameters(), lr=1e-3)
return optimizer
Query & download the MNIST dataset¶
We saved the MNIST dataset in curation notebook which now shows up in the Artifact registry:
ln.Artifact.filter(kind="dataset").df()
You can also find it on lamin.ai if you were connected your instance.

Let’s get the dataset:
artifact = ln.Artifact.get(key="testdata/mnist")
artifact
And download it to a local cache:
path = artifact.cache()
path
Create a PyTorch-compatible dataset:
dataset = MNIST(path.as_posix(), transform=ToTensor())
dataset
Monitor training with MLflow¶
Train our example model and track the training progress with MLflow
.
mlflow.pytorch.autolog()
MODEL_CONFIG = {"hidden_size": 32, "bottleneck_size": 16, "batch_size": 32}
# Start MLflow run
with mlflow.start_run() as run:
train_dataset = MNIST(
root="./data", train=True, download=True, transform=ToTensor()
)
train_loader = utils.data.DataLoader(
train_dataset, batch_size=MODEL_CONFIG["batch_size"]
)
# Initialize model
autoencoder = LitAutoEncoder(
MODEL_CONFIG["hidden_size"], MODEL_CONFIG["bottleneck_size"]
)
# Create checkpoint callback
from lightning.pytorch.callbacks import ModelCheckpoint
checkpoint_callback = ModelCheckpoint(
dirpath="model_checkpoints",
filename=f"{run.info.run_id}_last_epoch",
save_top_k=1,
monitor="train_loss",
)
# Train model
trainer = lightning.Trainer(
accelerator="cpu",
limit_train_batches=3,
max_epochs=2,
callbacks=[checkpoint_callback],
)
trainer.fit(model=autoencoder, train_dataloaders=train_loader)
# Get run information
run_id = run.info.run_id
metrics = mlflow.get_run(run_id).data.metrics
params = mlflow.get_run(run_id).data.params
# Access model artifacts path
model_uri = f"runs:/{run_id}/model"
artifacts_path = run.info.artifact_uri
See the training progress in the mlflow
UI:

Save model in LaminDB¶
# save checkpoint as a model in LaminDB
artifact = ln.Artifact(
f"model_checkpoints/{run_id}_last_epoch.ckpt",
key="testmodels/mlflow/litautoencoder.ckpt", # is automatically versioned
type="model",
).save()
# create a label with the mlflow experiment name
mlflow_run_name = mlflow.get_run(run_id).data.tags.get(
"mlflow.runName", f"run_{run_id}"
)
experiment_label = ln.ULabel(
name=mlflow_run_name, description="mlflow experiment name"
).save()
# annotate the model Artifact
artifact.ulabels.add(experiment_label)
# define the associated model hyperparameters in ln.Param
for k, v in MODEL_CONFIG.items():
ln.Param(name=k, dtype=type(v).__name__).save()
artifact.params.add_values(MODEL_CONFIG)
# look at Artifact annotations
artifact.describe()
artifact.params
See the checkpoints:

If later on, you want to re-use the checkpoint, you can download it like so:
ln.Artifact.get(key="testmodels/mlflow/litautoencoder.ckpt").cache()
Or on the CLI:
lamin get artifact --key 'testmodels/litautoencoder'
ln.finish()