# /// script # requires-python = ">=3.10" # dependencies = [ # "marimo", # "datasets", # "transformers", # "torch", # "torchvision", # "huggingface-hub", # "evaluate", # "accelerate", # "scikit-learn", # ] # /// """ Train an Image Classifier This marimo notebook fine-tunes a Vision Transformer (ViT) for image classification. Two ways to run: - Tutorial: uvx marimo edit --sandbox train-image-classifier.py - Script: uv run train-image-classifier.py --dataset beans --output-repo user/my-model On HF Jobs (GPU): hf jobs uv run --flavor l4x1 --secrets HF_TOKEN \ https://huggingface.co/datasets/uv-scripts/marimo/raw/main/train-image-classifier.py \ -- --dataset beans --output-repo user/beans-vit --epochs 5 """ import marimo __generated_with = "0.19.6" app = marimo.App(width="medium") @app.cell def _(): import marimo as mo return (mo,) @app.cell def _(mo): mo.md(""" # Train an Image Classifier This notebook fine-tunes a Vision Transformer (ViT) for image classification. **Two ways to run:** - **Tutorial**: `uvx marimo edit --sandbox train-image-classifier.py` - **Script**: `uv run train-image-classifier.py --dataset beans --output-repo user/my-model` The same code powers both experiences! """) return @app.cell def _(mo): mo.md(""" ## Running on HF Jobs (GPU) This notebook can run on [Hugging Face Jobs](https://huggingface.co/docs/hub/jobs) for GPU training. No local GPU needed - just run: ```bash hf jobs uv run --flavor l4x1 --secrets HF_TOKEN \\ https://huggingface.co/datasets/uv-scripts/marimo/raw/main/train-image-classifier.py \\ -- --dataset beans --output-repo your-username/beans-vit --epochs 5 --push-to-hub ``` **GPU Flavors:** | Flavor | GPU | VRAM | Best for | |--------|-----|------|----------| | `l4x1` | L4 | 24GB | Most fine-tuning tasks | | `a10gx1` | A10G | 24GB | Slightly faster than L4 | | `a100x1` | A100 | 40GB | Large models, big batches | **Key flags:** - `--secrets HF_TOKEN` - Passes your HF token for pushing models - `--` - Separates `hf jobs` args from script args - `--push-to-hub` - Actually pushes the model (otherwise just saves locally) **Tip:** Start with `beans` dataset and 1-3 epochs to test, then scale up! """) return @app.cell def _(mo): mo.md(""" ## Step 1: Configuration Set up training parameters. In interactive mode, use the controls below. In script mode, pass command-line arguments. """) return @app.cell def _(mo): import argparse # Parse CLI args (works in both modes) parser = argparse.ArgumentParser(description="Fine-tune ViT for image classification") parser.add_argument( "--dataset", default="beans", help="HF dataset name (must be image classification dataset)", ) parser.add_argument( "--model", default="google/vit-base-patch16-224-in21k", help="Pretrained model to fine-tune", ) parser.add_argument( "--output-repo", default=None, help="Where to push trained model (e.g., user/my-model)", ) parser.add_argument("--epochs", type=int, default=3, help="Number of training epochs") parser.add_argument("--batch-size", type=int, default=16, help="Batch size") parser.add_argument("--lr", type=float, default=5e-5, help="Learning rate") parser.add_argument( "--push-to-hub", action="store_true", default=False, help="Push model to Hub after training", ) args, _ = parser.parse_known_args() # Interactive controls (shown in notebook mode) dataset_input = mo.ui.text(value=args.dataset, label="Dataset") model_input = mo.ui.text(value=args.model, label="Model") output_input = mo.ui.text(value=args.output_repo or "", label="Output Repo") epochs_input = mo.ui.slider(1, 20, value=args.epochs, label="Epochs") batch_size_input = mo.ui.dropdown( options=["8", "16", "32", "64"], value=str(args.batch_size), label="Batch Size" ) lr_input = mo.ui.dropdown( options=["1e-5", "2e-5", "5e-5", "1e-4"], value=f"{args.lr:.0e}".replace("e-0", "e-"), label="Learning Rate", ) mo.vstack( [ mo.hstack([dataset_input, model_input]), mo.hstack([output_input]), mo.hstack([epochs_input, batch_size_input, lr_input]), ] ) return ( args, batch_size_input, dataset_input, epochs_input, lr_input, model_input, output_input, ) @app.cell def _( args, batch_size_input, dataset_input, epochs_input, lr_input, model_input, output_input, ): # Resolve values (interactive takes precedence) dataset_name = dataset_input.value or args.dataset model_name = model_input.value or args.model output_repo = output_input.value or args.output_repo num_epochs = epochs_input.value or args.epochs batch_size = int(batch_size_input.value) if batch_size_input.value else args.batch_size learning_rate = float(lr_input.value) if lr_input.value else args.lr print("Configuration:") print(f" Dataset: {dataset_name}") print(f" Model: {model_name}") print(f" Output: {output_repo or '(not pushing to Hub)'}") print(f" Epochs: {num_epochs}, Batch Size: {batch_size}, LR: {learning_rate}") return ( batch_size, dataset_name, learning_rate, model_name, num_epochs, output_repo, ) @app.cell def _(mo): mo.md(""" ## Step 2: Load Dataset We'll load an image classification dataset from the Hub. The `beans` dataset is small (~1000 images) and trains quickly - perfect for learning! """) return @app.cell def _(dataset_name, mo): from datasets import load_dataset print(f"Loading dataset: {dataset_name}...") dataset = load_dataset(dataset_name) print(f"Train: {len(dataset['train']):,} samples") print(f"Test: {len(dataset['test']):,} samples") # Get label column name (datasets use 'label' or 'labels') _features = dataset["train"].features label_column = "label" if "label" in _features else "labels" label_feature = _features[label_column] labels = label_feature.names if hasattr(label_feature, "names") else None num_labels = label_feature.num_classes if hasattr(label_feature, "num_classes") else len(set(dataset["train"][label_column])) print(f"Label column: '{label_column}'") print(f"Labels ({num_labels}): {labels}") mo.md(f"**Loaded {len(dataset['train']):,} training samples with {num_labels} classes**") return dataset, label_column, labels, num_labels @app.cell def _(dataset, label_column, labels, mo): # Show sample images (notebook mode only) import base64 as _base64 from io import BytesIO as _BytesIO def _image_to_base64(img, max_size=150): """Convert PIL image to base64 for HTML display.""" _img_copy = img.copy() _img_copy.thumbnail((max_size, max_size)) _buffered = _BytesIO() _img_copy.save(_buffered, format="PNG") return _base64.b64encode(_buffered.getvalue()).decode() # Get 6 sample images with different labels _samples = dataset["train"].shuffle(seed=42).select(range(6)) _images_html = [] for _sample in _samples: _img_b64 = _image_to_base64(_sample["image"]) _label_name = labels[_sample[label_column]] if labels else _sample[label_column] _images_html.append( f"""