Commit
·
2a3a1cc
1
Parent(s):
14544c7
Handle both 'label' and 'labels' column names in datasets
Browse files- train-image-classifier.py +16 -12
train-image-classifier.py
CHANGED
|
@@ -210,23 +210,26 @@ def _(dataset_name, mo):
|
|
| 210 |
from datasets import load_dataset
|
| 211 |
|
| 212 |
print(f"Loading dataset: {dataset_name}...")
|
| 213 |
-
dataset = load_dataset(dataset_name
|
| 214 |
print(f"Train: {len(dataset['train']):,} samples")
|
| 215 |
print(f"Test: {len(dataset['test']):,} samples")
|
| 216 |
|
| 217 |
-
# Get label
|
| 218 |
-
|
|
|
|
|
|
|
| 219 |
labels = label_feature.names if hasattr(label_feature, "names") else None
|
| 220 |
-
num_labels = label_feature.num_classes if hasattr(label_feature, "num_classes") else len(set(dataset["train"][
|
| 221 |
|
|
|
|
| 222 |
print(f"Labels ({num_labels}): {labels}")
|
| 223 |
|
| 224 |
mo.md(f"**Loaded {len(dataset['train']):,} training samples with {num_labels} classes**")
|
| 225 |
-
return dataset, labels, num_labels
|
| 226 |
|
| 227 |
|
| 228 |
@app.cell
|
| 229 |
-
def _(dataset, labels, mo):
|
| 230 |
# Show sample images (notebook mode only)
|
| 231 |
import base64 as _base64
|
| 232 |
from io import BytesIO as _BytesIO
|
|
@@ -245,7 +248,7 @@ def _(dataset, labels, mo):
|
|
| 245 |
_images_html = []
|
| 246 |
for _sample in _samples:
|
| 247 |
_img_b64 = _image_to_base64(_sample["image"])
|
| 248 |
-
_label_name = labels[_sample[
|
| 249 |
_images_html.append(
|
| 250 |
f"""
|
| 251 |
<div style="text-align: center; margin: 5px;">
|
|
@@ -311,12 +314,12 @@ def _(mo):
|
|
| 311 |
|
| 312 |
|
| 313 |
@app.cell
|
| 314 |
-
def _(dataset, image_processor):
|
| 315 |
def preprocess(examples):
|
| 316 |
"""Apply image processor to batch of images."""
|
| 317 |
images = [img.convert("RGB") for img in examples["image"]]
|
| 318 |
inputs = image_processor(images, return_tensors="pt")
|
| 319 |
-
inputs["
|
| 320 |
return inputs
|
| 321 |
|
| 322 |
print("Preprocessing dataset...")
|
|
@@ -418,7 +421,7 @@ def _(trainer):
|
|
| 418 |
|
| 419 |
|
| 420 |
@app.cell
|
| 421 |
-
def _(dataset, id2label, image_processor, mo, model):
|
| 422 |
import torch
|
| 423 |
import base64 as _b64
|
| 424 |
from io import BytesIO as _BIO
|
|
@@ -436,9 +439,10 @@ def _(dataset, id2label, image_processor, mo, model):
|
|
| 436 |
_outputs = model(**_inputs)
|
| 437 |
_pred_idx = _outputs.logits.argmax(-1).item()
|
| 438 |
|
| 439 |
-
|
|
|
|
| 440 |
_pred_label = id2label[_pred_idx] if id2label else _pred_idx
|
| 441 |
-
_correct = "correct" if _pred_idx ==
|
| 442 |
|
| 443 |
# Convert image for display
|
| 444 |
_img_copy = _img.copy()
|
|
|
|
| 210 |
from datasets import load_dataset
|
| 211 |
|
| 212 |
print(f"Loading dataset: {dataset_name}...")
|
| 213 |
+
dataset = load_dataset(dataset_name)
|
| 214 |
print(f"Train: {len(dataset['train']):,} samples")
|
| 215 |
print(f"Test: {len(dataset['test']):,} samples")
|
| 216 |
|
| 217 |
+
# Get label column name (datasets use 'label' or 'labels')
|
| 218 |
+
_features = dataset["train"].features
|
| 219 |
+
label_column = "label" if "label" in _features else "labels"
|
| 220 |
+
label_feature = _features[label_column]
|
| 221 |
labels = label_feature.names if hasattr(label_feature, "names") else None
|
| 222 |
+
num_labels = label_feature.num_classes if hasattr(label_feature, "num_classes") else len(set(dataset["train"][label_column]))
|
| 223 |
|
| 224 |
+
print(f"Label column: '{label_column}'")
|
| 225 |
print(f"Labels ({num_labels}): {labels}")
|
| 226 |
|
| 227 |
mo.md(f"**Loaded {len(dataset['train']):,} training samples with {num_labels} classes**")
|
| 228 |
+
return dataset, label_column, labels, num_labels
|
| 229 |
|
| 230 |
|
| 231 |
@app.cell
|
| 232 |
+
def _(dataset, label_column, labels, mo):
|
| 233 |
# Show sample images (notebook mode only)
|
| 234 |
import base64 as _base64
|
| 235 |
from io import BytesIO as _BytesIO
|
|
|
|
| 248 |
_images_html = []
|
| 249 |
for _sample in _samples:
|
| 250 |
_img_b64 = _image_to_base64(_sample["image"])
|
| 251 |
+
_label_name = labels[_sample[label_column]] if labels else _sample[label_column]
|
| 252 |
_images_html.append(
|
| 253 |
f"""
|
| 254 |
<div style="text-align: center; margin: 5px;">
|
|
|
|
| 314 |
|
| 315 |
|
| 316 |
@app.cell
|
| 317 |
+
def _(dataset, image_processor, label_column):
|
| 318 |
def preprocess(examples):
|
| 319 |
"""Apply image processor to batch of images."""
|
| 320 |
images = [img.convert("RGB") for img in examples["image"]]
|
| 321 |
inputs = image_processor(images, return_tensors="pt")
|
| 322 |
+
inputs["labels"] = examples[label_column] # Trainer expects 'labels'
|
| 323 |
return inputs
|
| 324 |
|
| 325 |
print("Preprocessing dataset...")
|
|
|
|
| 421 |
|
| 422 |
|
| 423 |
@app.cell
|
| 424 |
+
def _(dataset, id2label, image_processor, label_column, mo, model):
|
| 425 |
import torch
|
| 426 |
import base64 as _b64
|
| 427 |
from io import BytesIO as _BIO
|
|
|
|
| 439 |
_outputs = model(**_inputs)
|
| 440 |
_pred_idx = _outputs.logits.argmax(-1).item()
|
| 441 |
|
| 442 |
+
_true_idx = _sample[label_column]
|
| 443 |
+
_true_label = id2label[_true_idx] if id2label else _true_idx
|
| 444 |
_pred_label = id2label[_pred_idx] if id2label else _pred_idx
|
| 445 |
+
_correct = "correct" if _pred_idx == _true_idx else "wrong"
|
| 446 |
|
| 447 |
# Convert image for display
|
| 448 |
_img_copy = _img.copy()
|