| import os |
| import uvicorn |
| from fastapi import FastAPI, HTTPException |
| from pydantic import BaseModel |
| from typing import List, Dict, Union |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification |
| import torch |
|
|
|
|
| |
| class ProblematicItem(BaseModel): |
| text: str |
|
|
| class ProblematicList(BaseModel): |
| problematics: List[str] |
|
|
| class PredictionResponse(BaseModel): |
| predicted_class: str |
| score: float |
|
|
| class PredictionsResponse(BaseModel): |
| results: List[Dict[str, Union[str, float]]] |
|
|
| |
| app = FastAPI( |
| title="Problematic Specificity Classification API", |
| description="This API classifies problematics using a fine-tuned model hosted on Hugging Face.", |
| version="1.0.0" |
| ) |
|
|
| |
| MODEL_NAME = os.getenv("MODEL_NAME", "votre-compte/votre-modele") |
| LABEL_0 = os.getenv("LABEL_0", "Classe A") |
| LABEL_1 = os.getenv("LABEL_1", "Classe B") |
|
|
| |
| tokenizer = None |
| model = None |
|
|
| def load_model(): |
| global tokenizer, model |
| try: |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) |
| model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME) |
| return True |
| except Exception as e: |
| print(f"Error loading model: {e}") |
| return False |
|
|
| |
| @app.get("/") |
| def read_root(): |
| return {"status": "ok", "model": MODEL_NAME} |
|
|
| |
| @app.get("/health") |
| def health_check(): |
| global model, tokenizer |
| if model is None or tokenizer is None: |
| success = load_model() |
| logger.info("Success") |
| if not success: |
| raise HTTPException(status_code=503, detail="Model not available") |
| return {"status": "ok", "model": MODEL_NAME} |
|
|
| |
| @app.post("/predict", response_model=PredictionResponse) |
| def predict_single(item: ProblematicItem): |
| global model, tokenizer |
| |
| if model is None or tokenizer is None: |
| success = load_model() |
| if not success: |
| raise HTTPException(status_code=503, detail="Model not available") |
| |
| try: |
| |
| inputs = tokenizer(item.text, padding=True, truncation=True, return_tensors="pt") |
| |
| |
| with torch.no_grad(): |
| outputs = model(**inputs) |
| probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1) |
| predicted_class = torch.argmax(probabilities, dim=1).item() |
| confidence_score = probabilities[0][predicted_class].item() |
| |
| |
| predicted_label = LABEL_0 if predicted_class == 0 else LABEL_1 |
| |
| return PredictionResponse(predicted_class=predicted_label, score=confidence_score) |
| |
| except Exception as e: |
| raise HTTPException(status_code=500, detail=f"Error during prediction: {str(e)}") |
|
|
| |
| @app.post("/predict-batch", response_model=PredictionsResponse) |
| def predict_batch(items: ProblematicList): |
| global model, tokenizer |
| |
| if model is None or tokenizer is None: |
| success = load_model() |
| if not success: |
| raise HTTPException(status_code=503, detail="Model not available") |
| |
| try: |
| results = [] |
| |
| |
| batch_size = 8 |
| for i in range(0, len(items.problematics), batch_size): |
| batch_texts = items.problematics[i:i+batch_size] |
| |
| |
| inputs = tokenizer(batch_texts, padding=True, truncation=True, return_tensors="pt") |
| |
| |
| with torch.no_grad(): |
| outputs = model(**inputs) |
| probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1) |
| predicted_classes = torch.argmax(probabilities, dim=1).tolist() |
| confidence_scores = [probabilities[j][predicted_classes[j]].item() for j in range(len(predicted_classes))] |
| |
| |
| for j, (pred_class, score) in enumerate(zip(predicted_classes, confidence_scores)): |
| predicted_label = LABEL_0 if pred_class == 0 else LABEL_1 |
| results.append({ |
| "text": batch_texts[j], |
| "class": predicted_label, |
| "score": score |
| }) |
| |
| return PredictionsResponse(results=results) |
| |
| except Exception as e: |
| raise HTTPException(status_code=500, detail=f"Error during prediction: {str(e)}") |
|
|
| |
| @app.on_event("startup") |
| async def startup_event(): |
| load_model() |
|
|
| |
| if __name__ == "__main__": |
| |
| uvicorn.run("app:app", host="0.0.0.0", port=7860, reload=True) |
|
|