Skip to content

Commit

Permalink
fix: Sort import and other pre-commit fix
Browse files Browse the repository at this point in the history
  • Loading branch information
FrancoisLbrn committed Oct 24, 2024
1 parent 28f26dc commit 10e4343
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 11 deletions.
2 changes: 1 addition & 1 deletion PR_4.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ The goal of this PR is to:
- [ ] Create a FastAPI API to make predictions on new data
- [ ] Dockerize the API

Files to be modified:
Files to be modified:
- [ ] the `src/web_service` folder
- [ ] the `bin/run_services.sh` file
- [ ] `Dockerfile.app`
Expand Down
7 changes: 3 additions & 4 deletions src/web_service/lib/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,12 @@

import numpy as np
import pandas as pd
from loguru import logger
from lib.models import InputData
from sklearn.base import BaseEstimator
from sklearn.compose import ColumnTransformer

from lib.predicting import predict
from lib.preprocessing import preprocess_data
from loguru import logger
from sklearn.base import BaseEstimator
from sklearn.compose import ColumnTransformer


def run_inference(
Expand Down
17 changes: 11 additions & 6 deletions src/web_service/main.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,15 @@
from app_config import APP_DESCRIPTION, APP_TITLE, APP_VERSION, MODEL_VERSION
from app_config import (
APP_DESCRIPTION,
APP_TITLE,
APP_VERSION,
MODEL_VERSION,
PATH_TO_MODEL,
PATH_TO_PREPROCESSOR,
)
from fastapi import FastAPI
from lib.inference import run_inference
from lib.utils import load_pickle
from lib.models import InputData, PredictionOut
from app_config import PATH_TO_MODEL, PATH_TO_PREPROCESSOR

from lib.utils import load_pickle

app = FastAPI(
title=APP_TITLE, description=APP_DESCRIPTION, version=APP_VERSION
Expand All @@ -23,5 +28,5 @@ def home() -> dict:
def predict(value: InputData) -> dict:
model = load_pickle(PATH_TO_MODEL)
preprocessor = load_pickle(PATH_TO_PREPROCESSOR)
y = run_inference([value], preprocessor, model)
return {"abalone_age_prediction": y}
y = run_inference([value], preprocessor, model)
return {"abalone_age_prediction": y}

0 comments on commit 10e4343

Please sign in to comment.