pythonimagedeepface

How can I run DeepFace in a docker container on a mac?


I am trying to install deepface in docker container to do image comparison and I am close to getting it to run... however, I think I am probably missing dependencies... but cannot figure out which ones I need.

In my main.py... If I comment from deepface import DeepFace the app starts up and runs... when I uncomment it hangs

NOTE: when I say run the application it means I use this to start the app: uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload

System:

Hardware:

running uname -m gives arm64

MacBook Air

enter image description here

Software:

Docker desktop

Server: Docker Desktop 4.39.0 (184744)
 Engine:
  Version:          28.0.1
  API version:      1.48 (minimum version 1.24)
  Go version:       go1.23.6
  Git commit:       bbd0a17
  Built:            Wed Feb 26 10:40:57 2025
  OS/Arch:          linux/arm64
  Experimental:     false
 containerd:
  Version:          1.7.25
  GitCommit:        bcc810d6b9066471b0b6fa75f557a15a1cbf31bb
 runc:
  Version:          1.2.4
  GitCommit:        v1.2.4-0-g6c52b3f
 docker-init:
  Version:          0.19.0
  GitCommit:        de40ad0

VS Code - latest version

Files in project

.devcontainer/devcontainer.json

{
  "name": "AI Devcontainer",
  "dockerComposeFile": "../docker-compose.yml",
  "service": "rp",
  "workspaceFolder": "/usr/src/app",
  "postCreateCommand": "pip install -r requirements.txt"
}

./docker-compose.yml

services:
  rp:
    container_name: ai
    platform: linux/amd64
    build: .
    command: sleep infinity
    networks:
      - RP
    volumes:
      - .:/usr/src/app
    ports:
      - '8000:8000'

networks:
  RP:
    external: true

./Dockerfile

# Use the official Python image from the DockerHub
FROM python:3.9-slim

# Set the working directory in the container
WORKDIR /app

# Install system dependencies required for DeepFace
# Update package list and install dependencies
RUN apt update && apt install -y \
    tzdata \
    libgl1-mesa-glx \
    libegl1-mesa \
    libxrandr2 \
    libxss1 \
    libxcursor1 \
    libxcomposite1 \
    libasound2 \
    libxi6 \
    libxtst6 \
    curl \
    ffmpeg \
    git \
    nano \
    gnupg2 \
    libsm6 \
    wget \
    unzip \
    libxcb-icccm4 \
    libxkbcommon-x11-0 \
    libxcb-keysyms1 \
    libxcb-render0 \
    libxcb-render-util0 \
    libxcb-image0 \
    python3 \
    python3-pip

# Install required Qt and X11 libraries
RUN apt update && apt install -y \
    libx11-xcb1 \
    libxcb1 \
    libxcomposite1 \
    libxkbcommon-x11-0 \
    libxkbcommon0 \
    libxcb-cursor0 \
    libxcb-shape0 \
    libxcb-shm0 \
    libxcb-sync1 \
    libxcb-xfixes0 \
    libxcb-xinerama0 \
    libxcb-xinput0 \
    libxcb-xkb1


    # Upgrade pip and install required Python packages
RUN python -m pip install --upgrade pip
RUN python -m pip install \
    onnxruntime==1.15.1 \
    numpy==1.21.6 \
    h5py \
    numexpr \
    protobuf==3.20.2 \
    opencv-python==4.8.0.74 \
    opencv-contrib-python==4.8.0.74 \
    pyqt6==6.5.1 \
    onnx==1.14.0 \
    torch==1.13.1 \
    torchvision==0.14.1
    

# Install dependencies
COPY requirements.txt ./
RUN pip install --no-cache-dir -r requirements.txt

# Copy the rest of the application code into the container
COPY . .

# Expose port 8000 for the FastAPI app
EXPOSE 8000

# Command to run FastAPI with Uvicorn
CMD ["sleep", "infinity"]

./requirements.txt

fastapi
uvicorn
requests

deepface 
flask 
numpy 
pandas 
tensorflow-cpu
gunicorn 
pillow 
opencv-python

I load the devcontainer in VS Code, the container builds, then I run the application, it spits out this and then hangs

root@1f6928173e34:/usr/src/app# uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload
INFO:     Will watch for changes in these directories: ['/usr/src/app']
INFO:     Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
INFO:     Started reloader process [1104] using StatReload
INFO:numexpr.utils:NumExpr defaulting to 8 threads.

main.py

import logging
logging.basicConfig(level=logging.DEBUG)

from fastapi import FastAPI, HTTPException
import uvicorn
import requests

import numpy as np

import os
import socket
import base64
from typing import List, Dict, Union

#
# THIS LINE CAUSES THE ERROR
#
from deepface import DeepFace


app = FastAPI(title="RP AI Documentation", docs_url="/api-docs")

@app.on_event("startup")
async def startup_event():
    print("Starting up...")
    # Simulate any heavy lifting during startup if needed
    print("Startup finished!")

if __name__ == "__main__":
    print("Starting app...")
    uvicorn.run(app, host="0.0.0.0", port=8000)
    print("App started!")


# Function to perform DeepFace comparison
def compare_faces_with_deepface(img_path1: str, img_path2: str) -> bool:
    try:
        result = DeepFace.verify(img_path1, img_path2)
        return result["verified"]
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Error during face comparison: {str(e)}")


# Function to decode images and save them as temporary files
def get_temp_image_paths(images: List[Dict[str, Union[str, int]]]) -> List[str]:
    temp_image_paths = []

    for img in images:
        try:
            print(f"Processing img[{img['id']}], type: {img['imagetype']}")

            img_data = base64.b64decode(img["filedata"])
            temp_path = f"/tmp/{img['id']}.jpg"
            
            with open(temp_path, "wb") as f:
                f.write(img_data)

            temp_image_paths.append(temp_path)

        except Exception as e:
            raise HTTPException(status_code=400, detail=f"Error processing image: {str(e)}")

    return temp_image_paths


# Function to process the images and perform the comparison
def process_and_compare_images(images: List[Dict[str, Union[str, int]]]) -> bool:
    if len(images) < 2:
        raise HTTPException(status_code=400, detail="At least two images are required for comparison.")

    temp_image_paths = get_temp_image_paths(images)

    if len(temp_image_paths) < 2:
        raise HTTPException(status_code=400, detail="Not enough valid faces found for comparison.")

    match = compare_faces_with_deepface(temp_image_paths[0], temp_image_paths[1])

    # Clean up temporary files
    for path in temp_image_paths:
        os.remove(path)

    return match

@app.post("/compare", tags=["AI"])
def compare(images: List[Dict[str, Union[str, int]]]):
    try:
        # Call the function to process images and compare
        match_result = process_and_compare_images(images)
        
        # If match is None or an unexpected outcome, handle it gracefully
        if match_result is None:
            return {"match_results": "No comparison was made."}
        return {"match_results": match_result}

    except HTTPException as e:
        raise e
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Unexpected error: {str(e)}")

I also looked up this:

Use the Latest TensorFlow Version with the tensorflow-cpu Package

Ensure you’re installing the latest TensorFlow CPU version, which should automatically be compiled without AVX support.

TRY 1

Put this in Dockerfile

# Use the official Python image from the DockerHub
FROM python:3.9-slim
FROM serengil/deepface

Run the application, got this:

INFO:     Will watch for changes in these directories: ['/usr/src/app']
INFO:     Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
INFO:     Started reloader process [521] using StatReload
INFO:     Started server process [523]
INFO:     Waiting for application startup.
Starting up...
Startup finished!

Uncommented this line from deepface import DeepFace in main.py

got this:

WARNING:  StatReload detected changes in 'app/main.py'. Reloading...
INFO:     Shutting down
INFO:     Waiting for application shutdown.
INFO:     Application shutdown complete.
INFO:     Finished server process [523]
The TensorFlow library was compiled to use AVX instructions, but these aren't available on your machine.

TRY 2

After reading comments from Gwang-Jin Kim, I modified the following files:

Dockerfile

# Use only this docker image
FROM serengil/deepface

# Set the working directory in the container
WORKDIR /app

# Install dependencies
COPY requirements.txt ./
RUN pip install --no-cache-dir -r requirements.txt

# Copy the rest of the application code into the container
COPY . .

# Expose port 8000 for the FastAPI app
EXPOSE 8000

# Command to run FastAPI with Uvicorn
CMD ["sleep", "infinity"]

I removed the amd64 from docker-compose, as I thought that might be causing the problem since my computer is based on an M3 chip

docker-compose.yml

services:
  rp:
    container_name: ai
    build: .
    command: sleep infinity
    networks:
      - RP
    volumes:
      - .:/usr/src/app
    ports:
      - '8000:8000'

networks:
  RP:
    external: true

requirements.txt

fastapi
uvicorn
requests
numpy

Rebuilt the container, ran the application and still am getting this:

root@0dd0c7ae8862:/usr/src/app# uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload
INFO:     Will watch for changes in these directories: ['/usr/src/app']
INFO:     Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
INFO:     Started reloader process [540] using StatReload
The TensorFlow library was compiled to use AVX instructions, but these aren't available on your machine.

TRY 3

I re-read the answer provided and then cloned the repo git clone https://github.com/serengil/deepface.git

I made the following changes to these files:

Dockerfile

# base image
FROM python:3.8.12
LABEL org.opencontainers.image.source https://github.com/serengil/deepface

# -----------------------------------
# create required folder
RUN mkdir -p /app && chown -R 1001:0 /app
RUN mkdir /app/deepface

# -----------------------------------
# switch to application directory
WORKDIR /app

# -----------------------------------
# update image os
# Install system dependencies
RUN apt-get update && apt-get install -y \
    ffmpeg \
    libsm6 \
    libxext6 \
    libhdf5-dev \
    && rm -rf /var/lib/apt/lists/*

# -----------------------------------
# Copy required files from repo into image
COPY ./deepface /app/deepface
# even though we will use local requirements, this one is required to perform install deepface from source code
COPY ./requirements.txt /app/requirements.txt
COPY ./requirements_local /app/requirements_local.txt
COPY ./package_info.json /app/
COPY ./setup.py /app/
COPY ./README.md /app/

#
# ***** NOT USING THIS ******
#
#COPY ./entrypoint.sh /app/deepface/api/src/entrypoint.sh

# -----------------------------------
# if you plan to use a GPU, you should install the 'tensorflow-gpu' package
# RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org tensorflow-gpu

# if you plan to use face anti-spoofing, then activate this line
# RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org torch==2.1.2
# -----------------------------------
# install deepface from pypi release (might be out-of-date)
# RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org deepface
# -----------------------------------
# install dependencies - deepface with these dependency versions is working
RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org -r /app/requirements_local.txt
# install deepface from source code (always up-to-date)
RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org -e .

# -----------------------------------
# some packages are optional in deepface. activate if your task depends on one.
# RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org cmake==3.24.1.1
# RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org dlib==19.20.0
# RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org lightgbm==2.3.1

# -----------------------------------
# environment variables
ENV PYTHONUNBUFFERED=1

# -----------------------------------
# run the app (re-configure port if necessary)
WORKDIR /app/deepface/api/src


# Expose port 8000 for the FastAPI app
EXPOSE 8000

# I am not using the entrypoint.sh file... instead I run the app
# as I described at the top of this question
# ENTRYPOINT [ "sh", "entrypoint.sh" ]
CMD ["sleep", "infinity"]

requirements_additional.txt

opencv-contrib-python>=4.3.0.36
mediapipe>=0.8.7.3
dlib>=19.20.0
ultralytics>=8.0.122
facenet-pytorch>=2.5.3
torch>=2.1.2
insightface>=0.7.3
onnxruntime>=1.9.0
tf-keras
typing-extensions
pydantic
albumentations

requirements_local

numpy==1.22.3
pandas==2.0.3
Pillow==9.0.0
opencv-python==4.9.0.80
tensorflow==2.13.1
keras==2.13.1

requirements.txt

fastapi
uvicorn

requests>=2.27.1
numpy>=1.14.0
pandas>=0.23.4
gdown>=3.10.1
tqdm>=4.30.0
Pillow>=5.2.0
opencv-python>=4.5.5.64

# tensorflow>=1.9.0.    # I tried this
tensorflow-cpu>=1.9.0.  # and this and both hang 

keras>=2.2.0
Flask>=1.1.2
flask_cors>=4.0.1
mtcnn>=0.1.0
retina-face>=0.0.14
fire>=0.4.0
gunicorn>=20.1.0

I copied all the deepface source code into my project

enter image description here

Rebuilt the container, Ran the application with main.py commenting out # from deepface import DeepFace

uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload

INFO:     Will watch for changes in these directories: ['/usr/src/app']
INFO:     Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
INFO:     Started reloader process [3983] using StatReload
INFO:     Started server process [3985]
INFO:     Waiting for application startup.
Starting up...
Startup finished!
INFO:     Application startup complete.

It loads in swagger ( and I can hit the endpoint )

enter image description here

Then I UNCOMMENTED # from deepface import DeepFace in main.py. Uvicorn detected the change and then nothing...

WARNING:  StatReload detected changes in 'app/main.py'. Reloading...
INFO:     Shutting down
INFO:     Waiting for application shutdown.
INFO:     Application shutdown complete.
INFO:     Finished server process [3985]

when I CTRL-C the process and restart it also hangs

^CINFO:     Stopping reloader process [3983]

uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload

INFO:     Will watch for changes in these directories: ['/usr/src/app']
INFO:     Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
INFO:     Started reloader process [5188] using StatReload

I do see a AMD64 in Docker desktop - could that be an issue?

enter image description here


Solution

  • Why are you not using Docker images of deepface which area already there? serengil/deepface

    Use in your DOCKERFILE:

    FROM `serengil/deepface:latest`
    

    and try to find out, what other packages you have to install.

    Since you get problems with serengil/deepface - since your machine does not have AVX, I would recommend you to go go one step back: The github repo by which he produced the Docker image: https://github.com/serengil/deepface

    This is its Dockerfile:

    https://github.com/serengil/deepface/blob/master/Dockerfile

    # base image
    FROM python:3.8.12
    LABEL org.opencontainers.image.source https://github.com/serengil/deepface
    
    # -----------------------------------
    # create required folder
    RUN mkdir -p /app && chown -R 1001:0 /app
    RUN mkdir /app/deepface
    
    
    
    # -----------------------------------
    # switch to application directory
    WORKDIR /app
    
    # -----------------------------------
    # update image os
    # Install system dependencies
    RUN apt-get update && apt-get install -y \
        ffmpeg \
        libsm6 \
        libxext6 \
        libhdf5-dev \
        && rm -rf /var/lib/apt/lists/*
    
    # -----------------------------------
    # Copy required files from repo into image
    COPY ./deepface /app/deepface
    # even though we will use local requirements, this one is required to perform install deepface from source code
    COPY ./requirements.txt /app/requirements.txt
    COPY ./requirements_local /app/requirements_local.txt
    COPY ./package_info.json /app/
    COPY ./setup.py /app/
    COPY ./README.md /app/
    COPY ./entrypoint.sh /app/deepface/api/src/entrypoint.sh
    
    # -----------------------------------
    # if you plan to use a GPU, you should install the 'tensorflow-gpu' package
    # RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org tensorflow-gpu
    
    # if you plan to use face anti-spoofing, then activate this line
    # RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org torch==2.1.2
    # -----------------------------------
    # install deepface from pypi release (might be out-of-date)
    # RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org deepface
    # -----------------------------------
    # install dependencies - deepface with these dependency versions is working
    RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org -r /app/requirements_local.txt
    # install deepface from source code (always up-to-date)
    RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org -e .
    
    # -----------------------------------
    # some packages are optional in deepface. activate if your task depends on one.
    # RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org cmake==3.24.1.1
    # RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org dlib==19.20.0
    # RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org lightgbm==2.3.1
    
    # -----------------------------------
    # environment variables
    ENV PYTHONUNBUFFERED=1
    
    # -----------------------------------
    # run the app (re-configure port if necessary)
    WORKDIR /app/deepface/api/src
    EXPOSE 5000
    # CMD ["gunicorn", "--workers=1", "--timeout=3600", "--bind=0.0.0.0:5000", "app:create_app()"]
    ENTRYPOINT [ "sh", "entrypoint.sh" ]
    

    Clone his github.

    git clone git@github.com:serengil/deepface.git
    cd deepface
    

    The problem came from tensorflow and it is specified in:requirements.txt

    requests>=2.27.1
    numpy>=1.14.0
    pandas>=0.23.4
    gdown>=3.10.1
    tqdm>=4.30.0
    Pillow>=5.2.0
    opencv-python>=4.5.5.64
    tensorflow>=1.9.0
    keras>=2.2.0
    Flask>=1.1.2
    flask_cors>=4.0.1
    mtcnn>=0.1.0
    retina-face>=0.0.14
    fire>=0.4.0
    gunicorn>=20.1.0
    

    There are stackoverflow questions and answers to this problem already - and even better: A github issue from the original tensorflow repo:

    their github issue https://github.com/tensorflow/tensorflow/issues/24548 to the error you got says:

    I uninstall tensorflow,and install tensorflow-cpu as the same version as the original tensorflow,it can work normal

    So if you change tensorflow>=1.9.0 to tensorflow-cpu>=1.9.0 this AVX error should disappear.

    Then build and run with docker with a similar command you used to run and build your docker.

    macos

    If you are in a mac, the question is whether you are in a apple silicon cpu (M1-M4; they have an arm64 architecture) or in an airbook (amd64 architecture).

    In case of an amd64 machine - everything will be very similar to linux.

    But in case of an arm64 machine, there is the possibility to run amd64 in a Docker container https://blog.devgenius.io/how-to-run-ubuntu-amd64-in-macbook-pro-arm64-with-docker-97f0c1e32e25?sk=38397ed7ed89b3a4118d821ecf61703d

    for which you need an emulator (e.g. quemu).

    Or you run an arm64 ubuntu/linux docker container - and have to use tensorflow for arm64.

    This also will result in having to adjust your tensorflow. https://github.com/deganza/Install-TensorFlow-on-Mac-M1-GPU/blob/main/Install-TensorFlow-on-Mac-M1-GPU.ipynb

    So probably the modification described with tensorflow-cpu would be sufficient.

    installing tensorflow to apple M1-M4

    This answer contains all details - it is not so trivial because version numbers have to be locked. https://python.plainenglish.io/uv-the-python-package-manager-that-actually-respects-your-time-cdde36e74fd4?sk=1f1ec89e3631ea64d77a648b6ceac0a6

    I once had a similar problem when on a HPC cluster CUDA was old (8 or so). Pip tries to upgrade all your packages - regardless what you actually need.

    I used conda and poetry to lock versions. Today there is something much better: uv. I wrote a blog article about it https://python.plainenglish.io/uv-the-python-package-manager-that-actually-respects-your-time-cdde36e74fd4?sk=1f1ec89e3631ea64d77a648b6ceac0a6 . uv is 100x faster than pip and is at the same time more precise (locks version numbers). So from now on I will always use uv when doing such stuff.

    Install uv by:

    curl -LsSf https://astral.sh/uv/install.sh | sh
    

    and from then on, replace in all your pip commands pip with uv pip, then they will work using uv (the developers of uv very cleverly offer this pip-like interface).

    AVX problem with MacOS

    The final stance was that one cannot run Tensorflow within a Docker container inside a MacOS with ARM64 architecture, because Tensorflow uses AVX arrays which can't be used in MacOS.

    So even if you run a Docker container amd64, the AVX problem stays.

    If one would install Tensorflow directly into MacOS, by using tensorflow-macos, this would work, because that doesn't use those epecial arrays.

    Our "solution" was to use Pytorch face recognition models instead of Tensorflow ones. By that, you can make the Docker container truly platform independent.

    One running example:

    nano Dockerfile

    FROM python:3.11-slim
    COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
    
    WORKDIR /app
    
    RUN apt-get update && apt-get install -y \
    ffmpeg libsm6 libxext6 curl \
    && rm -rf /var/lib/apt/lists/*
    
    COPY requirements.txt .
    RUN uv pip install --system -r requirements.txt
    
    COPY . .
    
    EXPOSE 8000
    
    CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"]
    

    so we have it with uv (for slightly more speed) --system flag is important - means no extra virtual environment - but just installs globally into system but using uv

    nano requirements.txt

    fastapi
    uvicorn
    facenet-pytorch
    torch==2.1.2
    torchvision==0.16.2
    Pillow
    numpy==1.26.4 # last 1.x numpy
    python-multipart
    

    Later, one can open interactive session and look which is the lock file uv generates => and in future use that.

    nano app.py

    from fastapi import FastAPI, File, UploadFile, HTTPException
    from facenet_pytorch import MTCNN, InceptionResnetV1
    from PIL import Image
    import numpy as np
    import torch
    import io
    
    app = FastAPI(title="Face Verification API", docs_url="/docs", redoc_url="/redoc")
    
    # Device: GPU, M1 (MPS), or CPU
    if torch.cuda.is_available():
    device = 'cuda'
    elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
    device = 'mps'
    else:
    device = 'cpu'
    
    # Load models
    mtcnn = MTCNN(keep_all=False, device=device)
    resnet = InceptionResnetV1(pretrained='vggface2').eval().to(device)
    
    def extract_embedding(image):
    face = mtcnn(image)
    if face is None:
    return None
    return resnet(face.unsqueeze(0).to(device)).detach().cpu().numpy()
    
    @app.get("/")
    def root():
    return {"message": "Face Verification API is running!"}
    
    @app.post("/verify")
    async def verify_faces(img1: UploadFile = File(...), img2: UploadFile = File(...)):
    try:
    img1_bytes = await img1.read()
    img2_bytes = await img2.read()
    
    img1_pil = Image.open(io.BytesIO(img1_bytes)).convert("RGB")
    img2_pil = Image.open(io.BytesIO(img2_bytes)).convert("RGB")
    
    emb1 = extract_embedding(img1_pil)
    emb2 = extract_embedding(img2_pil)
    
    if emb1 is None or emb2 is None:
    raise HTTPException(status_code=400, detail="Face not detected in one or both images.")
    
    distance = np.linalg.norm(emb1 - emb2)
    match = distance < 0.8
    
    return {"match": bool(match), "distance": float(distance)}
    
    except Exception as e:
    raise HTTPException(status_code=500, detail=str(e))
    

    docker build -t facenet-api . docker run --rm -p 8000:8000 facenet-api

    deepface % curl -X POST localhost:8000/verify \
    -F "img1=@tests/dataset/img1.jpg" \
    -F "img2=@tests/dataset/img7.jpg"
    {"match":true,"distance":0.7869675755500793}