I'm trying to figure out what the best conf
and iou
is for the model.pred.
from ultralytics import YOLO
import pandas as pd
import numpy as np
df= pd.DataFrame()
# Load a model
for i in range(1,105):
print('epoch: ',i)
try:
model = YOLO(f'/content/weights/epoch{i}.pt')
for confidence in np.arange(0.1,0.4,0.02):
for inter in np.arange(0.1,0.8,0.05):
# Customize validation settings
validation_results = model.val(data='/content/myproject/data.yaml',
imgsz=640,
batch=16,
conf=confidence,
iou=inter,
device='cpu')
t = pd.DataFrame([{"epoch":i,
"conf":confidence,
"iou":inter,
"map50":validation_results.box.map50}])
df = df.append(t, ignore_index = True)
print(df.sort_values(by=['map50'],ascending=False).head(3))
except:
pass
above is my attempt at trying every combination and sorting this by the highest mean average precision (MAP).
This is slow because it's trying every combination. Maybe a package like Optuna or other Bayesian packages can be used? What would you do to optimise this scenario?
An answer using Optuna.
The following will setup an objective trialling various iou
and conf
until it finds the maximum MAP50 score. This will lead to a better understanding of the iou
and conf
values and how they affect the map score.
!pip install optuna
from ultralytics import YOLO
import pandas as pd
import numpy as np
class Objective:
def __init__(self):
self.best_map = 0
def __call__(self, trial):
i = trial.suggest_int("epoch", 1, 104)
confidence = trial.suggest_float("confidence", 0.05, 0.5)
inter = trial.suggest_float("iou", 0.1, 0.8)
model = YOLO(f'/content/weights/epoch{i}.pt')
validation_results = model.val(data='/content/myproject/data.yaml',
imgsz=640,
batch=16,
conf=confidence,
iou=inter,
device='cpu')
print(validation_results.box.map50)
self._map = float(validation_results.box.map50)
map = float(validation_results.box.map50)
return map
def callback(self, study, trial):
if study.best_trial == trial:
self.best_map = self._map
print('NEW BEST MAP: ', self._map)
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning) # for log error
import optuna
objective = Objective()
# Setting SEED
from optuna.samplers import TPESampler
sampler = TPESampler(seed=10)
study = optuna.create_study(
pruner=optuna.pruners.MedianPruner(n_warmup_steps=10), direction="maximize",
sampler=sampler
)
study.optimize(objective, n_trials=1000, callbacks=[objective.callback])
print("Best trial:")
trial = study.best_trial
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))