pythonmathematical-optimizationscipy-optimizeweibull

Should i optimize all three parameters of a weibull distribution function at the same time in a python script?


I want to optimize coefficents for the scale, shape and location function for a weibull PDF.

this is the code:

import numpy as np
from scipy.optimize import minimize
from scipy.stats import weibull_min

num_sets = 7
shape_params = np.random.uniform(1.5, 9, num_sets)  # shape parameter, k
loc_params = np.random.uniform(10, 100, num_sets)    # loc parameter, lambda
scale_params = np.random.uniform(500, 1500, num_sets) # scale parameter, c

# Operating conditions for 9 sets: temperature, PH value, humidity, CO2 concentration
conditions = np.random.uniform([80, 6, 30, 400], [95, 8, 80, 600], (num_sets, 4))

# Define the functions for shape, loc, scale parameters
def weibull_params(coeffs, conditions):
    shape = coeffs[0] + coeffs[1] * conditions[:,0] + coeffs[2] * conditions[:,1] + coeffs[3] * conditions[:,2] + coeffs[4] * conditions[:,3]
    loc = coeffs[5] + coeffs[6] * conditions[:,0] + coeffs[7] * conditions[:,1] + coeffs[8] * conditions[:,2] + coeffs[9] * conditions[:,3]
    scale = coeffs[10] + coeffs[11] * conditions[:,0] + coeffs[12] * conditions[:,1] + coeffs[13] * conditions[:,2] + coeffs[14] * conditions[:,3]
    return shape, loc, scale

# Objective function to minimize
def objective_function(coeffs, conditions, shape_params, loc_params, scale_params):
    shape, loc, scale = weibull_params(coeffs, conditions)
    error = np.sum((shape - shape_params)**2 + (loc - loc_params)**2 + (scale - scale_params)**2)
    return error

initial_guess = np.random.uniform(0, 1, 15)
result = minimize(objective_function, initial_guess, args=(conditions, shape_params, loc_params, scale_params), method='BFGS')

optimized_coeffs = result.x

# Calculate and print the optimized Weibull parameters
optimized_shape, optimized_loc, optimized_scale = weibull_params(optimized_coeffs, conditions)
print("\nOriginal Weibull Parameters:")
print("Shape:", shape_params)
print("Loc:", loc_params)
print("Scale:", scale_params)

print("\nOptimized Weibull Parameters:")
print("Shape:", optimized_shape)
print("Loc:", optimized_loc)
print("Scale:", optimized_scale)

Would it make more sense to optimize the coefficent with three separate objective functions like this approach:

import numpy as np
from scipy.optimize import minimize
from scipy.stats import weibull_min

num_sets = 7
shape_params = np.random.uniform(1.5, 9, num_sets)  # shape parameter, k
loc_params = np.random.uniform(10, 100, num_sets)   # loc parameter, lambda
scale_params = np.random.uniform(500, 1500, num_sets)  # scale parameter, c

# Operating conditions for 9 sets: temperature, PH value, humidity, CO2 concentration
conditions = np.random.uniform([80, 6, 30, 400], [95, 8, 80, 600], (num_sets, 4))


def weibull_params(coeffs, conditions):
  shape = coeffs[0] + coeffs[1] * conditions[:, 0] + coeffs[2] * conditions[:, 1] + coeffs[3] * conditions[:, 2] + coeffs[4] * conditions[:, 3]
  loc = coeffs[5] + coeffs[6] * conditions[:, 0] + coeffs[7] * conditions[:, 1] + coeffs[8] * conditions[:, 2] + coeffs[9] * conditions[:, 3]
  scale = coeffs[10] + coeffs[11] * conditions[:, 0] + coeffs[12] * conditions[:, 1] + coeffs[13] * conditions[:, 2] + coeffs[14] * conditions[:, 3]
  return shape, loc, scale


def shape_objective(coeffs, conditions, shape_params):
  shape, _, _ = weibull_params(coeffs, conditions)
  error = np.sum((shape - shape_params) ** 2)
  return error


def loc_objective(coeffs, conditions, loc_params):
  _, loc, _ = weibull_params(coeffs, conditions)
  error = np.sum((loc - loc_params) ** 2)
  return error


def scale_objective(coeffs, conditions, scale_params):
  _, _, scale = weibull_params(coeffs, conditions)
  error = np.sum((scale - scale_params) ** 2)
  return error


initial_guess = np.random.uniform(0, 1, 15)
result_shape = minimize(shape_objective, initial_guess, args=(conditions, shape_params), method='BFGS')
result_loc = minimize(loc_objective, initial_guess, args=(conditions, loc_params), method='BFGS')
result_scale = minimize(scale_objective, initial_guess, args=(conditions, scale_params), method='BFGS')

# Extract optimized coefficients from each result
optimized_coeffs_shape = result_shape.x
optimized_coeffs_loc = result_loc.x
optimized_coeffs_scale = result_scale.x

# Now you have optimized coefficients for each parameter
optimized_shape, optimized_loc, optimized_scale = weibull_params(np.concatenate((optimized_coeffs_shape, optimized_coeffs_loc, optimized_coeffs_scale)), conditions)

print("\nOriginal Weibull Parameters:")
print("Shape:", shape_params)
print("Loc:", loc_params)
print("Scale:", scale_params)
print("\nOptimized Weibull Parameters:")
print("Shape:", optimized_shape)
print("Loc:", optimized_loc)
print("Scale:", optimized_scale)

I tried both ways and i believe that optimizing all there parameters at the same time makes more sense to me.


Solution

  • It is better to optimise all of the parameters simultaneously.

    Just try and make sure the initial guess is reasonably close to getting the maximum peak value and position right and the scale about right and you shouldn't have too much difficulty.

    If you try to do them one at a time you can get stuck in a diagonal valley bouncing from side to side. IOW each time you alter one parameter in isolation it is unable to move very far because of hitting the valley wall.

    Allowing it to use any linear combination of the parameters gives the fitting code the opportunity to find the direction of steepest descent and so improve the fit much faster. Good fitting routines often use conjugate gradients picking the next search direction to be orthogonal to the one they have used most recently.