问题
I'm translating some code from MATLAB to python. This code simulate the behaviour of a model and I want to estimate parameters from it. The problem is that results obtained with python and with MATLAB are very different. I've tought it was related to the difference between the MATLAB's
fmincon
and the python scipy.optimize.minimize
function, but according to this tutorial that I've found on youtube (https://www.youtube.com/watch?v=SwogAa1719M) the results are almost the same,so problems must be in my code but I can't find them.
I report a minimum example of my code
def cost_function(parameters, measured, t, x0, total_active):
Tp = simulate(parameters, t, x0, total_active)
measured = np.squeeze(measured)
Tp = Tp[:,2]
return (np.sum(np.power((np.divide(np.diff(measured), measured[1:])-np.divide(np.diff(Tp),Tp[1:])),2)))
def SIR_model(x, t, params, total_active):
S0, _, R0 = x
v, tau, I0 = params
dSdt = - v * S0 * I0 / total_active(t)
dIdt = v * S0 * I0 / total_active(t) - g * I0 - tau * I0
dCdt = tau * I0
return [dSdt, dIdt, dCdt]
def simulate(p, t, x0, total_active):
T = np.zeros((len(t), 3))
T[0, :] = x0
for i in range(len(t) - 1):
ts = [t[i], t[i + 1]]
y = odeint(SIR_model, x0, ts, args=(p, total_active))
x0 = y[-1]
T[i + 1, :] = x0
return T
def identify_model(data, initial_guess, t, N, total_active):
# Set initial condition of the model
x0 = []
Q0 = data[0]
x0.append(N - initial_guess['It0'] - Q0)
x0.append(initial_guess['It0'])
x0.append(Q0)
It0 = initial_guess['It0']
v = initial_guess['v']
tau = initial_guess['tau']
lim_sup = [v * 10, tau * 1.5, It0 * 1.3]
lim_inf = [v * 0, tau * 0.9, It0 * 0.7]
bounds = Bounds(lim_inf, lim_sup)
options = {"maxiter": 1000,"ftol": 1e-08}
return minimize(cost_function, np.asarray([initial_guess['v'], initial_guess['tau'],initial_guess['It0']]),args=(data, t, x0, total_active), bounds=bounds,options=options, tol=1e-08,method="SLSQP")['x']
data=[275.5,317.,457.33333333,646.,888.66666667,1236.66666667,1619.33333333,2077.33333333,2542.33333333]
times = [i for i in range(len(data))]
total_active_data=[59999725.,59999684.33333334,59999558.66666666,59999385.33333334,59999158.33333333,59998823.,59998474.66666666,59998053.33333333,59997652.66666666]
total_active = interp1d([i for i in range(len(total_active_data))], total_active_data, fill_value="extrapolate")
initial_guess = {"v": 0.97, "tau": 0.066, "It0": 100}
print(identify_model(data,initial_guess,times,60e6,total_active))
This snippet gives, as result, [0.97099097,0.099,130.]
.
The (I hope so) equivalent MATLAB code is:
function [pars] = Identify_Model(data,initial_guess,lim_inf,lim_sup,times,N,total_active)
scalefac=100;
%Initialize the initial guess for each parameter
v=initial_guess.v;
It0=initial_guess.It0;
tau=initial_guess.tau;
g=0.07;
lim_inf(3)=lim_inf(3)/scalefac;
lim_sup(3)=lim_sup(3)/scalefac;
%Identify the model parameters
options=optimset('MaxIter',100,'TolFun',1e-6,'TolX',1e-6);
parmin=fmincon(@(pars) error_SIR(data,[pars(1),pars(2),g,scalefac*pars(3)],times,N,total_active),[v,tau,It0],[],[],[],[],lim_inf,lim_sup,[],options);
pars=num2cell([parmin(1:2),scalefac*parmin(3)]);
pars=[pars(1),pars(2),g,pars(3)];
end
function [costo]=error_SIR(data,pars,tempi,N,totale_attivi)
%Assign the parameters
pars=num2cell(pars);
[v,tau,g,I0]=pars{:};
%Simulate the model
Q0=data(1,1);
S0=N-I0-Q0;
[~,y]=ode45(@(t,x) SIR(t,x,v,tau,g,totale_attivi),tempi,[S0;I0;Q0]);
if length(tempi)==2
y=[y(1,:);y(end,:)];
end
%Identify on the normalized data (Data/mean data)
costo=sum(((diff(sum(data,1))./sum(data(:,2:end),1))-(diff(sum(y(:,3),2))./sum(y(2:end,3),2))').^2);
end
function y=SIR(t,x,v,tau,g,total_active)
y=zeros(3,1);
y(1)=-v*x(1)*x(2)/total_active(t); % S
y(2)=v*x(1)*x(2)/total_active(t)-(tau+g)*x(2); % I
y(3)=tau*x(2); % C
end
total_active_data=[59999725.,59999684.333,59999558.666,59999385.333,59999158.33,59998823.,59998474.66,59998053.333,59997652.66666666]
total_active = @(t) interp1(1:9,total_active_data,t);
initial_guess.It0=100;
initial_guess.v=0.97;
initial_guess.g=0.07;
initial_guess.tau=0.066;
g=initial_guess.g;
It0=initial_guess.It0;
v=initial_guess.v;
tau=initial_guess.tau;
N=60e6
%Define the constrints for the identification
lim_sup=[v*10, tau*1.5, It0*1.3];
lim_inf=[v*0, tau*0.9, It0*0.7];
data=[275.5,317.,457.33333333,646.,888.66666667,1236.66666667,1619.33333333,2077.33333333,2542.33333333]
times=1:9;
%identify the model parameters
pars=Identify_Model(data,initial_guess,lim_inf,lim_sup,times,N,total_active)
And the result is {[0.643004812025865]} {[0.0989999761533351]} {[0.07]} {[129.9999687237]}
(don't consider the value 0.07, it's fixed). I thought that the problem could linked to the fact that I want to minimize a non-convex function, and maybe fmincon
is more powerful than the scipy.optimize.minimize
function?
来源:https://stackoverflow.com/questions/64790692/different-behaviour-between-matlab-fmincon-and-scipy-optimize-minimize