问题
I am trying to solve the kmv merton model for default prediction (based on the black scholes model) in Python. I referred to the following code as a jump off point for my code.
My code is as follows:
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 10 15:42:24 2018
@author: CAFRAL
"""
from io import StringIO
import time, boto3
import pandas as pd
import numpy as np
import scipy.optimize as sco
from scipy.stats import norm
import datetime
#generating random values to create a database for testing the code
r = np.random.uniform(low=0.2, high=0.9, size=(252,))
px =np.random.uniform(low=10, high=20, size=(252,))
cso = np.random.uniform(low=10000, high=40000, size=(252,))
cud = np.random.uniform(low=200000, high=800000, size=(252,))
ltd = np.random.uniform(low=400000, high=900000, size=(252,))
d = pd.DataFrame({'r':r,'px':px,'cso':cso,'cud':cud,'ltd':ltd})
#formatting the vaules as required
d["cud"] = d["cud"].astype(float)
d["ltd"] = d["ltd"].astype(float)
d["px"] = d["px"].astype(float)
d["cso"] = d["cso"].astype(float)
#d['year'] = pd.DatetimeIndex(d['date']).year
#computating of key input variable for the model
d["f"] = d["cud"].add(d["ltd"]*0.5)
d['e'] = abs(d['px'])*d['cso']
d['a'] = d["f"].add(d["e"])
#defining a function for the black Scholes equation
def bseqn(a, debug=False):
d1 = (np.log(a/f) + (r + 0.5*sigma_a**2)*T)/(sigma_a*np.sqrt(T))
d2 = d1 - sigma_a*np.sqrt(T)
y1 = e - (a*norm.cdf(d1) - np.exp(-r*T)*f*norm.cdf(d2))
if debug:
print("d1 = {:.6f}".format(d1))
print("d2 = {:.6f}".format(d2))
print("Error = {:.6f}".format('y1'))
return y1
#Solving the model
time_horizon=[1]
timesteps = range(1, len(d))
results = np.empty((d.shape[0],len(time_horizon)))
#looping to solve for each row
for i, years in enumerate(time_horizon):
T = 252
results[:,i] = d.loc[:,'a']
for i_t, t in enumerate(timesteps):
a = results[t-10:t,i]
ra =np.log(a/np.roll(a,1))
sigma_a = np.nanstd(ra) #gives initial value of sigma_a
if i_t == 0:
subset_timesteps = range(t-1, t+1)
print(subset_timesteps)
else:
subset_timesteps = [t]
n_its = 0
while n_its < 10:
n_its += 1
for t_sub in subset_timesteps:
r = d.iloc[t_sub]['r']
f = d.iloc[t_sub]['f']
e = d.iloc[t_sub]['e']
sol = sco.newton(bseqn, results[t_sub,i]) #if I replace newton with fsolve the code works properly
results[t_sub,i] = sol # stores the new values of a
# Update sigma_a based on new values of a
last_sigma_a = sigma_a
a = results[t-10:t,i]
ra = np.log(a/np.roll(a,1))
sigma_a = np.nanstd(ra) #new val of sigma
diff = last_sigma_a - sigma_a
if abs(diff) < 1e-3:
d.loc[t_sub, 'sigma_a'] = sigma_a
break
else:
pass
When I run my code using the fsolve method, the code runs just fine. However, once i use the Newton method, I get the following error.
C:\Users\CAFRAL\Anaconda3\lib\site-packages\numpy\lib\nanfunctions.py:1434: RuntimeWarning: Degrees of freedom <= 0 for slice.
keepdims=keepdims)
C:\Users\CAFRAL\Anaconda3\lib\site-packages\scipy\stats\_distn_infrastructure.py:879: RuntimeWarning: invalid value encountered in greater
return (self.a < x) & (x < self.b)
C:\Users\CAFRAL\Anaconda3\lib\site-packages\scipy\stats\_distn_infrastructure.py:879: RuntimeWarning: invalid value encountered in less
return (self.a < x) & (x < self.b)
C:\Users\CAFRAL\Anaconda3\lib\site-packages\scipy\stats\_distn_infrastructure.py:1738: RuntimeWarning: invalid value encountered in greater_equal
cond2 = (x >= self.b) & cond0
Traceback (most recent call last):
File "<ipython-input-70-8382063cd297>", line 72, in <module>
sol = sco.newton(bseqn, results[t_sub,i])#results[t_sub,i] is the initial guess for a
File "C:\Users\CAFRAL\Anaconda3\lib\site-packages\scipy\optimize\zeros.py", line 206, in newton
raise RuntimeError(msg)
RuntimeError: Failed to converge after 50 iterations, value is nan
Could someone please help me understand the following: a) Why does the Newton method not converge? b) Is it possible to fix the convergence error in the Newton method? If yes, how?
来源:https://stackoverflow.com/questions/52270610/newton-method-not-converging-after-iteration-python-scipy