# adadelta
optimizer = tf.train.AdadeltaOptimizer(learning_rate=0.001,
rho=0.95,
epsilon=1e-8,
use_locking=False,
name="Adadelta")
# adagrad
optimizer = tf.train.AdagradOptimizer(learning_rate=0.001,
initial_accumulator_value=0.1,
use_locking=False,
name="Adagrad")
# adam
optimizer = tf.train.AdamOptimizer(learning_rate=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
use_locking=False,
name="Adam")
# ftrl
optimizer = tf.train.FtrlOptimizer(learning_rate=0.001,
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0,
use_locking=False,
name="Ftrl",
accum_name=None,
linear_name=None,
l2_shrinkage_regularization_strength=0.0)
# momentum
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001,
momentum=0.9,
use_locking=False,
name="Momentum",
use_nesterov=False)
# rmsprop
optimizer = tf.train.RMSPropOptimizer(learning_rate=0.001,
decay=0.9,
momentum=0.0,
epsilon=1e-10,
use_locking=False,
centered=False,
name="RMSProp")
# sgd
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001,
use_locking=False,
name="GradientDescent")
来源:CSDN
作者:Phoenix hui
链接:https://blog.csdn.net/weixin_36836622/article/details/91487973