损失函数优化器总结

一世执手 提交于 2020-02-28 00:54:28
# adadelta
optimizer = tf.train.AdadeltaOptimizer(learning_rate=0.001, 
                                       rho=0.95, 
                                       epsilon=1e-8,
                                       use_locking=False, 
                                       name="Adadelta")
# adagrad
optimizer = tf.train.AdagradOptimizer(learning_rate=0.001, 
                                      initial_accumulator_value=0.1,
                                      use_locking=False, 
                                      name="Adagrad")

# adam
optimizer = tf.train.AdamOptimizer(learning_rate=0.001, 
                                   beta1=0.9, 
                                   beta2=0.999, 
                                   epsilon=1e-8,
                                   use_locking=False, 
                                   name="Adam")

# ftrl
optimizer = tf.train.FtrlOptimizer(learning_rate=0.001,
                                   learning_rate_power=-0.5,
                                   initial_accumulator_value=0.1,
                                   l1_regularization_strength=0.0,
                                   l2_regularization_strength=0.0,
                                   use_locking=False,
                                   name="Ftrl",
                                   accum_name=None,
                                   linear_name=None,
                                   l2_shrinkage_regularization_strength=0.0)

# momentum
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001, 
                                       momentum=0.9,
                                       use_locking=False, 
                                       name="Momentum", 
                                       use_nesterov=False)

# rmsprop
optimizer = tf.train.RMSPropOptimizer(learning_rate=0.001, 
                                      decay=0.9,
                                      momentum=0.0,
                                      epsilon=1e-10,
                                      use_locking=False,
                                      centered=False,
                                      name="RMSProp")
# sgd
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001, 
                                              use_locking=False, 
                                              name="GradientDescent")

 

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!