损失函数优化器总结
# adadelta optimizer = tf.train.AdadeltaOptimizer(learning_rate=0.001, rho=0.95, epsilon=1e-8, use_locking=False, name="Adadelta") # adagrad optimizer = tf.train.AdagradOptimizer(learning_rate=0.001, initial_accumulator_value=0.1, use_locking=False, name="Adagrad") # adam optimizer = tf.train.AdamOptimizer(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, use_locking=False, name="Adam") # ftrl optimizer = tf.train.FtrlOptimizer(learning_rate=0.001, learning_rate_power=-0.5, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0, use_locking=False