# Source code for mindspore.nn.optim.sgd

# Copyright 2020 Huawei Technologies Co., Ltd
#
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# Unless required by applicable law or agreed to in writing, software
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# ============================================================================
"""sgd"""
from mindspore.ops import functional as F, composite as C, operations as P
from mindspore.common.parameter import Parameter
from mindspore.common.tensor import Tensor
import mindspore.common.dtype as mstype
from mindspore._checkparam import Validator as validator
from .optimizer import Optimizer

_sgd_opt = C.MultitypeFuncGraph("sgd_opt")

@_sgd_opt.register("Function", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor")
def _tensor_run_opt_ext(opt, momentum, learning_rate, gradient, weight, accum, stat):
"""Apply sgd optimizer to the weight parameter using Tensor."""
success = True
success = F.depend(success, opt(weight, gradient, learning_rate, accum, momentum, stat))
return success

[docs]class SGD(Optimizer):
"""
Implements stochastic gradient descent (optionally with momentum).

Introduction to SGD can be found at https://en.wikipedia.org/wiki/Stochastic_gradient_descent.
Nesterov momentum is based on the formula from paper On the importance of initialization and
momentum in deep learning <http://proceedings.mlr.press/v28/sutskever13.html>_.

Note:
When separating parameter groups, the weight decay in each group will be applied on the parameters if the
weight decay is positive. When not separating parameter groups, the weight_decay in the API will be applied
on the parameters without 'beta' or 'gamma' in their names if weight_decay is positive.

To improve parameter groups performance, the customized order of parameters can be supported.

.. math::
v_{t+1} = u \ast v_{t} + gradient \ast (1-dampening)

If nesterov is True:
.. math::
p_{t+1} = p_{t} - lr \ast (gradient + u \ast v_{t+1})

If nesterov is Flase:
.. math::
p_{t+1} = p_{t} - lr \ast v_{t+1}

To be noticed, for the first step, v_{t+1} = gradient

Here : where p, v and u denote the parameters, accum, and momentum respectively.

Args:
params (Union[list[Parameter], list[dict]]): When the params is a list of Parameter which will be updated,
the element in params should be class Parameter. When the params is a list of dict, the "params",
"lr", "weight_decay" and "order_params" are the keys can be parsed.

- params: Required. The value should be a list of Parameter.

- lr: Optional. If "lr" in the keys, the value of corresponding learning rate will be used.
If not, the learning_rate in the API will be used.

- weight_decay: Optional. If "weight_decay" in the keys, the value of corresponding weight decay
will be used. If not, the weight_decay in the API will be used.

- order_params: Optional. If "order_params" in the keys, the value should be the order of parameters and
the order will be followed in optimizer. There are no other keys in the dict and the parameters which
in the value of 'order_params' should be in one of group parameters.

learning_rate (Union[float, Tensor, Iterable, LearningRateSchedule]): A value or graph for the learning rate.
When the learning_rate is a Iterable or a Tensor with dimension of 1, use dynamic learning rate, then
the i-th step will take the i-th value as the learning rate. When the learning_rate is LearningRateSchedule,
use dynamic learning rate, the i-th learning rate will be calculated during the process of training
according to the formula of LearningRateSchedule. When the learning_rate is a float or a Tensor with
dimension of 0, use fixed learning rate. Other cases are not supported. The float learning rate should be
equal to or greater than 0. If the type of learning_rate is int, it will be converted to float.
Default: 0.1.
momentum (float): A floating point value the momentum. should be at least 0.0. Default: 0.0.
dampening (float): A floating point value of dampening for momentum. should be at least 0.0. Default: 0.0.
weight_decay (float): Weight decay (L2 penalty). It should be in range [0.0, 1.0]. Default: 0.0.
nesterov (bool): Enables the Nesterov momentum. If use nesterov, momentum must be positive,
and dampening must equal to 0.0. Default: False.
loss_scale (float): A floating point value for the loss scale. Should be not less than 1.0. Default: 1.0.

Inputs:
- **gradients** (tuple[Tensor]) - The gradients of params, the shape is the same as params.

Outputs:
Tensor[bool], the value is True.

Raises:
ValueError: If the momentum, dampening or weight_decay value is less than 0.0.

Examples:
>>> net = Net()
>>> #1) All parameters use the same learning rate and weight decay
>>> optim = nn.SGD(params=net.trainable_params())
>>>
>>> #2) Use parameter groups and set different values
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>>                 {'params': no_conv_params, 'lr': 0.01},
>>>                 {'order_params': net.trainable_params()}]
>>> optim = nn.SGD(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01.
>>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0.
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
>>>
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> model = Model(net, loss_fn=loss, optimizer=optim)
"""
def __init__(self, params, learning_rate=0.1, momentum=0.0, dampening=0.0, weight_decay=0.0, nesterov=False,
loss_scale=1.0):

super(SGD, self).__init__(learning_rate, params, weight_decay, loss_scale)

if isinstance(momentum, int):
momentum = float(momentum)
if not isinstance(momentum, float):
raise TypeError("momentum should be float number!")

if isinstance(momentum, float) and momentum < 0.0:
raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum))

if nesterov and (momentum <= 0.0 or dampening != 0.0):
raise ValueError("If use nesterov, momentum must be positive and dampening must equal to 0.0,"
"but got momentum {}, dampening {}".format(momentum, dampening))

if isinstance(dampening, int):
dampening = float(dampening)
if not isinstance(dampening, float):
raise TypeError("dampening should be float number")

if dampening < 0.0:
raise ValueError("dampening should be at least 0.0, but got dampening {}".format(dampening))
self.dampening = dampening

if isinstance(weight_decay, int):
weight_decay = float(weight_decay)

validator.check_value_type("nesterov", nesterov, [bool], self.cls_name)
self.nesterov = nesterov

self.opt = P.SGD(dampening, weight_decay, nesterov)

self.momentum = Parameter(Tensor(momentum, mstype.float32), name="momentum")
self.accum = self.parameters.clone(prefix="accum", init='zeros')
self.stat = self.parameters.clone(prefix="stat", init='ones')
self.hyper_map = C.HyperMap()