ftorch_optim Module

Optimisers module for FTorch.

  • License FTorch is released under an MIT license. See the LICENSE file for details.

Uses

  • module~~ftorch_optim~~UsesGraph module~ftorch_optim ftorch_optim iso_c_binding iso_c_binding module~ftorch_optim->iso_c_binding module~ftorch_tensor ftorch_tensor module~ftorch_optim->module~ftorch_tensor module~ftorch_types ftorch_types module~ftorch_optim->module~ftorch_types module~ftorch_tensor->iso_c_binding module~ftorch_tensor->module~ftorch_types iso_fortran_env iso_fortran_env module~ftorch_tensor->iso_fortran_env module~ftorch_devices ftorch_devices module~ftorch_tensor->module~ftorch_devices module~ftorch_types->iso_fortran_env module~ftorch_devices->iso_fortran_env

Used by

  • module~~ftorch_optim~~UsedByGraph module~ftorch_optim ftorch_optim module~ftorch ftorch module~ftorch->module~ftorch_optim

Derived Types

type, public ::  torch_optim

Type for holding a torch optimizer.

Components

Type Visibility Attributes Name Initial
type(c_ptr), public :: p = c_null_ptr

pointer to the optimizer in memory

Finalizations Procedures

final :: torch_optim_delete

Type-Bound Procedures

procedure, public :: step => torch_optim_step
procedure, public :: zero_grad => torch_optim_zero_grad

Subroutines

public subroutine torch_optim_Adam(optim, parameters, learning_rate, beta_1, beta_2, eps, weight_decay, amsgrad)

Create an Adam optimizer

Arguments

Type IntentOptional Attributes Name
type(torch_optim), intent(out) :: optim

Optimizer we are creating

type(torch_tensor), intent(in), dimension(:) :: parameters

Array of parameter tensors

real(kind=real64), intent(in), optional :: learning_rate

learning rate for the optimization algorithm (default: 0.001)

real(kind=real64), intent(in), optional :: beta_1

beta 1 for the optimization algorithm (default: 0.9)

real(kind=real64), intent(in), optional :: beta_2

beta 2 for the optimization algorithm (default: 0.999)

real(kind=real64), intent(in), optional :: eps

eps for the optimization algorithm (default: 1.0e-8)

real(kind=real64), intent(in), optional :: weight_decay

weight_decay for the optimization algorithm (default: 0.0)

logical, intent(in), optional :: amsgrad

enable AMSGrad variant (default: .false.)

public subroutine torch_optim_AdamW(optim, parameters, learning_rate, beta_1, beta_2, eps, weight_decay, amsgrad)

Create an AdamW optimizer

Arguments

Type IntentOptional Attributes Name
type(torch_optim), intent(out) :: optim

Optimizer we are creating

type(torch_tensor), intent(in), dimension(:) :: parameters

Array of parameter tensors

real(kind=real64), intent(in), optional :: learning_rate

learning rate for the optimization algorithm (default: 0.001)

real(kind=real64), intent(in), optional :: beta_1

beta 1 for the optimization algorithm (default: 0.9)

real(kind=real64), intent(in), optional :: beta_2

beta 2 for the optimization algorithm (default: 0.999)

real(kind=real64), intent(in), optional :: eps

eps for the optimization algorithm (default: 1.0e-8)

real(kind=real64), intent(in), optional :: weight_decay

weight_decay for the optimization algorithm (default: 0.01)

logical, intent(in), optional :: amsgrad

enable AMSGrad variant (default: .false.)

public subroutine torch_optim_SGD(optim, parameters, learning_rate, momentum, weight_decay, dampening, nesterov)

Create an SGD optimizer

Arguments

Type IntentOptional Attributes Name
type(torch_optim), intent(out) :: optim

Optimizer we are creating

type(torch_tensor), intent(in), dimension(:) :: parameters

Array of parameter tensors

real(kind=real64), intent(in), optional :: learning_rate

learning rate for the optimization algorithm (default: 0.001)

real(kind=real64), intent(in), optional :: momentum

momentum for the optimization algorithm (default: 0.0)

real(kind=real64), intent(in), optional :: weight_decay

weight_decay for the optimization algorithm (default: 0.0)

real(kind=real64), intent(in), optional :: dampening

dampening for the optimization algorithm (default: 0.0)

logical, intent(in), optional :: nesterov

enable Nesterov momentum. Only applicable when momentum is non-zero. (default: .false.)

public subroutine torch_optim_delete(optim)

Deallocate a Torch optimizer

Arguments

Type IntentOptional Attributes Name
type(torch_optim), intent(inout) :: optim

Optimizer to deallocate

public subroutine torch_optim_step(optim)

Step a Torch optimizer

Arguments

Type IntentOptional Attributes Name
class(torch_optim), intent(in) :: optim

Optimizer to step

public subroutine torch_optim_zero_grad(optim)

Zero Gradients on tensors associated with a Torch optimizer

Arguments

Type IntentOptional Attributes Name
class(torch_optim), intent(in) :: optim

Optimizer to zero gradients for