diff --git a/docs/neural-network/optimizers/adagrad.md b/docs/neural-network/optimizers/adagrad.md index 9cfddff25..2e55a9953 100644 --- a/docs/neural-network/optimizers/adagrad.md +++ b/docs/neural-network/optimizers/adagrad.md @@ -1,8 +1,24 @@ -[source] +[source] # AdaGrad Short for *Adaptive Gradient*, the AdaGrad Optimizer speeds up the learning of parameters that do not change often and slows down the learning of parameters that do enjoy heavy activity. Due to AdaGrad's infinitely decaying step size, training may be slow or fail to converge using a low learning rate. +## Mathematical formulation +Per step (element-wise), AdaGrad accumulates the sum of squared gradients and scales the update by the root of this sum: + +$$ +\begin{aligned} +\mathbf{n}_t &= \mathbf{n}_{t-1} + \mathbf{g}_t^{2} \\ +\Delta{\theta}_t &= \alpha\, \frac{\mathbf{g}_t}{\sqrt{\mathbf{n}_t} + \varepsilon} +\end{aligned} +$$ + +where: +- $t$ is the current step, +- $\alpha$ is the learning rate (`rate`), +- $\mathbf{g}_t$ is the current gradient, and $\mathbf{g}_t^{2}$ denotes element-wise square, +- $\varepsilon$ is a small constant for numerical stability (in the implementation, the denominator is clipped from below by `EPSILON`). + ## Parameters | # | Name | Default | Type | Description | |---|---|---|---|---| @@ -10,10 +26,10 @@ Short for *Adaptive Gradient*, the AdaGrad Optimizer speeds up the learning of p ## Example ```php -use Rubix\ML\NeuralNet\Optimizers\AdaGrad; +use Rubix\ML\NeuralNet\Optimizers\AdaGrad\AdaGrad; $optimizer = new AdaGrad(0.125); ``` ## References -[^1]: J. Duchi et al. (2011). Adaptive Subgradient Methods for Online Learning and Stochastic Optimization. \ No newline at end of file +[^1]: J. Duchi et al. (2011). Adaptive Subgradient Methods for Online Learning and Stochastic Optimization. diff --git a/docs/neural-network/optimizers/adam.md b/docs/neural-network/optimizers/adam.md index 3b9898649..d10a469f3 100644 --- a/docs/neural-network/optimizers/adam.md +++ b/docs/neural-network/optimizers/adam.md @@ -1,8 +1,27 @@ -[source] +[source] # Adam Short for *Adaptive Moment Estimation*, the Adam Optimizer combines both Momentum and RMS properties. In addition to storing an exponentially decaying average of past squared gradients like [RMSprop](rms-prop.md), Adam also keeps an exponentially decaying average of past gradients, similar to [Momentum](momentum.md). Whereas Momentum can be seen as a ball running down a slope, Adam behaves like a heavy ball with friction. +## Mathematical formulation +Per step (element-wise), Adam maintains exponentially decaying moving averages of the gradient and its element-wise square and uses them to scale the update: + +$$ +\begin{aligned} +\mathbf{v}_t &= (1 - \beta_1)\,\mathbf{v}_{t-1} + \beta_1\,\mathbf{g}_t \\ +\mathbf{n}_t &= (1 - \beta_2)\,\mathbf{n}_{t-1} + \beta_2\,\mathbf{g}_t^{2} \\ +\Delta{\theta}_t &= \alpha\, \frac{\mathbf{v}_t}{\sqrt{\mathbf{n}_t} + \varepsilon} +\end{aligned} +$$ + +where: +- $t$ is the current step, +- $\alpha$ is the learning rate (`rate`), +- $\beta_1$ is the momentum decay (`momentumDecay`), +- $\beta_2$ is the norm decay (`normDecay`), +- $\mathbf{g}_t$ is the current gradient, and $\mathbf{g}_t^{2}$ denotes element-wise square, +- $\varepsilon$ is a small constant for numerical stability (in the implementation, the denominator is clipped from below by `EPSILON`). + ## Parameters | # | Name | Default | Type | Description | |---|---|---|---|---| @@ -12,10 +31,10 @@ Short for *Adaptive Moment Estimation*, the Adam Optimizer combines both Momentu ## Example ```php -use Rubix\ML\NeuralNet\Optimizers\Adam; +use Rubix\ML\NeuralNet\Optimizers\Adam\Adam; $optimizer = new Adam(0.0001, 0.1, 0.001); ``` ## References -[^1]: D. P. Kingma et al. (2014). Adam: A Method for Stochastic Optimization. \ No newline at end of file +[^1]: D. P. Kingma et al. (2014). Adam: A Method for Stochastic Optimization. diff --git a/docs/neural-network/optimizers/adamax.md b/docs/neural-network/optimizers/adamax.md index 6b1d9ea05..ff02f925a 100644 --- a/docs/neural-network/optimizers/adamax.md +++ b/docs/neural-network/optimizers/adamax.md @@ -1,8 +1,27 @@ -[source] +[source] # AdaMax A version of the [Adam](adam.md) optimizer that replaces the RMS property with the infinity norm of the past gradients. As such, AdaMax is generally more suitable for sparse parameter updates and noisy gradients. +## Mathematical formulation +Per step (element-wise), AdaMax maintains an exponentially decaying moving average of the gradient (velocity) and an infinity-norm accumulator of past gradients, and uses them to scale the update: + +$$ +\begin{aligned} +\mathbf{v}_t &= (1 - \beta_1)\,\mathbf{v}_{t-1} + \beta_1\,\mathbf{g}_t \\ +\mathbf{u}_t &= \max\big(\beta_2\,\mathbf{u}_{t-1},\ |\mathbf{g}_t|\big) \\ +\Delta{\theta}_t &= \alpha\, \frac{\mathbf{v}_t}{\max(\mathbf{u}_t, \varepsilon)} +\end{aligned} +$$ + +where: +- $t$ is the current step, +- $\alpha$ is the learning rate (`rate`), +- $\beta_1$ is the momentum decay (`momentumDecay`), +- $\beta_2$ is the norm decay (`normDecay`), +- $\mathbf{g}_t$ is the current gradient and $|\mathbf{g}_t|$ denotes element-wise absolute value, +- $\varepsilon$ is a small constant for numerical stability (in the implementation, the denominator is clipped from below by `EPSILON`). + ## Parameters | # | Name | Default | Type | Description | |---|---|---|---|---| @@ -12,10 +31,10 @@ A version of the [Adam](adam.md) optimizer that replaces the RMS property with t ## Example ```php -use Rubix\ML\NeuralNet\Optimizers\AdaMax; +use Rubix\ML\NeuralNet\Optimizers\AdaMax\AdaMax; $optimizer = new AdaMax(0.0001, 0.1, 0.001); ``` ## References -[^1]: D. P. Kingma et al. (2014). Adam: A Method for Stochastic Optimization. \ No newline at end of file +[^1]: D. P. Kingma et al. (2014). Adam: A Method for Stochastic Optimization. diff --git a/docs/neural-network/optimizers/cyclical.md b/docs/neural-network/optimizers/cyclical.md index 9773004da..eed8b2779 100644 --- a/docs/neural-network/optimizers/cyclical.md +++ b/docs/neural-network/optimizers/cyclical.md @@ -1,8 +1,28 @@ -[source] +[source] # Cyclical The Cyclical optimizer uses a global learning rate that cycles between the lower and upper bound over a designated period while also decaying the upper bound by a factor at each step. Cyclical learning rates have been shown to help escape bad local minima and saddle points of the gradient. +## Mathematical formulation +Per step (element-wise), the cyclical learning rate and update are computed as: + +$$ +\begin{aligned} +\text{cycle} &= \left\lfloor 1 + \frac{t}{2\,\text{steps}} \right\rfloor \\ +x &= \left| \frac{t}{\text{steps}} - 2\,\text{cycle} + 1 \right| \\ +\text{scale} &= \text{decay}^{\,t} \\ +\eta_t &= \text{lower} + (\text{upper} - \text{lower})\,\max\bigl(0\,1 - x\bigr)\,\text{scale} \\ +\Delta\theta_t &= \eta_t\,g_t +\end{aligned} +$$ + +where: +- $t$ is the current step counter, +- $steps$ is the number of steps in every half cycle, +- $lower$ and $upper$ are the learning rate bounds, +- $decay$ is the multiplicative decay applied each step, +- $g_t$ is the current gradient. + ## Parameters | # | Name | Default | Type | Description | |---|---|---|---|---| @@ -13,10 +33,10 @@ The Cyclical optimizer uses a global learning rate that cycles between the lower ## Example ```php -use Rubix\ML\NeuralNet\Optimizers\Cyclical; +use Rubix\ML\NeuralNet\Optimizers\Cyclical\Cyclical; $optimizer = new Cyclical(0.001, 0.005, 1000); ``` ## References -[^1]: L. N. Smith. (2017). Cyclical Learning Rates for Training Neural Networks. \ No newline at end of file +[^1]: L. N. Smith. (2017). Cyclical Learning Rates for Training Neural Networks. diff --git a/docs/neural-network/optimizers/momentum.md b/docs/neural-network/optimizers/momentum.md index 7556ca008..e9c787a2f 100644 --- a/docs/neural-network/optimizers/momentum.md +++ b/docs/neural-network/optimizers/momentum.md @@ -1,8 +1,33 @@ -[source] +[source] # Momentum Momentum accelerates each update step by accumulating velocity from past updates and adding a factor of the previous velocity to the current step. Momentum can help speed up training and escape bad local minima when compared with [Stochastic](stochastic.md) Gradient Descent. +## Mathematical formulation +Per step (element-wise), Momentum updates the velocity and applies it as the parameter step: + +$$ +\begin{aligned} +\beta &= 1 - \text{decay}, \quad \eta = \text{rate} \\ +\text{Velocity update:}\quad v_t &= \beta\,v_{t-1} + \eta\,g_t \\ +\text{Returned step:}\quad \Delta\theta_t &= v_t +\end{aligned} +$$ + +Nesterov lookahead (when `lookahead = true`) is approximated by applying the velocity update a second time: + +$$ +\begin{aligned} +v_t &\leftarrow \beta\,v_t + \eta\,g_t +\end{aligned} +$$ + +where: +- $g_t$ is the current gradient, +- $v_t$ is the velocity (accumulated update), +- $\beta$ is the momentum coefficient ($1 − decay$), +- $\eta$ is the learning rate ($rate$). + ## Parameters | # | Name | Default | Type | Description | |---|---|---|---|---| @@ -12,7 +37,7 @@ Momentum accelerates each update step by accumulating velocity from past updates ## Example ```php -use Rubix\ML\NeuralNet\Optimizers\Momentum; +use Rubix\ML\NeuralNet\Optimizers\Momentum\Momentum; $optimizer = new Momentum(0.01, 0.1, true); ``` diff --git a/docs/neural-network/optimizers/rms-prop.md b/docs/neural-network/optimizers/rms-prop.md index fdca6fd05..c531a863e 100644 --- a/docs/neural-network/optimizers/rms-prop.md +++ b/docs/neural-network/optimizers/rms-prop.md @@ -1,7 +1,25 @@ -[source] +[source] # RMS Prop -An adaptive gradient technique that divides the current gradient over a rolling window of the magnitudes of recent gradients. Unlike [AdaGrad](adagrad.md), RMS Prop does not suffer from an infinitely decaying step size. +An adaptive gradient technique that divides the current gradient over a rolling window of magnitudes of recent gradients. Unlike [AdaGrad](adagrad.md), RMS Prop does not suffer from an infinitely decaying step size. + +## Mathematical formulation +Per step (element-wise), RMSProp maintains a running average of squared gradients and scales the step by the root-mean-square: + +$$ +\begin{aligned} +\rho &= 1 - \text{decay}, \quad \eta = \text{rate} \\ +\text{Running average:}\quad v_t &= \rho\,v_{t-1} + (1 - \rho)\,g_t^{\,2} \\ +\text{Returned step:}\quad \Delta\theta_t &= \frac{\eta\,g_t}{\max\bigl(\sqrt{v_t},\,\varepsilon\bigr)} +\end{aligned} +$$ + +where: +- $g_t$ - is the current gradient, +- $v_t$ - is the running average of squared gradients, +- $\rho$ - is the averaging coefficient ($1 − decay$), +- $\eta$ - is the learning rate ($rate$), +- $\varepsilon$ - is a small constant to avoid division by zero (implemented by clipping $\sqrt{v_t}$ to $[ε, +∞)$). ## Parameters | # | Name | Default | Type | Description | @@ -11,10 +29,10 @@ An adaptive gradient technique that divides the current gradient over a rolling ## Example ```php -use Rubix\ML\NeuralNet\Optimizers\RMSProp; +use Rubix\ML\NeuralNet\Optimizers\RMSProp\RMSProp; $optimizer = new RMSProp(0.01, 0.1); ``` ## References -[^1]: T. Tieleman et al. (2012). Lecture 6e rmsprop: Divide the gradient by a running average of its recent magnitude. \ No newline at end of file +[^1]: T. Tieleman et al. (2012). Lecture 6e rmsprop: Divide the gradient by a running average of its recent magnitude. diff --git a/docs/neural-network/optimizers/step-decay.md b/docs/neural-network/optimizers/step-decay.md index 1a21f0804..f5da99c8b 100644 --- a/docs/neural-network/optimizers/step-decay.md +++ b/docs/neural-network/optimizers/step-decay.md @@ -1,8 +1,26 @@ -[source] +[source] # Step Decay A learning rate decay optimizer that reduces the global learning rate by a factor whenever it reaches a new *floor*. The number of steps needed to reach a new floor is defined by the *steps* hyper-parameter. +## Mathematical formulation +Per step (element-wise), the Step Decay learning rate and update are: + +$$ +\begin{aligned} +\text{floor} &= \left\lfloor \frac{t}{k} \right\rfloor \\ +\eta_t &= \frac{\eta_0}{1 + \text{floor}\cdot \lambda} \\ +\Delta\theta_t &= \eta_t\,g_t +\end{aligned} +$$ + +where: +- $t$ is the current step number, +- $k$ is the number of steps per floor, +- $\eta_0$ is the initial learning rate ($rate$), +- $\lambda$ is the decay factor ($decay$), +- $g_t$ is the current gradient. + ## Parameters | # | Name | Default | Type | Description | |---|---|---|---|---| @@ -12,7 +30,7 @@ A learning rate decay optimizer that reduces the global learning rate by a facto ## Example ```php -use Rubix\ML\NeuralNet\Optimizers\StepDecay; +use Rubix\ML\NeuralNet\Optimizers\StepDecay\StepDecay; $optimizer = new StepDecay(0.1, 50, 1e-3); -``` \ No newline at end of file +``` diff --git a/docs/neural-network/optimizers/stochastic.md b/docs/neural-network/optimizers/stochastic.md index 4422e0ddc..bb0096b87 100644 --- a/docs/neural-network/optimizers/stochastic.md +++ b/docs/neural-network/optimizers/stochastic.md @@ -3,6 +3,20 @@ # Stochastic A constant learning rate optimizer based on vanilla Stochastic Gradient Descent (SGD). +## Mathematical formulation +Per step (element-wise), the SGD update scales the gradient by a constant learning rate: + +$$ +\begin{aligned} +\eta &= \text{rate} \\ +\Delta\theta_t &= \eta\,g_t +\end{aligned} +$$ + +where: +- $g_t$ is the current gradient, +- $\eta$ is the learning rate ($rate$). + ## Parameters | # | Name | Default | Type | Description | |---|---|---|---|---| diff --git a/src/NeuralNet/Optimizers/AdaGrad/AdaGrad.php b/src/NeuralNet/Optimizers/AdaGrad/AdaGrad.php new file mode 100644 index 000000000..b6c92bd56 --- /dev/null +++ b/src/NeuralNet/Optimizers/AdaGrad/AdaGrad.php @@ -0,0 +1,134 @@ + + */ +class AdaGrad implements Optimizer, Adaptive +{ + /** + * The learning rate that controls the global step size. + * + * @var float + */ + protected float $rate; + + /** + * The cache of sum of squared gradients. + * + * @var NDArray[] + */ + protected array $cache = [ + // + ]; + + /** + * @param float $rate + * @throws InvalidArgumentException + */ + public function __construct(float $rate = 0.01) + { + if ($rate <= 0.0) { + throw new InvalidArgumentException("Learning rate must be greater than 0, $rate given."); + } + + $this->rate = $rate; + } + + /** + * Warm the parameter cache. + * + * @internal + * + * @param Parameter $param + * @throws RuntimeException + */ + public function warm(Parameter $param) : void + { + $class = get_class($param->param()); + + if (!$class) { + throw new RuntimeException('Could not locate parameter class.'); + } + + $this->cache[$param->id()] = NumPower::zeros($param->param()->shape()); + } + + /** + * Take a step of gradient descent for a given parameter. + * + * AdaGrad update (element-wise): + * n_t = n_{t-1} + g_t^2 + * Δθ_t = η · g_t / max(√n_t, ε) + * + * where: + * - g_t is the current gradient, + * - n_t is the accumulated (running) sum of squared gradients, + * - η is the learning rate (rate), + * - ε is a small constant to avoid division by zero (implemented via clipping √n_t to [ε, +∞)). + * + * @internal + * + * @param Parameter $param + * @param NDArray $gradient + * @return NDArray + */ + public function step(Parameter $param, NDArray $gradient) : NDArray + { + $norm = $this->cache[$param->id()]; + + // Update accumulated squared gradients: norm = norm + gradient^2 + $norm = NumPower::add($norm, NumPower::square($gradient)); + + $this->cache[$param->id()] = $norm; + + // denominator = max(sqrt(norm), EPSILON) + $denominator = NumPower::sqrt($norm); + $denominator = NumPower::clip($denominator, EPSILON, PHP_FLOAT_MAX); + + // return rate * gradient / denominator + return NumPower::divide( + NumPower::multiply($gradient, $this->rate), + $denominator + ); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "AdaGrad (rate: {$this->rate})"; + } +} diff --git a/src/NeuralNet/Optimizers/AdaMax/AdaMax.php b/src/NeuralNet/Optimizers/AdaMax/AdaMax.php new file mode 100644 index 000000000..ae13d2249 --- /dev/null +++ b/src/NeuralNet/Optimizers/AdaMax/AdaMax.php @@ -0,0 +1,90 @@ + + */ +class AdaMax extends Adam +{ + /** + * @param float $rate + * @param float $momentumDecay + * @param float $normDecay + */ + public function __construct(float $rate = 0.001, float $momentumDecay = 0.1, float $normDecay = 0.001) + { + parent::__construct($rate, $momentumDecay, $normDecay); + } + + /** + * Take a step of gradient descent for a given parameter. + * + * AdaMax update (element-wise): + * v_t = v_{t-1} + β1 · (g_t − v_{t-1}) + * u_t = max(β2 · u_{t-1}, |g_t|) + * Δθ_t = η · v_t / max(u_t, ε) + * + * @internal + * + * @param Parameter $param + * @param NDArray $gradient + * @return NDArray + */ + public function step(Parameter $param, NDArray $gradient) : NDArray + { + [$velocity, $norm] = $this->cache[$param->id()]; + + $vHat = NumPower::multiply( + NumPower::subtract($gradient, $velocity), + $this->momentumDecay + ); + + $velocity = NumPower::add($velocity, $vHat); + + // Infinity norm accumulator + $norm = NumPower::multiply($norm, 1.0 - $this->normDecay); + $absGrad = NumPower::abs($gradient); + $norm = NumPower::maximum($norm, $absGrad); + + $this->cache[$param->id()] = [$velocity, $norm]; + + $norm = NumPower::clip($norm, EPSILON, PHP_FLOAT_MAX); + + return NumPower::multiply( + NumPower::divide($velocity, $norm), + $this->rate + ); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "AdaMax (rate: {$this->rate}, momentum decay: {$this->momentumDecay}," + . " norm decay: {$this->normDecay})"; + } +} diff --git a/src/NeuralNet/Optimizers/Adam/Adam.php b/src/NeuralNet/Optimizers/Adam/Adam.php new file mode 100644 index 000000000..fad8ac1bf --- /dev/null +++ b/src/NeuralNet/Optimizers/Adam/Adam.php @@ -0,0 +1,181 @@ + + */ +class Adam implements Optimizer, Adaptive +{ + /** + * The learning rate that controls the global step size. + * + * @var float + */ + protected float $rate; + + /** + * The momentum decay rate. + * + * @var float + */ + protected float $momentumDecay; + + /** + * The decay rate of the previous norms. + * + * @var float + */ + protected float $normDecay; + + /** + * The parameter cache of running velocity and squared gradients. + * + * @var array{0: NDArray, 1: NDArray}[] + */ + protected array $cache = [ + // id => [velocity, norm] + ]; + + /** + * @param float $rate + * @param float $momentumDecay + * @param float $normDecay + * @throws InvalidArgumentException + */ + public function __construct(float $rate = 0.001, float $momentumDecay = 0.1, float $normDecay = 0.001) + { + if ($rate <= 0.0) { + throw new InvalidArgumentException( + "Learning rate must be greater than 0, $rate given." + ); + } + + if ($momentumDecay <= 0.0 or $momentumDecay >= 1.0) { + throw new InvalidArgumentException( + "Momentum decay must be between 0 and 1, $momentumDecay given." + ); + } + + if ($normDecay <= 0.0 or $normDecay >= 1.0) { + throw new InvalidArgumentException( + "Norm decay must be between 0 and 1, $normDecay given." + ); + } + + $this->rate = $rate; + $this->momentumDecay = $momentumDecay; + $this->normDecay = $normDecay; + } + + /** + * Warm the parameter cache. + * + * @internal + * + * @param Parameter $param + * @throws RuntimeException + */ + public function warm(Parameter $param) : void + { + $class = get_class($param->param()); + + if (!$class) { + throw new RuntimeException('Could not locate parameter class.'); + } + + $zeros = NumPower::zeros($param->param()->shape()); + + $this->cache[$param->id()] = [clone $zeros, $zeros]; + } + + /** + * Take a step of gradient descent for a given parameter. + * + * Adam update (element-wise): + * v_t = v_{t-1} + β1 · (g_t − v_{t-1}) // exponential moving average of gradients + * n_t = n_{t-1} + β2 · (g_t^2 − n_{t-1}) // exponential moving average of squared gradients + * Δθ_t = η · v_t / max(√n_t, ε) + * + * where: + * - g_t is the current gradient, + * - v_t is the running average of gradients ("velocity"), β1 = momentumDecay, + * - n_t is the running average of squared gradients ("norm"), β2 = normDecay, + * - η is the learning rate (rate), ε is a small constant to avoid division by zero (implemented by clipping √n_t to [ε, +∞)). + * + * @internal + * + * @param Parameter $param + * @param NDArray $gradient + * @return NDArray + */ + public function step(Parameter $param, NDArray $gradient) : NDArray + { + [$velocity, $norm] = $this->cache[$param->id()]; + + $vHat = NumPower::multiply( + NumPower::subtract($gradient, $velocity), + $this->momentumDecay + ); + + $velocity = NumPower::add($velocity, $vHat); + + $nHat = NumPower::multiply( + NumPower::subtract(NumPower::square($gradient), $norm), + $this->normDecay + ); + + $norm = NumPower::add($norm, $nHat); + + $this->cache[$param->id()] = [$velocity, $norm]; + + $denominator = NumPower::sqrt($norm); + $denominator = NumPower::clip($denominator, EPSILON, PHP_FLOAT_MAX); + + return NumPower::divide( + NumPower::multiply($velocity, $this->rate), + $denominator + ); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Adam (rate: {$this->rate}, momentum decay: {$this->momentumDecay}," + . " norm decay: {$this->normDecay})"; + } +} diff --git a/src/NeuralNet/Optimizers/Base/Adaptive.php b/src/NeuralNet/Optimizers/Base/Adaptive.php new file mode 100644 index 000000000..35ee5323b --- /dev/null +++ b/src/NeuralNet/Optimizers/Base/Adaptive.php @@ -0,0 +1,25 @@ + + */ +interface Adaptive extends Optimizer +{ + /** + * Warm the parameter cache. + * + * @param Parameter $param + */ + public function warm(Parameter $param) : void; +} diff --git a/src/NeuralNet/Optimizers/Cyclical/Cyclical.php b/src/NeuralNet/Optimizers/Cyclical/Cyclical.php new file mode 100644 index 000000000..ac22d9d52 --- /dev/null +++ b/src/NeuralNet/Optimizers/Cyclical/Cyclical.php @@ -0,0 +1,166 @@ + + */ +class Cyclical implements Optimizer +{ + /** + * The lower bound on the learning rate. + * + * @var float + */ + protected float $lower; + + /** + * The upper bound on the learning rate. + * + * @var float + */ + protected float $upper; + + /** + * The range of the learning rate. + * + * @var float + */ + protected float $range; + + /** + * The number of steps in every cycle. + * + * @var int + */ + protected int $losses; + + /** + * The exponential scaling factor applied to each step as decay. + * + * @var float + */ + protected float $decay; + + /** + * The number of steps taken so far. + * + * @var int + */ + protected int $t = 0; + + /** + * @param float $lower + * @param float $upper + * @param int $losses + * @param float $decay + * @throws InvalidArgumentException + */ + public function __construct( + float $lower = 0.001, + float $upper = 0.006, + int $losses = 2000, + float $decay = 0.99994 + ) { + if ($lower <= 0.0) { + throw new InvalidArgumentException( + "Lower bound must be greater than 0, $lower given." + ); + } + + if ($lower > $upper) { + throw new InvalidArgumentException( + 'Lower bound cannot be reater than the upper bound.' + ); + } + + if ($losses < 1) { + throw new InvalidArgumentException( + "The number of steps per cycle must be greater than 0, $losses given." + ); + } + + if ($decay <= 0.0 or $decay >= 1.0) { + throw new InvalidArgumentException( + "Decay must be between 0 and 1, $decay given." + ); + } + + $this->lower = $lower; + $this->upper = $upper; + $this->range = $upper - $lower; + $this->losses = $losses; + $this->decay = $decay; + } + + /** + * Take a step of gradient descent for a given parameter. + * + * Cyclical learning rate schedule (per-step, element-wise update): + * - Cycle index: cycle = floor(1 + t / (2 · losses)) + * - Triangular position: x = | t / losses − 2 · cycle + 1 | + * - Exponential decay: scale = decay^t + * - Learning rate at t: η_t = lower + (upper − lower) · max(0, 1 − x) · scale + * - Returned step: Δθ_t = η_t · g_t + * + * where: + * - t is the current step counter (incremented after computing η_t), + * - losses is the number of steps per cycle, + * - lower and upper are the learning rate bounds, + * - decay is the multiplicative decay applied each step, + * - g_t is the current gradient. + * + * @internal + * + * @param Parameter $param + * @param NDArray $gradient + * @return NDArray + */ + public function step(Parameter $param, NDArray $gradient) : NDArray + { + $cycle = floor(1 + $this->t / (2 * $this->losses)); + + $x = abs($this->t / $this->losses - 2 * $cycle + 1); + + $scale = $this->decay ** $this->t; + + $rate = $this->lower + $this->range * max(0, 1 - $x) * $scale; + + ++$this->t; + + return NumPower::multiply($gradient, $rate); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Cyclical (lower: {$this->lower}, upper: {$this->upper}," + . " steps: {$this->losses}, decay: {$this->decay})"; + } +} diff --git a/src/NeuralNet/Optimizers/Momentum/Momentum.php b/src/NeuralNet/Optimizers/Momentum/Momentum.php new file mode 100644 index 000000000..05e62fa0b --- /dev/null +++ b/src/NeuralNet/Optimizers/Momentum/Momentum.php @@ -0,0 +1,164 @@ + + */ +class Momentum implements Optimizer, Adaptive +{ + /** + * The learning rate that controls the global step size. + * + * @var float + */ + protected float $rate; + + /** + * The rate at which the momentum force decays. + * + * @var float + */ + protected float $decay; + + /** + * Should we employ Nesterov's lookahead (NAG) when updating the parameters? + * + * @var bool + */ + protected bool $lookahead; + + /** + * The parameter cache of velocity NDArrays. + * + * @var NDArray[] + */ + protected array $cache = [ + // + ]; + + /** + * @param float $rate + * @param float $decay + * @param bool $lookahead + * @throws InvalidArgumentException + */ + public function __construct(float $rate = 0.001, float $decay = 0.1, bool $lookahead = false) + { + if ($rate <= 0.0) { + throw new InvalidArgumentException( + "Learning rate must be greater than 0, $rate given." + ); + } + + if ($decay <= 0.0 or $decay >= 1.0) { + throw new InvalidArgumentException( + "Decay must be between 0 and 1, $decay given." + ); + } + + $this->rate = $rate; + $this->decay = $decay; + $this->lookahead = $lookahead; + } + + /** + * Warm the cache. + * + * @internal + * + * @param Parameter $param + * @throws RuntimeException + */ + public function warm(Parameter $param) : void + { + $class = get_class($param->param()); + + if (!$class) { + throw new RuntimeException('Could not locate parameter class.'); + } + + $this->cache[$param->id()] = NumPower::zeros($param->param()->shape()); + } + + /** + * Take a step of gradient descent for a given parameter. + * + * Mathematical formulation (per-parameter element): + * - Velocity update: v_t = β · v_{t-1} + η · g_t + * where β = 1 − decay and η = rate, and g_t is the current gradient. + * - Returned step (the amount added to the parameter by the trainer): Δθ_t = v_t + * + * Nesterov lookahead (when lookahead = true): + * - We apply the same velocity update a second time to approximate NAG: + * v_t ← β · v_t + η · g_t + * + * Notes: + * - This method updates and caches the velocity tensor per Parameter id. + * - The actual parameter update is performed by the training loop using the returned velocity. + * + * @internal + * + * @param Parameter $param + * @param NDArray $gradient + * @return NDArray + */ + public function step(Parameter $param, NDArray $gradient) : NDArray + { + $velocity = $this->cache[$param->id()]; + + // velocity = gradient * rate + velocity * (1 - decay) + $velocity = NumPower::add( + NumPower::multiply($gradient, $this->rate), + NumPower::multiply($velocity, 1.0 - $this->decay) + ); + + $this->cache[$param->id()] = $velocity; + + if ($this->lookahead) { + // Apply lookahead: velocity = gradient * rate + velocity * (1 - decay) + $velocity = NumPower::add( + NumPower::multiply($gradient, $this->rate), + NumPower::multiply($velocity, 1.0 - $this->decay) + ); + } + + return $velocity; + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Momentum (rate: {$this->rate}, decay: {$this->decay}," + . ' lookahead: ' . Params::toString($this->lookahead) . ')'; + } +} diff --git a/src/NeuralNet/Optimizers/RMSProp/RMSProp.php b/src/NeuralNet/Optimizers/RMSProp/RMSProp.php new file mode 100644 index 000000000..7c08aebb2 --- /dev/null +++ b/src/NeuralNet/Optimizers/RMSProp/RMSProp.php @@ -0,0 +1,158 @@ + + */ +class RMSProp implements Optimizer, Adaptive +{ + /** + * The learning rate that controls the global step size. + * + * @var float + */ + protected float $rate; + + /** + * The rms decay rate. + * + * @var float + */ + protected float $decay; + + /** + * The opposite of the rms decay rate. + * + * @var float + */ + protected float $rho; + + /** + * The cache of running squared gradients. + * + * @var NDArray[] + */ + protected array $cache = [ + // + ]; + + /** + * @param float $rate + * @param float $decay + * @throws InvalidArgumentException + */ + public function __construct(float $rate = 0.001, float $decay = 0.1) + { + if ($rate <= 0.0) { + throw new InvalidArgumentException( + "Learning rate must be greater than 0, $rate given." + ); + } + + if ($decay <= 0.0 or $decay >= 1.0) { + throw new InvalidArgumentException( + "Decay must be between 0 and 1, $decay given." + ); + } + + $this->rate = $rate; + $this->decay = $decay; + $this->rho = 1.0 - $decay; + } + + /** + * Warm the parameter cache. + * + * @internal + * + * @param Parameter $param + * @throws RuntimeException + */ + public function warm(Parameter $param) : void + { + $class = get_class($param->param()); + + if (!$class) { + throw new RuntimeException('Could not locate parameter class.'); + } + + $this->cache[$param->id()] = NumPower::zeros($param->param()->shape()); + } + + /** + * Take a step of gradient descent for a given parameter. + * + * RMSProp update (element-wise): + * v_t = ρ · v_{t-1} + (1 − ρ) · g_t^2 + * Δθ_t = η · g_t / max(sqrt(v_t), ε) + * + * where: + * - g_t is the current gradient, + * - v_t is the running average of squared gradients, + * - ρ = 1 − decay, η is the learning rate, + * - ε is a small constant to avoid division by zero (implemented by clipping √v_t to [ε, +∞)). + * + * @internal + * + * @param Parameter $param + * @param NDArray $gradient + * @return NDArray + */ + public function step(Parameter $param, NDArray $gradient) : NDArray + { + $norm = $this->cache[$param->id()]; + + $norm = NumPower::add( + NumPower::multiply($norm, $this->rho), + NumPower::multiply(NumPower::square($gradient), $this->decay) + ); + + $this->cache[$param->id()] = $norm; + + $denominator = NumPower::sqrt($norm); + $denominator = NumPower::clip($denominator, EPSILON, PHP_FLOAT_MAX); + + return NumPower::divide( + NumPower::multiply($gradient, $this->rate), + $denominator + ); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "RMS Prop (rate: {$this->rate}, decay: {$this->decay})"; + } +} diff --git a/src/NeuralNet/Optimizers/StepDecay/StepDecay.php b/src/NeuralNet/Optimizers/StepDecay/StepDecay.php new file mode 100644 index 000000000..abfeb6f7e --- /dev/null +++ b/src/NeuralNet/Optimizers/StepDecay/StepDecay.php @@ -0,0 +1,127 @@ + + */ +class StepDecay implements Optimizer +{ + /** + * The learning rate that controls the global step size. + * + * @var float + */ + protected float $rate; + + /** + * The size of every floor in steps. i.e. the number of steps to take before applying another factor of decay. + * + * @var int + */ + protected int $losses; + + /** + * The factor to decrease the learning rate by over a period of k steps. + * + * @var float + */ + protected float $decay; + + /** + * The number of steps taken so far. + * + * @var int + */ + protected int $steps = 0; + + /** + * @param float $rate + * @param int $losses + * @param float $decay + * @throws InvalidArgumentException + */ + public function __construct(float $rate = 0.01, int $losses = 100, float $decay = 1e-3) + { + if ($rate <= 0.0) { + throw new InvalidArgumentException( + "Learning rate must be greater than 0, $rate given." + ); + } + + if ($losses < 1) { + throw new InvalidArgumentException( + "The number of steps per floor must be greater than 0, $losses given." + ); + } + + if ($decay < 0.0) { + throw new InvalidArgumentException( + "Decay rate must be positive, $decay given." + ); + } + + $this->rate = $rate; + $this->losses = $losses; + $this->decay = $decay; + } + + /** + * Take a step of gradient descent for a given parameter. + * + * Step Decay update (element-wise): + * floor = ⌊t / k⌋ + * η_t = η₀ / (1 + floor · λ) + * Δθ_t = η_t · g_t + * + * where: + * - t is the current step number, + * - k is the number of steps per floor, + * - η₀ is the initial learning rate, + * - λ is the decay factor, + * - g_t is the current gradient. + * + * @internal + * + * @param Parameter $param + * @param NDArray $gradient + * @return NDArray + */ + public function step(Parameter $param, NDArray $gradient) : NDArray + { + $floor = floor($this->steps / $this->losses); + + $rate = $this->rate * (1.0 / (1.0 + $floor * $this->decay)); + + ++$this->steps; + + return NumPower::multiply($gradient, $rate); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Step Decay (rate: {$this->rate}, steps: {$this->losses}, decay: {$this->decay})"; + } +} diff --git a/src/NeuralNet/Optimizers/Stochastic/Stochastic.php b/src/NeuralNet/Optimizers/Stochastic/Stochastic.php index ffd9daf30..b2cd6ebac 100644 --- a/src/NeuralNet/Optimizers/Stochastic/Stochastic.php +++ b/src/NeuralNet/Optimizers/Stochastic/Stochastic.php @@ -35,7 +35,9 @@ class Stochastic implements Optimizer public function __construct(float $rate = 0.01) { if ($rate <= 0.0) { - throw new InvalidArgumentException("Learning rate must be greater than 0, $rate given."); + throw new InvalidArgumentException( + "Learning rate must be greater than 0, $rate given." + ); } $this->rate = $rate; @@ -44,6 +46,13 @@ public function __construct(float $rate = 0.01) /** * Take a step of gradient descent for a given parameter. * + * SGD update (element-wise): + * Δθ_t = η · g_t + * + * where: + * - g_t is the current gradient, + * - η is the learning rate. + * * @internal * * @param Parameter $param diff --git a/src/NeuralNet/Parameters/Parameter.php b/src/NeuralNet/Parameters/Parameter.php index efa7cf88a..0cef2e87a 100644 --- a/src/NeuralNet/Parameters/Parameter.php +++ b/src/NeuralNet/Parameters/Parameter.php @@ -22,7 +22,6 @@ /** * Parameter - * */ class Parameter { @@ -61,7 +60,7 @@ public function __construct(NDArray $param) * * @return int */ - public function id(): int + public function id() : int { return $this->id; } @@ -71,7 +70,7 @@ public function id(): int * * @return NDArray */ - public function param(): NDArray + public function param() : NDArray { return $this->param; } @@ -79,10 +78,10 @@ public function param(): NDArray /** * Update the parameter with the gradient and optimizer. * - * @param NDArray $gradient - * @param Optimizer $optimizer + * @param NDArray $gradient + * @param Optimizer $optimizer */ - public function update(NDArray $gradient, Optimizer $optimizer): void + public function update(NDArray $gradient, Optimizer $optimizer) : void { $step = $optimizer->step($this, $gradient); @@ -92,7 +91,7 @@ public function update(NDArray $gradient, Optimizer $optimizer): void /** * Perform a deep copy of the object upon cloning. */ - public function __clone(): void + public function __clone() : void { $this->param = clone $this->param; } diff --git a/tests/NeuralNet/Optimizers/AdaGrad/AdaGradTest.php b/tests/NeuralNet/Optimizers/AdaGrad/AdaGradTest.php new file mode 100644 index 000000000..44ff773f5 --- /dev/null +++ b/tests/NeuralNet/Optimizers/AdaGrad/AdaGradTest.php @@ -0,0 +1,94 @@ + [0.0]; + yield 'negative rate' => [-0.001]; + } + + public static function stepProvider() : Generator + { + yield [ + new Parameter(NumPower::array([ + [0.1, 0.6, -0.4], + [0.5, 0.6, -0.4], + [0.1, 0.1, -0.7], + ])), + NumPower::array([ + [0.01, 0.05, -0.02], + [-0.01, 0.02, 0.03], + [0.04, -0.01, -0.5], + ]), + [ + [0.001, 0.001, -0.001], + [-0.001, 0.001, 0.001], + [0.001, -0.001, -0.001], + ], + ]; + } + + protected function setUp() : void + { + $this->optimizer = new AdaGrad(0.001); + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertSame('AdaGrad (rate: 0.01)', (string) (new AdaGrad())); + } + + /** + * @param float $rate + */ + #[Test] + #[DataProvider('invalidConstructorProvider')] + #[TestDox('Throws exception when constructed with invalid arguments')] + public function testInvalidConstructorParams(float $rate) : void + { + $this->expectException(InvalidArgumentException::class); + + new AdaGrad(rate: $rate); + } + + /** + * @param Parameter $param + * @param NDArray $gradient + * @param list> $expected + */ + #[Test] + #[DataProvider('stepProvider')] + #[TestDox('Can compute the step')] + public function testStep(Parameter $param, NDArray $gradient, array $expected) : void + { + $this->optimizer->warm($param); + + $step = $this->optimizer->step(param: $param, gradient: $gradient); + + self::assertEqualsWithDelta($expected, $step->toArray(), 1e-7); + } +} diff --git a/tests/NeuralNet/Optimizers/AdaMax/AdaMaxTest.php b/tests/NeuralNet/Optimizers/AdaMax/AdaMaxTest.php new file mode 100644 index 000000000..0ca059561 --- /dev/null +++ b/tests/NeuralNet/Optimizers/AdaMax/AdaMaxTest.php @@ -0,0 +1,108 @@ + [0.0, 0.1, 0.001]; + yield 'negative rate' => [-0.001, 0.1, 0.001]; + yield 'zero momentum decay' => [0.001, 0.0, 0.001]; + yield 'momentum decay == 1' => [0.001, 1.0, 0.001]; + yield 'momentum decay > 1' => [0.001, 1.5, 0.001]; + yield 'negative momentum decay' => [0.001, -0.1, 0.001]; + yield 'zero norm decay' => [0.001, 0.1, 0.0]; + yield 'norm decay == 1' => [0.001, 0.1, 1.0]; + yield 'norm decay > 1' => [0.001, 0.1, 1.5]; + yield 'negative norm decay' => [0.001, 0.1, -0.1]; + } + + public static function stepProvider() : Generator + { + yield [ + new Parameter(NumPower::array([ + [0.1, 0.6, -0.4], + [0.5, 0.6, -0.4], + [0.1, 0.1, -0.7], + ])), + NumPower::array([ + [0.01, 0.05, -0.02], + [-0.01, 0.02, 0.03], + [0.04, -0.01, -0.5], + ]), + [ + [0.0001, 0.0001, -0.0001], + [-0.0001, 0.0001, 0.0001], + [0.0001, -0.0001, -0.0001], + ], + ]; + } + + protected function setUp() : void + { + $this->optimizer = new AdaMax( + rate: 0.001, + momentumDecay: 0.1, + normDecay: 0.001 + ); + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertEquals('AdaMax (rate: 0.001, momentum decay: 0.1, norm decay: 0.001)', (string) $this->optimizer); + } + + /** + * @param float $rate + * @param float $momentumDecay + * @param float $normDecay + */ + #[Test] + #[DataProvider('invalidConstructorProvider')] + #[TestDox('Throws exception when constructed with invalid arguments')] + public function testInvalidConstructorParams(float $rate, float $momentumDecay, float $normDecay) : void + { + $this->expectException(InvalidArgumentException::class); + + new AdaMax(rate: $rate, momentumDecay: $momentumDecay, normDecay: $normDecay); + } + + /** + * @param Parameter $param + * @param NDArray $gradient + * @param list> $expected + */ + #[Test] + #[DataProvider('stepProvider')] + #[TestDox('Can compute the step')] + public function testStep(Parameter $param, NDArray $gradient, array $expected) : void + { + $this->optimizer->warm($param); + + $step = $this->optimizer->step(param: $param, gradient: $gradient); + + self::assertEqualsWithDelta($expected, $step->toArray(), 1e-7); + } +} diff --git a/tests/NeuralNet/Optimizers/Adam/AdamTest.php b/tests/NeuralNet/Optimizers/Adam/AdamTest.php new file mode 100644 index 000000000..bcf19d344 --- /dev/null +++ b/tests/NeuralNet/Optimizers/Adam/AdamTest.php @@ -0,0 +1,141 @@ + [0.0, 0.1, 0.001]; + yield 'negative rate' => [-0.5, 0.1, 0.001]; + + // Invalid momentumDecay (<= 0 or >= 1) + yield 'zero momentumDecay' => [0.001, 0.0, 0.001]; + yield 'negative momentumDecay' => [0.001, -0.1, 0.001]; + yield 'momentumDecay == 1' => [0.001, 1.0, 0.001]; + yield 'momentumDecay > 1' => [0.001, 1.1, 0.001]; + + // Invalid normDecay (<= 0 or >= 1) + yield 'zero normDecay' => [0.001, 0.1, 0.0]; + yield 'negative normDecay' => [0.001, 0.1, -0.1]; + yield 'normDecay == 1' => [0.001, 0.1, 1.0]; + yield 'normDecay > 1' => [0.001, 0.1, 1.1]; + } + + public static function stepProvider() : Generator + { + yield [ + new Parameter(NumPower::array([ + [0.1, 0.6, -0.4], + [0.5, 0.6, -0.4], + [0.1, 0.1, -0.7], + ])), + NumPower::array([ + [0.01, 0.05, -0.02], + [-0.01, 0.02, 0.03], + [0.04, -0.01, -0.5], + ]), + [ + [0.0031622, 0.0031622, -0.0031622], + [-0.0031622, 0.0031622, 0.0031622], + [0.0031622, -0.0031622, -0.0031622], + ], + ]; + } + + protected function setUp() : void + { + $this->optimizer = new Adam( + rate: 0.001, + momentumDecay: 0.1, + normDecay: 0.001 + ); + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + $expected = 'Adam (rate: 0.001, momentum decay: 0.1, norm decay: 0.001)'; + self::assertSame($expected, (string) $this->optimizer); + } + + #[Test] + #[TestDox('Warm initializes zeroed velocity and norm caches with the parameter\'s shape')] + public function testWarmInitializesZeroedCache() : void + { + $param = new Parameter(NumPower::array([ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ])); + + // Warm the optimizer for this parameter + $this->optimizer->warm($param); + + // Inspect protected cache via reflection + $ref = new \ReflectionClass($this->optimizer); + $prop = $ref->getProperty('cache'); + $prop->setAccessible(true); + $cache = $prop->getValue($this->optimizer); + + self::assertArrayHasKey($param->id(), $cache); + + [$velocity, $norm] = $cache[$param->id()]; + + $zeros = NumPower::zeros($param->param()->shape()); + self::assertEqualsWithDelta($zeros->toArray(), $velocity->toArray(), 0.0); + self::assertEqualsWithDelta($zeros->toArray(), $norm->toArray(), 0.0); + } + + /** + * @param float $rate + * @param float $momentumDecay + * @param float $normDecay + */ + #[Test] + #[DataProvider('invalidConstructorProvider')] + #[TestDox('Throws exception when constructed with invalid arguments')] + public function testInvalidConstructorParams(float $rate, float $momentumDecay, float $normDecay) : void + { + $this->expectException(InvalidArgumentException::class); + new Adam(rate: $rate, momentumDecay: $momentumDecay, normDecay: $normDecay); + } + + /** + * @param Parameter $param + * @param NDArray $gradient + * @param list> $expected + */ + #[Test] + #[DataProvider('stepProvider')] + #[TestDox('Can compute the step')] + public function testStep(Parameter $param, NDArray $gradient, array $expected) : void + { + $this->optimizer->warm($param); + + $step = $this->optimizer->step(param: $param, gradient: $gradient); + + self::assertEqualsWithDelta($expected, $step->toArray(), 1e-7); + } +} diff --git a/tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php b/tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php new file mode 100644 index 000000000..302b770be --- /dev/null +++ b/tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php @@ -0,0 +1,106 @@ + [0.0, 0.006, 2000, null]; + yield 'negative lower' => [-0.001, 0.006, 2000, null]; + yield 'lower > upper' => [0.01, 0.006, 2000, null]; + yield 'zero steps' => [0.001, 0.006, 0, null]; + yield 'negative steps' => [0.001, 0.006, -5, null]; + yield 'zero decay' => [0.001, 0.006, 2000, 0.0]; + yield 'decay == 1' => [0.001, 0.006, 2000, 1.0]; + yield 'decay > 1' => [0.001, 0.006, 2000, 1.5]; + yield 'negative decay' => [0.001, 0.006, 2000, -0.1]; + } + + public static function stepProvider() : Generator + { + yield [ + new Parameter(NumPower::array([ + [0.1, 0.6, -0.4], + [0.5, 0.6, -0.4], + [0.1, 0.1, -0.7], + ])), + NumPower::array([ + [0.01, 0.05, -0.02], + [-0.01, 0.02, 0.03], + [0.04, -0.01, -0.5], + ]), + [ + [0.00001, 0.00005, -0.00002], + [-0.00001, 0.00002, 0.00003], + [0.00004, -0.00001, -0.0005], + ], + ]; + } + + protected function setUp() : void + { + $this->optimizer = new Cyclical(lower: 0.001, upper: 0.006, losses: 2000); + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertEquals('Cyclical (lower: 0.001, upper: 0.006, steps: 2000, decay: 0.99994)', (string) $this->optimizer); + } + + /** + * @param float $lower + * @param float $upper + * @param int $losses + * @param float|null $decay + */ + #[Test] + #[DataProvider('invalidConstructorProvider')] + #[TestDox('Throws exception when constructed with invalid arguments')] + public function testConstructorInvalidArgs(float $lower, float $upper, int $losses, ?float $decay) : void + { + $this->expectException(InvalidArgumentException::class); + + if ($decay === null) { + new Cyclical(lower: $lower, upper: $upper, losses: $losses); + } else { + new Cyclical(lower: $lower, upper: $upper, losses: $losses, decay: $decay); + } + } + + /** + * @param Parameter $param + * @param NDArray $gradient + * @param list> $expected + */ + #[Test] + #[DataProvider('stepProvider')] + #[TestDox('Can compute the step')] + public function testStep(Parameter $param, NDArray $gradient, array $expected) : void + { + $step = $this->optimizer->step(param: $param, gradient: $gradient); + + self::assertEqualsWithDelta($expected, $step->toArray(), 1e-7); + } +} diff --git a/tests/NeuralNet/Optimizers/Momentum/MomentumTest.php b/tests/NeuralNet/Optimizers/Momentum/MomentumTest.php new file mode 100644 index 000000000..03b65f9a7 --- /dev/null +++ b/tests/NeuralNet/Optimizers/Momentum/MomentumTest.php @@ -0,0 +1,126 @@ + [0.0, 0.1]; + yield 'negative rate' => [-0.001, 0.1]; + yield 'zero decay' => [0.001, 0.0]; + yield 'decay == 1' => [0.001, 1.0]; + yield 'decay > 1' => [0.001, 1.5]; + yield 'negative decay' => [0.001, -0.1]; + } + + public static function stepProvider() : Generator + { + yield [ + new Parameter(NumPower::array([ + [0.1, 0.6, -0.4], + [0.5, 0.6, -0.4], + [0.1, 0.1, -0.7], + ])), + NumPower::array([ + [0.01, 0.05, -0.02], + [-0.01, 0.02, 0.03], + [0.04, -0.01, -0.5], + ]), + [ + [0.00001, 0.00005, -0.00002], + [-0.00001, 0.00002, 0.00003], + [0.00004, -0.00001, -0.0005], + ], + ]; + } + + protected function setUp() : void + { + $this->optimizer = new Momentum(rate: 0.001, decay: 0.1, lookahead: false); + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertEquals('Momentum (rate: 0.001, decay: 0.1, lookahead: false)', (string) $this->optimizer); + } + + /** + * @param float $rate + * @param float $decay + */ + #[Test] + #[DataProvider('invalidConstructorProvider')] + #[TestDox('Throws exception when constructed with invalid arguments')] + public function testInvalidConstructorParams(float $rate, float $decay) : void + { + $this->expectException(InvalidArgumentException::class); + + new Momentum(rate: $rate, decay: $decay); + } + + #[Test] + #[TestDox('Warm initializes a zeroed velocity cache with the parameter\'s shape')] + public function testWarmInitializesZeroedCache() : void + { + $param = new Parameter(NumPower::array([ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ])); + + // Warm the optimizer for this parameter + $this->optimizer->warm($param); + + // Use reflection to read the protected cache + $ref = new \ReflectionClass($this->optimizer); + $prop = $ref->getProperty('cache'); + $prop->setAccessible(true); + $cache = $prop->getValue($this->optimizer); + + self::assertArrayHasKey($param->id(), $cache); + + $velocity = $cache[$param->id()]; + + // Verify the velocity is an all-zeros tensor of the correct shape + $zeros = NumPower::zeros($param->param()->shape()); + self::assertEqualsWithDelta($zeros->toArray(), $velocity->toArray(), 0.0); + } + + /** + * @param Parameter $param + * @param NDArray $gradient + * @param list> $expected + */ + #[Test] + #[DataProvider('stepProvider')] + #[TestDox('Can compute the step')] + public function testStep(Parameter $param, NDArray $gradient, array $expected) : void + { + $this->optimizer->warm($param); + + $step = $this->optimizer->step(param: $param, gradient: $gradient); + + self::assertEqualsWithDelta($expected, $step->toArray(), 1e-7); + } +} diff --git a/tests/NeuralNet/Optimizers/RMSProp/RMSPropTest.php b/tests/NeuralNet/Optimizers/RMSProp/RMSPropTest.php new file mode 100644 index 000000000..f47e4f2b3 --- /dev/null +++ b/tests/NeuralNet/Optimizers/RMSProp/RMSPropTest.php @@ -0,0 +1,122 @@ + [0.0, 0.1]; + yield 'negative rate' => [-0.001, 0.1]; + yield 'zero decay' => [0.001, 0.0]; + yield 'decay == 1' => [0.001, 1.0]; + yield 'decay > 1' => [0.001, 1.5]; + yield 'negative decay' => [0.001, -0.1]; + } + + public static function stepProvider() : Generator + { + yield [ + new Parameter(NumPower::array([ + [0.1, 0.6, -0.4], + [0.5, 0.6, -0.4], + [0.1, 0.1, -0.7], + ])), + NumPower::array([ + [0.01, 0.05, -0.02], + [-0.01, 0.02, 0.03], + [0.04, -0.01, -0.5], + ]), + [ + [0.0031622, 0.0031622, -0.0031622], + [-0.0031622, 0.0031622, 0.0031622], + [0.0031622, -0.0031622, -0.0031622], + ], + ]; + } + + protected function setUp() : void + { + $this->optimizer = new RMSProp(rate: 0.001, decay: 0.1); + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertEquals('RMS Prop (rate: 0.001, decay: 0.1)', (string) $this->optimizer); + } + + #[Test] + #[DataProvider('invalidConstructorProvider')] + #[TestDox('Throws exception when constructed with invalid arguments')] + public function testInvalidConstructorParams(float $rate, float $decay) : void + { + $this->expectException(InvalidArgumentException::class); + + new RMSProp(rate: $rate, decay: $decay); + } + + #[Test] + #[TestDox('Warm initializes a zeroed velocity cache with the parameter\'s shape')] + public function testWarmInitializesZeroedCache() : void + { + $param = new Parameter(NumPower::array([ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ])); + + // Warm the optimizer for this parameter + $this->optimizer->warm($param); + + // Use reflection to read the protected cache + $ref = new \ReflectionClass($this->optimizer); + $prop = $ref->getProperty('cache'); + $prop->setAccessible(true); + $cache = $prop->getValue($this->optimizer); + + self::assertArrayHasKey($param->id(), $cache); + + $velocity = $cache[$param->id()]; + + // Verify the velocity is an all-zeros tensor of the correct shape + $zeros = NumPower::zeros($param->param()->shape()); + self::assertEqualsWithDelta($zeros->toArray(), $velocity->toArray(), 0.0); + } + + /** + * @param Parameter $param + * @param NDArray $gradient + * @param list> $expected + */ + #[Test] + #[DataProvider('stepProvider')] + #[TestDox('Can compute the step')] + public function testStep(Parameter $param, NDArray $gradient, array $expected) : void + { + $this->optimizer->warm($param); + + $step = $this->optimizer->step(param: $param, gradient: $gradient); + + self::assertEqualsWithDelta($expected, $step->toArray(), 1e-7); + } +} diff --git a/tests/NeuralNet/Optimizers/StepDecay/StepDecayTest.php b/tests/NeuralNet/Optimizers/StepDecay/StepDecayTest.php new file mode 100644 index 000000000..ae7f78810 --- /dev/null +++ b/tests/NeuralNet/Optimizers/StepDecay/StepDecayTest.php @@ -0,0 +1,97 @@ + [0.0, 100, 0.001]; + yield 'negative rate' => [-0.001, 100, 0.001]; + yield 'zero losses' => [0.01, 0, 0.001]; + yield 'negative losses' => [0.01, -5, 0.001]; + yield 'negative decay' => [0.01, 100, -0.1]; + } + + public static function stepProvider() : Generator + { + yield [ + new Parameter(NumPower::array([ + [0.1, 0.6, -0.4], + [0.5, 0.6, -0.4], + [0.1, 0.1, -0.7], + ])), + NumPower::array([ + [0.01, 0.05, -0.02], + [-0.01, 0.02, 0.03], + [0.04, -0.01, -0.5], + ]), + [ + [0.00001, 0.00005, -0.00002], + [-0.00001, 0.00002, 0.00003], + [0.00004, -0.00001, -0.0005], + ], + ]; + } + + protected function setUp() : void + { + $this->optimizer = new StepDecay(rate: 0.001); + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertEquals('Step Decay (rate: 0.001, steps: 100, decay: 0.001)', (string) $this->optimizer); + } + + /** + * @param float $rate + * @param int $losses + * @param float $decay + */ + #[Test] + #[DataProvider('invalidConstructorProvider')] + #[TestDox('Throws exception when constructed with invalid arguments')] + public function testInvalidConstructorParams(float $rate, int $losses, float $decay) : void + { + $this->expectException(InvalidArgumentException::class); + + new StepDecay(rate: $rate, losses: $losses, decay: $decay); + } + + /** + * @param Parameter $param + * @param NDArray $gradient + * @param list> $expected + */ + #[Test] + #[DataProvider('stepProvider')] + #[TestDox('Can compute the step')] + public function testStep(Parameter $param, NDArray $gradient, array $expected) : void + { + $step = $this->optimizer->step(param: $param, gradient: $gradient); + + self::assertEqualsWithDelta($expected, $step->toArray(), 1e-7); + } +} diff --git a/tests/NeuralNet/Optimizers/Stochastic/StochasticTest.php b/tests/NeuralNet/Optimizers/Stochastic/StochasticTest.php index 57a50335f..c24b990f7 100644 --- a/tests/NeuralNet/Optimizers/Stochastic/StochasticTest.php +++ b/tests/NeuralNet/Optimizers/Stochastic/StochasticTest.php @@ -23,6 +23,12 @@ class StochasticTest extends TestCase { protected Stochastic $optimizer; + public static function invalidConstructorProvider() : Generator + { + yield 'zero rate' => [0.0]; + yield 'negative rate' => [-0.001]; + } + public static function stepProvider() : Generator { yield [ @@ -50,19 +56,23 @@ protected function setUp() : void } #[Test] - #[TestDox('Throws exception when constructed with invalid learning rate')] - public function testConstructorWithInvalidRate() : void + #[TestDox('Can be cast to a string')] + public function testToString() : void { - $this->expectException(InvalidArgumentException::class); - - new Stochastic(0.0); + self::assertEquals('Stochastic (rate: 0.001)', (string) $this->optimizer); } + /** + * @param float $rate + */ #[Test] - #[TestDox('Can be cast to a string')] - public function testToString() : void + #[DataProvider('invalidConstructorProvider')] + #[TestDox('Throws exception when constructed with invalid arguments')] + public function testInvalidConstructorParams(float $rate) : void { - self::assertEquals('Stochastic (rate: 0.001)', (string) $this->optimizer); + $this->expectException(InvalidArgumentException::class); + + new Stochastic($rate); } /** @@ -70,7 +80,9 @@ public function testToString() : void * @param NDArray $gradient * @param list> $expected */ + #[Test] #[DataProvider('stepProvider')] + #[TestDox('Can compute the step')] public function testStep(Parameter $param, NDArray $gradient, array $expected) : void { $step = $this->optimizer->step(param: $param, gradient: $gradient);