2017-02-16 22:23:55 +00:00
|
|
|
<?php
|
|
|
|
|
|
|
|
declare(strict_types=1);
|
|
|
|
|
|
|
|
namespace Phpml\Classification\Linear;
|
|
|
|
|
2018-03-06 22:26:36 +00:00
|
|
|
use Phpml\Exception\InvalidArgumentException;
|
2017-11-22 21:16:10 +00:00
|
|
|
|
2017-02-16 22:23:55 +00:00
|
|
|
class Adaline extends Perceptron
|
|
|
|
{
|
|
|
|
/**
|
|
|
|
* Batch training is the default Adaline training algorithm
|
|
|
|
*/
|
2017-11-14 20:21:23 +00:00
|
|
|
public const BATCH_TRAINING = 1;
|
2017-02-16 22:23:55 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Online training: Stochastic gradient descent learning
|
|
|
|
*/
|
2017-11-14 20:21:23 +00:00
|
|
|
public const ONLINE_TRAINING = 2;
|
2017-02-16 22:23:55 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Training type may be either 'Batch' or 'Online' learning
|
|
|
|
*
|
2018-01-06 12:09:33 +00:00
|
|
|
* @var string|int
|
2017-02-16 22:23:55 +00:00
|
|
|
*/
|
|
|
|
protected $trainingType;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Initalize an Adaline (ADAptive LInear NEuron) classifier with given learning rate and maximum
|
|
|
|
* number of iterations used while training the classifier <br>
|
|
|
|
*
|
|
|
|
* Learning rate should be a float value between 0.0(exclusive) and 1.0 (inclusive) <br>
|
|
|
|
* Maximum number of iterations can be an integer value greater than 0 <br>
|
|
|
|
* If normalizeInputs is set to true, then every input given to the algorithm will be standardized
|
|
|
|
* by use of standard deviation and mean calculation
|
|
|
|
*
|
2018-03-06 22:26:36 +00:00
|
|
|
* @throws InvalidArgumentException
|
2017-02-16 22:23:55 +00:00
|
|
|
*/
|
2017-07-26 06:24:47 +00:00
|
|
|
public function __construct(
|
|
|
|
float $learningRate = 0.001,
|
|
|
|
int $maxIterations = 1000,
|
|
|
|
bool $normalizeInputs = true,
|
|
|
|
int $trainingType = self::BATCH_TRAINING
|
|
|
|
) {
|
2018-02-16 06:25:24 +00:00
|
|
|
if (!in_array($trainingType, [self::BATCH_TRAINING, self::ONLINE_TRAINING], true)) {
|
2018-03-06 22:26:36 +00:00
|
|
|
throw new InvalidArgumentException('Adaline can only be trained with batch and online/stochastic gradient descent algorithm');
|
2017-02-16 22:23:55 +00:00
|
|
|
}
|
|
|
|
|
2017-02-21 09:38:18 +00:00
|
|
|
$this->trainingType = $trainingType;
|
2017-02-16 22:23:55 +00:00
|
|
|
|
2017-02-21 09:38:18 +00:00
|
|
|
parent::__construct($learningRate, $maxIterations, $normalizeInputs);
|
2017-02-16 22:23:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Adapts the weights with respect to given samples and targets
|
|
|
|
* by use of gradient descent learning rule
|
|
|
|
*/
|
2018-10-28 06:44:52 +00:00
|
|
|
protected function runTraining(array $samples, array $targets): void
|
2017-02-16 22:23:55 +00:00
|
|
|
{
|
2017-03-27 21:46:53 +00:00
|
|
|
// The cost function is the sum of squares
|
2020-03-03 17:52:29 +00:00
|
|
|
$callback = function ($weights, $sample, $target): array {
|
2017-03-27 21:46:53 +00:00
|
|
|
$this->weights = $weights;
|
2017-02-16 22:23:55 +00:00
|
|
|
|
2017-03-27 21:46:53 +00:00
|
|
|
$output = $this->output($sample);
|
|
|
|
$gradient = $output - $target;
|
|
|
|
$error = $gradient ** 2;
|
2017-02-16 22:23:55 +00:00
|
|
|
|
2017-03-27 21:46:53 +00:00
|
|
|
return [$error, $gradient];
|
|
|
|
};
|
2017-02-21 09:38:18 +00:00
|
|
|
|
2017-03-27 21:46:53 +00:00
|
|
|
$isBatch = $this->trainingType == self::BATCH_TRAINING;
|
2017-02-16 22:23:55 +00:00
|
|
|
|
2018-10-28 06:44:52 +00:00
|
|
|
parent::runGradientDescent($samples, $targets, $callback, $isBatch);
|
2017-02-16 22:23:55 +00:00
|
|
|
}
|
|
|
|
}
|