2017-02-16 22:23:55 +00:00
|
|
|
<?php
|
|
|
|
|
|
|
|
declare(strict_types=1);
|
|
|
|
|
|
|
|
namespace Phpml\Classification\Linear;
|
|
|
|
|
|
|
|
use Phpml\Classification\Classifier;
|
|
|
|
|
|
|
|
class Adaline extends Perceptron
|
|
|
|
{
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Batch training is the default Adaline training algorithm
|
|
|
|
*/
|
|
|
|
const BATCH_TRAINING = 1;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Online training: Stochastic gradient descent learning
|
|
|
|
*/
|
|
|
|
const ONLINE_TRAINING = 2;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Training type may be either 'Batch' or 'Online' learning
|
|
|
|
*
|
|
|
|
* @var string
|
|
|
|
*/
|
|
|
|
protected $trainingType;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Initalize an Adaline (ADAptive LInear NEuron) classifier with given learning rate and maximum
|
|
|
|
* number of iterations used while training the classifier <br>
|
|
|
|
*
|
|
|
|
* Learning rate should be a float value between 0.0(exclusive) and 1.0 (inclusive) <br>
|
|
|
|
* Maximum number of iterations can be an integer value greater than 0 <br>
|
|
|
|
* If normalizeInputs is set to true, then every input given to the algorithm will be standardized
|
|
|
|
* by use of standard deviation and mean calculation
|
|
|
|
*
|
|
|
|
* @param int $learningRate
|
|
|
|
* @param int $maxIterations
|
|
|
|
*/
|
|
|
|
public function __construct(float $learningRate = 0.001, int $maxIterations = 1000,
|
|
|
|
bool $normalizeInputs = true, int $trainingType = self::BATCH_TRAINING)
|
|
|
|
{
|
|
|
|
if (! in_array($trainingType, [self::BATCH_TRAINING, self::ONLINE_TRAINING])) {
|
|
|
|
throw new \Exception("Adaline can only be trained with batch and online/stochastic gradient descent algorithm");
|
|
|
|
}
|
|
|
|
|
2017-02-21 09:38:18 +00:00
|
|
|
$this->trainingType = $trainingType;
|
2017-02-16 22:23:55 +00:00
|
|
|
|
2017-02-21 09:38:18 +00:00
|
|
|
parent::__construct($learningRate, $maxIterations, $normalizeInputs);
|
2017-02-16 22:23:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Adapts the weights with respect to given samples and targets
|
|
|
|
* by use of gradient descent learning rule
|
2017-04-19 20:26:31 +00:00
|
|
|
*
|
|
|
|
* @param array $samples
|
|
|
|
* @param array $targets
|
2017-02-16 22:23:55 +00:00
|
|
|
*/
|
2017-04-19 20:26:31 +00:00
|
|
|
protected function runTraining(array $samples, array $targets)
|
2017-02-16 22:23:55 +00:00
|
|
|
{
|
2017-03-27 21:46:53 +00:00
|
|
|
// The cost function is the sum of squares
|
|
|
|
$callback = function ($weights, $sample, $target) {
|
|
|
|
$this->weights = $weights;
|
2017-02-16 22:23:55 +00:00
|
|
|
|
2017-03-27 21:46:53 +00:00
|
|
|
$output = $this->output($sample);
|
|
|
|
$gradient = $output - $target;
|
|
|
|
$error = $gradient ** 2;
|
2017-02-16 22:23:55 +00:00
|
|
|
|
2017-03-27 21:46:53 +00:00
|
|
|
return [$error, $gradient];
|
|
|
|
};
|
2017-02-21 09:38:18 +00:00
|
|
|
|
2017-03-27 21:46:53 +00:00
|
|
|
$isBatch = $this->trainingType == self::BATCH_TRAINING;
|
2017-02-16 22:23:55 +00:00
|
|
|
|
2017-04-19 20:26:31 +00:00
|
|
|
return parent::runGradientDescent($samples, $targets, $callback, $isBatch);
|
2017-02-16 22:23:55 +00:00
|
|
|
}
|
|
|
|
}
|