2016-05-03 21:28:29 +00:00
|
|
|
<?php
|
|
|
|
|
|
|
|
declare (strict_types = 1);
|
|
|
|
|
|
|
|
namespace tests\Phpml\FeatureExtraction;
|
|
|
|
|
|
|
|
use Phpml\FeatureExtraction\TokenCountVectorizer;
|
|
|
|
use Phpml\Tokenization\WhitespaceTokenizer;
|
|
|
|
|
|
|
|
class TokenCountVectorizerTest extends \PHPUnit_Framework_TestCase
|
|
|
|
{
|
|
|
|
public function testTokenCountVectorizerWithWhitespaceTokenizer()
|
|
|
|
{
|
|
|
|
$samples = [
|
|
|
|
'Lorem ipsum dolor sit amet dolor',
|
|
|
|
'Mauris placerat ipsum dolor',
|
|
|
|
'Mauris diam eros fringilla diam',
|
|
|
|
];
|
|
|
|
|
2016-06-14 07:58:11 +00:00
|
|
|
$vocabulary = [
|
|
|
|
0 => 'Lorem',
|
|
|
|
1 => 'ipsum',
|
|
|
|
2 => 'dolor',
|
|
|
|
3 => 'sit',
|
|
|
|
4 => 'amet',
|
|
|
|
5 => 'Mauris',
|
|
|
|
6 => 'placerat',
|
|
|
|
7 => 'diam',
|
|
|
|
8 => 'eros',
|
|
|
|
9 => 'fringilla',
|
|
|
|
];
|
|
|
|
|
|
|
|
$tokensCounts = [
|
|
|
|
[0 => 1, 1 => 1, 2 => 2, 3 => 1, 4 => 1, 5 => 0, 6 => 0, 7 => 0, 8 => 0, 9 => 0],
|
|
|
|
[0 => 0, 1 => 1, 2 => 1, 3 => 0, 4 => 0, 5 => 1, 6 => 1, 7 => 0, 8 => 0, 9 => 0],
|
|
|
|
[0 => 0, 1 => 0, 2 => 0, 3 => 0, 4 => 0, 5 => 1, 6 => 0, 7 => 2, 8 => 1, 9 => 1],
|
2016-05-03 21:28:29 +00:00
|
|
|
];
|
|
|
|
|
|
|
|
$vectorizer = new TokenCountVectorizer(new WhitespaceTokenizer());
|
|
|
|
|
2016-06-16 22:33:48 +00:00
|
|
|
$vectorizer->fit($samples);
|
2016-05-03 21:28:29 +00:00
|
|
|
$this->assertEquals($vocabulary, $vectorizer->getVocabulary());
|
2016-06-16 22:33:48 +00:00
|
|
|
|
|
|
|
$vectorizer->transform($samples);
|
|
|
|
$this->assertEquals($tokensCounts, $samples);
|
2016-05-03 21:28:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
public function testMinimumDocumentTokenCountFrequency()
|
|
|
|
{
|
|
|
|
// word at least in half samples
|
|
|
|
$samples = [
|
|
|
|
'Lorem ipsum dolor sit amet',
|
|
|
|
'Lorem ipsum sit amet',
|
|
|
|
'ipsum sit amet',
|
|
|
|
'ipsum sit amet',
|
|
|
|
];
|
|
|
|
|
2016-06-14 07:58:11 +00:00
|
|
|
$vocabulary = [
|
|
|
|
0 => 'Lorem',
|
|
|
|
1 => 'ipsum',
|
|
|
|
2 => 'dolor',
|
|
|
|
3 => 'sit',
|
|
|
|
4 => 'amet',
|
|
|
|
];
|
|
|
|
|
|
|
|
$tokensCounts = [
|
|
|
|
[0 => 1, 1 => 1, 2 => 0, 3 => 1, 4 => 1],
|
|
|
|
[0 => 1, 1 => 1, 2 => 0, 3 => 1, 4 => 1],
|
|
|
|
[0 => 0, 1 => 1, 2 => 0, 3 => 1, 4 => 1],
|
|
|
|
[0 => 0, 1 => 1, 2 => 0, 3 => 1, 4 => 1],
|
2016-05-03 21:28:29 +00:00
|
|
|
];
|
|
|
|
|
|
|
|
$vectorizer = new TokenCountVectorizer(new WhitespaceTokenizer(), 0.5);
|
|
|
|
|
2016-06-16 22:33:48 +00:00
|
|
|
$vectorizer->fit($samples);
|
2016-05-03 21:28:29 +00:00
|
|
|
$this->assertEquals($vocabulary, $vectorizer->getVocabulary());
|
|
|
|
|
2016-06-16 22:33:48 +00:00
|
|
|
$vectorizer->transform($samples);
|
|
|
|
$this->assertEquals($tokensCounts, $samples);
|
|
|
|
|
2016-06-14 07:58:11 +00:00
|
|
|
// word at least once in all samples
|
2016-05-03 21:28:29 +00:00
|
|
|
$samples = [
|
|
|
|
'Lorem ipsum dolor sit amet',
|
2016-06-14 07:58:11 +00:00
|
|
|
'Morbi quis sagittis Lorem',
|
|
|
|
'eros Lorem',
|
2016-05-03 21:28:29 +00:00
|
|
|
];
|
|
|
|
|
2016-06-14 07:58:11 +00:00
|
|
|
$tokensCounts = [
|
|
|
|
[0 => 1, 1 => 0, 2 => 0, 3 => 0, 4 => 0, 5 => 0, 6 => 0, 7 => 0, 8 => 0],
|
|
|
|
[0 => 1, 1 => 0, 2 => 0, 3 => 0, 4 => 0, 5 => 0, 6 => 0, 7 => 0, 8 => 0],
|
|
|
|
[0 => 1, 1 => 0, 2 => 0, 3 => 0, 4 => 0, 5 => 0, 6 => 0, 7 => 0, 8 => 0],
|
2016-05-03 21:28:29 +00:00
|
|
|
];
|
|
|
|
|
|
|
|
$vectorizer = new TokenCountVectorizer(new WhitespaceTokenizer(), 1);
|
2016-06-16 22:33:48 +00:00
|
|
|
$vectorizer->fit($samples);
|
2016-06-16 08:01:40 +00:00
|
|
|
$vectorizer->transform($samples);
|
2016-05-03 21:28:29 +00:00
|
|
|
|
2016-06-16 08:01:40 +00:00
|
|
|
$this->assertEquals($tokensCounts, $samples);
|
2016-05-03 21:28:29 +00:00
|
|
|
}
|
|
|
|
}
|