documentation add tokenizer->fit required to build the dictionary (#155)

This commit is contained in:
David Monllaó 2017-11-16 21:40:11 +01:00 committed by Arkadiusz Kondas
parent a11e3f69c3
commit f7537c049a
1 changed files with 8 additions and 3 deletions

View File

@ -26,13 +26,18 @@ $samples = [
];
$vectorizer = new TokenCountVectorizer(new WhitespaceTokenizer());
$vectorizer->transform($samples)
// return $vector = [
// Build the dictionary.
$vectorizer->fit($samples);
// Transform the provided text samples into a vectorized list.
$vectorizer->transform($samples);
// return $samples = [
// [0 => 1, 1 => 1, 2 => 2, 3 => 1, 4 => 1],
// [5 => 1, 6 => 1, 1 => 1, 2 => 1],
// [5 => 1, 7 => 2, 8 => 1, 9 => 1],
//];
```
### Vocabulary