Browse Source

[DOCS] Abbreviate token filter titles (#50511)

James Rodewig 5 years ago
parent
commit
18ee52a5b2
28 changed files with 112 additions and 28 deletions
  1. 4 1
      docs/reference/analysis/tokenfilters/flatten-graph-tokenfilter.asciidoc
  2. 4 1
      docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc
  3. 4 1
      docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc
  4. 4 1
      docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc
  5. 4 1
      docs/reference/analysis/tokenfilters/kstem-tokenfilter.asciidoc
  6. 4 1
      docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc
  7. 4 1
      docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc
  8. 4 1
      docs/reference/analysis/tokenfilters/normalization-tokenfilter.asciidoc
  9. 4 1
      docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc
  10. 4 1
      docs/reference/analysis/tokenfilters/pattern_replace-tokenfilter.asciidoc
  11. 4 1
      docs/reference/analysis/tokenfilters/phonetic-tokenfilter.asciidoc
  12. 4 1
      docs/reference/analysis/tokenfilters/porterstem-tokenfilter.asciidoc
  13. 4 1
      docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc
  14. 4 1
      docs/reference/analysis/tokenfilters/remove-duplicates-tokenfilter.asciidoc
  15. 4 1
      docs/reference/analysis/tokenfilters/reverse-tokenfilter.asciidoc
  16. 4 1
      docs/reference/analysis/tokenfilters/shingle-tokenfilter.asciidoc
  17. 4 1
      docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc
  18. 4 1
      docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc
  19. 4 1
      docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc
  20. 4 1
      docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc
  21. 4 1
      docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc
  22. 4 1
      docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc
  23. 4 1
      docs/reference/analysis/tokenfilters/trim-tokenfilter.asciidoc
  24. 4 1
      docs/reference/analysis/tokenfilters/truncate-tokenfilter.asciidoc
  25. 4 1
      docs/reference/analysis/tokenfilters/unique-tokenfilter.asciidoc
  26. 4 1
      docs/reference/analysis/tokenfilters/uppercase-tokenfilter.asciidoc
  27. 4 1
      docs/reference/analysis/tokenfilters/word-delimiter-graph-tokenfilter.asciidoc
  28. 4 1
      docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc

+ 4 - 1
docs/reference/analysis/tokenfilters/flatten-graph-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-flatten-graph-tokenfilter]]
-=== Flatten Graph Token Filter
+=== Flatten graph token filter
+++++
+<titleabbrev>Flatten graph</titleabbrev>
+++++
 
 experimental[This functionality is marked as experimental in Lucene]
 

+ 4 - 1
docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-hunspell-tokenfilter]]
-=== Hunspell Token Filter
+=== Hunspell token filter
+++++
+<titleabbrev>Hunspell</titleabbrev>
+++++
 
 Basic support for hunspell stemming. Hunspell dictionaries will be
 picked up from a dedicated hunspell directory on the filesystem

+ 4 - 1
docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-keyword-marker-tokenfilter]]
-=== Keyword Marker Token Filter
+=== Keyword marker token filter
+++++
+<titleabbrev>Keyword marker</titleabbrev>
+++++
 
 Protects words from being modified by stemmers. Must be placed before
 any stemming filters.

+ 4 - 1
docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-keyword-repeat-tokenfilter]]
-=== Keyword Repeat Token Filter
+=== Keyword repeat token filter
+++++
+<titleabbrev>Keyword repeat</titleabbrev>
+++++
 
 The `keyword_repeat` token filter Emits each incoming token twice once
 as keyword and once as a non-keyword to allow an unstemmed version of a

+ 4 - 1
docs/reference/analysis/tokenfilters/kstem-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-kstem-tokenfilter]]
-=== KStem Token Filter
+=== KStem token filter
+++++
+<titleabbrev>KStem</titleabbrev>
+++++
 
 The `kstem` token filter is a high performance filter for english. All
 terms must already be lowercased (use `lowercase` filter) for this

+ 4 - 1
docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-minhash-tokenfilter]]
-=== MinHash Token Filter
+=== MinHash token filter
+++++
+<titleabbrev>MinHash</titleabbrev>
+++++
 
 The `min_hash` token filter hashes each token of the token stream and divides
 the resulting hashes into buckets, keeping the lowest-valued hashes per

+ 4 - 1
docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-multiplexer-tokenfilter]]
-=== Multiplexer Token Filter
+=== Multiplexer token filter
+++++
+<titleabbrev>Multiplexer</titleabbrev>
+++++
 
 A token filter of type `multiplexer` will emit multiple tokens at the same position,
 each version of the token having been run through a different filter.  Identical

+ 4 - 1
docs/reference/analysis/tokenfilters/normalization-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-normalization-tokenfilter]]
-=== Normalization Token Filter
+=== Normalization token filters
+++++
+<titleabbrev>Normalization</titleabbrev>
+++++
 
 There are several token filters available which try to normalize special
 characters of a certain language.

+ 4 - 1
docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-pattern-capture-tokenfilter]]
-=== Pattern Capture Token Filter
+=== Pattern capture token filter
+++++
+<titleabbrev>Pattern capture</titleabbrev>
+++++
 
 The `pattern_capture` token filter, unlike the `pattern` tokenizer,
 emits a token for every capture group in the regular expression.

+ 4 - 1
docs/reference/analysis/tokenfilters/pattern_replace-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-pattern_replace-tokenfilter]]
-=== Pattern Replace Token Filter
+=== Pattern replace token filter
+++++
+<titleabbrev>Pattern replace</titleabbrev>
+++++
 
 The `pattern_replace` token filter allows to easily handle string
 replacements based on a regular expression. The regular expression is

+ 4 - 1
docs/reference/analysis/tokenfilters/phonetic-tokenfilter.asciidoc

@@ -1,4 +1,7 @@
 [[analysis-phonetic-tokenfilter]]
-=== Phonetic Token Filter
+=== Phonetic token filter
+++++
+<titleabbrev>Phonetic</titleabbrev>
+++++
 
 The `phonetic` token filter is provided as the {plugins}/analysis-phonetic.html[`analysis-phonetic`] plugin.

+ 4 - 1
docs/reference/analysis/tokenfilters/porterstem-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-porterstem-tokenfilter]]
-=== Porter Stem Token Filter
+=== Porter stem token filter
+++++
+<titleabbrev>Porter stem</titleabbrev>
+++++
 
 A token filter of type `porter_stem` that transforms the token stream as
 per the Porter stemming algorithm.

+ 4 - 1
docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-predicatefilter-tokenfilter]]
-=== Predicate Token Filter Script
+=== Predicate script token filter
+++++
+<titleabbrev>Predicate script</titleabbrev>
+++++
 
 The predicate_token_filter token filter takes a predicate script, and removes tokens that do
 not match the predicate.

+ 4 - 1
docs/reference/analysis/tokenfilters/remove-duplicates-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-remove-duplicates-tokenfilter]]
-=== Remove Duplicates Token Filter
+=== Remove duplicates token filter
+++++
+<titleabbrev>Remove duplicates</titleabbrev>
+++++
 
 A token filter of type `remove_duplicates` that drops identical tokens at the
 same position.

+ 4 - 1
docs/reference/analysis/tokenfilters/reverse-tokenfilter.asciidoc

@@ -1,4 +1,7 @@
 [[analysis-reverse-tokenfilter]]
-=== Reverse Token Filter
+=== Reverse token filter
+++++
+<titleabbrev>Reverse</titleabbrev>
+++++
 
 A token filter of type `reverse` that simply reverses each token.

+ 4 - 1
docs/reference/analysis/tokenfilters/shingle-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-shingle-tokenfilter]]
-=== Shingle Token Filter
+=== Shingle token filter
+++++
+<titleabbrev>Shingle</titleabbrev>
+++++
 
 NOTE: Shingles are generally used to help speed up phrase queries.  Rather
 than building filter chains by hand, you may find it easier to use the

+ 4 - 1
docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-snowball-tokenfilter]]
-=== Snowball Token Filter
+=== Snowball token filter
+++++
+<titleabbrev>Snowball</titleabbrev>
+++++
 
 A filter that stems words using a Snowball-generated stemmer. The
 `language` parameter controls the stemmer with the following available

+ 4 - 1
docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-stemmer-override-tokenfilter]]
-=== Stemmer Override Token Filter
+=== Stemmer override token filter
+++++
+<titleabbrev>Stemmer override</titleabbrev>
+++++
 
 Overrides stemming algorithms, by applying a custom mapping, then
 protecting these terms from being modified by stemmers. Must be placed

+ 4 - 1
docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-stemmer-tokenfilter]]
-=== Stemmer Token Filter
+=== Stemmer token filter
+++++
+<titleabbrev>Stemmer</titleabbrev>
+++++
 
 // Adds attribute for the 'minimal_portuguese' stemmer values link.
 // This link contains ~, which is converted to subscript.

+ 4 - 1
docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-stop-tokenfilter]]
-=== Stop Token Filter
+=== Stop token filter
+++++
+<titleabbrev>Stop</titleabbrev>
+++++
 
 A token filter of type `stop` that removes stop words from token
 streams.

+ 4 - 1
docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-synonym-graph-tokenfilter]]
-=== Synonym Graph Token Filter
+=== Synonym graph token filter
+++++
+<titleabbrev>Synonym graph</titleabbrev>
+++++
 
 The `synonym_graph` token filter allows to easily handle synonyms,
 including multi-word synonyms correctly during the analysis process.

+ 4 - 1
docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-synonym-tokenfilter]]
-=== Synonym Token Filter
+=== Synonym token filter
+++++
+<titleabbrev>Synonym</titleabbrev>
+++++
 
 The `synonym` token filter allows to easily handle synonyms during the
 analysis process. Synonyms are configured using a configuration file.

+ 4 - 1
docs/reference/analysis/tokenfilters/trim-tokenfilter.asciidoc

@@ -1,4 +1,7 @@
 [[analysis-trim-tokenfilter]]
-=== Trim Token Filter
+=== Trim token filter
+++++
+<titleabbrev>Trim</titleabbrev>
+++++
 
 The `trim` token filter trims the whitespace surrounding a token.

+ 4 - 1
docs/reference/analysis/tokenfilters/truncate-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-truncate-tokenfilter]]
-=== Truncate Token Filter
+=== Truncate token filter
+++++
+<titleabbrev>Truncate</titleabbrev>
+++++
 
 The `truncate` token filter can be used to truncate tokens into a
 specific length.

+ 4 - 1
docs/reference/analysis/tokenfilters/unique-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-unique-tokenfilter]]
-=== Unique Token Filter
+=== Unique token filter
+++++
+<titleabbrev>Unique</titleabbrev>
+++++
 
 The `unique` token filter can be used to only index unique tokens during
 analysis. By default it is applied on all the token stream. If

+ 4 - 1
docs/reference/analysis/tokenfilters/uppercase-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-uppercase-tokenfilter]]
-=== Uppercase Token Filter
+=== Uppercase token filter
+++++
+<titleabbrev>Uppercase</titleabbrev>
+++++
 
 A token filter of type `uppercase` that normalizes token text to upper
 case.

+ 4 - 1
docs/reference/analysis/tokenfilters/word-delimiter-graph-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-word-delimiter-graph-tokenfilter]]
-=== Word Delimiter Graph Token Filter
+=== Word delimiter graph token filter
+++++
+<titleabbrev>Word delimiter graph</titleabbrev>
+++++
 
 experimental[This functionality is marked as experimental in Lucene]
 

+ 4 - 1
docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc

@@ -1,5 +1,8 @@
 [[analysis-word-delimiter-tokenfilter]]
-=== Word Delimiter Token Filter
+=== Word delimiter token filter
+++++
+<titleabbrev>Word delimiter</titleabbrev>
+++++
 
 Named `word_delimiter`, it Splits words into subwords and performs
 optional transformations on subword groups. Words are split into