classic-tokenizer.asciidoc 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269
  1. [[analysis-classic-tokenizer]]
  2. === Classic Tokenizer
  3. The `classic` tokenizer is a grammar based tokenizer that is good for English
  4. language documents. This tokenizer has heuristics for special treatment of
  5. acronyms, company names, email addresses, and internet host names. However,
  6. these rules don't always work, and the tokenizer doesn't work well for most
  7. languages other than English:
  8. * It splits words at most punctuation characters, removing punctuation. However, a
  9. dot that's not followed by whitespace is considered part of a token.
  10. * It splits words at hyphens, unless there's a number in the token, in which case
  11. the whole token is interpreted as a product number and is not split.
  12. * It recognizes email addresses and internet hostnames as one token.
  13. [float]
  14. === Example output
  15. [source,js]
  16. ---------------------------
  17. POST _analyze
  18. {
  19. "tokenizer": "classic",
  20. "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone."
  21. }
  22. ---------------------------
  23. // CONSOLE
  24. /////////////////////
  25. [source,js]
  26. ----------------------------
  27. {
  28. "tokens": [
  29. {
  30. "token": "The",
  31. "start_offset": 0,
  32. "end_offset": 3,
  33. "type": "<ALPHANUM>",
  34. "position": 0
  35. },
  36. {
  37. "token": "2",
  38. "start_offset": 4,
  39. "end_offset": 5,
  40. "type": "<ALPHANUM>",
  41. "position": 1
  42. },
  43. {
  44. "token": "QUICK",
  45. "start_offset": 6,
  46. "end_offset": 11,
  47. "type": "<ALPHANUM>",
  48. "position": 2
  49. },
  50. {
  51. "token": "Brown",
  52. "start_offset": 12,
  53. "end_offset": 17,
  54. "type": "<ALPHANUM>",
  55. "position": 3
  56. },
  57. {
  58. "token": "Foxes",
  59. "start_offset": 18,
  60. "end_offset": 23,
  61. "type": "<ALPHANUM>",
  62. "position": 4
  63. },
  64. {
  65. "token": "jumped",
  66. "start_offset": 24,
  67. "end_offset": 30,
  68. "type": "<ALPHANUM>",
  69. "position": 5
  70. },
  71. {
  72. "token": "over",
  73. "start_offset": 31,
  74. "end_offset": 35,
  75. "type": "<ALPHANUM>",
  76. "position": 6
  77. },
  78. {
  79. "token": "the",
  80. "start_offset": 36,
  81. "end_offset": 39,
  82. "type": "<ALPHANUM>",
  83. "position": 7
  84. },
  85. {
  86. "token": "lazy",
  87. "start_offset": 40,
  88. "end_offset": 44,
  89. "type": "<ALPHANUM>",
  90. "position": 8
  91. },
  92. {
  93. "token": "dog's",
  94. "start_offset": 45,
  95. "end_offset": 50,
  96. "type": "<APOSTROPHE>",
  97. "position": 9
  98. },
  99. {
  100. "token": "bone",
  101. "start_offset": 51,
  102. "end_offset": 55,
  103. "type": "<ALPHANUM>",
  104. "position": 10
  105. }
  106. ]
  107. }
  108. ----------------------------
  109. // TESTRESPONSE
  110. /////////////////////
  111. The above sentence would produce the following terms:
  112. [source,text]
  113. ---------------------------
  114. [ The, 2, QUICK, Brown, Foxes, jumped, over, the, lazy, dog's, bone ]
  115. ---------------------------
  116. [float]
  117. === Configuration
  118. The `classic` tokenizer accepts the following parameters:
  119. [horizontal]
  120. `max_token_length`::
  121. The maximum token length. If a token is seen that exceeds this length then
  122. it is split at `max_token_length` intervals. Defaults to `255`.
  123. [float]
  124. === Example configuration
  125. In this example, we configure the `classic` tokenizer to have a
  126. `max_token_length` of 5 (for demonstration purposes):
  127. [source,js]
  128. ----------------------------
  129. PUT my_index
  130. {
  131. "settings": {
  132. "analysis": {
  133. "analyzer": {
  134. "my_analyzer": {
  135. "tokenizer": "my_tokenizer"
  136. }
  137. },
  138. "tokenizer": {
  139. "my_tokenizer": {
  140. "type": "classic",
  141. "max_token_length": 5
  142. }
  143. }
  144. }
  145. }
  146. }
  147. GET _cluster/health?wait_for_status=yellow
  148. POST my_index/_analyze
  149. {
  150. "analyzer": "my_analyzer",
  151. "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone."
  152. }
  153. ----------------------------
  154. // CONSOLE
  155. /////////////////////
  156. [source,js]
  157. ----------------------------
  158. {
  159. "tokens": [
  160. {
  161. "token": "The",
  162. "start_offset": 0,
  163. "end_offset": 3,
  164. "type": "<ALPHANUM>",
  165. "position": 0
  166. },
  167. {
  168. "token": "2",
  169. "start_offset": 4,
  170. "end_offset": 5,
  171. "type": "<ALPHANUM>",
  172. "position": 1
  173. },
  174. {
  175. "token": "QUICK",
  176. "start_offset": 6,
  177. "end_offset": 11,
  178. "type": "<ALPHANUM>",
  179. "position": 2
  180. },
  181. {
  182. "token": "Brown",
  183. "start_offset": 12,
  184. "end_offset": 17,
  185. "type": "<ALPHANUM>",
  186. "position": 3
  187. },
  188. {
  189. "token": "Foxes",
  190. "start_offset": 18,
  191. "end_offset": 23,
  192. "type": "<ALPHANUM>",
  193. "position": 4
  194. },
  195. {
  196. "token": "over",
  197. "start_offset": 31,
  198. "end_offset": 35,
  199. "type": "<ALPHANUM>",
  200. "position": 6
  201. },
  202. {
  203. "token": "the",
  204. "start_offset": 36,
  205. "end_offset": 39,
  206. "type": "<ALPHANUM>",
  207. "position": 7
  208. },
  209. {
  210. "token": "lazy",
  211. "start_offset": 40,
  212. "end_offset": 44,
  213. "type": "<ALPHANUM>",
  214. "position": 8
  215. },
  216. {
  217. "token": "dog's",
  218. "start_offset": 45,
  219. "end_offset": 50,
  220. "type": "<APOSTROPHE>",
  221. "position": 9
  222. },
  223. {
  224. "token": "bone",
  225. "start_offset": 51,
  226. "end_offset": 55,
  227. "type": "<ALPHANUM>",
  228. "position": 10
  229. }
  230. ]
  231. }
  232. ----------------------------
  233. // TESTRESPONSE
  234. /////////////////////
  235. The above example produces the following terms:
  236. [source,text]
  237. ---------------------------
  238. [ The, 2, QUICK, Brown, Foxes, jumpe, d, over, the, lazy, dog's, bone ]
  239. ---------------------------