letter-tokenizer.asciidoc 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. [[analysis-letter-tokenizer]]
  2. === Letter tokenizer
  3. ++++
  4. <titleabbrev>Letter</titleabbrev>
  5. ++++
  6. The `letter` tokenizer breaks text into terms whenever it encounters a
  7. character which is not a letter. It does a reasonable job for most European
  8. languages, but does a terrible job for some Asian languages, where words are
  9. not separated by spaces.
  10. [float]
  11. === Example output
  12. [source,console]
  13. ---------------------------
  14. POST _analyze
  15. {
  16. "tokenizer": "letter",
  17. "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone."
  18. }
  19. ---------------------------
  20. /////////////////////
  21. [source,console-result]
  22. ----------------------------
  23. {
  24. "tokens": [
  25. {
  26. "token": "The",
  27. "start_offset": 0,
  28. "end_offset": 3,
  29. "type": "word",
  30. "position": 0
  31. },
  32. {
  33. "token": "QUICK",
  34. "start_offset": 6,
  35. "end_offset": 11,
  36. "type": "word",
  37. "position": 1
  38. },
  39. {
  40. "token": "Brown",
  41. "start_offset": 12,
  42. "end_offset": 17,
  43. "type": "word",
  44. "position": 2
  45. },
  46. {
  47. "token": "Foxes",
  48. "start_offset": 18,
  49. "end_offset": 23,
  50. "type": "word",
  51. "position": 3
  52. },
  53. {
  54. "token": "jumped",
  55. "start_offset": 24,
  56. "end_offset": 30,
  57. "type": "word",
  58. "position": 4
  59. },
  60. {
  61. "token": "over",
  62. "start_offset": 31,
  63. "end_offset": 35,
  64. "type": "word",
  65. "position": 5
  66. },
  67. {
  68. "token": "the",
  69. "start_offset": 36,
  70. "end_offset": 39,
  71. "type": "word",
  72. "position": 6
  73. },
  74. {
  75. "token": "lazy",
  76. "start_offset": 40,
  77. "end_offset": 44,
  78. "type": "word",
  79. "position": 7
  80. },
  81. {
  82. "token": "dog",
  83. "start_offset": 45,
  84. "end_offset": 48,
  85. "type": "word",
  86. "position": 8
  87. },
  88. {
  89. "token": "s",
  90. "start_offset": 49,
  91. "end_offset": 50,
  92. "type": "word",
  93. "position": 9
  94. },
  95. {
  96. "token": "bone",
  97. "start_offset": 51,
  98. "end_offset": 55,
  99. "type": "word",
  100. "position": 10
  101. }
  102. ]
  103. }
  104. ----------------------------
  105. /////////////////////
  106. The above sentence would produce the following terms:
  107. [source,text]
  108. ---------------------------
  109. [ The, QUICK, Brown, Foxes, jumped, over, the, lazy, dog, s, bone ]
  110. ---------------------------
  111. [float]
  112. === Configuration
  113. The `letter` tokenizer is not configurable.