lang-analyzer.asciidoc 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821
  1. [[analysis-lang-analyzer]]
  2. === Language Analyzers
  3. A set of analyzers aimed at analyzing specific language text. The
  4. following types are supported:
  5. <<arabic-analyzer,`arabic`>>,
  6. <<armenian-analyzer,`armenian`>>,
  7. <<basque-analyzer,`basque`>>,
  8. <<bengali-analyzer,`bengali`>>,
  9. <<brazilian-analyzer,`brazilian`>>,
  10. <<bulgarian-analyzer,`bulgarian`>>,
  11. <<catalan-analyzer,`catalan`>>,
  12. <<cjk-analyzer,`cjk`>>,
  13. <<czech-analyzer,`czech`>>,
  14. <<danish-analyzer,`danish`>>,
  15. <<dutch-analyzer,`dutch`>>,
  16. <<english-analyzer,`english`>>,
  17. <<estonian-analyzer,`estonian`>>,
  18. <<finnish-analyzer,`finnish`>>,
  19. <<french-analyzer,`french`>>,
  20. <<galician-analyzer,`galician`>>,
  21. <<german-analyzer,`german`>>,
  22. <<greek-analyzer,`greek`>>,
  23. <<hindi-analyzer,`hindi`>>,
  24. <<hungarian-analyzer,`hungarian`>>,
  25. <<indonesian-analyzer,`indonesian`>>,
  26. <<irish-analyzer,`irish`>>,
  27. <<italian-analyzer,`italian`>>,
  28. <<latvian-analyzer,`latvian`>>,
  29. <<lithuanian-analyzer,`lithuanian`>>,
  30. <<norwegian-analyzer,`norwegian`>>,
  31. <<persian-analyzer,`persian`>>,
  32. <<portuguese-analyzer,`portuguese`>>,
  33. <<romanian-analyzer,`romanian`>>,
  34. <<russian-analyzer,`russian`>>,
  35. <<sorani-analyzer,`sorani`>>,
  36. <<spanish-analyzer,`spanish`>>,
  37. <<swedish-analyzer,`swedish`>>,
  38. <<turkish-analyzer,`turkish`>>,
  39. <<thai-analyzer,`thai`>>.
  40. ==== Configuring language analyzers
  41. ===== Stopwords
  42. All analyzers support setting custom `stopwords` either internally in
  43. the config, or by using an external stopwords file by setting
  44. `stopwords_path`. Check <<analysis-stop-analyzer,Stop Analyzer>> for
  45. more details.
  46. ===== Excluding words from stemming
  47. The `stem_exclusion` parameter allows you to specify an array
  48. of lowercase words that should not be stemmed. Internally, this
  49. functionality is implemented by adding the
  50. <<analysis-keyword-marker-tokenfilter,`keyword_marker` token filter>>
  51. with the `keywords` set to the value of the `stem_exclusion` parameter.
  52. The following analyzers support setting custom `stem_exclusion` list:
  53. `arabic`, `armenian`, `basque`, `bengali`, `bulgarian`, `catalan`, `czech`,
  54. `dutch`, `english`, `finnish`, `french`, `galician`,
  55. `german`, `hindi`, `hungarian`, `indonesian`, `irish`, `italian`, `latvian`,
  56. `lithuanian`, `norwegian`, `portuguese`, `romanian`, `russian`, `sorani`,
  57. `spanish`, `swedish`, `turkish`.
  58. ==== Reimplementing language analyzers
  59. The built-in language analyzers can be reimplemented as `custom` analyzers
  60. (as described below) in order to customize their behaviour.
  61. NOTE: If you do not intend to exclude words from being stemmed (the
  62. equivalent of the `stem_exclusion` parameter above), then you should remove
  63. the `keyword_marker` token filter from the custom analyzer configuration.
  64. [[arabic-analyzer]]
  65. ===== `arabic` analyzer
  66. The `arabic` analyzer could be reimplemented as a `custom` analyzer as follows:
  67. [source,console]
  68. ----------------------------------------------------
  69. PUT /arabic_example
  70. {
  71. "settings": {
  72. "analysis": {
  73. "filter": {
  74. "arabic_stop": {
  75. "type": "stop",
  76. "stopwords": "_arabic_" <1>
  77. },
  78. "arabic_keywords": {
  79. "type": "keyword_marker",
  80. "keywords": ["مثال"] <2>
  81. },
  82. "arabic_stemmer": {
  83. "type": "stemmer",
  84. "language": "arabic"
  85. }
  86. },
  87. "analyzer": {
  88. "rebuilt_arabic": {
  89. "tokenizer": "standard",
  90. "filter": [
  91. "lowercase",
  92. "decimal_digit",
  93. "arabic_stop",
  94. "arabic_normalization",
  95. "arabic_keywords",
  96. "arabic_stemmer"
  97. ]
  98. }
  99. }
  100. }
  101. }
  102. }
  103. ----------------------------------------------------
  104. // TEST[s/"arabic_keywords",//]
  105. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: arabic_example, first: arabic, second: rebuilt_arabic}\nendyaml\n/]
  106. <1> The default stopwords can be overridden with the `stopwords`
  107. or `stopwords_path` parameters.
  108. <2> This filter should be removed unless there are words which should
  109. be excluded from stemming.
  110. [[armenian-analyzer]]
  111. ===== `armenian` analyzer
  112. The `armenian` analyzer could be reimplemented as a `custom` analyzer as follows:
  113. [source,console]
  114. ----------------------------------------------------
  115. PUT /armenian_example
  116. {
  117. "settings": {
  118. "analysis": {
  119. "filter": {
  120. "armenian_stop": {
  121. "type": "stop",
  122. "stopwords": "_armenian_" <1>
  123. },
  124. "armenian_keywords": {
  125. "type": "keyword_marker",
  126. "keywords": ["օրինակ"] <2>
  127. },
  128. "armenian_stemmer": {
  129. "type": "stemmer",
  130. "language": "armenian"
  131. }
  132. },
  133. "analyzer": {
  134. "rebuilt_armenian": {
  135. "tokenizer": "standard",
  136. "filter": [
  137. "lowercase",
  138. "armenian_stop",
  139. "armenian_keywords",
  140. "armenian_stemmer"
  141. ]
  142. }
  143. }
  144. }
  145. }
  146. }
  147. ----------------------------------------------------
  148. // TEST[s/"armenian_keywords",//]
  149. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: armenian_example, first: armenian, second: rebuilt_armenian}\nendyaml\n/]
  150. <1> The default stopwords can be overridden with the `stopwords`
  151. or `stopwords_path` parameters.
  152. <2> This filter should be removed unless there are words which should
  153. be excluded from stemming.
  154. [[basque-analyzer]]
  155. ===== `basque` analyzer
  156. The `basque` analyzer could be reimplemented as a `custom` analyzer as follows:
  157. [source,console]
  158. ----------------------------------------------------
  159. PUT /basque_example
  160. {
  161. "settings": {
  162. "analysis": {
  163. "filter": {
  164. "basque_stop": {
  165. "type": "stop",
  166. "stopwords": "_basque_" <1>
  167. },
  168. "basque_keywords": {
  169. "type": "keyword_marker",
  170. "keywords": ["Adibidez"] <2>
  171. },
  172. "basque_stemmer": {
  173. "type": "stemmer",
  174. "language": "basque"
  175. }
  176. },
  177. "analyzer": {
  178. "rebuilt_basque": {
  179. "tokenizer": "standard",
  180. "filter": [
  181. "lowercase",
  182. "basque_stop",
  183. "basque_keywords",
  184. "basque_stemmer"
  185. ]
  186. }
  187. }
  188. }
  189. }
  190. }
  191. ----------------------------------------------------
  192. // TEST[s/"basque_keywords",//]
  193. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: basque_example, first: basque, second: rebuilt_basque}\nendyaml\n/]
  194. <1> The default stopwords can be overridden with the `stopwords`
  195. or `stopwords_path` parameters.
  196. <2> This filter should be removed unless there are words which should
  197. be excluded from stemming.
  198. [[bengali-analyzer]]
  199. ===== `bengali` analyzer
  200. The `bengali` analyzer could be reimplemented as a `custom` analyzer as follows:
  201. [source,console]
  202. ----------------------------------------------------
  203. PUT /bengali_example
  204. {
  205. "settings": {
  206. "analysis": {
  207. "filter": {
  208. "bengali_stop": {
  209. "type": "stop",
  210. "stopwords": "_bengali_" <1>
  211. },
  212. "bengali_keywords": {
  213. "type": "keyword_marker",
  214. "keywords": ["উদাহরণ"] <2>
  215. },
  216. "bengali_stemmer": {
  217. "type": "stemmer",
  218. "language": "bengali"
  219. }
  220. },
  221. "analyzer": {
  222. "rebuilt_bengali": {
  223. "tokenizer": "standard",
  224. "filter": [
  225. "lowercase",
  226. "decimal_digit",
  227. "bengali_keywords",
  228. "indic_normalization",
  229. "bengali_normalization",
  230. "bengali_stop",
  231. "bengali_stemmer"
  232. ]
  233. }
  234. }
  235. }
  236. }
  237. }
  238. ----------------------------------------------------
  239. // TEST[s/"bengali_keywords",//]
  240. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: bengali_example, first: bengali, second: rebuilt_bengali}\nendyaml\n/]
  241. <1> The default stopwords can be overridden with the `stopwords`
  242. or `stopwords_path` parameters.
  243. <2> This filter should be removed unless there are words which should
  244. be excluded from stemming.
  245. [[brazilian-analyzer]]
  246. ===== `brazilian` analyzer
  247. The `brazilian` analyzer could be reimplemented as a `custom` analyzer as follows:
  248. [source,console]
  249. ----------------------------------------------------
  250. PUT /brazilian_example
  251. {
  252. "settings": {
  253. "analysis": {
  254. "filter": {
  255. "brazilian_stop": {
  256. "type": "stop",
  257. "stopwords": "_brazilian_" <1>
  258. },
  259. "brazilian_keywords": {
  260. "type": "keyword_marker",
  261. "keywords": ["exemplo"] <2>
  262. },
  263. "brazilian_stemmer": {
  264. "type": "stemmer",
  265. "language": "brazilian"
  266. }
  267. },
  268. "analyzer": {
  269. "rebuilt_brazilian": {
  270. "tokenizer": "standard",
  271. "filter": [
  272. "lowercase",
  273. "brazilian_stop",
  274. "brazilian_keywords",
  275. "brazilian_stemmer"
  276. ]
  277. }
  278. }
  279. }
  280. }
  281. }
  282. ----------------------------------------------------
  283. // TEST[s/"brazilian_keywords",//]
  284. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: brazilian_example, first: brazilian, second: rebuilt_brazilian}\nendyaml\n/]
  285. <1> The default stopwords can be overridden with the `stopwords`
  286. or `stopwords_path` parameters.
  287. <2> This filter should be removed unless there are words which should
  288. be excluded from stemming.
  289. [[bulgarian-analyzer]]
  290. ===== `bulgarian` analyzer
  291. The `bulgarian` analyzer could be reimplemented as a `custom` analyzer as follows:
  292. [source,console]
  293. ----------------------------------------------------
  294. PUT /bulgarian_example
  295. {
  296. "settings": {
  297. "analysis": {
  298. "filter": {
  299. "bulgarian_stop": {
  300. "type": "stop",
  301. "stopwords": "_bulgarian_" <1>
  302. },
  303. "bulgarian_keywords": {
  304. "type": "keyword_marker",
  305. "keywords": ["пример"] <2>
  306. },
  307. "bulgarian_stemmer": {
  308. "type": "stemmer",
  309. "language": "bulgarian"
  310. }
  311. },
  312. "analyzer": {
  313. "rebuilt_bulgarian": {
  314. "tokenizer": "standard",
  315. "filter": [
  316. "lowercase",
  317. "bulgarian_stop",
  318. "bulgarian_keywords",
  319. "bulgarian_stemmer"
  320. ]
  321. }
  322. }
  323. }
  324. }
  325. }
  326. ----------------------------------------------------
  327. // TEST[s/"bulgarian_keywords",//]
  328. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: bulgarian_example, first: bulgarian, second: rebuilt_bulgarian}\nendyaml\n/]
  329. <1> The default stopwords can be overridden with the `stopwords`
  330. or `stopwords_path` parameters.
  331. <2> This filter should be removed unless there are words which should
  332. be excluded from stemming.
  333. [[catalan-analyzer]]
  334. ===== `catalan` analyzer
  335. The `catalan` analyzer could be reimplemented as a `custom` analyzer as follows:
  336. [source,console]
  337. ----------------------------------------------------
  338. PUT /catalan_example
  339. {
  340. "settings": {
  341. "analysis": {
  342. "filter": {
  343. "catalan_elision": {
  344. "type": "elision",
  345. "articles": [ "d", "l", "m", "n", "s", "t"],
  346. "articles_case": true
  347. },
  348. "catalan_stop": {
  349. "type": "stop",
  350. "stopwords": "_catalan_" <1>
  351. },
  352. "catalan_keywords": {
  353. "type": "keyword_marker",
  354. "keywords": ["example"] <2>
  355. },
  356. "catalan_stemmer": {
  357. "type": "stemmer",
  358. "language": "catalan"
  359. }
  360. },
  361. "analyzer": {
  362. "rebuilt_catalan": {
  363. "tokenizer": "standard",
  364. "filter": [
  365. "catalan_elision",
  366. "lowercase",
  367. "catalan_stop",
  368. "catalan_keywords",
  369. "catalan_stemmer"
  370. ]
  371. }
  372. }
  373. }
  374. }
  375. }
  376. ----------------------------------------------------
  377. // TEST[s/"catalan_keywords",//]
  378. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: catalan_example, first: catalan, second: rebuilt_catalan}\nendyaml\n/]
  379. <1> The default stopwords can be overridden with the `stopwords`
  380. or `stopwords_path` parameters.
  381. <2> This filter should be removed unless there are words which should
  382. be excluded from stemming.
  383. [[cjk-analyzer]]
  384. ===== `cjk` analyzer
  385. NOTE: You may find that `icu_analyzer` in the ICU analysis plugin works better
  386. for CJK text than the `cjk` analyzer. Experiment with your text and queries.
  387. The `cjk` analyzer could be reimplemented as a `custom` analyzer as follows:
  388. [source,console]
  389. ----------------------------------------------------
  390. PUT /cjk_example
  391. {
  392. "settings": {
  393. "analysis": {
  394. "filter": {
  395. "english_stop": {
  396. "type": "stop",
  397. "stopwords": [ <1>
  398. "a", "and", "are", "as", "at", "be", "but", "by", "for",
  399. "if", "in", "into", "is", "it", "no", "not", "of", "on",
  400. "or", "s", "such", "t", "that", "the", "their", "then",
  401. "there", "these", "they", "this", "to", "was", "will",
  402. "with", "www"
  403. ]
  404. }
  405. },
  406. "analyzer": {
  407. "rebuilt_cjk": {
  408. "tokenizer": "standard",
  409. "filter": [
  410. "cjk_width",
  411. "lowercase",
  412. "cjk_bigram",
  413. "english_stop"
  414. ]
  415. }
  416. }
  417. }
  418. }
  419. }
  420. ----------------------------------------------------
  421. // TEST[s/"cjk_keywords",//]
  422. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: cjk_example, first: cjk, second: rebuilt_cjk}\nendyaml\n/]
  423. <1> The default stopwords can be overridden with the `stopwords`
  424. or `stopwords_path` parameters. The default stop words are
  425. *almost* the same as the `_english_` set, but not exactly
  426. the same.
  427. [[czech-analyzer]]
  428. ===== `czech` analyzer
  429. The `czech` analyzer could be reimplemented as a `custom` analyzer as follows:
  430. [source,console]
  431. ----------------------------------------------------
  432. PUT /czech_example
  433. {
  434. "settings": {
  435. "analysis": {
  436. "filter": {
  437. "czech_stop": {
  438. "type": "stop",
  439. "stopwords": "_czech_" <1>
  440. },
  441. "czech_keywords": {
  442. "type": "keyword_marker",
  443. "keywords": ["příklad"] <2>
  444. },
  445. "czech_stemmer": {
  446. "type": "stemmer",
  447. "language": "czech"
  448. }
  449. },
  450. "analyzer": {
  451. "rebuilt_czech": {
  452. "tokenizer": "standard",
  453. "filter": [
  454. "lowercase",
  455. "czech_stop",
  456. "czech_keywords",
  457. "czech_stemmer"
  458. ]
  459. }
  460. }
  461. }
  462. }
  463. }
  464. ----------------------------------------------------
  465. // TEST[s/"czech_keywords",//]
  466. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: czech_example, first: czech, second: rebuilt_czech}\nendyaml\n/]
  467. <1> The default stopwords can be overridden with the `stopwords`
  468. or `stopwords_path` parameters.
  469. <2> This filter should be removed unless there are words which should
  470. be excluded from stemming.
  471. [[danish-analyzer]]
  472. ===== `danish` analyzer
  473. The `danish` analyzer could be reimplemented as a `custom` analyzer as follows:
  474. [source,console]
  475. ----------------------------------------------------
  476. PUT /danish_example
  477. {
  478. "settings": {
  479. "analysis": {
  480. "filter": {
  481. "danish_stop": {
  482. "type": "stop",
  483. "stopwords": "_danish_" <1>
  484. },
  485. "danish_keywords": {
  486. "type": "keyword_marker",
  487. "keywords": ["eksempel"] <2>
  488. },
  489. "danish_stemmer": {
  490. "type": "stemmer",
  491. "language": "danish"
  492. }
  493. },
  494. "analyzer": {
  495. "rebuilt_danish": {
  496. "tokenizer": "standard",
  497. "filter": [
  498. "lowercase",
  499. "danish_stop",
  500. "danish_keywords",
  501. "danish_stemmer"
  502. ]
  503. }
  504. }
  505. }
  506. }
  507. }
  508. ----------------------------------------------------
  509. // TEST[s/"danish_keywords",//]
  510. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: danish_example, first: danish, second: rebuilt_danish}\nendyaml\n/]
  511. <1> The default stopwords can be overridden with the `stopwords`
  512. or `stopwords_path` parameters.
  513. <2> This filter should be removed unless there are words which should
  514. be excluded from stemming.
  515. [[dutch-analyzer]]
  516. ===== `dutch` analyzer
  517. The `dutch` analyzer could be reimplemented as a `custom` analyzer as follows:
  518. [source,console]
  519. ----------------------------------------------------
  520. PUT /dutch_example
  521. {
  522. "settings": {
  523. "analysis": {
  524. "filter": {
  525. "dutch_stop": {
  526. "type": "stop",
  527. "stopwords": "_dutch_" <1>
  528. },
  529. "dutch_keywords": {
  530. "type": "keyword_marker",
  531. "keywords": ["voorbeeld"] <2>
  532. },
  533. "dutch_stemmer": {
  534. "type": "stemmer",
  535. "language": "dutch"
  536. },
  537. "dutch_override": {
  538. "type": "stemmer_override",
  539. "rules": [
  540. "fiets=>fiets",
  541. "bromfiets=>bromfiets",
  542. "ei=>eier",
  543. "kind=>kinder"
  544. ]
  545. }
  546. },
  547. "analyzer": {
  548. "rebuilt_dutch": {
  549. "tokenizer": "standard",
  550. "filter": [
  551. "lowercase",
  552. "dutch_stop",
  553. "dutch_keywords",
  554. "dutch_override",
  555. "dutch_stemmer"
  556. ]
  557. }
  558. }
  559. }
  560. }
  561. }
  562. ----------------------------------------------------
  563. // TEST[s/"dutch_keywords",//]
  564. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: dutch_example, first: dutch, second: rebuilt_dutch}\nendyaml\n/]
  565. <1> The default stopwords can be overridden with the `stopwords`
  566. or `stopwords_path` parameters.
  567. <2> This filter should be removed unless there are words which should
  568. be excluded from stemming.
  569. [[english-analyzer]]
  570. ===== `english` analyzer
  571. The `english` analyzer could be reimplemented as a `custom` analyzer as follows:
  572. [source,console]
  573. ----------------------------------------------------
  574. PUT /english_example
  575. {
  576. "settings": {
  577. "analysis": {
  578. "filter": {
  579. "english_stop": {
  580. "type": "stop",
  581. "stopwords": "_english_" <1>
  582. },
  583. "english_keywords": {
  584. "type": "keyword_marker",
  585. "keywords": ["example"] <2>
  586. },
  587. "english_stemmer": {
  588. "type": "stemmer",
  589. "language": "english"
  590. },
  591. "english_possessive_stemmer": {
  592. "type": "stemmer",
  593. "language": "possessive_english"
  594. }
  595. },
  596. "analyzer": {
  597. "rebuilt_english": {
  598. "tokenizer": "standard",
  599. "filter": [
  600. "english_possessive_stemmer",
  601. "lowercase",
  602. "english_stop",
  603. "english_keywords",
  604. "english_stemmer"
  605. ]
  606. }
  607. }
  608. }
  609. }
  610. }
  611. ----------------------------------------------------
  612. // TEST[s/"english_keywords",//]
  613. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: english_example, first: english, second: rebuilt_english}\nendyaml\n/]
  614. <1> The default stopwords can be overridden with the `stopwords`
  615. or `stopwords_path` parameters.
  616. <2> This filter should be removed unless there are words which should
  617. be excluded from stemming.
  618. [[estonian-analyzer]]
  619. ===== `estonian` analyzer
  620. The `estonian` analyzer could be reimplemented as a `custom` analyzer as follows:
  621. [source,console]
  622. ----------------------------------------------------
  623. PUT /estonian_example
  624. {
  625. "settings": {
  626. "analysis": {
  627. "filter": {
  628. "estonian_stop": {
  629. "type": "stop",
  630. "stopwords": "_estonian_" <1>
  631. },
  632. "estonian_keywords": {
  633. "type": "keyword_marker",
  634. "keywords": ["näide"] <2>
  635. },
  636. "estonian_stemmer": {
  637. "type": "stemmer",
  638. "language": "estonian"
  639. }
  640. },
  641. "analyzer": {
  642. "rebuilt_estonian": {
  643. "tokenizer": "standard",
  644. "filter": [
  645. "lowercase",
  646. "estonian_stop",
  647. "estonian_keywords",
  648. "estonian_stemmer"
  649. ]
  650. }
  651. }
  652. }
  653. }
  654. }
  655. ----------------------------------------------------
  656. // TEST[s/"estonian_keywords",//]
  657. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: estonian_example, first: estonian, second: rebuilt_estonian}\nendyaml\n/]
  658. <1> The default stopwords can be overridden with the `stopwords`
  659. or `stopwords_path` parameters.
  660. <2> This filter should be removed unless there are words which should
  661. be excluded from stemming.
  662. [[finnish-analyzer]]
  663. ===== `finnish` analyzer
  664. The `finnish` analyzer could be reimplemented as a `custom` analyzer as follows:
  665. [source,console]
  666. ----------------------------------------------------
  667. PUT /finnish_example
  668. {
  669. "settings": {
  670. "analysis": {
  671. "filter": {
  672. "finnish_stop": {
  673. "type": "stop",
  674. "stopwords": "_finnish_" <1>
  675. },
  676. "finnish_keywords": {
  677. "type": "keyword_marker",
  678. "keywords": ["esimerkki"] <2>
  679. },
  680. "finnish_stemmer": {
  681. "type": "stemmer",
  682. "language": "finnish"
  683. }
  684. },
  685. "analyzer": {
  686. "rebuilt_finnish": {
  687. "tokenizer": "standard",
  688. "filter": [
  689. "lowercase",
  690. "finnish_stop",
  691. "finnish_keywords",
  692. "finnish_stemmer"
  693. ]
  694. }
  695. }
  696. }
  697. }
  698. }
  699. ----------------------------------------------------
  700. // TEST[s/"finnish_keywords",//]
  701. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: finnish_example, first: finnish, second: rebuilt_finnish}\nendyaml\n/]
  702. <1> The default stopwords can be overridden with the `stopwords`
  703. or `stopwords_path` parameters.
  704. <2> This filter should be removed unless there are words which should
  705. be excluded from stemming.
  706. [[french-analyzer]]
  707. ===== `french` analyzer
  708. The `french` analyzer could be reimplemented as a `custom` analyzer as follows:
  709. [source,console]
  710. ----------------------------------------------------
  711. PUT /french_example
  712. {
  713. "settings": {
  714. "analysis": {
  715. "filter": {
  716. "french_elision": {
  717. "type": "elision",
  718. "articles_case": true,
  719. "articles": [
  720. "l", "m", "t", "qu", "n", "s",
  721. "j", "d", "c", "jusqu", "quoiqu",
  722. "lorsqu", "puisqu"
  723. ]
  724. },
  725. "french_stop": {
  726. "type": "stop",
  727. "stopwords": "_french_" <1>
  728. },
  729. "french_keywords": {
  730. "type": "keyword_marker",
  731. "keywords": ["Example"] <2>
  732. },
  733. "french_stemmer": {
  734. "type": "stemmer",
  735. "language": "light_french"
  736. }
  737. },
  738. "analyzer": {
  739. "rebuilt_french": {
  740. "tokenizer": "standard",
  741. "filter": [
  742. "french_elision",
  743. "lowercase",
  744. "french_stop",
  745. "french_keywords",
  746. "french_stemmer"
  747. ]
  748. }
  749. }
  750. }
  751. }
  752. }
  753. ----------------------------------------------------
  754. // TEST[s/"french_keywords",//]
  755. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: french_example, first: french, second: rebuilt_french}\nendyaml\n/]
  756. <1> The default stopwords can be overridden with the `stopwords`
  757. or `stopwords_path` parameters.
  758. <2> This filter should be removed unless there are words which should
  759. be excluded from stemming.
  760. [[galician-analyzer]]
  761. ===== `galician` analyzer
  762. The `galician` analyzer could be reimplemented as a `custom` analyzer as follows:
  763. [source,console]
  764. ----------------------------------------------------
  765. PUT /galician_example
  766. {
  767. "settings": {
  768. "analysis": {
  769. "filter": {
  770. "galician_stop": {
  771. "type": "stop",
  772. "stopwords": "_galician_" <1>
  773. },
  774. "galician_keywords": {
  775. "type": "keyword_marker",
  776. "keywords": ["exemplo"] <2>
  777. },
  778. "galician_stemmer": {
  779. "type": "stemmer",
  780. "language": "galician"
  781. }
  782. },
  783. "analyzer": {
  784. "rebuilt_galician": {
  785. "tokenizer": "standard",
  786. "filter": [
  787. "lowercase",
  788. "galician_stop",
  789. "galician_keywords",
  790. "galician_stemmer"
  791. ]
  792. }
  793. }
  794. }
  795. }
  796. }
  797. ----------------------------------------------------
  798. // TEST[s/"galician_keywords",//]
  799. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: galician_example, first: galician, second: rebuilt_galician}\nendyaml\n/]
  800. <1> The default stopwords can be overridden with the `stopwords`
  801. or `stopwords_path` parameters.
  802. <2> This filter should be removed unless there are words which should
  803. be excluded from stemming.
  804. [[german-analyzer]]
  805. ===== `german` analyzer
  806. The `german` analyzer could be reimplemented as a `custom` analyzer as follows:
  807. [source,console]
  808. ----------------------------------------------------
  809. PUT /german_example
  810. {
  811. "settings": {
  812. "analysis": {
  813. "filter": {
  814. "german_stop": {
  815. "type": "stop",
  816. "stopwords": "_german_" <1>
  817. },
  818. "german_keywords": {
  819. "type": "keyword_marker",
  820. "keywords": ["Beispiel"] <2>
  821. },
  822. "german_stemmer": {
  823. "type": "stemmer",
  824. "language": "light_german"
  825. }
  826. },
  827. "analyzer": {
  828. "rebuilt_german": {
  829. "tokenizer": "standard",
  830. "filter": [
  831. "lowercase",
  832. "german_stop",
  833. "german_keywords",
  834. "german_normalization",
  835. "german_stemmer"
  836. ]
  837. }
  838. }
  839. }
  840. }
  841. }
  842. ----------------------------------------------------
  843. // TEST[s/"german_keywords",//]
  844. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: german_example, first: german, second: rebuilt_german}\nendyaml\n/]
  845. <1> The default stopwords can be overridden with the `stopwords`
  846. or `stopwords_path` parameters.
  847. <2> This filter should be removed unless there are words which should
  848. be excluded from stemming.
  849. [[greek-analyzer]]
  850. ===== `greek` analyzer
  851. The `greek` analyzer could be reimplemented as a `custom` analyzer as follows:
  852. [source,console]
  853. ----------------------------------------------------
  854. PUT /greek_example
  855. {
  856. "settings": {
  857. "analysis": {
  858. "filter": {
  859. "greek_stop": {
  860. "type": "stop",
  861. "stopwords": "_greek_" <1>
  862. },
  863. "greek_lowercase": {
  864. "type": "lowercase",
  865. "language": "greek"
  866. },
  867. "greek_keywords": {
  868. "type": "keyword_marker",
  869. "keywords": ["παράδειγμα"] <2>
  870. },
  871. "greek_stemmer": {
  872. "type": "stemmer",
  873. "language": "greek"
  874. }
  875. },
  876. "analyzer": {
  877. "rebuilt_greek": {
  878. "tokenizer": "standard",
  879. "filter": [
  880. "greek_lowercase",
  881. "greek_stop",
  882. "greek_keywords",
  883. "greek_stemmer"
  884. ]
  885. }
  886. }
  887. }
  888. }
  889. }
  890. ----------------------------------------------------
  891. // TEST[s/"greek_keywords",//]
  892. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: greek_example, first: greek, second: rebuilt_greek}\nendyaml\n/]
  893. <1> The default stopwords can be overridden with the `stopwords`
  894. or `stopwords_path` parameters.
  895. <2> This filter should be removed unless there are words which should
  896. be excluded from stemming.
  897. [[hindi-analyzer]]
  898. ===== `hindi` analyzer
  899. The `hindi` analyzer could be reimplemented as a `custom` analyzer as follows:
  900. [source,console]
  901. ----------------------------------------------------
  902. PUT /hindi_example
  903. {
  904. "settings": {
  905. "analysis": {
  906. "filter": {
  907. "hindi_stop": {
  908. "type": "stop",
  909. "stopwords": "_hindi_" <1>
  910. },
  911. "hindi_keywords": {
  912. "type": "keyword_marker",
  913. "keywords": ["उदाहरण"] <2>
  914. },
  915. "hindi_stemmer": {
  916. "type": "stemmer",
  917. "language": "hindi"
  918. }
  919. },
  920. "analyzer": {
  921. "rebuilt_hindi": {
  922. "tokenizer": "standard",
  923. "filter": [
  924. "lowercase",
  925. "decimal_digit",
  926. "hindi_keywords",
  927. "indic_normalization",
  928. "hindi_normalization",
  929. "hindi_stop",
  930. "hindi_stemmer"
  931. ]
  932. }
  933. }
  934. }
  935. }
  936. }
  937. ----------------------------------------------------
  938. // TEST[s/"hindi_keywords",//]
  939. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: hindi_example, first: hindi, second: rebuilt_hindi}\nendyaml\n/]
  940. <1> The default stopwords can be overridden with the `stopwords`
  941. or `stopwords_path` parameters.
  942. <2> This filter should be removed unless there are words which should
  943. be excluded from stemming.
  944. [[hungarian-analyzer]]
  945. ===== `hungarian` analyzer
  946. The `hungarian` analyzer could be reimplemented as a `custom` analyzer as follows:
  947. [source,console]
  948. ----------------------------------------------------
  949. PUT /hungarian_example
  950. {
  951. "settings": {
  952. "analysis": {
  953. "filter": {
  954. "hungarian_stop": {
  955. "type": "stop",
  956. "stopwords": "_hungarian_" <1>
  957. },
  958. "hungarian_keywords": {
  959. "type": "keyword_marker",
  960. "keywords": ["példa"] <2>
  961. },
  962. "hungarian_stemmer": {
  963. "type": "stemmer",
  964. "language": "hungarian"
  965. }
  966. },
  967. "analyzer": {
  968. "rebuilt_hungarian": {
  969. "tokenizer": "standard",
  970. "filter": [
  971. "lowercase",
  972. "hungarian_stop",
  973. "hungarian_keywords",
  974. "hungarian_stemmer"
  975. ]
  976. }
  977. }
  978. }
  979. }
  980. }
  981. ----------------------------------------------------
  982. // TEST[s/"hungarian_keywords",//]
  983. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: hungarian_example, first: hungarian, second: rebuilt_hungarian}\nendyaml\n/]
  984. <1> The default stopwords can be overridden with the `stopwords`
  985. or `stopwords_path` parameters.
  986. <2> This filter should be removed unless there are words which should
  987. be excluded from stemming.
  988. [[indonesian-analyzer]]
  989. ===== `indonesian` analyzer
  990. The `indonesian` analyzer could be reimplemented as a `custom` analyzer as follows:
  991. [source,console]
  992. ----------------------------------------------------
  993. PUT /indonesian_example
  994. {
  995. "settings": {
  996. "analysis": {
  997. "filter": {
  998. "indonesian_stop": {
  999. "type": "stop",
  1000. "stopwords": "_indonesian_" <1>
  1001. },
  1002. "indonesian_keywords": {
  1003. "type": "keyword_marker",
  1004. "keywords": ["contoh"] <2>
  1005. },
  1006. "indonesian_stemmer": {
  1007. "type": "stemmer",
  1008. "language": "indonesian"
  1009. }
  1010. },
  1011. "analyzer": {
  1012. "rebuilt_indonesian": {
  1013. "tokenizer": "standard",
  1014. "filter": [
  1015. "lowercase",
  1016. "indonesian_stop",
  1017. "indonesian_keywords",
  1018. "indonesian_stemmer"
  1019. ]
  1020. }
  1021. }
  1022. }
  1023. }
  1024. }
  1025. ----------------------------------------------------
  1026. // TEST[s/"indonesian_keywords",//]
  1027. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: indonesian_example, first: indonesian, second: rebuilt_indonesian}\nendyaml\n/]
  1028. <1> The default stopwords can be overridden with the `stopwords`
  1029. or `stopwords_path` parameters.
  1030. <2> This filter should be removed unless there are words which should
  1031. be excluded from stemming.
  1032. [[irish-analyzer]]
  1033. ===== `irish` analyzer
  1034. The `irish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1035. [source,console]
  1036. ----------------------------------------------------
  1037. PUT /irish_example
  1038. {
  1039. "settings": {
  1040. "analysis": {
  1041. "filter": {
  1042. "irish_hyphenation": {
  1043. "type": "stop",
  1044. "stopwords": [ "h", "n", "t" ],
  1045. "ignore_case": true
  1046. },
  1047. "irish_elision": {
  1048. "type": "elision",
  1049. "articles": [ "d", "m", "b" ],
  1050. "articles_case": true
  1051. },
  1052. "irish_stop": {
  1053. "type": "stop",
  1054. "stopwords": "_irish_" <1>
  1055. },
  1056. "irish_lowercase": {
  1057. "type": "lowercase",
  1058. "language": "irish"
  1059. },
  1060. "irish_keywords": {
  1061. "type": "keyword_marker",
  1062. "keywords": ["sampla"] <2>
  1063. },
  1064. "irish_stemmer": {
  1065. "type": "stemmer",
  1066. "language": "irish"
  1067. }
  1068. },
  1069. "analyzer": {
  1070. "rebuilt_irish": {
  1071. "tokenizer": "standard",
  1072. "filter": [
  1073. "irish_hyphenation",
  1074. "irish_elision",
  1075. "irish_lowercase",
  1076. "irish_stop",
  1077. "irish_keywords",
  1078. "irish_stemmer"
  1079. ]
  1080. }
  1081. }
  1082. }
  1083. }
  1084. }
  1085. ----------------------------------------------------
  1086. // TEST[s/"irish_keywords",//]
  1087. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: irish_example, first: irish, second: rebuilt_irish}\nendyaml\n/]
  1088. <1> The default stopwords can be overridden with the `stopwords`
  1089. or `stopwords_path` parameters.
  1090. <2> This filter should be removed unless there are words which should
  1091. be excluded from stemming.
  1092. [[italian-analyzer]]
  1093. ===== `italian` analyzer
  1094. The `italian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1095. [source,console]
  1096. ----------------------------------------------------
  1097. PUT /italian_example
  1098. {
  1099. "settings": {
  1100. "analysis": {
  1101. "filter": {
  1102. "italian_elision": {
  1103. "type": "elision",
  1104. "articles": [
  1105. "c", "l", "all", "dall", "dell",
  1106. "nell", "sull", "coll", "pell",
  1107. "gl", "agl", "dagl", "degl", "negl",
  1108. "sugl", "un", "m", "t", "s", "v", "d"
  1109. ],
  1110. "articles_case": true
  1111. },
  1112. "italian_stop": {
  1113. "type": "stop",
  1114. "stopwords": "_italian_" <1>
  1115. },
  1116. "italian_keywords": {
  1117. "type": "keyword_marker",
  1118. "keywords": ["esempio"] <2>
  1119. },
  1120. "italian_stemmer": {
  1121. "type": "stemmer",
  1122. "language": "light_italian"
  1123. }
  1124. },
  1125. "analyzer": {
  1126. "rebuilt_italian": {
  1127. "tokenizer": "standard",
  1128. "filter": [
  1129. "italian_elision",
  1130. "lowercase",
  1131. "italian_stop",
  1132. "italian_keywords",
  1133. "italian_stemmer"
  1134. ]
  1135. }
  1136. }
  1137. }
  1138. }
  1139. }
  1140. ----------------------------------------------------
  1141. // TEST[s/"italian_keywords",//]
  1142. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: italian_example, first: italian, second: rebuilt_italian}\nendyaml\n/]
  1143. <1> The default stopwords can be overridden with the `stopwords`
  1144. or `stopwords_path` parameters.
  1145. <2> This filter should be removed unless there are words which should
  1146. be excluded from stemming.
  1147. [[latvian-analyzer]]
  1148. ===== `latvian` analyzer
  1149. The `latvian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1150. [source,console]
  1151. ----------------------------------------------------
  1152. PUT /latvian_example
  1153. {
  1154. "settings": {
  1155. "analysis": {
  1156. "filter": {
  1157. "latvian_stop": {
  1158. "type": "stop",
  1159. "stopwords": "_latvian_" <1>
  1160. },
  1161. "latvian_keywords": {
  1162. "type": "keyword_marker",
  1163. "keywords": ["piemērs"] <2>
  1164. },
  1165. "latvian_stemmer": {
  1166. "type": "stemmer",
  1167. "language": "latvian"
  1168. }
  1169. },
  1170. "analyzer": {
  1171. "rebuilt_latvian": {
  1172. "tokenizer": "standard",
  1173. "filter": [
  1174. "lowercase",
  1175. "latvian_stop",
  1176. "latvian_keywords",
  1177. "latvian_stemmer"
  1178. ]
  1179. }
  1180. }
  1181. }
  1182. }
  1183. }
  1184. ----------------------------------------------------
  1185. // TEST[s/"latvian_keywords",//]
  1186. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: latvian_example, first: latvian, second: rebuilt_latvian}\nendyaml\n/]
  1187. <1> The default stopwords can be overridden with the `stopwords`
  1188. or `stopwords_path` parameters.
  1189. <2> This filter should be removed unless there are words which should
  1190. be excluded from stemming.
  1191. [[lithuanian-analyzer]]
  1192. ===== `lithuanian` analyzer
  1193. The `lithuanian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1194. [source,console]
  1195. ----------------------------------------------------
  1196. PUT /lithuanian_example
  1197. {
  1198. "settings": {
  1199. "analysis": {
  1200. "filter": {
  1201. "lithuanian_stop": {
  1202. "type": "stop",
  1203. "stopwords": "_lithuanian_" <1>
  1204. },
  1205. "lithuanian_keywords": {
  1206. "type": "keyword_marker",
  1207. "keywords": ["pavyzdys"] <2>
  1208. },
  1209. "lithuanian_stemmer": {
  1210. "type": "stemmer",
  1211. "language": "lithuanian"
  1212. }
  1213. },
  1214. "analyzer": {
  1215. "rebuilt_lithuanian": {
  1216. "tokenizer": "standard",
  1217. "filter": [
  1218. "lowercase",
  1219. "lithuanian_stop",
  1220. "lithuanian_keywords",
  1221. "lithuanian_stemmer"
  1222. ]
  1223. }
  1224. }
  1225. }
  1226. }
  1227. }
  1228. ----------------------------------------------------
  1229. // TEST[s/"lithuanian_keywords",//]
  1230. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: lithuanian_example, first: lithuanian, second: rebuilt_lithuanian}\nendyaml\n/]
  1231. <1> The default stopwords can be overridden with the `stopwords`
  1232. or `stopwords_path` parameters.
  1233. <2> This filter should be removed unless there are words which should
  1234. be excluded from stemming.
  1235. [[norwegian-analyzer]]
  1236. ===== `norwegian` analyzer
  1237. The `norwegian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1238. [source,console]
  1239. ----------------------------------------------------
  1240. PUT /norwegian_example
  1241. {
  1242. "settings": {
  1243. "analysis": {
  1244. "filter": {
  1245. "norwegian_stop": {
  1246. "type": "stop",
  1247. "stopwords": "_norwegian_" <1>
  1248. },
  1249. "norwegian_keywords": {
  1250. "type": "keyword_marker",
  1251. "keywords": ["eksempel"] <2>
  1252. },
  1253. "norwegian_stemmer": {
  1254. "type": "stemmer",
  1255. "language": "norwegian"
  1256. }
  1257. },
  1258. "analyzer": {
  1259. "rebuilt_norwegian": {
  1260. "tokenizer": "standard",
  1261. "filter": [
  1262. "lowercase",
  1263. "norwegian_stop",
  1264. "norwegian_keywords",
  1265. "norwegian_stemmer"
  1266. ]
  1267. }
  1268. }
  1269. }
  1270. }
  1271. }
  1272. ----------------------------------------------------
  1273. // TEST[s/"norwegian_keywords",//]
  1274. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: norwegian_example, first: norwegian, second: rebuilt_norwegian}\nendyaml\n/]
  1275. <1> The default stopwords can be overridden with the `stopwords`
  1276. or `stopwords_path` parameters.
  1277. <2> This filter should be removed unless there are words which should
  1278. be excluded from stemming.
  1279. [[persian-analyzer]]
  1280. ===== `persian` analyzer
  1281. The `persian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1282. [source,console]
  1283. ----------------------------------------------------
  1284. PUT /persian_example
  1285. {
  1286. "settings": {
  1287. "analysis": {
  1288. "char_filter": {
  1289. "zero_width_spaces": {
  1290. "type": "mapping",
  1291. "mappings": [ "\\u200C=>\\u0020"] <1>
  1292. }
  1293. },
  1294. "filter": {
  1295. "persian_stop": {
  1296. "type": "stop",
  1297. "stopwords": "_persian_" <2>
  1298. }
  1299. },
  1300. "analyzer": {
  1301. "rebuilt_persian": {
  1302. "tokenizer": "standard",
  1303. "char_filter": [ "zero_width_spaces" ],
  1304. "filter": [
  1305. "lowercase",
  1306. "decimal_digit",
  1307. "arabic_normalization",
  1308. "persian_normalization",
  1309. "persian_stop"
  1310. ]
  1311. }
  1312. }
  1313. }
  1314. }
  1315. }
  1316. ----------------------------------------------------
  1317. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: persian_example, first: persian, second: rebuilt_persian}\nendyaml\n/]
  1318. <1> Replaces zero-width non-joiners with an ASCII space.
  1319. <2> The default stopwords can be overridden with the `stopwords`
  1320. or `stopwords_path` parameters.
  1321. [[portuguese-analyzer]]
  1322. ===== `portuguese` analyzer
  1323. The `portuguese` analyzer could be reimplemented as a `custom` analyzer as follows:
  1324. [source,console]
  1325. ----------------------------------------------------
  1326. PUT /portuguese_example
  1327. {
  1328. "settings": {
  1329. "analysis": {
  1330. "filter": {
  1331. "portuguese_stop": {
  1332. "type": "stop",
  1333. "stopwords": "_portuguese_" <1>
  1334. },
  1335. "portuguese_keywords": {
  1336. "type": "keyword_marker",
  1337. "keywords": ["exemplo"] <2>
  1338. },
  1339. "portuguese_stemmer": {
  1340. "type": "stemmer",
  1341. "language": "light_portuguese"
  1342. }
  1343. },
  1344. "analyzer": {
  1345. "rebuilt_portuguese": {
  1346. "tokenizer": "standard",
  1347. "filter": [
  1348. "lowercase",
  1349. "portuguese_stop",
  1350. "portuguese_keywords",
  1351. "portuguese_stemmer"
  1352. ]
  1353. }
  1354. }
  1355. }
  1356. }
  1357. }
  1358. ----------------------------------------------------
  1359. // TEST[s/"portuguese_keywords",//]
  1360. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: portuguese_example, first: portuguese, second: rebuilt_portuguese}\nendyaml\n/]
  1361. <1> The default stopwords can be overridden with the `stopwords`
  1362. or `stopwords_path` parameters.
  1363. <2> This filter should be removed unless there are words which should
  1364. be excluded from stemming.
  1365. [[romanian-analyzer]]
  1366. ===== `romanian` analyzer
  1367. The `romanian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1368. [source,console]
  1369. ----------------------------------------------------
  1370. PUT /romanian_example
  1371. {
  1372. "settings": {
  1373. "analysis": {
  1374. "filter": {
  1375. "romanian_stop": {
  1376. "type": "stop",
  1377. "stopwords": "_romanian_" <1>
  1378. },
  1379. "romanian_keywords": {
  1380. "type": "keyword_marker",
  1381. "keywords": ["exemplu"] <2>
  1382. },
  1383. "romanian_stemmer": {
  1384. "type": "stemmer",
  1385. "language": "romanian"
  1386. }
  1387. },
  1388. "analyzer": {
  1389. "rebuilt_romanian": {
  1390. "tokenizer": "standard",
  1391. "filter": [
  1392. "lowercase",
  1393. "romanian_stop",
  1394. "romanian_keywords",
  1395. "romanian_stemmer"
  1396. ]
  1397. }
  1398. }
  1399. }
  1400. }
  1401. }
  1402. ----------------------------------------------------
  1403. // TEST[s/"romanian_keywords",//]
  1404. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: romanian_example, first: romanian, second: rebuilt_romanian}\nendyaml\n/]
  1405. <1> The default stopwords can be overridden with the `stopwords`
  1406. or `stopwords_path` parameters.
  1407. <2> This filter should be removed unless there are words which should
  1408. be excluded from stemming.
  1409. [[russian-analyzer]]
  1410. ===== `russian` analyzer
  1411. The `russian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1412. [source,console]
  1413. ----------------------------------------------------
  1414. PUT /russian_example
  1415. {
  1416. "settings": {
  1417. "analysis": {
  1418. "filter": {
  1419. "russian_stop": {
  1420. "type": "stop",
  1421. "stopwords": "_russian_" <1>
  1422. },
  1423. "russian_keywords": {
  1424. "type": "keyword_marker",
  1425. "keywords": ["пример"] <2>
  1426. },
  1427. "russian_stemmer": {
  1428. "type": "stemmer",
  1429. "language": "russian"
  1430. }
  1431. },
  1432. "analyzer": {
  1433. "rebuilt_russian": {
  1434. "tokenizer": "standard",
  1435. "filter": [
  1436. "lowercase",
  1437. "russian_stop",
  1438. "russian_keywords",
  1439. "russian_stemmer"
  1440. ]
  1441. }
  1442. }
  1443. }
  1444. }
  1445. }
  1446. ----------------------------------------------------
  1447. // TEST[s/"russian_keywords",//]
  1448. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: russian_example, first: russian, second: rebuilt_russian}\nendyaml\n/]
  1449. <1> The default stopwords can be overridden with the `stopwords`
  1450. or `stopwords_path` parameters.
  1451. <2> This filter should be removed unless there are words which should
  1452. be excluded from stemming.
  1453. [[sorani-analyzer]]
  1454. ===== `sorani` analyzer
  1455. The `sorani` analyzer could be reimplemented as a `custom` analyzer as follows:
  1456. [source,console]
  1457. ----------------------------------------------------
  1458. PUT /sorani_example
  1459. {
  1460. "settings": {
  1461. "analysis": {
  1462. "filter": {
  1463. "sorani_stop": {
  1464. "type": "stop",
  1465. "stopwords": "_sorani_" <1>
  1466. },
  1467. "sorani_keywords": {
  1468. "type": "keyword_marker",
  1469. "keywords": ["mînak"] <2>
  1470. },
  1471. "sorani_stemmer": {
  1472. "type": "stemmer",
  1473. "language": "sorani"
  1474. }
  1475. },
  1476. "analyzer": {
  1477. "rebuilt_sorani": {
  1478. "tokenizer": "standard",
  1479. "filter": [
  1480. "sorani_normalization",
  1481. "lowercase",
  1482. "decimal_digit",
  1483. "sorani_stop",
  1484. "sorani_keywords",
  1485. "sorani_stemmer"
  1486. ]
  1487. }
  1488. }
  1489. }
  1490. }
  1491. }
  1492. ----------------------------------------------------
  1493. // TEST[s/"sorani_keywords",//]
  1494. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: sorani_example, first: sorani, second: rebuilt_sorani}\nendyaml\n/]
  1495. <1> The default stopwords can be overridden with the `stopwords`
  1496. or `stopwords_path` parameters.
  1497. <2> This filter should be removed unless there are words which should
  1498. be excluded from stemming.
  1499. [[spanish-analyzer]]
  1500. ===== `spanish` analyzer
  1501. The `spanish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1502. [source,console]
  1503. ----------------------------------------------------
  1504. PUT /spanish_example
  1505. {
  1506. "settings": {
  1507. "analysis": {
  1508. "filter": {
  1509. "spanish_stop": {
  1510. "type": "stop",
  1511. "stopwords": "_spanish_" <1>
  1512. },
  1513. "spanish_keywords": {
  1514. "type": "keyword_marker",
  1515. "keywords": ["ejemplo"] <2>
  1516. },
  1517. "spanish_stemmer": {
  1518. "type": "stemmer",
  1519. "language": "light_spanish"
  1520. }
  1521. },
  1522. "analyzer": {
  1523. "rebuilt_spanish": {
  1524. "tokenizer": "standard",
  1525. "filter": [
  1526. "lowercase",
  1527. "spanish_stop",
  1528. "spanish_keywords",
  1529. "spanish_stemmer"
  1530. ]
  1531. }
  1532. }
  1533. }
  1534. }
  1535. }
  1536. ----------------------------------------------------
  1537. // TEST[s/"spanish_keywords",//]
  1538. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: spanish_example, first: spanish, second: rebuilt_spanish}\nendyaml\n/]
  1539. <1> The default stopwords can be overridden with the `stopwords`
  1540. or `stopwords_path` parameters.
  1541. <2> This filter should be removed unless there are words which should
  1542. be excluded from stemming.
  1543. [[swedish-analyzer]]
  1544. ===== `swedish` analyzer
  1545. The `swedish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1546. [source,console]
  1547. ----------------------------------------------------
  1548. PUT /swedish_example
  1549. {
  1550. "settings": {
  1551. "analysis": {
  1552. "filter": {
  1553. "swedish_stop": {
  1554. "type": "stop",
  1555. "stopwords": "_swedish_" <1>
  1556. },
  1557. "swedish_keywords": {
  1558. "type": "keyword_marker",
  1559. "keywords": ["exempel"] <2>
  1560. },
  1561. "swedish_stemmer": {
  1562. "type": "stemmer",
  1563. "language": "swedish"
  1564. }
  1565. },
  1566. "analyzer": {
  1567. "rebuilt_swedish": {
  1568. "tokenizer": "standard",
  1569. "filter": [
  1570. "lowercase",
  1571. "swedish_stop",
  1572. "swedish_keywords",
  1573. "swedish_stemmer"
  1574. ]
  1575. }
  1576. }
  1577. }
  1578. }
  1579. }
  1580. ----------------------------------------------------
  1581. // TEST[s/"swedish_keywords",//]
  1582. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: swedish_example, first: swedish, second: rebuilt_swedish}\nendyaml\n/]
  1583. <1> The default stopwords can be overridden with the `stopwords`
  1584. or `stopwords_path` parameters.
  1585. <2> This filter should be removed unless there are words which should
  1586. be excluded from stemming.
  1587. [[turkish-analyzer]]
  1588. ===== `turkish` analyzer
  1589. The `turkish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1590. [source,console]
  1591. ----------------------------------------------------
  1592. PUT /turkish_example
  1593. {
  1594. "settings": {
  1595. "analysis": {
  1596. "filter": {
  1597. "turkish_stop": {
  1598. "type": "stop",
  1599. "stopwords": "_turkish_" <1>
  1600. },
  1601. "turkish_lowercase": {
  1602. "type": "lowercase",
  1603. "language": "turkish"
  1604. },
  1605. "turkish_keywords": {
  1606. "type": "keyword_marker",
  1607. "keywords": ["örnek"] <2>
  1608. },
  1609. "turkish_stemmer": {
  1610. "type": "stemmer",
  1611. "language": "turkish"
  1612. }
  1613. },
  1614. "analyzer": {
  1615. "rebuilt_turkish": {
  1616. "tokenizer": "standard",
  1617. "filter": [
  1618. "apostrophe",
  1619. "turkish_lowercase",
  1620. "turkish_stop",
  1621. "turkish_keywords",
  1622. "turkish_stemmer"
  1623. ]
  1624. }
  1625. }
  1626. }
  1627. }
  1628. }
  1629. ----------------------------------------------------
  1630. // TEST[s/"turkish_keywords",//]
  1631. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: turkish_example, first: turkish, second: rebuilt_turkish}\nendyaml\n/]
  1632. <1> The default stopwords can be overridden with the `stopwords`
  1633. or `stopwords_path` parameters.
  1634. <2> This filter should be removed unless there are words which should
  1635. be excluded from stemming.
  1636. [[thai-analyzer]]
  1637. ===== `thai` analyzer
  1638. The `thai` analyzer could be reimplemented as a `custom` analyzer as follows:
  1639. [source,console]
  1640. ----------------------------------------------------
  1641. PUT /thai_example
  1642. {
  1643. "settings": {
  1644. "analysis": {
  1645. "filter": {
  1646. "thai_stop": {
  1647. "type": "stop",
  1648. "stopwords": "_thai_" <1>
  1649. }
  1650. },
  1651. "analyzer": {
  1652. "rebuilt_thai": {
  1653. "tokenizer": "thai",
  1654. "filter": [
  1655. "lowercase",
  1656. "decimal_digit",
  1657. "thai_stop"
  1658. ]
  1659. }
  1660. }
  1661. }
  1662. }
  1663. }
  1664. ----------------------------------------------------
  1665. // TEST[s/"thai_keywords",//]
  1666. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: thai_example, first: thai, second: rebuilt_thai}\nendyaml\n/]
  1667. <1> The default stopwords can be overridden with the `stopwords`
  1668. or `stopwords_path` parameters.