lang-analyzer.asciidoc 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825
  1. [[analysis-lang-analyzer]]
  2. === Language analyzers
  3. ++++
  4. <titleabbrev>Language</titleabbrev>
  5. ++++
  6. A set of analyzers aimed at analyzing specific language text. The
  7. following types are supported:
  8. <<arabic-analyzer,`arabic`>>,
  9. <<armenian-analyzer,`armenian`>>,
  10. <<basque-analyzer,`basque`>>,
  11. <<bengali-analyzer,`bengali`>>,
  12. <<brazilian-analyzer,`brazilian`>>,
  13. <<bulgarian-analyzer,`bulgarian`>>,
  14. <<catalan-analyzer,`catalan`>>,
  15. <<cjk-analyzer,`cjk`>>,
  16. <<czech-analyzer,`czech`>>,
  17. <<danish-analyzer,`danish`>>,
  18. <<dutch-analyzer,`dutch`>>,
  19. <<english-analyzer,`english`>>,
  20. <<estonian-analyzer,`estonian`>>,
  21. <<finnish-analyzer,`finnish`>>,
  22. <<french-analyzer,`french`>>,
  23. <<galician-analyzer,`galician`>>,
  24. <<german-analyzer,`german`>>,
  25. <<greek-analyzer,`greek`>>,
  26. <<hindi-analyzer,`hindi`>>,
  27. <<hungarian-analyzer,`hungarian`>>,
  28. <<indonesian-analyzer,`indonesian`>>,
  29. <<irish-analyzer,`irish`>>,
  30. <<italian-analyzer,`italian`>>,
  31. <<latvian-analyzer,`latvian`>>,
  32. <<lithuanian-analyzer,`lithuanian`>>,
  33. <<norwegian-analyzer,`norwegian`>>,
  34. <<persian-analyzer,`persian`>>,
  35. <<portuguese-analyzer,`portuguese`>>,
  36. <<romanian-analyzer,`romanian`>>,
  37. <<russian-analyzer,`russian`>>,
  38. <<sorani-analyzer,`sorani`>>,
  39. <<spanish-analyzer,`spanish`>>,
  40. <<swedish-analyzer,`swedish`>>,
  41. <<turkish-analyzer,`turkish`>>,
  42. <<thai-analyzer,`thai`>>.
  43. ==== Configuring language analyzers
  44. ===== Stopwords
  45. All analyzers support setting custom `stopwords` either internally in
  46. the config, or by using an external stopwords file by setting
  47. `stopwords_path`. Check <<analysis-stop-analyzer,Stop Analyzer>> for
  48. more details.
  49. [[_excluding_words_from_stemming]]
  50. ===== Excluding words from stemming
  51. The `stem_exclusion` parameter allows you to specify an array
  52. of lowercase words that should not be stemmed. Internally, this
  53. functionality is implemented by adding the
  54. <<analysis-keyword-marker-tokenfilter,`keyword_marker` token filter>>
  55. with the `keywords` set to the value of the `stem_exclusion` parameter.
  56. The following analyzers support setting custom `stem_exclusion` list:
  57. `arabic`, `armenian`, `basque`, `bengali`, `bulgarian`, `catalan`, `czech`,
  58. `dutch`, `english`, `finnish`, `french`, `galician`,
  59. `german`, `hindi`, `hungarian`, `indonesian`, `irish`, `italian`, `latvian`,
  60. `lithuanian`, `norwegian`, `portuguese`, `romanian`, `russian`, `sorani`,
  61. `spanish`, `swedish`, `turkish`.
  62. ==== Reimplementing language analyzers
  63. The built-in language analyzers can be reimplemented as `custom` analyzers
  64. (as described below) in order to customize their behaviour.
  65. NOTE: If you do not intend to exclude words from being stemmed (the
  66. equivalent of the `stem_exclusion` parameter above), then you should remove
  67. the `keyword_marker` token filter from the custom analyzer configuration.
  68. [[arabic-analyzer]]
  69. ===== `arabic` analyzer
  70. The `arabic` analyzer could be reimplemented as a `custom` analyzer as follows:
  71. [source,console]
  72. ----------------------------------------------------
  73. PUT /arabic_example
  74. {
  75. "settings": {
  76. "analysis": {
  77. "filter": {
  78. "arabic_stop": {
  79. "type": "stop",
  80. "stopwords": "_arabic_" <1>
  81. },
  82. "arabic_keywords": {
  83. "type": "keyword_marker",
  84. "keywords": ["مثال"] <2>
  85. },
  86. "arabic_stemmer": {
  87. "type": "stemmer",
  88. "language": "arabic"
  89. }
  90. },
  91. "analyzer": {
  92. "rebuilt_arabic": {
  93. "tokenizer": "standard",
  94. "filter": [
  95. "lowercase",
  96. "decimal_digit",
  97. "arabic_stop",
  98. "arabic_normalization",
  99. "arabic_keywords",
  100. "arabic_stemmer"
  101. ]
  102. }
  103. }
  104. }
  105. }
  106. }
  107. ----------------------------------------------------
  108. // TEST[s/"arabic_keywords",//]
  109. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: arabic_example, first: arabic, second: rebuilt_arabic}\nendyaml\n/]
  110. <1> The default stopwords can be overridden with the `stopwords`
  111. or `stopwords_path` parameters.
  112. <2> This filter should be removed unless there are words which should
  113. be excluded from stemming.
  114. [[armenian-analyzer]]
  115. ===== `armenian` analyzer
  116. The `armenian` analyzer could be reimplemented as a `custom` analyzer as follows:
  117. [source,console]
  118. ----------------------------------------------------
  119. PUT /armenian_example
  120. {
  121. "settings": {
  122. "analysis": {
  123. "filter": {
  124. "armenian_stop": {
  125. "type": "stop",
  126. "stopwords": "_armenian_" <1>
  127. },
  128. "armenian_keywords": {
  129. "type": "keyword_marker",
  130. "keywords": ["օրինակ"] <2>
  131. },
  132. "armenian_stemmer": {
  133. "type": "stemmer",
  134. "language": "armenian"
  135. }
  136. },
  137. "analyzer": {
  138. "rebuilt_armenian": {
  139. "tokenizer": "standard",
  140. "filter": [
  141. "lowercase",
  142. "armenian_stop",
  143. "armenian_keywords",
  144. "armenian_stemmer"
  145. ]
  146. }
  147. }
  148. }
  149. }
  150. }
  151. ----------------------------------------------------
  152. // TEST[s/"armenian_keywords",//]
  153. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: armenian_example, first: armenian, second: rebuilt_armenian}\nendyaml\n/]
  154. <1> The default stopwords can be overridden with the `stopwords`
  155. or `stopwords_path` parameters.
  156. <2> This filter should be removed unless there are words which should
  157. be excluded from stemming.
  158. [[basque-analyzer]]
  159. ===== `basque` analyzer
  160. The `basque` analyzer could be reimplemented as a `custom` analyzer as follows:
  161. [source,console]
  162. ----------------------------------------------------
  163. PUT /basque_example
  164. {
  165. "settings": {
  166. "analysis": {
  167. "filter": {
  168. "basque_stop": {
  169. "type": "stop",
  170. "stopwords": "_basque_" <1>
  171. },
  172. "basque_keywords": {
  173. "type": "keyword_marker",
  174. "keywords": ["Adibidez"] <2>
  175. },
  176. "basque_stemmer": {
  177. "type": "stemmer",
  178. "language": "basque"
  179. }
  180. },
  181. "analyzer": {
  182. "rebuilt_basque": {
  183. "tokenizer": "standard",
  184. "filter": [
  185. "lowercase",
  186. "basque_stop",
  187. "basque_keywords",
  188. "basque_stemmer"
  189. ]
  190. }
  191. }
  192. }
  193. }
  194. }
  195. ----------------------------------------------------
  196. // TEST[s/"basque_keywords",//]
  197. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: basque_example, first: basque, second: rebuilt_basque}\nendyaml\n/]
  198. <1> The default stopwords can be overridden with the `stopwords`
  199. or `stopwords_path` parameters.
  200. <2> This filter should be removed unless there are words which should
  201. be excluded from stemming.
  202. [[bengali-analyzer]]
  203. ===== `bengali` analyzer
  204. The `bengali` analyzer could be reimplemented as a `custom` analyzer as follows:
  205. [source,console]
  206. ----------------------------------------------------
  207. PUT /bengali_example
  208. {
  209. "settings": {
  210. "analysis": {
  211. "filter": {
  212. "bengali_stop": {
  213. "type": "stop",
  214. "stopwords": "_bengali_" <1>
  215. },
  216. "bengali_keywords": {
  217. "type": "keyword_marker",
  218. "keywords": ["উদাহরণ"] <2>
  219. },
  220. "bengali_stemmer": {
  221. "type": "stemmer",
  222. "language": "bengali"
  223. }
  224. },
  225. "analyzer": {
  226. "rebuilt_bengali": {
  227. "tokenizer": "standard",
  228. "filter": [
  229. "lowercase",
  230. "decimal_digit",
  231. "bengali_keywords",
  232. "indic_normalization",
  233. "bengali_normalization",
  234. "bengali_stop",
  235. "bengali_stemmer"
  236. ]
  237. }
  238. }
  239. }
  240. }
  241. }
  242. ----------------------------------------------------
  243. // TEST[s/"bengali_keywords",//]
  244. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: bengali_example, first: bengali, second: rebuilt_bengali}\nendyaml\n/]
  245. <1> The default stopwords can be overridden with the `stopwords`
  246. or `stopwords_path` parameters.
  247. <2> This filter should be removed unless there are words which should
  248. be excluded from stemming.
  249. [[brazilian-analyzer]]
  250. ===== `brazilian` analyzer
  251. The `brazilian` analyzer could be reimplemented as a `custom` analyzer as follows:
  252. [source,console]
  253. ----------------------------------------------------
  254. PUT /brazilian_example
  255. {
  256. "settings": {
  257. "analysis": {
  258. "filter": {
  259. "brazilian_stop": {
  260. "type": "stop",
  261. "stopwords": "_brazilian_" <1>
  262. },
  263. "brazilian_keywords": {
  264. "type": "keyword_marker",
  265. "keywords": ["exemplo"] <2>
  266. },
  267. "brazilian_stemmer": {
  268. "type": "stemmer",
  269. "language": "brazilian"
  270. }
  271. },
  272. "analyzer": {
  273. "rebuilt_brazilian": {
  274. "tokenizer": "standard",
  275. "filter": [
  276. "lowercase",
  277. "brazilian_stop",
  278. "brazilian_keywords",
  279. "brazilian_stemmer"
  280. ]
  281. }
  282. }
  283. }
  284. }
  285. }
  286. ----------------------------------------------------
  287. // TEST[s/"brazilian_keywords",//]
  288. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: brazilian_example, first: brazilian, second: rebuilt_brazilian}\nendyaml\n/]
  289. <1> The default stopwords can be overridden with the `stopwords`
  290. or `stopwords_path` parameters.
  291. <2> This filter should be removed unless there are words which should
  292. be excluded from stemming.
  293. [[bulgarian-analyzer]]
  294. ===== `bulgarian` analyzer
  295. The `bulgarian` analyzer could be reimplemented as a `custom` analyzer as follows:
  296. [source,console]
  297. ----------------------------------------------------
  298. PUT /bulgarian_example
  299. {
  300. "settings": {
  301. "analysis": {
  302. "filter": {
  303. "bulgarian_stop": {
  304. "type": "stop",
  305. "stopwords": "_bulgarian_" <1>
  306. },
  307. "bulgarian_keywords": {
  308. "type": "keyword_marker",
  309. "keywords": ["пример"] <2>
  310. },
  311. "bulgarian_stemmer": {
  312. "type": "stemmer",
  313. "language": "bulgarian"
  314. }
  315. },
  316. "analyzer": {
  317. "rebuilt_bulgarian": {
  318. "tokenizer": "standard",
  319. "filter": [
  320. "lowercase",
  321. "bulgarian_stop",
  322. "bulgarian_keywords",
  323. "bulgarian_stemmer"
  324. ]
  325. }
  326. }
  327. }
  328. }
  329. }
  330. ----------------------------------------------------
  331. // TEST[s/"bulgarian_keywords",//]
  332. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: bulgarian_example, first: bulgarian, second: rebuilt_bulgarian}\nendyaml\n/]
  333. <1> The default stopwords can be overridden with the `stopwords`
  334. or `stopwords_path` parameters.
  335. <2> This filter should be removed unless there are words which should
  336. be excluded from stemming.
  337. [[catalan-analyzer]]
  338. ===== `catalan` analyzer
  339. The `catalan` analyzer could be reimplemented as a `custom` analyzer as follows:
  340. [source,console]
  341. ----------------------------------------------------
  342. PUT /catalan_example
  343. {
  344. "settings": {
  345. "analysis": {
  346. "filter": {
  347. "catalan_elision": {
  348. "type": "elision",
  349. "articles": [ "d", "l", "m", "n", "s", "t"],
  350. "articles_case": true
  351. },
  352. "catalan_stop": {
  353. "type": "stop",
  354. "stopwords": "_catalan_" <1>
  355. },
  356. "catalan_keywords": {
  357. "type": "keyword_marker",
  358. "keywords": ["example"] <2>
  359. },
  360. "catalan_stemmer": {
  361. "type": "stemmer",
  362. "language": "catalan"
  363. }
  364. },
  365. "analyzer": {
  366. "rebuilt_catalan": {
  367. "tokenizer": "standard",
  368. "filter": [
  369. "catalan_elision",
  370. "lowercase",
  371. "catalan_stop",
  372. "catalan_keywords",
  373. "catalan_stemmer"
  374. ]
  375. }
  376. }
  377. }
  378. }
  379. }
  380. ----------------------------------------------------
  381. // TEST[s/"catalan_keywords",//]
  382. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: catalan_example, first: catalan, second: rebuilt_catalan}\nendyaml\n/]
  383. <1> The default stopwords can be overridden with the `stopwords`
  384. or `stopwords_path` parameters.
  385. <2> This filter should be removed unless there are words which should
  386. be excluded from stemming.
  387. [[cjk-analyzer]]
  388. ===== `cjk` analyzer
  389. NOTE: You may find that `icu_analyzer` in the ICU analysis plugin works better
  390. for CJK text than the `cjk` analyzer. Experiment with your text and queries.
  391. The `cjk` analyzer could be reimplemented as a `custom` analyzer as follows:
  392. [source,console]
  393. ----------------------------------------------------
  394. PUT /cjk_example
  395. {
  396. "settings": {
  397. "analysis": {
  398. "filter": {
  399. "english_stop": {
  400. "type": "stop",
  401. "stopwords": [ <1>
  402. "a", "and", "are", "as", "at", "be", "but", "by", "for",
  403. "if", "in", "into", "is", "it", "no", "not", "of", "on",
  404. "or", "s", "such", "t", "that", "the", "their", "then",
  405. "there", "these", "they", "this", "to", "was", "will",
  406. "with", "www"
  407. ]
  408. }
  409. },
  410. "analyzer": {
  411. "rebuilt_cjk": {
  412. "tokenizer": "standard",
  413. "filter": [
  414. "cjk_width",
  415. "lowercase",
  416. "cjk_bigram",
  417. "english_stop"
  418. ]
  419. }
  420. }
  421. }
  422. }
  423. }
  424. ----------------------------------------------------
  425. // TEST[s/"cjk_keywords",//]
  426. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: cjk_example, first: cjk, second: rebuilt_cjk}\nendyaml\n/]
  427. <1> The default stopwords can be overridden with the `stopwords`
  428. or `stopwords_path` parameters. The default stop words are
  429. *almost* the same as the `_english_` set, but not exactly
  430. the same.
  431. [[czech-analyzer]]
  432. ===== `czech` analyzer
  433. The `czech` analyzer could be reimplemented as a `custom` analyzer as follows:
  434. [source,console]
  435. ----------------------------------------------------
  436. PUT /czech_example
  437. {
  438. "settings": {
  439. "analysis": {
  440. "filter": {
  441. "czech_stop": {
  442. "type": "stop",
  443. "stopwords": "_czech_" <1>
  444. },
  445. "czech_keywords": {
  446. "type": "keyword_marker",
  447. "keywords": ["příklad"] <2>
  448. },
  449. "czech_stemmer": {
  450. "type": "stemmer",
  451. "language": "czech"
  452. }
  453. },
  454. "analyzer": {
  455. "rebuilt_czech": {
  456. "tokenizer": "standard",
  457. "filter": [
  458. "lowercase",
  459. "czech_stop",
  460. "czech_keywords",
  461. "czech_stemmer"
  462. ]
  463. }
  464. }
  465. }
  466. }
  467. }
  468. ----------------------------------------------------
  469. // TEST[s/"czech_keywords",//]
  470. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: czech_example, first: czech, second: rebuilt_czech}\nendyaml\n/]
  471. <1> The default stopwords can be overridden with the `stopwords`
  472. or `stopwords_path` parameters.
  473. <2> This filter should be removed unless there are words which should
  474. be excluded from stemming.
  475. [[danish-analyzer]]
  476. ===== `danish` analyzer
  477. The `danish` analyzer could be reimplemented as a `custom` analyzer as follows:
  478. [source,console]
  479. ----------------------------------------------------
  480. PUT /danish_example
  481. {
  482. "settings": {
  483. "analysis": {
  484. "filter": {
  485. "danish_stop": {
  486. "type": "stop",
  487. "stopwords": "_danish_" <1>
  488. },
  489. "danish_keywords": {
  490. "type": "keyword_marker",
  491. "keywords": ["eksempel"] <2>
  492. },
  493. "danish_stemmer": {
  494. "type": "stemmer",
  495. "language": "danish"
  496. }
  497. },
  498. "analyzer": {
  499. "rebuilt_danish": {
  500. "tokenizer": "standard",
  501. "filter": [
  502. "lowercase",
  503. "danish_stop",
  504. "danish_keywords",
  505. "danish_stemmer"
  506. ]
  507. }
  508. }
  509. }
  510. }
  511. }
  512. ----------------------------------------------------
  513. // TEST[s/"danish_keywords",//]
  514. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: danish_example, first: danish, second: rebuilt_danish}\nendyaml\n/]
  515. <1> The default stopwords can be overridden with the `stopwords`
  516. or `stopwords_path` parameters.
  517. <2> This filter should be removed unless there are words which should
  518. be excluded from stemming.
  519. [[dutch-analyzer]]
  520. ===== `dutch` analyzer
  521. The `dutch` analyzer could be reimplemented as a `custom` analyzer as follows:
  522. [source,console]
  523. ----------------------------------------------------
  524. PUT /dutch_example
  525. {
  526. "settings": {
  527. "analysis": {
  528. "filter": {
  529. "dutch_stop": {
  530. "type": "stop",
  531. "stopwords": "_dutch_" <1>
  532. },
  533. "dutch_keywords": {
  534. "type": "keyword_marker",
  535. "keywords": ["voorbeeld"] <2>
  536. },
  537. "dutch_stemmer": {
  538. "type": "stemmer",
  539. "language": "dutch"
  540. },
  541. "dutch_override": {
  542. "type": "stemmer_override",
  543. "rules": [
  544. "fiets=>fiets",
  545. "bromfiets=>bromfiets",
  546. "ei=>eier",
  547. "kind=>kinder"
  548. ]
  549. }
  550. },
  551. "analyzer": {
  552. "rebuilt_dutch": {
  553. "tokenizer": "standard",
  554. "filter": [
  555. "lowercase",
  556. "dutch_stop",
  557. "dutch_keywords",
  558. "dutch_override",
  559. "dutch_stemmer"
  560. ]
  561. }
  562. }
  563. }
  564. }
  565. }
  566. ----------------------------------------------------
  567. // TEST[s/"dutch_keywords",//]
  568. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: dutch_example, first: dutch, second: rebuilt_dutch}\nendyaml\n/]
  569. <1> The default stopwords can be overridden with the `stopwords`
  570. or `stopwords_path` parameters.
  571. <2> This filter should be removed unless there are words which should
  572. be excluded from stemming.
  573. [[english-analyzer]]
  574. ===== `english` analyzer
  575. The `english` analyzer could be reimplemented as a `custom` analyzer as follows:
  576. [source,console]
  577. ----------------------------------------------------
  578. PUT /english_example
  579. {
  580. "settings": {
  581. "analysis": {
  582. "filter": {
  583. "english_stop": {
  584. "type": "stop",
  585. "stopwords": "_english_" <1>
  586. },
  587. "english_keywords": {
  588. "type": "keyword_marker",
  589. "keywords": ["example"] <2>
  590. },
  591. "english_stemmer": {
  592. "type": "stemmer",
  593. "language": "english"
  594. },
  595. "english_possessive_stemmer": {
  596. "type": "stemmer",
  597. "language": "possessive_english"
  598. }
  599. },
  600. "analyzer": {
  601. "rebuilt_english": {
  602. "tokenizer": "standard",
  603. "filter": [
  604. "english_possessive_stemmer",
  605. "lowercase",
  606. "english_stop",
  607. "english_keywords",
  608. "english_stemmer"
  609. ]
  610. }
  611. }
  612. }
  613. }
  614. }
  615. ----------------------------------------------------
  616. // TEST[s/"english_keywords",//]
  617. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: english_example, first: english, second: rebuilt_english}\nendyaml\n/]
  618. <1> The default stopwords can be overridden with the `stopwords`
  619. or `stopwords_path` parameters.
  620. <2> This filter should be removed unless there are words which should
  621. be excluded from stemming.
  622. [[estonian-analyzer]]
  623. ===== `estonian` analyzer
  624. The `estonian` analyzer could be reimplemented as a `custom` analyzer as follows:
  625. [source,console]
  626. ----------------------------------------------------
  627. PUT /estonian_example
  628. {
  629. "settings": {
  630. "analysis": {
  631. "filter": {
  632. "estonian_stop": {
  633. "type": "stop",
  634. "stopwords": "_estonian_" <1>
  635. },
  636. "estonian_keywords": {
  637. "type": "keyword_marker",
  638. "keywords": ["näide"] <2>
  639. },
  640. "estonian_stemmer": {
  641. "type": "stemmer",
  642. "language": "estonian"
  643. }
  644. },
  645. "analyzer": {
  646. "rebuilt_estonian": {
  647. "tokenizer": "standard",
  648. "filter": [
  649. "lowercase",
  650. "estonian_stop",
  651. "estonian_keywords",
  652. "estonian_stemmer"
  653. ]
  654. }
  655. }
  656. }
  657. }
  658. }
  659. ----------------------------------------------------
  660. // TEST[s/"estonian_keywords",//]
  661. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: estonian_example, first: estonian, second: rebuilt_estonian}\nendyaml\n/]
  662. <1> The default stopwords can be overridden with the `stopwords`
  663. or `stopwords_path` parameters.
  664. <2> This filter should be removed unless there are words which should
  665. be excluded from stemming.
  666. [[finnish-analyzer]]
  667. ===== `finnish` analyzer
  668. The `finnish` analyzer could be reimplemented as a `custom` analyzer as follows:
  669. [source,console]
  670. ----------------------------------------------------
  671. PUT /finnish_example
  672. {
  673. "settings": {
  674. "analysis": {
  675. "filter": {
  676. "finnish_stop": {
  677. "type": "stop",
  678. "stopwords": "_finnish_" <1>
  679. },
  680. "finnish_keywords": {
  681. "type": "keyword_marker",
  682. "keywords": ["esimerkki"] <2>
  683. },
  684. "finnish_stemmer": {
  685. "type": "stemmer",
  686. "language": "finnish"
  687. }
  688. },
  689. "analyzer": {
  690. "rebuilt_finnish": {
  691. "tokenizer": "standard",
  692. "filter": [
  693. "lowercase",
  694. "finnish_stop",
  695. "finnish_keywords",
  696. "finnish_stemmer"
  697. ]
  698. }
  699. }
  700. }
  701. }
  702. }
  703. ----------------------------------------------------
  704. // TEST[s/"finnish_keywords",//]
  705. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: finnish_example, first: finnish, second: rebuilt_finnish}\nendyaml\n/]
  706. <1> The default stopwords can be overridden with the `stopwords`
  707. or `stopwords_path` parameters.
  708. <2> This filter should be removed unless there are words which should
  709. be excluded from stemming.
  710. [[french-analyzer]]
  711. ===== `french` analyzer
  712. The `french` analyzer could be reimplemented as a `custom` analyzer as follows:
  713. [source,console]
  714. ----------------------------------------------------
  715. PUT /french_example
  716. {
  717. "settings": {
  718. "analysis": {
  719. "filter": {
  720. "french_elision": {
  721. "type": "elision",
  722. "articles_case": true,
  723. "articles": [
  724. "l", "m", "t", "qu", "n", "s",
  725. "j", "d", "c", "jusqu", "quoiqu",
  726. "lorsqu", "puisqu"
  727. ]
  728. },
  729. "french_stop": {
  730. "type": "stop",
  731. "stopwords": "_french_" <1>
  732. },
  733. "french_keywords": {
  734. "type": "keyword_marker",
  735. "keywords": ["Example"] <2>
  736. },
  737. "french_stemmer": {
  738. "type": "stemmer",
  739. "language": "light_french"
  740. }
  741. },
  742. "analyzer": {
  743. "rebuilt_french": {
  744. "tokenizer": "standard",
  745. "filter": [
  746. "french_elision",
  747. "lowercase",
  748. "french_stop",
  749. "french_keywords",
  750. "french_stemmer"
  751. ]
  752. }
  753. }
  754. }
  755. }
  756. }
  757. ----------------------------------------------------
  758. // TEST[s/"french_keywords",//]
  759. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: french_example, first: french, second: rebuilt_french}\nendyaml\n/]
  760. <1> The default stopwords can be overridden with the `stopwords`
  761. or `stopwords_path` parameters.
  762. <2> This filter should be removed unless there are words which should
  763. be excluded from stemming.
  764. [[galician-analyzer]]
  765. ===== `galician` analyzer
  766. The `galician` analyzer could be reimplemented as a `custom` analyzer as follows:
  767. [source,console]
  768. ----------------------------------------------------
  769. PUT /galician_example
  770. {
  771. "settings": {
  772. "analysis": {
  773. "filter": {
  774. "galician_stop": {
  775. "type": "stop",
  776. "stopwords": "_galician_" <1>
  777. },
  778. "galician_keywords": {
  779. "type": "keyword_marker",
  780. "keywords": ["exemplo"] <2>
  781. },
  782. "galician_stemmer": {
  783. "type": "stemmer",
  784. "language": "galician"
  785. }
  786. },
  787. "analyzer": {
  788. "rebuilt_galician": {
  789. "tokenizer": "standard",
  790. "filter": [
  791. "lowercase",
  792. "galician_stop",
  793. "galician_keywords",
  794. "galician_stemmer"
  795. ]
  796. }
  797. }
  798. }
  799. }
  800. }
  801. ----------------------------------------------------
  802. // TEST[s/"galician_keywords",//]
  803. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: galician_example, first: galician, second: rebuilt_galician}\nendyaml\n/]
  804. <1> The default stopwords can be overridden with the `stopwords`
  805. or `stopwords_path` parameters.
  806. <2> This filter should be removed unless there are words which should
  807. be excluded from stemming.
  808. [[german-analyzer]]
  809. ===== `german` analyzer
  810. The `german` analyzer could be reimplemented as a `custom` analyzer as follows:
  811. [source,console]
  812. ----------------------------------------------------
  813. PUT /german_example
  814. {
  815. "settings": {
  816. "analysis": {
  817. "filter": {
  818. "german_stop": {
  819. "type": "stop",
  820. "stopwords": "_german_" <1>
  821. },
  822. "german_keywords": {
  823. "type": "keyword_marker",
  824. "keywords": ["Beispiel"] <2>
  825. },
  826. "german_stemmer": {
  827. "type": "stemmer",
  828. "language": "light_german"
  829. }
  830. },
  831. "analyzer": {
  832. "rebuilt_german": {
  833. "tokenizer": "standard",
  834. "filter": [
  835. "lowercase",
  836. "german_stop",
  837. "german_keywords",
  838. "german_normalization",
  839. "german_stemmer"
  840. ]
  841. }
  842. }
  843. }
  844. }
  845. }
  846. ----------------------------------------------------
  847. // TEST[s/"german_keywords",//]
  848. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: german_example, first: german, second: rebuilt_german}\nendyaml\n/]
  849. <1> The default stopwords can be overridden with the `stopwords`
  850. or `stopwords_path` parameters.
  851. <2> This filter should be removed unless there are words which should
  852. be excluded from stemming.
  853. [[greek-analyzer]]
  854. ===== `greek` analyzer
  855. The `greek` analyzer could be reimplemented as a `custom` analyzer as follows:
  856. [source,console]
  857. ----------------------------------------------------
  858. PUT /greek_example
  859. {
  860. "settings": {
  861. "analysis": {
  862. "filter": {
  863. "greek_stop": {
  864. "type": "stop",
  865. "stopwords": "_greek_" <1>
  866. },
  867. "greek_lowercase": {
  868. "type": "lowercase",
  869. "language": "greek"
  870. },
  871. "greek_keywords": {
  872. "type": "keyword_marker",
  873. "keywords": ["παράδειγμα"] <2>
  874. },
  875. "greek_stemmer": {
  876. "type": "stemmer",
  877. "language": "greek"
  878. }
  879. },
  880. "analyzer": {
  881. "rebuilt_greek": {
  882. "tokenizer": "standard",
  883. "filter": [
  884. "greek_lowercase",
  885. "greek_stop",
  886. "greek_keywords",
  887. "greek_stemmer"
  888. ]
  889. }
  890. }
  891. }
  892. }
  893. }
  894. ----------------------------------------------------
  895. // TEST[s/"greek_keywords",//]
  896. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: greek_example, first: greek, second: rebuilt_greek}\nendyaml\n/]
  897. <1> The default stopwords can be overridden with the `stopwords`
  898. or `stopwords_path` parameters.
  899. <2> This filter should be removed unless there are words which should
  900. be excluded from stemming.
  901. [[hindi-analyzer]]
  902. ===== `hindi` analyzer
  903. The `hindi` analyzer could be reimplemented as a `custom` analyzer as follows:
  904. [source,console]
  905. ----------------------------------------------------
  906. PUT /hindi_example
  907. {
  908. "settings": {
  909. "analysis": {
  910. "filter": {
  911. "hindi_stop": {
  912. "type": "stop",
  913. "stopwords": "_hindi_" <1>
  914. },
  915. "hindi_keywords": {
  916. "type": "keyword_marker",
  917. "keywords": ["उदाहरण"] <2>
  918. },
  919. "hindi_stemmer": {
  920. "type": "stemmer",
  921. "language": "hindi"
  922. }
  923. },
  924. "analyzer": {
  925. "rebuilt_hindi": {
  926. "tokenizer": "standard",
  927. "filter": [
  928. "lowercase",
  929. "decimal_digit",
  930. "hindi_keywords",
  931. "indic_normalization",
  932. "hindi_normalization",
  933. "hindi_stop",
  934. "hindi_stemmer"
  935. ]
  936. }
  937. }
  938. }
  939. }
  940. }
  941. ----------------------------------------------------
  942. // TEST[s/"hindi_keywords",//]
  943. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: hindi_example, first: hindi, second: rebuilt_hindi}\nendyaml\n/]
  944. <1> The default stopwords can be overridden with the `stopwords`
  945. or `stopwords_path` parameters.
  946. <2> This filter should be removed unless there are words which should
  947. be excluded from stemming.
  948. [[hungarian-analyzer]]
  949. ===== `hungarian` analyzer
  950. The `hungarian` analyzer could be reimplemented as a `custom` analyzer as follows:
  951. [source,console]
  952. ----------------------------------------------------
  953. PUT /hungarian_example
  954. {
  955. "settings": {
  956. "analysis": {
  957. "filter": {
  958. "hungarian_stop": {
  959. "type": "stop",
  960. "stopwords": "_hungarian_" <1>
  961. },
  962. "hungarian_keywords": {
  963. "type": "keyword_marker",
  964. "keywords": ["példa"] <2>
  965. },
  966. "hungarian_stemmer": {
  967. "type": "stemmer",
  968. "language": "hungarian"
  969. }
  970. },
  971. "analyzer": {
  972. "rebuilt_hungarian": {
  973. "tokenizer": "standard",
  974. "filter": [
  975. "lowercase",
  976. "hungarian_stop",
  977. "hungarian_keywords",
  978. "hungarian_stemmer"
  979. ]
  980. }
  981. }
  982. }
  983. }
  984. }
  985. ----------------------------------------------------
  986. // TEST[s/"hungarian_keywords",//]
  987. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: hungarian_example, first: hungarian, second: rebuilt_hungarian}\nendyaml\n/]
  988. <1> The default stopwords can be overridden with the `stopwords`
  989. or `stopwords_path` parameters.
  990. <2> This filter should be removed unless there are words which should
  991. be excluded from stemming.
  992. [[indonesian-analyzer]]
  993. ===== `indonesian` analyzer
  994. The `indonesian` analyzer could be reimplemented as a `custom` analyzer as follows:
  995. [source,console]
  996. ----------------------------------------------------
  997. PUT /indonesian_example
  998. {
  999. "settings": {
  1000. "analysis": {
  1001. "filter": {
  1002. "indonesian_stop": {
  1003. "type": "stop",
  1004. "stopwords": "_indonesian_" <1>
  1005. },
  1006. "indonesian_keywords": {
  1007. "type": "keyword_marker",
  1008. "keywords": ["contoh"] <2>
  1009. },
  1010. "indonesian_stemmer": {
  1011. "type": "stemmer",
  1012. "language": "indonesian"
  1013. }
  1014. },
  1015. "analyzer": {
  1016. "rebuilt_indonesian": {
  1017. "tokenizer": "standard",
  1018. "filter": [
  1019. "lowercase",
  1020. "indonesian_stop",
  1021. "indonesian_keywords",
  1022. "indonesian_stemmer"
  1023. ]
  1024. }
  1025. }
  1026. }
  1027. }
  1028. }
  1029. ----------------------------------------------------
  1030. // TEST[s/"indonesian_keywords",//]
  1031. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: indonesian_example, first: indonesian, second: rebuilt_indonesian}\nendyaml\n/]
  1032. <1> The default stopwords can be overridden with the `stopwords`
  1033. or `stopwords_path` parameters.
  1034. <2> This filter should be removed unless there are words which should
  1035. be excluded from stemming.
  1036. [[irish-analyzer]]
  1037. ===== `irish` analyzer
  1038. The `irish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1039. [source,console]
  1040. ----------------------------------------------------
  1041. PUT /irish_example
  1042. {
  1043. "settings": {
  1044. "analysis": {
  1045. "filter": {
  1046. "irish_hyphenation": {
  1047. "type": "stop",
  1048. "stopwords": [ "h", "n", "t" ],
  1049. "ignore_case": true
  1050. },
  1051. "irish_elision": {
  1052. "type": "elision",
  1053. "articles": [ "d", "m", "b" ],
  1054. "articles_case": true
  1055. },
  1056. "irish_stop": {
  1057. "type": "stop",
  1058. "stopwords": "_irish_" <1>
  1059. },
  1060. "irish_lowercase": {
  1061. "type": "lowercase",
  1062. "language": "irish"
  1063. },
  1064. "irish_keywords": {
  1065. "type": "keyword_marker",
  1066. "keywords": ["sampla"] <2>
  1067. },
  1068. "irish_stemmer": {
  1069. "type": "stemmer",
  1070. "language": "irish"
  1071. }
  1072. },
  1073. "analyzer": {
  1074. "rebuilt_irish": {
  1075. "tokenizer": "standard",
  1076. "filter": [
  1077. "irish_hyphenation",
  1078. "irish_elision",
  1079. "irish_lowercase",
  1080. "irish_stop",
  1081. "irish_keywords",
  1082. "irish_stemmer"
  1083. ]
  1084. }
  1085. }
  1086. }
  1087. }
  1088. }
  1089. ----------------------------------------------------
  1090. // TEST[s/"irish_keywords",//]
  1091. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: irish_example, first: irish, second: rebuilt_irish}\nendyaml\n/]
  1092. <1> The default stopwords can be overridden with the `stopwords`
  1093. or `stopwords_path` parameters.
  1094. <2> This filter should be removed unless there are words which should
  1095. be excluded from stemming.
  1096. [[italian-analyzer]]
  1097. ===== `italian` analyzer
  1098. The `italian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1099. [source,console]
  1100. ----------------------------------------------------
  1101. PUT /italian_example
  1102. {
  1103. "settings": {
  1104. "analysis": {
  1105. "filter": {
  1106. "italian_elision": {
  1107. "type": "elision",
  1108. "articles": [
  1109. "c", "l", "all", "dall", "dell",
  1110. "nell", "sull", "coll", "pell",
  1111. "gl", "agl", "dagl", "degl", "negl",
  1112. "sugl", "un", "m", "t", "s", "v", "d"
  1113. ],
  1114. "articles_case": true
  1115. },
  1116. "italian_stop": {
  1117. "type": "stop",
  1118. "stopwords": "_italian_" <1>
  1119. },
  1120. "italian_keywords": {
  1121. "type": "keyword_marker",
  1122. "keywords": ["esempio"] <2>
  1123. },
  1124. "italian_stemmer": {
  1125. "type": "stemmer",
  1126. "language": "light_italian"
  1127. }
  1128. },
  1129. "analyzer": {
  1130. "rebuilt_italian": {
  1131. "tokenizer": "standard",
  1132. "filter": [
  1133. "italian_elision",
  1134. "lowercase",
  1135. "italian_stop",
  1136. "italian_keywords",
  1137. "italian_stemmer"
  1138. ]
  1139. }
  1140. }
  1141. }
  1142. }
  1143. }
  1144. ----------------------------------------------------
  1145. // TEST[s/"italian_keywords",//]
  1146. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: italian_example, first: italian, second: rebuilt_italian}\nendyaml\n/]
  1147. <1> The default stopwords can be overridden with the `stopwords`
  1148. or `stopwords_path` parameters.
  1149. <2> This filter should be removed unless there are words which should
  1150. be excluded from stemming.
  1151. [[latvian-analyzer]]
  1152. ===== `latvian` analyzer
  1153. The `latvian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1154. [source,console]
  1155. ----------------------------------------------------
  1156. PUT /latvian_example
  1157. {
  1158. "settings": {
  1159. "analysis": {
  1160. "filter": {
  1161. "latvian_stop": {
  1162. "type": "stop",
  1163. "stopwords": "_latvian_" <1>
  1164. },
  1165. "latvian_keywords": {
  1166. "type": "keyword_marker",
  1167. "keywords": ["piemērs"] <2>
  1168. },
  1169. "latvian_stemmer": {
  1170. "type": "stemmer",
  1171. "language": "latvian"
  1172. }
  1173. },
  1174. "analyzer": {
  1175. "rebuilt_latvian": {
  1176. "tokenizer": "standard",
  1177. "filter": [
  1178. "lowercase",
  1179. "latvian_stop",
  1180. "latvian_keywords",
  1181. "latvian_stemmer"
  1182. ]
  1183. }
  1184. }
  1185. }
  1186. }
  1187. }
  1188. ----------------------------------------------------
  1189. // TEST[s/"latvian_keywords",//]
  1190. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: latvian_example, first: latvian, second: rebuilt_latvian}\nendyaml\n/]
  1191. <1> The default stopwords can be overridden with the `stopwords`
  1192. or `stopwords_path` parameters.
  1193. <2> This filter should be removed unless there are words which should
  1194. be excluded from stemming.
  1195. [[lithuanian-analyzer]]
  1196. ===== `lithuanian` analyzer
  1197. The `lithuanian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1198. [source,console]
  1199. ----------------------------------------------------
  1200. PUT /lithuanian_example
  1201. {
  1202. "settings": {
  1203. "analysis": {
  1204. "filter": {
  1205. "lithuanian_stop": {
  1206. "type": "stop",
  1207. "stopwords": "_lithuanian_" <1>
  1208. },
  1209. "lithuanian_keywords": {
  1210. "type": "keyword_marker",
  1211. "keywords": ["pavyzdys"] <2>
  1212. },
  1213. "lithuanian_stemmer": {
  1214. "type": "stemmer",
  1215. "language": "lithuanian"
  1216. }
  1217. },
  1218. "analyzer": {
  1219. "rebuilt_lithuanian": {
  1220. "tokenizer": "standard",
  1221. "filter": [
  1222. "lowercase",
  1223. "lithuanian_stop",
  1224. "lithuanian_keywords",
  1225. "lithuanian_stemmer"
  1226. ]
  1227. }
  1228. }
  1229. }
  1230. }
  1231. }
  1232. ----------------------------------------------------
  1233. // TEST[s/"lithuanian_keywords",//]
  1234. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: lithuanian_example, first: lithuanian, second: rebuilt_lithuanian}\nendyaml\n/]
  1235. <1> The default stopwords can be overridden with the `stopwords`
  1236. or `stopwords_path` parameters.
  1237. <2> This filter should be removed unless there are words which should
  1238. be excluded from stemming.
  1239. [[norwegian-analyzer]]
  1240. ===== `norwegian` analyzer
  1241. The `norwegian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1242. [source,console]
  1243. ----------------------------------------------------
  1244. PUT /norwegian_example
  1245. {
  1246. "settings": {
  1247. "analysis": {
  1248. "filter": {
  1249. "norwegian_stop": {
  1250. "type": "stop",
  1251. "stopwords": "_norwegian_" <1>
  1252. },
  1253. "norwegian_keywords": {
  1254. "type": "keyword_marker",
  1255. "keywords": ["eksempel"] <2>
  1256. },
  1257. "norwegian_stemmer": {
  1258. "type": "stemmer",
  1259. "language": "norwegian"
  1260. }
  1261. },
  1262. "analyzer": {
  1263. "rebuilt_norwegian": {
  1264. "tokenizer": "standard",
  1265. "filter": [
  1266. "lowercase",
  1267. "norwegian_stop",
  1268. "norwegian_keywords",
  1269. "norwegian_stemmer"
  1270. ]
  1271. }
  1272. }
  1273. }
  1274. }
  1275. }
  1276. ----------------------------------------------------
  1277. // TEST[s/"norwegian_keywords",//]
  1278. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: norwegian_example, first: norwegian, second: rebuilt_norwegian}\nendyaml\n/]
  1279. <1> The default stopwords can be overridden with the `stopwords`
  1280. or `stopwords_path` parameters.
  1281. <2> This filter should be removed unless there are words which should
  1282. be excluded from stemming.
  1283. [[persian-analyzer]]
  1284. ===== `persian` analyzer
  1285. The `persian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1286. [source,console]
  1287. ----------------------------------------------------
  1288. PUT /persian_example
  1289. {
  1290. "settings": {
  1291. "analysis": {
  1292. "char_filter": {
  1293. "zero_width_spaces": {
  1294. "type": "mapping",
  1295. "mappings": [ "\\u200C=>\\u0020"] <1>
  1296. }
  1297. },
  1298. "filter": {
  1299. "persian_stop": {
  1300. "type": "stop",
  1301. "stopwords": "_persian_" <2>
  1302. }
  1303. },
  1304. "analyzer": {
  1305. "rebuilt_persian": {
  1306. "tokenizer": "standard",
  1307. "char_filter": [ "zero_width_spaces" ],
  1308. "filter": [
  1309. "lowercase",
  1310. "decimal_digit",
  1311. "arabic_normalization",
  1312. "persian_normalization",
  1313. "persian_stop"
  1314. ]
  1315. }
  1316. }
  1317. }
  1318. }
  1319. }
  1320. ----------------------------------------------------
  1321. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: persian_example, first: persian, second: rebuilt_persian}\nendyaml\n/]
  1322. <1> Replaces zero-width non-joiners with an ASCII space.
  1323. <2> The default stopwords can be overridden with the `stopwords`
  1324. or `stopwords_path` parameters.
  1325. [[portuguese-analyzer]]
  1326. ===== `portuguese` analyzer
  1327. The `portuguese` analyzer could be reimplemented as a `custom` analyzer as follows:
  1328. [source,console]
  1329. ----------------------------------------------------
  1330. PUT /portuguese_example
  1331. {
  1332. "settings": {
  1333. "analysis": {
  1334. "filter": {
  1335. "portuguese_stop": {
  1336. "type": "stop",
  1337. "stopwords": "_portuguese_" <1>
  1338. },
  1339. "portuguese_keywords": {
  1340. "type": "keyword_marker",
  1341. "keywords": ["exemplo"] <2>
  1342. },
  1343. "portuguese_stemmer": {
  1344. "type": "stemmer",
  1345. "language": "light_portuguese"
  1346. }
  1347. },
  1348. "analyzer": {
  1349. "rebuilt_portuguese": {
  1350. "tokenizer": "standard",
  1351. "filter": [
  1352. "lowercase",
  1353. "portuguese_stop",
  1354. "portuguese_keywords",
  1355. "portuguese_stemmer"
  1356. ]
  1357. }
  1358. }
  1359. }
  1360. }
  1361. }
  1362. ----------------------------------------------------
  1363. // TEST[s/"portuguese_keywords",//]
  1364. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: portuguese_example, first: portuguese, second: rebuilt_portuguese}\nendyaml\n/]
  1365. <1> The default stopwords can be overridden with the `stopwords`
  1366. or `stopwords_path` parameters.
  1367. <2> This filter should be removed unless there are words which should
  1368. be excluded from stemming.
  1369. [[romanian-analyzer]]
  1370. ===== `romanian` analyzer
  1371. The `romanian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1372. [source,console]
  1373. ----------------------------------------------------
  1374. PUT /romanian_example
  1375. {
  1376. "settings": {
  1377. "analysis": {
  1378. "filter": {
  1379. "romanian_stop": {
  1380. "type": "stop",
  1381. "stopwords": "_romanian_" <1>
  1382. },
  1383. "romanian_keywords": {
  1384. "type": "keyword_marker",
  1385. "keywords": ["exemplu"] <2>
  1386. },
  1387. "romanian_stemmer": {
  1388. "type": "stemmer",
  1389. "language": "romanian"
  1390. }
  1391. },
  1392. "analyzer": {
  1393. "rebuilt_romanian": {
  1394. "tokenizer": "standard",
  1395. "filter": [
  1396. "lowercase",
  1397. "romanian_stop",
  1398. "romanian_keywords",
  1399. "romanian_stemmer"
  1400. ]
  1401. }
  1402. }
  1403. }
  1404. }
  1405. }
  1406. ----------------------------------------------------
  1407. // TEST[s/"romanian_keywords",//]
  1408. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: romanian_example, first: romanian, second: rebuilt_romanian}\nendyaml\n/]
  1409. <1> The default stopwords can be overridden with the `stopwords`
  1410. or `stopwords_path` parameters.
  1411. <2> This filter should be removed unless there are words which should
  1412. be excluded from stemming.
  1413. [[russian-analyzer]]
  1414. ===== `russian` analyzer
  1415. The `russian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1416. [source,console]
  1417. ----------------------------------------------------
  1418. PUT /russian_example
  1419. {
  1420. "settings": {
  1421. "analysis": {
  1422. "filter": {
  1423. "russian_stop": {
  1424. "type": "stop",
  1425. "stopwords": "_russian_" <1>
  1426. },
  1427. "russian_keywords": {
  1428. "type": "keyword_marker",
  1429. "keywords": ["пример"] <2>
  1430. },
  1431. "russian_stemmer": {
  1432. "type": "stemmer",
  1433. "language": "russian"
  1434. }
  1435. },
  1436. "analyzer": {
  1437. "rebuilt_russian": {
  1438. "tokenizer": "standard",
  1439. "filter": [
  1440. "lowercase",
  1441. "russian_stop",
  1442. "russian_keywords",
  1443. "russian_stemmer"
  1444. ]
  1445. }
  1446. }
  1447. }
  1448. }
  1449. }
  1450. ----------------------------------------------------
  1451. // TEST[s/"russian_keywords",//]
  1452. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: russian_example, first: russian, second: rebuilt_russian}\nendyaml\n/]
  1453. <1> The default stopwords can be overridden with the `stopwords`
  1454. or `stopwords_path` parameters.
  1455. <2> This filter should be removed unless there are words which should
  1456. be excluded from stemming.
  1457. [[sorani-analyzer]]
  1458. ===== `sorani` analyzer
  1459. The `sorani` analyzer could be reimplemented as a `custom` analyzer as follows:
  1460. [source,console]
  1461. ----------------------------------------------------
  1462. PUT /sorani_example
  1463. {
  1464. "settings": {
  1465. "analysis": {
  1466. "filter": {
  1467. "sorani_stop": {
  1468. "type": "stop",
  1469. "stopwords": "_sorani_" <1>
  1470. },
  1471. "sorani_keywords": {
  1472. "type": "keyword_marker",
  1473. "keywords": ["mînak"] <2>
  1474. },
  1475. "sorani_stemmer": {
  1476. "type": "stemmer",
  1477. "language": "sorani"
  1478. }
  1479. },
  1480. "analyzer": {
  1481. "rebuilt_sorani": {
  1482. "tokenizer": "standard",
  1483. "filter": [
  1484. "sorani_normalization",
  1485. "lowercase",
  1486. "decimal_digit",
  1487. "sorani_stop",
  1488. "sorani_keywords",
  1489. "sorani_stemmer"
  1490. ]
  1491. }
  1492. }
  1493. }
  1494. }
  1495. }
  1496. ----------------------------------------------------
  1497. // TEST[s/"sorani_keywords",//]
  1498. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: sorani_example, first: sorani, second: rebuilt_sorani}\nendyaml\n/]
  1499. <1> The default stopwords can be overridden with the `stopwords`
  1500. or `stopwords_path` parameters.
  1501. <2> This filter should be removed unless there are words which should
  1502. be excluded from stemming.
  1503. [[spanish-analyzer]]
  1504. ===== `spanish` analyzer
  1505. The `spanish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1506. [source,console]
  1507. ----------------------------------------------------
  1508. PUT /spanish_example
  1509. {
  1510. "settings": {
  1511. "analysis": {
  1512. "filter": {
  1513. "spanish_stop": {
  1514. "type": "stop",
  1515. "stopwords": "_spanish_" <1>
  1516. },
  1517. "spanish_keywords": {
  1518. "type": "keyword_marker",
  1519. "keywords": ["ejemplo"] <2>
  1520. },
  1521. "spanish_stemmer": {
  1522. "type": "stemmer",
  1523. "language": "light_spanish"
  1524. }
  1525. },
  1526. "analyzer": {
  1527. "rebuilt_spanish": {
  1528. "tokenizer": "standard",
  1529. "filter": [
  1530. "lowercase",
  1531. "spanish_stop",
  1532. "spanish_keywords",
  1533. "spanish_stemmer"
  1534. ]
  1535. }
  1536. }
  1537. }
  1538. }
  1539. }
  1540. ----------------------------------------------------
  1541. // TEST[s/"spanish_keywords",//]
  1542. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: spanish_example, first: spanish, second: rebuilt_spanish}\nendyaml\n/]
  1543. <1> The default stopwords can be overridden with the `stopwords`
  1544. or `stopwords_path` parameters.
  1545. <2> This filter should be removed unless there are words which should
  1546. be excluded from stemming.
  1547. [[swedish-analyzer]]
  1548. ===== `swedish` analyzer
  1549. The `swedish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1550. [source,console]
  1551. ----------------------------------------------------
  1552. PUT /swedish_example
  1553. {
  1554. "settings": {
  1555. "analysis": {
  1556. "filter": {
  1557. "swedish_stop": {
  1558. "type": "stop",
  1559. "stopwords": "_swedish_" <1>
  1560. },
  1561. "swedish_keywords": {
  1562. "type": "keyword_marker",
  1563. "keywords": ["exempel"] <2>
  1564. },
  1565. "swedish_stemmer": {
  1566. "type": "stemmer",
  1567. "language": "swedish"
  1568. }
  1569. },
  1570. "analyzer": {
  1571. "rebuilt_swedish": {
  1572. "tokenizer": "standard",
  1573. "filter": [
  1574. "lowercase",
  1575. "swedish_stop",
  1576. "swedish_keywords",
  1577. "swedish_stemmer"
  1578. ]
  1579. }
  1580. }
  1581. }
  1582. }
  1583. }
  1584. ----------------------------------------------------
  1585. // TEST[s/"swedish_keywords",//]
  1586. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: swedish_example, first: swedish, second: rebuilt_swedish}\nendyaml\n/]
  1587. <1> The default stopwords can be overridden with the `stopwords`
  1588. or `stopwords_path` parameters.
  1589. <2> This filter should be removed unless there are words which should
  1590. be excluded from stemming.
  1591. [[turkish-analyzer]]
  1592. ===== `turkish` analyzer
  1593. The `turkish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1594. [source,console]
  1595. ----------------------------------------------------
  1596. PUT /turkish_example
  1597. {
  1598. "settings": {
  1599. "analysis": {
  1600. "filter": {
  1601. "turkish_stop": {
  1602. "type": "stop",
  1603. "stopwords": "_turkish_" <1>
  1604. },
  1605. "turkish_lowercase": {
  1606. "type": "lowercase",
  1607. "language": "turkish"
  1608. },
  1609. "turkish_keywords": {
  1610. "type": "keyword_marker",
  1611. "keywords": ["örnek"] <2>
  1612. },
  1613. "turkish_stemmer": {
  1614. "type": "stemmer",
  1615. "language": "turkish"
  1616. }
  1617. },
  1618. "analyzer": {
  1619. "rebuilt_turkish": {
  1620. "tokenizer": "standard",
  1621. "filter": [
  1622. "apostrophe",
  1623. "turkish_lowercase",
  1624. "turkish_stop",
  1625. "turkish_keywords",
  1626. "turkish_stemmer"
  1627. ]
  1628. }
  1629. }
  1630. }
  1631. }
  1632. }
  1633. ----------------------------------------------------
  1634. // TEST[s/"turkish_keywords",//]
  1635. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: turkish_example, first: turkish, second: rebuilt_turkish}\nendyaml\n/]
  1636. <1> The default stopwords can be overridden with the `stopwords`
  1637. or `stopwords_path` parameters.
  1638. <2> This filter should be removed unless there are words which should
  1639. be excluded from stemming.
  1640. [[thai-analyzer]]
  1641. ===== `thai` analyzer
  1642. The `thai` analyzer could be reimplemented as a `custom` analyzer as follows:
  1643. [source,console]
  1644. ----------------------------------------------------
  1645. PUT /thai_example
  1646. {
  1647. "settings": {
  1648. "analysis": {
  1649. "filter": {
  1650. "thai_stop": {
  1651. "type": "stop",
  1652. "stopwords": "_thai_" <1>
  1653. }
  1654. },
  1655. "analyzer": {
  1656. "rebuilt_thai": {
  1657. "tokenizer": "thai",
  1658. "filter": [
  1659. "lowercase",
  1660. "decimal_digit",
  1661. "thai_stop"
  1662. ]
  1663. }
  1664. }
  1665. }
  1666. }
  1667. }
  1668. ----------------------------------------------------
  1669. // TEST[s/"thai_keywords",//]
  1670. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: thai_example, first: thai, second: rebuilt_thai}\nendyaml\n/]
  1671. <1> The default stopwords can be overridden with the `stopwords`
  1672. or `stopwords_path` parameters.