lang-analyzer.asciidoc 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822
  1. [[analysis-lang-analyzer]]
  2. === Language Analyzers
  3. A set of analyzers aimed at analyzing specific language text. The
  4. following types are supported:
  5. <<arabic-analyzer,`arabic`>>,
  6. <<armenian-analyzer,`armenian`>>,
  7. <<basque-analyzer,`basque`>>,
  8. <<bengali-analyzer,`bengali`>>,
  9. <<brazilian-analyzer,`brazilian`>>,
  10. <<bulgarian-analyzer,`bulgarian`>>,
  11. <<catalan-analyzer,`catalan`>>,
  12. <<cjk-analyzer,`cjk`>>,
  13. <<czech-analyzer,`czech`>>,
  14. <<danish-analyzer,`danish`>>,
  15. <<dutch-analyzer,`dutch`>>,
  16. <<english-analyzer,`english`>>,
  17. <<estonian-analyzer,`estonian`>>,
  18. <<finnish-analyzer,`finnish`>>,
  19. <<french-analyzer,`french`>>,
  20. <<galician-analyzer,`galician`>>,
  21. <<german-analyzer,`german`>>,
  22. <<greek-analyzer,`greek`>>,
  23. <<hindi-analyzer,`hindi`>>,
  24. <<hungarian-analyzer,`hungarian`>>,
  25. <<indonesian-analyzer,`indonesian`>>,
  26. <<irish-analyzer,`irish`>>,
  27. <<italian-analyzer,`italian`>>,
  28. <<latvian-analyzer,`latvian`>>,
  29. <<lithuanian-analyzer,`lithuanian`>>,
  30. <<norwegian-analyzer,`norwegian`>>,
  31. <<persian-analyzer,`persian`>>,
  32. <<portuguese-analyzer,`portuguese`>>,
  33. <<romanian-analyzer,`romanian`>>,
  34. <<russian-analyzer,`russian`>>,
  35. <<sorani-analyzer,`sorani`>>,
  36. <<spanish-analyzer,`spanish`>>,
  37. <<swedish-analyzer,`swedish`>>,
  38. <<turkish-analyzer,`turkish`>>,
  39. <<thai-analyzer,`thai`>>.
  40. ==== Configuring language analyzers
  41. ===== Stopwords
  42. All analyzers support setting custom `stopwords` either internally in
  43. the config, or by using an external stopwords file by setting
  44. `stopwords_path`. Check <<analysis-stop-analyzer,Stop Analyzer>> for
  45. more details.
  46. [[_excluding_words_from_stemming]]
  47. ===== Excluding words from stemming
  48. The `stem_exclusion` parameter allows you to specify an array
  49. of lowercase words that should not be stemmed. Internally, this
  50. functionality is implemented by adding the
  51. <<analysis-keyword-marker-tokenfilter,`keyword_marker` token filter>>
  52. with the `keywords` set to the value of the `stem_exclusion` parameter.
  53. The following analyzers support setting custom `stem_exclusion` list:
  54. `arabic`, `armenian`, `basque`, `bengali`, `bulgarian`, `catalan`, `czech`,
  55. `dutch`, `english`, `finnish`, `french`, `galician`,
  56. `german`, `hindi`, `hungarian`, `indonesian`, `irish`, `italian`, `latvian`,
  57. `lithuanian`, `norwegian`, `portuguese`, `romanian`, `russian`, `sorani`,
  58. `spanish`, `swedish`, `turkish`.
  59. ==== Reimplementing language analyzers
  60. The built-in language analyzers can be reimplemented as `custom` analyzers
  61. (as described below) in order to customize their behaviour.
  62. NOTE: If you do not intend to exclude words from being stemmed (the
  63. equivalent of the `stem_exclusion` parameter above), then you should remove
  64. the `keyword_marker` token filter from the custom analyzer configuration.
  65. [[arabic-analyzer]]
  66. ===== `arabic` analyzer
  67. The `arabic` analyzer could be reimplemented as a `custom` analyzer as follows:
  68. [source,console]
  69. ----------------------------------------------------
  70. PUT /arabic_example
  71. {
  72. "settings": {
  73. "analysis": {
  74. "filter": {
  75. "arabic_stop": {
  76. "type": "stop",
  77. "stopwords": "_arabic_" <1>
  78. },
  79. "arabic_keywords": {
  80. "type": "keyword_marker",
  81. "keywords": ["مثال"] <2>
  82. },
  83. "arabic_stemmer": {
  84. "type": "stemmer",
  85. "language": "arabic"
  86. }
  87. },
  88. "analyzer": {
  89. "rebuilt_arabic": {
  90. "tokenizer": "standard",
  91. "filter": [
  92. "lowercase",
  93. "decimal_digit",
  94. "arabic_stop",
  95. "arabic_normalization",
  96. "arabic_keywords",
  97. "arabic_stemmer"
  98. ]
  99. }
  100. }
  101. }
  102. }
  103. }
  104. ----------------------------------------------------
  105. // TEST[s/"arabic_keywords",//]
  106. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: arabic_example, first: arabic, second: rebuilt_arabic}\nendyaml\n/]
  107. <1> The default stopwords can be overridden with the `stopwords`
  108. or `stopwords_path` parameters.
  109. <2> This filter should be removed unless there are words which should
  110. be excluded from stemming.
  111. [[armenian-analyzer]]
  112. ===== `armenian` analyzer
  113. The `armenian` analyzer could be reimplemented as a `custom` analyzer as follows:
  114. [source,console]
  115. ----------------------------------------------------
  116. PUT /armenian_example
  117. {
  118. "settings": {
  119. "analysis": {
  120. "filter": {
  121. "armenian_stop": {
  122. "type": "stop",
  123. "stopwords": "_armenian_" <1>
  124. },
  125. "armenian_keywords": {
  126. "type": "keyword_marker",
  127. "keywords": ["օրինակ"] <2>
  128. },
  129. "armenian_stemmer": {
  130. "type": "stemmer",
  131. "language": "armenian"
  132. }
  133. },
  134. "analyzer": {
  135. "rebuilt_armenian": {
  136. "tokenizer": "standard",
  137. "filter": [
  138. "lowercase",
  139. "armenian_stop",
  140. "armenian_keywords",
  141. "armenian_stemmer"
  142. ]
  143. }
  144. }
  145. }
  146. }
  147. }
  148. ----------------------------------------------------
  149. // TEST[s/"armenian_keywords",//]
  150. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: armenian_example, first: armenian, second: rebuilt_armenian}\nendyaml\n/]
  151. <1> The default stopwords can be overridden with the `stopwords`
  152. or `stopwords_path` parameters.
  153. <2> This filter should be removed unless there are words which should
  154. be excluded from stemming.
  155. [[basque-analyzer]]
  156. ===== `basque` analyzer
  157. The `basque` analyzer could be reimplemented as a `custom` analyzer as follows:
  158. [source,console]
  159. ----------------------------------------------------
  160. PUT /basque_example
  161. {
  162. "settings": {
  163. "analysis": {
  164. "filter": {
  165. "basque_stop": {
  166. "type": "stop",
  167. "stopwords": "_basque_" <1>
  168. },
  169. "basque_keywords": {
  170. "type": "keyword_marker",
  171. "keywords": ["Adibidez"] <2>
  172. },
  173. "basque_stemmer": {
  174. "type": "stemmer",
  175. "language": "basque"
  176. }
  177. },
  178. "analyzer": {
  179. "rebuilt_basque": {
  180. "tokenizer": "standard",
  181. "filter": [
  182. "lowercase",
  183. "basque_stop",
  184. "basque_keywords",
  185. "basque_stemmer"
  186. ]
  187. }
  188. }
  189. }
  190. }
  191. }
  192. ----------------------------------------------------
  193. // TEST[s/"basque_keywords",//]
  194. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: basque_example, first: basque, second: rebuilt_basque}\nendyaml\n/]
  195. <1> The default stopwords can be overridden with the `stopwords`
  196. or `stopwords_path` parameters.
  197. <2> This filter should be removed unless there are words which should
  198. be excluded from stemming.
  199. [[bengali-analyzer]]
  200. ===== `bengali` analyzer
  201. The `bengali` analyzer could be reimplemented as a `custom` analyzer as follows:
  202. [source,console]
  203. ----------------------------------------------------
  204. PUT /bengali_example
  205. {
  206. "settings": {
  207. "analysis": {
  208. "filter": {
  209. "bengali_stop": {
  210. "type": "stop",
  211. "stopwords": "_bengali_" <1>
  212. },
  213. "bengali_keywords": {
  214. "type": "keyword_marker",
  215. "keywords": ["উদাহরণ"] <2>
  216. },
  217. "bengali_stemmer": {
  218. "type": "stemmer",
  219. "language": "bengali"
  220. }
  221. },
  222. "analyzer": {
  223. "rebuilt_bengali": {
  224. "tokenizer": "standard",
  225. "filter": [
  226. "lowercase",
  227. "decimal_digit",
  228. "bengali_keywords",
  229. "indic_normalization",
  230. "bengali_normalization",
  231. "bengali_stop",
  232. "bengali_stemmer"
  233. ]
  234. }
  235. }
  236. }
  237. }
  238. }
  239. ----------------------------------------------------
  240. // TEST[s/"bengali_keywords",//]
  241. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: bengali_example, first: bengali, second: rebuilt_bengali}\nendyaml\n/]
  242. <1> The default stopwords can be overridden with the `stopwords`
  243. or `stopwords_path` parameters.
  244. <2> This filter should be removed unless there are words which should
  245. be excluded from stemming.
  246. [[brazilian-analyzer]]
  247. ===== `brazilian` analyzer
  248. The `brazilian` analyzer could be reimplemented as a `custom` analyzer as follows:
  249. [source,console]
  250. ----------------------------------------------------
  251. PUT /brazilian_example
  252. {
  253. "settings": {
  254. "analysis": {
  255. "filter": {
  256. "brazilian_stop": {
  257. "type": "stop",
  258. "stopwords": "_brazilian_" <1>
  259. },
  260. "brazilian_keywords": {
  261. "type": "keyword_marker",
  262. "keywords": ["exemplo"] <2>
  263. },
  264. "brazilian_stemmer": {
  265. "type": "stemmer",
  266. "language": "brazilian"
  267. }
  268. },
  269. "analyzer": {
  270. "rebuilt_brazilian": {
  271. "tokenizer": "standard",
  272. "filter": [
  273. "lowercase",
  274. "brazilian_stop",
  275. "brazilian_keywords",
  276. "brazilian_stemmer"
  277. ]
  278. }
  279. }
  280. }
  281. }
  282. }
  283. ----------------------------------------------------
  284. // TEST[s/"brazilian_keywords",//]
  285. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: brazilian_example, first: brazilian, second: rebuilt_brazilian}\nendyaml\n/]
  286. <1> The default stopwords can be overridden with the `stopwords`
  287. or `stopwords_path` parameters.
  288. <2> This filter should be removed unless there are words which should
  289. be excluded from stemming.
  290. [[bulgarian-analyzer]]
  291. ===== `bulgarian` analyzer
  292. The `bulgarian` analyzer could be reimplemented as a `custom` analyzer as follows:
  293. [source,console]
  294. ----------------------------------------------------
  295. PUT /bulgarian_example
  296. {
  297. "settings": {
  298. "analysis": {
  299. "filter": {
  300. "bulgarian_stop": {
  301. "type": "stop",
  302. "stopwords": "_bulgarian_" <1>
  303. },
  304. "bulgarian_keywords": {
  305. "type": "keyword_marker",
  306. "keywords": ["пример"] <2>
  307. },
  308. "bulgarian_stemmer": {
  309. "type": "stemmer",
  310. "language": "bulgarian"
  311. }
  312. },
  313. "analyzer": {
  314. "rebuilt_bulgarian": {
  315. "tokenizer": "standard",
  316. "filter": [
  317. "lowercase",
  318. "bulgarian_stop",
  319. "bulgarian_keywords",
  320. "bulgarian_stemmer"
  321. ]
  322. }
  323. }
  324. }
  325. }
  326. }
  327. ----------------------------------------------------
  328. // TEST[s/"bulgarian_keywords",//]
  329. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: bulgarian_example, first: bulgarian, second: rebuilt_bulgarian}\nendyaml\n/]
  330. <1> The default stopwords can be overridden with the `stopwords`
  331. or `stopwords_path` parameters.
  332. <2> This filter should be removed unless there are words which should
  333. be excluded from stemming.
  334. [[catalan-analyzer]]
  335. ===== `catalan` analyzer
  336. The `catalan` analyzer could be reimplemented as a `custom` analyzer as follows:
  337. [source,console]
  338. ----------------------------------------------------
  339. PUT /catalan_example
  340. {
  341. "settings": {
  342. "analysis": {
  343. "filter": {
  344. "catalan_elision": {
  345. "type": "elision",
  346. "articles": [ "d", "l", "m", "n", "s", "t"],
  347. "articles_case": true
  348. },
  349. "catalan_stop": {
  350. "type": "stop",
  351. "stopwords": "_catalan_" <1>
  352. },
  353. "catalan_keywords": {
  354. "type": "keyword_marker",
  355. "keywords": ["example"] <2>
  356. },
  357. "catalan_stemmer": {
  358. "type": "stemmer",
  359. "language": "catalan"
  360. }
  361. },
  362. "analyzer": {
  363. "rebuilt_catalan": {
  364. "tokenizer": "standard",
  365. "filter": [
  366. "catalan_elision",
  367. "lowercase",
  368. "catalan_stop",
  369. "catalan_keywords",
  370. "catalan_stemmer"
  371. ]
  372. }
  373. }
  374. }
  375. }
  376. }
  377. ----------------------------------------------------
  378. // TEST[s/"catalan_keywords",//]
  379. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: catalan_example, first: catalan, second: rebuilt_catalan}\nendyaml\n/]
  380. <1> The default stopwords can be overridden with the `stopwords`
  381. or `stopwords_path` parameters.
  382. <2> This filter should be removed unless there are words which should
  383. be excluded from stemming.
  384. [[cjk-analyzer]]
  385. ===== `cjk` analyzer
  386. NOTE: You may find that `icu_analyzer` in the ICU analysis plugin works better
  387. for CJK text than the `cjk` analyzer. Experiment with your text and queries.
  388. The `cjk` analyzer could be reimplemented as a `custom` analyzer as follows:
  389. [source,console]
  390. ----------------------------------------------------
  391. PUT /cjk_example
  392. {
  393. "settings": {
  394. "analysis": {
  395. "filter": {
  396. "english_stop": {
  397. "type": "stop",
  398. "stopwords": [ <1>
  399. "a", "and", "are", "as", "at", "be", "but", "by", "for",
  400. "if", "in", "into", "is", "it", "no", "not", "of", "on",
  401. "or", "s", "such", "t", "that", "the", "their", "then",
  402. "there", "these", "they", "this", "to", "was", "will",
  403. "with", "www"
  404. ]
  405. }
  406. },
  407. "analyzer": {
  408. "rebuilt_cjk": {
  409. "tokenizer": "standard",
  410. "filter": [
  411. "cjk_width",
  412. "lowercase",
  413. "cjk_bigram",
  414. "english_stop"
  415. ]
  416. }
  417. }
  418. }
  419. }
  420. }
  421. ----------------------------------------------------
  422. // TEST[s/"cjk_keywords",//]
  423. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: cjk_example, first: cjk, second: rebuilt_cjk}\nendyaml\n/]
  424. <1> The default stopwords can be overridden with the `stopwords`
  425. or `stopwords_path` parameters. The default stop words are
  426. *almost* the same as the `_english_` set, but not exactly
  427. the same.
  428. [[czech-analyzer]]
  429. ===== `czech` analyzer
  430. The `czech` analyzer could be reimplemented as a `custom` analyzer as follows:
  431. [source,console]
  432. ----------------------------------------------------
  433. PUT /czech_example
  434. {
  435. "settings": {
  436. "analysis": {
  437. "filter": {
  438. "czech_stop": {
  439. "type": "stop",
  440. "stopwords": "_czech_" <1>
  441. },
  442. "czech_keywords": {
  443. "type": "keyword_marker",
  444. "keywords": ["příklad"] <2>
  445. },
  446. "czech_stemmer": {
  447. "type": "stemmer",
  448. "language": "czech"
  449. }
  450. },
  451. "analyzer": {
  452. "rebuilt_czech": {
  453. "tokenizer": "standard",
  454. "filter": [
  455. "lowercase",
  456. "czech_stop",
  457. "czech_keywords",
  458. "czech_stemmer"
  459. ]
  460. }
  461. }
  462. }
  463. }
  464. }
  465. ----------------------------------------------------
  466. // TEST[s/"czech_keywords",//]
  467. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: czech_example, first: czech, second: rebuilt_czech}\nendyaml\n/]
  468. <1> The default stopwords can be overridden with the `stopwords`
  469. or `stopwords_path` parameters.
  470. <2> This filter should be removed unless there are words which should
  471. be excluded from stemming.
  472. [[danish-analyzer]]
  473. ===== `danish` analyzer
  474. The `danish` analyzer could be reimplemented as a `custom` analyzer as follows:
  475. [source,console]
  476. ----------------------------------------------------
  477. PUT /danish_example
  478. {
  479. "settings": {
  480. "analysis": {
  481. "filter": {
  482. "danish_stop": {
  483. "type": "stop",
  484. "stopwords": "_danish_" <1>
  485. },
  486. "danish_keywords": {
  487. "type": "keyword_marker",
  488. "keywords": ["eksempel"] <2>
  489. },
  490. "danish_stemmer": {
  491. "type": "stemmer",
  492. "language": "danish"
  493. }
  494. },
  495. "analyzer": {
  496. "rebuilt_danish": {
  497. "tokenizer": "standard",
  498. "filter": [
  499. "lowercase",
  500. "danish_stop",
  501. "danish_keywords",
  502. "danish_stemmer"
  503. ]
  504. }
  505. }
  506. }
  507. }
  508. }
  509. ----------------------------------------------------
  510. // TEST[s/"danish_keywords",//]
  511. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: danish_example, first: danish, second: rebuilt_danish}\nendyaml\n/]
  512. <1> The default stopwords can be overridden with the `stopwords`
  513. or `stopwords_path` parameters.
  514. <2> This filter should be removed unless there are words which should
  515. be excluded from stemming.
  516. [[dutch-analyzer]]
  517. ===== `dutch` analyzer
  518. The `dutch` analyzer could be reimplemented as a `custom` analyzer as follows:
  519. [source,console]
  520. ----------------------------------------------------
  521. PUT /dutch_example
  522. {
  523. "settings": {
  524. "analysis": {
  525. "filter": {
  526. "dutch_stop": {
  527. "type": "stop",
  528. "stopwords": "_dutch_" <1>
  529. },
  530. "dutch_keywords": {
  531. "type": "keyword_marker",
  532. "keywords": ["voorbeeld"] <2>
  533. },
  534. "dutch_stemmer": {
  535. "type": "stemmer",
  536. "language": "dutch"
  537. },
  538. "dutch_override": {
  539. "type": "stemmer_override",
  540. "rules": [
  541. "fiets=>fiets",
  542. "bromfiets=>bromfiets",
  543. "ei=>eier",
  544. "kind=>kinder"
  545. ]
  546. }
  547. },
  548. "analyzer": {
  549. "rebuilt_dutch": {
  550. "tokenizer": "standard",
  551. "filter": [
  552. "lowercase",
  553. "dutch_stop",
  554. "dutch_keywords",
  555. "dutch_override",
  556. "dutch_stemmer"
  557. ]
  558. }
  559. }
  560. }
  561. }
  562. }
  563. ----------------------------------------------------
  564. // TEST[s/"dutch_keywords",//]
  565. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: dutch_example, first: dutch, second: rebuilt_dutch}\nendyaml\n/]
  566. <1> The default stopwords can be overridden with the `stopwords`
  567. or `stopwords_path` parameters.
  568. <2> This filter should be removed unless there are words which should
  569. be excluded from stemming.
  570. [[english-analyzer]]
  571. ===== `english` analyzer
  572. The `english` analyzer could be reimplemented as a `custom` analyzer as follows:
  573. [source,console]
  574. ----------------------------------------------------
  575. PUT /english_example
  576. {
  577. "settings": {
  578. "analysis": {
  579. "filter": {
  580. "english_stop": {
  581. "type": "stop",
  582. "stopwords": "_english_" <1>
  583. },
  584. "english_keywords": {
  585. "type": "keyword_marker",
  586. "keywords": ["example"] <2>
  587. },
  588. "english_stemmer": {
  589. "type": "stemmer",
  590. "language": "english"
  591. },
  592. "english_possessive_stemmer": {
  593. "type": "stemmer",
  594. "language": "possessive_english"
  595. }
  596. },
  597. "analyzer": {
  598. "rebuilt_english": {
  599. "tokenizer": "standard",
  600. "filter": [
  601. "english_possessive_stemmer",
  602. "lowercase",
  603. "english_stop",
  604. "english_keywords",
  605. "english_stemmer"
  606. ]
  607. }
  608. }
  609. }
  610. }
  611. }
  612. ----------------------------------------------------
  613. // TEST[s/"english_keywords",//]
  614. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: english_example, first: english, second: rebuilt_english}\nendyaml\n/]
  615. <1> The default stopwords can be overridden with the `stopwords`
  616. or `stopwords_path` parameters.
  617. <2> This filter should be removed unless there are words which should
  618. be excluded from stemming.
  619. [[estonian-analyzer]]
  620. ===== `estonian` analyzer
  621. The `estonian` analyzer could be reimplemented as a `custom` analyzer as follows:
  622. [source,console]
  623. ----------------------------------------------------
  624. PUT /estonian_example
  625. {
  626. "settings": {
  627. "analysis": {
  628. "filter": {
  629. "estonian_stop": {
  630. "type": "stop",
  631. "stopwords": "_estonian_" <1>
  632. },
  633. "estonian_keywords": {
  634. "type": "keyword_marker",
  635. "keywords": ["näide"] <2>
  636. },
  637. "estonian_stemmer": {
  638. "type": "stemmer",
  639. "language": "estonian"
  640. }
  641. },
  642. "analyzer": {
  643. "rebuilt_estonian": {
  644. "tokenizer": "standard",
  645. "filter": [
  646. "lowercase",
  647. "estonian_stop",
  648. "estonian_keywords",
  649. "estonian_stemmer"
  650. ]
  651. }
  652. }
  653. }
  654. }
  655. }
  656. ----------------------------------------------------
  657. // TEST[s/"estonian_keywords",//]
  658. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: estonian_example, first: estonian, second: rebuilt_estonian}\nendyaml\n/]
  659. <1> The default stopwords can be overridden with the `stopwords`
  660. or `stopwords_path` parameters.
  661. <2> This filter should be removed unless there are words which should
  662. be excluded from stemming.
  663. [[finnish-analyzer]]
  664. ===== `finnish` analyzer
  665. The `finnish` analyzer could be reimplemented as a `custom` analyzer as follows:
  666. [source,console]
  667. ----------------------------------------------------
  668. PUT /finnish_example
  669. {
  670. "settings": {
  671. "analysis": {
  672. "filter": {
  673. "finnish_stop": {
  674. "type": "stop",
  675. "stopwords": "_finnish_" <1>
  676. },
  677. "finnish_keywords": {
  678. "type": "keyword_marker",
  679. "keywords": ["esimerkki"] <2>
  680. },
  681. "finnish_stemmer": {
  682. "type": "stemmer",
  683. "language": "finnish"
  684. }
  685. },
  686. "analyzer": {
  687. "rebuilt_finnish": {
  688. "tokenizer": "standard",
  689. "filter": [
  690. "lowercase",
  691. "finnish_stop",
  692. "finnish_keywords",
  693. "finnish_stemmer"
  694. ]
  695. }
  696. }
  697. }
  698. }
  699. }
  700. ----------------------------------------------------
  701. // TEST[s/"finnish_keywords",//]
  702. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: finnish_example, first: finnish, second: rebuilt_finnish}\nendyaml\n/]
  703. <1> The default stopwords can be overridden with the `stopwords`
  704. or `stopwords_path` parameters.
  705. <2> This filter should be removed unless there are words which should
  706. be excluded from stemming.
  707. [[french-analyzer]]
  708. ===== `french` analyzer
  709. The `french` analyzer could be reimplemented as a `custom` analyzer as follows:
  710. [source,console]
  711. ----------------------------------------------------
  712. PUT /french_example
  713. {
  714. "settings": {
  715. "analysis": {
  716. "filter": {
  717. "french_elision": {
  718. "type": "elision",
  719. "articles_case": true,
  720. "articles": [
  721. "l", "m", "t", "qu", "n", "s",
  722. "j", "d", "c", "jusqu", "quoiqu",
  723. "lorsqu", "puisqu"
  724. ]
  725. },
  726. "french_stop": {
  727. "type": "stop",
  728. "stopwords": "_french_" <1>
  729. },
  730. "french_keywords": {
  731. "type": "keyword_marker",
  732. "keywords": ["Example"] <2>
  733. },
  734. "french_stemmer": {
  735. "type": "stemmer",
  736. "language": "light_french"
  737. }
  738. },
  739. "analyzer": {
  740. "rebuilt_french": {
  741. "tokenizer": "standard",
  742. "filter": [
  743. "french_elision",
  744. "lowercase",
  745. "french_stop",
  746. "french_keywords",
  747. "french_stemmer"
  748. ]
  749. }
  750. }
  751. }
  752. }
  753. }
  754. ----------------------------------------------------
  755. // TEST[s/"french_keywords",//]
  756. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: french_example, first: french, second: rebuilt_french}\nendyaml\n/]
  757. <1> The default stopwords can be overridden with the `stopwords`
  758. or `stopwords_path` parameters.
  759. <2> This filter should be removed unless there are words which should
  760. be excluded from stemming.
  761. [[galician-analyzer]]
  762. ===== `galician` analyzer
  763. The `galician` analyzer could be reimplemented as a `custom` analyzer as follows:
  764. [source,console]
  765. ----------------------------------------------------
  766. PUT /galician_example
  767. {
  768. "settings": {
  769. "analysis": {
  770. "filter": {
  771. "galician_stop": {
  772. "type": "stop",
  773. "stopwords": "_galician_" <1>
  774. },
  775. "galician_keywords": {
  776. "type": "keyword_marker",
  777. "keywords": ["exemplo"] <2>
  778. },
  779. "galician_stemmer": {
  780. "type": "stemmer",
  781. "language": "galician"
  782. }
  783. },
  784. "analyzer": {
  785. "rebuilt_galician": {
  786. "tokenizer": "standard",
  787. "filter": [
  788. "lowercase",
  789. "galician_stop",
  790. "galician_keywords",
  791. "galician_stemmer"
  792. ]
  793. }
  794. }
  795. }
  796. }
  797. }
  798. ----------------------------------------------------
  799. // TEST[s/"galician_keywords",//]
  800. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: galician_example, first: galician, second: rebuilt_galician}\nendyaml\n/]
  801. <1> The default stopwords can be overridden with the `stopwords`
  802. or `stopwords_path` parameters.
  803. <2> This filter should be removed unless there are words which should
  804. be excluded from stemming.
  805. [[german-analyzer]]
  806. ===== `german` analyzer
  807. The `german` analyzer could be reimplemented as a `custom` analyzer as follows:
  808. [source,console]
  809. ----------------------------------------------------
  810. PUT /german_example
  811. {
  812. "settings": {
  813. "analysis": {
  814. "filter": {
  815. "german_stop": {
  816. "type": "stop",
  817. "stopwords": "_german_" <1>
  818. },
  819. "german_keywords": {
  820. "type": "keyword_marker",
  821. "keywords": ["Beispiel"] <2>
  822. },
  823. "german_stemmer": {
  824. "type": "stemmer",
  825. "language": "light_german"
  826. }
  827. },
  828. "analyzer": {
  829. "rebuilt_german": {
  830. "tokenizer": "standard",
  831. "filter": [
  832. "lowercase",
  833. "german_stop",
  834. "german_keywords",
  835. "german_normalization",
  836. "german_stemmer"
  837. ]
  838. }
  839. }
  840. }
  841. }
  842. }
  843. ----------------------------------------------------
  844. // TEST[s/"german_keywords",//]
  845. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: german_example, first: german, second: rebuilt_german}\nendyaml\n/]
  846. <1> The default stopwords can be overridden with the `stopwords`
  847. or `stopwords_path` parameters.
  848. <2> This filter should be removed unless there are words which should
  849. be excluded from stemming.
  850. [[greek-analyzer]]
  851. ===== `greek` analyzer
  852. The `greek` analyzer could be reimplemented as a `custom` analyzer as follows:
  853. [source,console]
  854. ----------------------------------------------------
  855. PUT /greek_example
  856. {
  857. "settings": {
  858. "analysis": {
  859. "filter": {
  860. "greek_stop": {
  861. "type": "stop",
  862. "stopwords": "_greek_" <1>
  863. },
  864. "greek_lowercase": {
  865. "type": "lowercase",
  866. "language": "greek"
  867. },
  868. "greek_keywords": {
  869. "type": "keyword_marker",
  870. "keywords": ["παράδειγμα"] <2>
  871. },
  872. "greek_stemmer": {
  873. "type": "stemmer",
  874. "language": "greek"
  875. }
  876. },
  877. "analyzer": {
  878. "rebuilt_greek": {
  879. "tokenizer": "standard",
  880. "filter": [
  881. "greek_lowercase",
  882. "greek_stop",
  883. "greek_keywords",
  884. "greek_stemmer"
  885. ]
  886. }
  887. }
  888. }
  889. }
  890. }
  891. ----------------------------------------------------
  892. // TEST[s/"greek_keywords",//]
  893. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: greek_example, first: greek, second: rebuilt_greek}\nendyaml\n/]
  894. <1> The default stopwords can be overridden with the `stopwords`
  895. or `stopwords_path` parameters.
  896. <2> This filter should be removed unless there are words which should
  897. be excluded from stemming.
  898. [[hindi-analyzer]]
  899. ===== `hindi` analyzer
  900. The `hindi` analyzer could be reimplemented as a `custom` analyzer as follows:
  901. [source,console]
  902. ----------------------------------------------------
  903. PUT /hindi_example
  904. {
  905. "settings": {
  906. "analysis": {
  907. "filter": {
  908. "hindi_stop": {
  909. "type": "stop",
  910. "stopwords": "_hindi_" <1>
  911. },
  912. "hindi_keywords": {
  913. "type": "keyword_marker",
  914. "keywords": ["उदाहरण"] <2>
  915. },
  916. "hindi_stemmer": {
  917. "type": "stemmer",
  918. "language": "hindi"
  919. }
  920. },
  921. "analyzer": {
  922. "rebuilt_hindi": {
  923. "tokenizer": "standard",
  924. "filter": [
  925. "lowercase",
  926. "decimal_digit",
  927. "hindi_keywords",
  928. "indic_normalization",
  929. "hindi_normalization",
  930. "hindi_stop",
  931. "hindi_stemmer"
  932. ]
  933. }
  934. }
  935. }
  936. }
  937. }
  938. ----------------------------------------------------
  939. // TEST[s/"hindi_keywords",//]
  940. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: hindi_example, first: hindi, second: rebuilt_hindi}\nendyaml\n/]
  941. <1> The default stopwords can be overridden with the `stopwords`
  942. or `stopwords_path` parameters.
  943. <2> This filter should be removed unless there are words which should
  944. be excluded from stemming.
  945. [[hungarian-analyzer]]
  946. ===== `hungarian` analyzer
  947. The `hungarian` analyzer could be reimplemented as a `custom` analyzer as follows:
  948. [source,console]
  949. ----------------------------------------------------
  950. PUT /hungarian_example
  951. {
  952. "settings": {
  953. "analysis": {
  954. "filter": {
  955. "hungarian_stop": {
  956. "type": "stop",
  957. "stopwords": "_hungarian_" <1>
  958. },
  959. "hungarian_keywords": {
  960. "type": "keyword_marker",
  961. "keywords": ["példa"] <2>
  962. },
  963. "hungarian_stemmer": {
  964. "type": "stemmer",
  965. "language": "hungarian"
  966. }
  967. },
  968. "analyzer": {
  969. "rebuilt_hungarian": {
  970. "tokenizer": "standard",
  971. "filter": [
  972. "lowercase",
  973. "hungarian_stop",
  974. "hungarian_keywords",
  975. "hungarian_stemmer"
  976. ]
  977. }
  978. }
  979. }
  980. }
  981. }
  982. ----------------------------------------------------
  983. // TEST[s/"hungarian_keywords",//]
  984. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: hungarian_example, first: hungarian, second: rebuilt_hungarian}\nendyaml\n/]
  985. <1> The default stopwords can be overridden with the `stopwords`
  986. or `stopwords_path` parameters.
  987. <2> This filter should be removed unless there are words which should
  988. be excluded from stemming.
  989. [[indonesian-analyzer]]
  990. ===== `indonesian` analyzer
  991. The `indonesian` analyzer could be reimplemented as a `custom` analyzer as follows:
  992. [source,console]
  993. ----------------------------------------------------
  994. PUT /indonesian_example
  995. {
  996. "settings": {
  997. "analysis": {
  998. "filter": {
  999. "indonesian_stop": {
  1000. "type": "stop",
  1001. "stopwords": "_indonesian_" <1>
  1002. },
  1003. "indonesian_keywords": {
  1004. "type": "keyword_marker",
  1005. "keywords": ["contoh"] <2>
  1006. },
  1007. "indonesian_stemmer": {
  1008. "type": "stemmer",
  1009. "language": "indonesian"
  1010. }
  1011. },
  1012. "analyzer": {
  1013. "rebuilt_indonesian": {
  1014. "tokenizer": "standard",
  1015. "filter": [
  1016. "lowercase",
  1017. "indonesian_stop",
  1018. "indonesian_keywords",
  1019. "indonesian_stemmer"
  1020. ]
  1021. }
  1022. }
  1023. }
  1024. }
  1025. }
  1026. ----------------------------------------------------
  1027. // TEST[s/"indonesian_keywords",//]
  1028. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: indonesian_example, first: indonesian, second: rebuilt_indonesian}\nendyaml\n/]
  1029. <1> The default stopwords can be overridden with the `stopwords`
  1030. or `stopwords_path` parameters.
  1031. <2> This filter should be removed unless there are words which should
  1032. be excluded from stemming.
  1033. [[irish-analyzer]]
  1034. ===== `irish` analyzer
  1035. The `irish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1036. [source,console]
  1037. ----------------------------------------------------
  1038. PUT /irish_example
  1039. {
  1040. "settings": {
  1041. "analysis": {
  1042. "filter": {
  1043. "irish_hyphenation": {
  1044. "type": "stop",
  1045. "stopwords": [ "h", "n", "t" ],
  1046. "ignore_case": true
  1047. },
  1048. "irish_elision": {
  1049. "type": "elision",
  1050. "articles": [ "d", "m", "b" ],
  1051. "articles_case": true
  1052. },
  1053. "irish_stop": {
  1054. "type": "stop",
  1055. "stopwords": "_irish_" <1>
  1056. },
  1057. "irish_lowercase": {
  1058. "type": "lowercase",
  1059. "language": "irish"
  1060. },
  1061. "irish_keywords": {
  1062. "type": "keyword_marker",
  1063. "keywords": ["sampla"] <2>
  1064. },
  1065. "irish_stemmer": {
  1066. "type": "stemmer",
  1067. "language": "irish"
  1068. }
  1069. },
  1070. "analyzer": {
  1071. "rebuilt_irish": {
  1072. "tokenizer": "standard",
  1073. "filter": [
  1074. "irish_hyphenation",
  1075. "irish_elision",
  1076. "irish_lowercase",
  1077. "irish_stop",
  1078. "irish_keywords",
  1079. "irish_stemmer"
  1080. ]
  1081. }
  1082. }
  1083. }
  1084. }
  1085. }
  1086. ----------------------------------------------------
  1087. // TEST[s/"irish_keywords",//]
  1088. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: irish_example, first: irish, second: rebuilt_irish}\nendyaml\n/]
  1089. <1> The default stopwords can be overridden with the `stopwords`
  1090. or `stopwords_path` parameters.
  1091. <2> This filter should be removed unless there are words which should
  1092. be excluded from stemming.
  1093. [[italian-analyzer]]
  1094. ===== `italian` analyzer
  1095. The `italian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1096. [source,console]
  1097. ----------------------------------------------------
  1098. PUT /italian_example
  1099. {
  1100. "settings": {
  1101. "analysis": {
  1102. "filter": {
  1103. "italian_elision": {
  1104. "type": "elision",
  1105. "articles": [
  1106. "c", "l", "all", "dall", "dell",
  1107. "nell", "sull", "coll", "pell",
  1108. "gl", "agl", "dagl", "degl", "negl",
  1109. "sugl", "un", "m", "t", "s", "v", "d"
  1110. ],
  1111. "articles_case": true
  1112. },
  1113. "italian_stop": {
  1114. "type": "stop",
  1115. "stopwords": "_italian_" <1>
  1116. },
  1117. "italian_keywords": {
  1118. "type": "keyword_marker",
  1119. "keywords": ["esempio"] <2>
  1120. },
  1121. "italian_stemmer": {
  1122. "type": "stemmer",
  1123. "language": "light_italian"
  1124. }
  1125. },
  1126. "analyzer": {
  1127. "rebuilt_italian": {
  1128. "tokenizer": "standard",
  1129. "filter": [
  1130. "italian_elision",
  1131. "lowercase",
  1132. "italian_stop",
  1133. "italian_keywords",
  1134. "italian_stemmer"
  1135. ]
  1136. }
  1137. }
  1138. }
  1139. }
  1140. }
  1141. ----------------------------------------------------
  1142. // TEST[s/"italian_keywords",//]
  1143. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: italian_example, first: italian, second: rebuilt_italian}\nendyaml\n/]
  1144. <1> The default stopwords can be overridden with the `stopwords`
  1145. or `stopwords_path` parameters.
  1146. <2> This filter should be removed unless there are words which should
  1147. be excluded from stemming.
  1148. [[latvian-analyzer]]
  1149. ===== `latvian` analyzer
  1150. The `latvian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1151. [source,console]
  1152. ----------------------------------------------------
  1153. PUT /latvian_example
  1154. {
  1155. "settings": {
  1156. "analysis": {
  1157. "filter": {
  1158. "latvian_stop": {
  1159. "type": "stop",
  1160. "stopwords": "_latvian_" <1>
  1161. },
  1162. "latvian_keywords": {
  1163. "type": "keyword_marker",
  1164. "keywords": ["piemērs"] <2>
  1165. },
  1166. "latvian_stemmer": {
  1167. "type": "stemmer",
  1168. "language": "latvian"
  1169. }
  1170. },
  1171. "analyzer": {
  1172. "rebuilt_latvian": {
  1173. "tokenizer": "standard",
  1174. "filter": [
  1175. "lowercase",
  1176. "latvian_stop",
  1177. "latvian_keywords",
  1178. "latvian_stemmer"
  1179. ]
  1180. }
  1181. }
  1182. }
  1183. }
  1184. }
  1185. ----------------------------------------------------
  1186. // TEST[s/"latvian_keywords",//]
  1187. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: latvian_example, first: latvian, second: rebuilt_latvian}\nendyaml\n/]
  1188. <1> The default stopwords can be overridden with the `stopwords`
  1189. or `stopwords_path` parameters.
  1190. <2> This filter should be removed unless there are words which should
  1191. be excluded from stemming.
  1192. [[lithuanian-analyzer]]
  1193. ===== `lithuanian` analyzer
  1194. The `lithuanian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1195. [source,console]
  1196. ----------------------------------------------------
  1197. PUT /lithuanian_example
  1198. {
  1199. "settings": {
  1200. "analysis": {
  1201. "filter": {
  1202. "lithuanian_stop": {
  1203. "type": "stop",
  1204. "stopwords": "_lithuanian_" <1>
  1205. },
  1206. "lithuanian_keywords": {
  1207. "type": "keyword_marker",
  1208. "keywords": ["pavyzdys"] <2>
  1209. },
  1210. "lithuanian_stemmer": {
  1211. "type": "stemmer",
  1212. "language": "lithuanian"
  1213. }
  1214. },
  1215. "analyzer": {
  1216. "rebuilt_lithuanian": {
  1217. "tokenizer": "standard",
  1218. "filter": [
  1219. "lowercase",
  1220. "lithuanian_stop",
  1221. "lithuanian_keywords",
  1222. "lithuanian_stemmer"
  1223. ]
  1224. }
  1225. }
  1226. }
  1227. }
  1228. }
  1229. ----------------------------------------------------
  1230. // TEST[s/"lithuanian_keywords",//]
  1231. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: lithuanian_example, first: lithuanian, second: rebuilt_lithuanian}\nendyaml\n/]
  1232. <1> The default stopwords can be overridden with the `stopwords`
  1233. or `stopwords_path` parameters.
  1234. <2> This filter should be removed unless there are words which should
  1235. be excluded from stemming.
  1236. [[norwegian-analyzer]]
  1237. ===== `norwegian` analyzer
  1238. The `norwegian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1239. [source,console]
  1240. ----------------------------------------------------
  1241. PUT /norwegian_example
  1242. {
  1243. "settings": {
  1244. "analysis": {
  1245. "filter": {
  1246. "norwegian_stop": {
  1247. "type": "stop",
  1248. "stopwords": "_norwegian_" <1>
  1249. },
  1250. "norwegian_keywords": {
  1251. "type": "keyword_marker",
  1252. "keywords": ["eksempel"] <2>
  1253. },
  1254. "norwegian_stemmer": {
  1255. "type": "stemmer",
  1256. "language": "norwegian"
  1257. }
  1258. },
  1259. "analyzer": {
  1260. "rebuilt_norwegian": {
  1261. "tokenizer": "standard",
  1262. "filter": [
  1263. "lowercase",
  1264. "norwegian_stop",
  1265. "norwegian_keywords",
  1266. "norwegian_stemmer"
  1267. ]
  1268. }
  1269. }
  1270. }
  1271. }
  1272. }
  1273. ----------------------------------------------------
  1274. // TEST[s/"norwegian_keywords",//]
  1275. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: norwegian_example, first: norwegian, second: rebuilt_norwegian}\nendyaml\n/]
  1276. <1> The default stopwords can be overridden with the `stopwords`
  1277. or `stopwords_path` parameters.
  1278. <2> This filter should be removed unless there are words which should
  1279. be excluded from stemming.
  1280. [[persian-analyzer]]
  1281. ===== `persian` analyzer
  1282. The `persian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1283. [source,console]
  1284. ----------------------------------------------------
  1285. PUT /persian_example
  1286. {
  1287. "settings": {
  1288. "analysis": {
  1289. "char_filter": {
  1290. "zero_width_spaces": {
  1291. "type": "mapping",
  1292. "mappings": [ "\\u200C=>\\u0020"] <1>
  1293. }
  1294. },
  1295. "filter": {
  1296. "persian_stop": {
  1297. "type": "stop",
  1298. "stopwords": "_persian_" <2>
  1299. }
  1300. },
  1301. "analyzer": {
  1302. "rebuilt_persian": {
  1303. "tokenizer": "standard",
  1304. "char_filter": [ "zero_width_spaces" ],
  1305. "filter": [
  1306. "lowercase",
  1307. "decimal_digit",
  1308. "arabic_normalization",
  1309. "persian_normalization",
  1310. "persian_stop"
  1311. ]
  1312. }
  1313. }
  1314. }
  1315. }
  1316. }
  1317. ----------------------------------------------------
  1318. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: persian_example, first: persian, second: rebuilt_persian}\nendyaml\n/]
  1319. <1> Replaces zero-width non-joiners with an ASCII space.
  1320. <2> The default stopwords can be overridden with the `stopwords`
  1321. or `stopwords_path` parameters.
  1322. [[portuguese-analyzer]]
  1323. ===== `portuguese` analyzer
  1324. The `portuguese` analyzer could be reimplemented as a `custom` analyzer as follows:
  1325. [source,console]
  1326. ----------------------------------------------------
  1327. PUT /portuguese_example
  1328. {
  1329. "settings": {
  1330. "analysis": {
  1331. "filter": {
  1332. "portuguese_stop": {
  1333. "type": "stop",
  1334. "stopwords": "_portuguese_" <1>
  1335. },
  1336. "portuguese_keywords": {
  1337. "type": "keyword_marker",
  1338. "keywords": ["exemplo"] <2>
  1339. },
  1340. "portuguese_stemmer": {
  1341. "type": "stemmer",
  1342. "language": "light_portuguese"
  1343. }
  1344. },
  1345. "analyzer": {
  1346. "rebuilt_portuguese": {
  1347. "tokenizer": "standard",
  1348. "filter": [
  1349. "lowercase",
  1350. "portuguese_stop",
  1351. "portuguese_keywords",
  1352. "portuguese_stemmer"
  1353. ]
  1354. }
  1355. }
  1356. }
  1357. }
  1358. }
  1359. ----------------------------------------------------
  1360. // TEST[s/"portuguese_keywords",//]
  1361. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: portuguese_example, first: portuguese, second: rebuilt_portuguese}\nendyaml\n/]
  1362. <1> The default stopwords can be overridden with the `stopwords`
  1363. or `stopwords_path` parameters.
  1364. <2> This filter should be removed unless there are words which should
  1365. be excluded from stemming.
  1366. [[romanian-analyzer]]
  1367. ===== `romanian` analyzer
  1368. The `romanian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1369. [source,console]
  1370. ----------------------------------------------------
  1371. PUT /romanian_example
  1372. {
  1373. "settings": {
  1374. "analysis": {
  1375. "filter": {
  1376. "romanian_stop": {
  1377. "type": "stop",
  1378. "stopwords": "_romanian_" <1>
  1379. },
  1380. "romanian_keywords": {
  1381. "type": "keyword_marker",
  1382. "keywords": ["exemplu"] <2>
  1383. },
  1384. "romanian_stemmer": {
  1385. "type": "stemmer",
  1386. "language": "romanian"
  1387. }
  1388. },
  1389. "analyzer": {
  1390. "rebuilt_romanian": {
  1391. "tokenizer": "standard",
  1392. "filter": [
  1393. "lowercase",
  1394. "romanian_stop",
  1395. "romanian_keywords",
  1396. "romanian_stemmer"
  1397. ]
  1398. }
  1399. }
  1400. }
  1401. }
  1402. }
  1403. ----------------------------------------------------
  1404. // TEST[s/"romanian_keywords",//]
  1405. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: romanian_example, first: romanian, second: rebuilt_romanian}\nendyaml\n/]
  1406. <1> The default stopwords can be overridden with the `stopwords`
  1407. or `stopwords_path` parameters.
  1408. <2> This filter should be removed unless there are words which should
  1409. be excluded from stemming.
  1410. [[russian-analyzer]]
  1411. ===== `russian` analyzer
  1412. The `russian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1413. [source,console]
  1414. ----------------------------------------------------
  1415. PUT /russian_example
  1416. {
  1417. "settings": {
  1418. "analysis": {
  1419. "filter": {
  1420. "russian_stop": {
  1421. "type": "stop",
  1422. "stopwords": "_russian_" <1>
  1423. },
  1424. "russian_keywords": {
  1425. "type": "keyword_marker",
  1426. "keywords": ["пример"] <2>
  1427. },
  1428. "russian_stemmer": {
  1429. "type": "stemmer",
  1430. "language": "russian"
  1431. }
  1432. },
  1433. "analyzer": {
  1434. "rebuilt_russian": {
  1435. "tokenizer": "standard",
  1436. "filter": [
  1437. "lowercase",
  1438. "russian_stop",
  1439. "russian_keywords",
  1440. "russian_stemmer"
  1441. ]
  1442. }
  1443. }
  1444. }
  1445. }
  1446. }
  1447. ----------------------------------------------------
  1448. // TEST[s/"russian_keywords",//]
  1449. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: russian_example, first: russian, second: rebuilt_russian}\nendyaml\n/]
  1450. <1> The default stopwords can be overridden with the `stopwords`
  1451. or `stopwords_path` parameters.
  1452. <2> This filter should be removed unless there are words which should
  1453. be excluded from stemming.
  1454. [[sorani-analyzer]]
  1455. ===== `sorani` analyzer
  1456. The `sorani` analyzer could be reimplemented as a `custom` analyzer as follows:
  1457. [source,console]
  1458. ----------------------------------------------------
  1459. PUT /sorani_example
  1460. {
  1461. "settings": {
  1462. "analysis": {
  1463. "filter": {
  1464. "sorani_stop": {
  1465. "type": "stop",
  1466. "stopwords": "_sorani_" <1>
  1467. },
  1468. "sorani_keywords": {
  1469. "type": "keyword_marker",
  1470. "keywords": ["mînak"] <2>
  1471. },
  1472. "sorani_stemmer": {
  1473. "type": "stemmer",
  1474. "language": "sorani"
  1475. }
  1476. },
  1477. "analyzer": {
  1478. "rebuilt_sorani": {
  1479. "tokenizer": "standard",
  1480. "filter": [
  1481. "sorani_normalization",
  1482. "lowercase",
  1483. "decimal_digit",
  1484. "sorani_stop",
  1485. "sorani_keywords",
  1486. "sorani_stemmer"
  1487. ]
  1488. }
  1489. }
  1490. }
  1491. }
  1492. }
  1493. ----------------------------------------------------
  1494. // TEST[s/"sorani_keywords",//]
  1495. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: sorani_example, first: sorani, second: rebuilt_sorani}\nendyaml\n/]
  1496. <1> The default stopwords can be overridden with the `stopwords`
  1497. or `stopwords_path` parameters.
  1498. <2> This filter should be removed unless there are words which should
  1499. be excluded from stemming.
  1500. [[spanish-analyzer]]
  1501. ===== `spanish` analyzer
  1502. The `spanish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1503. [source,console]
  1504. ----------------------------------------------------
  1505. PUT /spanish_example
  1506. {
  1507. "settings": {
  1508. "analysis": {
  1509. "filter": {
  1510. "spanish_stop": {
  1511. "type": "stop",
  1512. "stopwords": "_spanish_" <1>
  1513. },
  1514. "spanish_keywords": {
  1515. "type": "keyword_marker",
  1516. "keywords": ["ejemplo"] <2>
  1517. },
  1518. "spanish_stemmer": {
  1519. "type": "stemmer",
  1520. "language": "light_spanish"
  1521. }
  1522. },
  1523. "analyzer": {
  1524. "rebuilt_spanish": {
  1525. "tokenizer": "standard",
  1526. "filter": [
  1527. "lowercase",
  1528. "spanish_stop",
  1529. "spanish_keywords",
  1530. "spanish_stemmer"
  1531. ]
  1532. }
  1533. }
  1534. }
  1535. }
  1536. }
  1537. ----------------------------------------------------
  1538. // TEST[s/"spanish_keywords",//]
  1539. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: spanish_example, first: spanish, second: rebuilt_spanish}\nendyaml\n/]
  1540. <1> The default stopwords can be overridden with the `stopwords`
  1541. or `stopwords_path` parameters.
  1542. <2> This filter should be removed unless there are words which should
  1543. be excluded from stemming.
  1544. [[swedish-analyzer]]
  1545. ===== `swedish` analyzer
  1546. The `swedish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1547. [source,console]
  1548. ----------------------------------------------------
  1549. PUT /swedish_example
  1550. {
  1551. "settings": {
  1552. "analysis": {
  1553. "filter": {
  1554. "swedish_stop": {
  1555. "type": "stop",
  1556. "stopwords": "_swedish_" <1>
  1557. },
  1558. "swedish_keywords": {
  1559. "type": "keyword_marker",
  1560. "keywords": ["exempel"] <2>
  1561. },
  1562. "swedish_stemmer": {
  1563. "type": "stemmer",
  1564. "language": "swedish"
  1565. }
  1566. },
  1567. "analyzer": {
  1568. "rebuilt_swedish": {
  1569. "tokenizer": "standard",
  1570. "filter": [
  1571. "lowercase",
  1572. "swedish_stop",
  1573. "swedish_keywords",
  1574. "swedish_stemmer"
  1575. ]
  1576. }
  1577. }
  1578. }
  1579. }
  1580. }
  1581. ----------------------------------------------------
  1582. // TEST[s/"swedish_keywords",//]
  1583. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: swedish_example, first: swedish, second: rebuilt_swedish}\nendyaml\n/]
  1584. <1> The default stopwords can be overridden with the `stopwords`
  1585. or `stopwords_path` parameters.
  1586. <2> This filter should be removed unless there are words which should
  1587. be excluded from stemming.
  1588. [[turkish-analyzer]]
  1589. ===== `turkish` analyzer
  1590. The `turkish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1591. [source,console]
  1592. ----------------------------------------------------
  1593. PUT /turkish_example
  1594. {
  1595. "settings": {
  1596. "analysis": {
  1597. "filter": {
  1598. "turkish_stop": {
  1599. "type": "stop",
  1600. "stopwords": "_turkish_" <1>
  1601. },
  1602. "turkish_lowercase": {
  1603. "type": "lowercase",
  1604. "language": "turkish"
  1605. },
  1606. "turkish_keywords": {
  1607. "type": "keyword_marker",
  1608. "keywords": ["örnek"] <2>
  1609. },
  1610. "turkish_stemmer": {
  1611. "type": "stemmer",
  1612. "language": "turkish"
  1613. }
  1614. },
  1615. "analyzer": {
  1616. "rebuilt_turkish": {
  1617. "tokenizer": "standard",
  1618. "filter": [
  1619. "apostrophe",
  1620. "turkish_lowercase",
  1621. "turkish_stop",
  1622. "turkish_keywords",
  1623. "turkish_stemmer"
  1624. ]
  1625. }
  1626. }
  1627. }
  1628. }
  1629. }
  1630. ----------------------------------------------------
  1631. // TEST[s/"turkish_keywords",//]
  1632. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: turkish_example, first: turkish, second: rebuilt_turkish}\nendyaml\n/]
  1633. <1> The default stopwords can be overridden with the `stopwords`
  1634. or `stopwords_path` parameters.
  1635. <2> This filter should be removed unless there are words which should
  1636. be excluded from stemming.
  1637. [[thai-analyzer]]
  1638. ===== `thai` analyzer
  1639. The `thai` analyzer could be reimplemented as a `custom` analyzer as follows:
  1640. [source,console]
  1641. ----------------------------------------------------
  1642. PUT /thai_example
  1643. {
  1644. "settings": {
  1645. "analysis": {
  1646. "filter": {
  1647. "thai_stop": {
  1648. "type": "stop",
  1649. "stopwords": "_thai_" <1>
  1650. }
  1651. },
  1652. "analyzer": {
  1653. "rebuilt_thai": {
  1654. "tokenizer": "thai",
  1655. "filter": [
  1656. "lowercase",
  1657. "decimal_digit",
  1658. "thai_stop"
  1659. ]
  1660. }
  1661. }
  1662. }
  1663. }
  1664. }
  1665. ----------------------------------------------------
  1666. // TEST[s/"thai_keywords",//]
  1667. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: thai_example, first: thai, second: rebuilt_thai}\nendyaml\n/]
  1668. <1> The default stopwords can be overridden with the `stopwords`
  1669. or `stopwords_path` parameters.