lang-analyzer.asciidoc 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772
  1. [[analysis-lang-analyzer]]
  2. === Language Analyzers
  3. A set of analyzers aimed at analyzing specific language text. The
  4. following types are supported:
  5. <<arabic-analyzer,`arabic`>>,
  6. <<armenian-analyzer,`armenian`>>,
  7. <<basque-analyzer,`basque`>>,
  8. <<bengali-analyzer,`bengali`>>,
  9. <<brazilian-analyzer,`brazilian`>>,
  10. <<bulgarian-analyzer,`bulgarian`>>,
  11. <<catalan-analyzer,`catalan`>>,
  12. <<cjk-analyzer,`cjk`>>,
  13. <<czech-analyzer,`czech`>>,
  14. <<danish-analyzer,`danish`>>,
  15. <<dutch-analyzer,`dutch`>>,
  16. <<english-analyzer,`english`>>,
  17. <<finnish-analyzer,`finnish`>>,
  18. <<french-analyzer,`french`>>,
  19. <<galician-analyzer,`galician`>>,
  20. <<german-analyzer,`german`>>,
  21. <<greek-analyzer,`greek`>>,
  22. <<hindi-analyzer,`hindi`>>,
  23. <<hungarian-analyzer,`hungarian`>>,
  24. <<indonesian-analyzer,`indonesian`>>,
  25. <<irish-analyzer,`irish`>>,
  26. <<italian-analyzer,`italian`>>,
  27. <<latvian-analyzer,`latvian`>>,
  28. <<lithuanian-analyzer,`lithuanian`>>,
  29. <<norwegian-analyzer,`norwegian`>>,
  30. <<persian-analyzer,`persian`>>,
  31. <<portuguese-analyzer,`portuguese`>>,
  32. <<romanian-analyzer,`romanian`>>,
  33. <<russian-analyzer,`russian`>>,
  34. <<sorani-analyzer,`sorani`>>,
  35. <<spanish-analyzer,`spanish`>>,
  36. <<swedish-analyzer,`swedish`>>,
  37. <<turkish-analyzer,`turkish`>>,
  38. <<thai-analyzer,`thai`>>.
  39. ==== Configuring language analyzers
  40. ===== Stopwords
  41. All analyzers support setting custom `stopwords` either internally in
  42. the config, or by using an external stopwords file by setting
  43. `stopwords_path`. Check <<analysis-stop-analyzer,Stop Analyzer>> for
  44. more details.
  45. ===== Excluding words from stemming
  46. The `stem_exclusion` parameter allows you to specify an array
  47. of lowercase words that should not be stemmed. Internally, this
  48. functionality is implemented by adding the
  49. <<analysis-keyword-marker-tokenfilter,`keyword_marker` token filter>>
  50. with the `keywords` set to the value of the `stem_exclusion` parameter.
  51. The following analyzers support setting custom `stem_exclusion` list:
  52. `arabic`, `armenian`, `basque`, `bengali`, `bulgarian`, `catalan`, `czech`,
  53. `dutch`, `english`, `finnish`, `french`, `galician`,
  54. `german`, `hindi`, `hungarian`, `indonesian`, `irish`, `italian`, `latvian`,
  55. `lithuanian`, `norwegian`, `portuguese`, `romanian`, `russian`, `sorani`,
  56. `spanish`, `swedish`, `turkish`.
  57. ==== Reimplementing language analyzers
  58. The built-in language analyzers can be reimplemented as `custom` analyzers
  59. (as described below) in order to customize their behaviour.
  60. NOTE: If you do not intend to exclude words from being stemmed (the
  61. equivalent of the `stem_exclusion` parameter above), then you should remove
  62. the `keyword_marker` token filter from the custom analyzer configuration.
  63. [[arabic-analyzer]]
  64. ===== `arabic` analyzer
  65. The `arabic` analyzer could be reimplemented as a `custom` analyzer as follows:
  66. [source,js]
  67. ----------------------------------------------------
  68. PUT /arabic_example
  69. {
  70. "settings": {
  71. "analysis": {
  72. "filter": {
  73. "arabic_stop": {
  74. "type": "stop",
  75. "stopwords": "_arabic_" <1>
  76. },
  77. "arabic_keywords": {
  78. "type": "keyword_marker",
  79. "keywords": ["مثال"] <2>
  80. },
  81. "arabic_stemmer": {
  82. "type": "stemmer",
  83. "language": "arabic"
  84. }
  85. },
  86. "analyzer": {
  87. "rebuilt_arabic": {
  88. "tokenizer": "standard",
  89. "filter": [
  90. "lowercase",
  91. "decimal_digit",
  92. "arabic_stop",
  93. "arabic_normalization",
  94. "arabic_keywords",
  95. "arabic_stemmer"
  96. ]
  97. }
  98. }
  99. }
  100. }
  101. }
  102. ----------------------------------------------------
  103. // CONSOLE
  104. // TEST[s/"arabic_keywords",//]
  105. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: arabic_example, first: arabic, second: rebuilt_arabic}\nendyaml\n/]
  106. <1> The default stopwords can be overridden with the `stopwords`
  107. or `stopwords_path` parameters.
  108. <2> This filter should be removed unless there are words which should
  109. be excluded from stemming.
  110. [[armenian-analyzer]]
  111. ===== `armenian` analyzer
  112. The `armenian` analyzer could be reimplemented as a `custom` analyzer as follows:
  113. [source,js]
  114. ----------------------------------------------------
  115. PUT /armenian_example
  116. {
  117. "settings": {
  118. "analysis": {
  119. "filter": {
  120. "armenian_stop": {
  121. "type": "stop",
  122. "stopwords": "_armenian_" <1>
  123. },
  124. "armenian_keywords": {
  125. "type": "keyword_marker",
  126. "keywords": ["օրինակ"] <2>
  127. },
  128. "armenian_stemmer": {
  129. "type": "stemmer",
  130. "language": "armenian"
  131. }
  132. },
  133. "analyzer": {
  134. "rebuilt_armenian": {
  135. "tokenizer": "standard",
  136. "filter": [
  137. "lowercase",
  138. "armenian_stop",
  139. "armenian_keywords",
  140. "armenian_stemmer"
  141. ]
  142. }
  143. }
  144. }
  145. }
  146. }
  147. ----------------------------------------------------
  148. // CONSOLE
  149. // TEST[s/"armenian_keywords",//]
  150. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: armenian_example, first: armenian, second: rebuilt_armenian}\nendyaml\n/]
  151. <1> The default stopwords can be overridden with the `stopwords`
  152. or `stopwords_path` parameters.
  153. <2> This filter should be removed unless there are words which should
  154. be excluded from stemming.
  155. [[basque-analyzer]]
  156. ===== `basque` analyzer
  157. The `basque` analyzer could be reimplemented as a `custom` analyzer as follows:
  158. [source,js]
  159. ----------------------------------------------------
  160. PUT /basque_example
  161. {
  162. "settings": {
  163. "analysis": {
  164. "filter": {
  165. "basque_stop": {
  166. "type": "stop",
  167. "stopwords": "_basque_" <1>
  168. },
  169. "basque_keywords": {
  170. "type": "keyword_marker",
  171. "keywords": ["Adibidez"] <2>
  172. },
  173. "basque_stemmer": {
  174. "type": "stemmer",
  175. "language": "basque"
  176. }
  177. },
  178. "analyzer": {
  179. "rebuilt_basque": {
  180. "tokenizer": "standard",
  181. "filter": [
  182. "lowercase",
  183. "basque_stop",
  184. "basque_keywords",
  185. "basque_stemmer"
  186. ]
  187. }
  188. }
  189. }
  190. }
  191. }
  192. ----------------------------------------------------
  193. // CONSOLE
  194. // TEST[s/"basque_keywords",//]
  195. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: basque_example, first: basque, second: rebuilt_basque}\nendyaml\n/]
  196. <1> The default stopwords can be overridden with the `stopwords`
  197. or `stopwords_path` parameters.
  198. <2> This filter should be removed unless there are words which should
  199. be excluded from stemming.
  200. [[bengali-analyzer]]
  201. ===== `bengali` analyzer
  202. The `bengali` analyzer could be reimplemented as a `custom` analyzer as follows:
  203. [source,js]
  204. ----------------------------------------------------
  205. PUT /bengali_example
  206. {
  207. "settings": {
  208. "analysis": {
  209. "filter": {
  210. "bengali_stop": {
  211. "type": "stop",
  212. "stopwords": "_bengali_" <1>
  213. },
  214. "bengali_keywords": {
  215. "type": "keyword_marker",
  216. "keywords": ["উদাহরণ"] <2>
  217. },
  218. "bengali_stemmer": {
  219. "type": "stemmer",
  220. "language": "bengali"
  221. }
  222. },
  223. "analyzer": {
  224. "rebuilt_bengali": {
  225. "tokenizer": "standard",
  226. "filter": [
  227. "lowercase",
  228. "decimal_digit",
  229. "bengali_keywords",
  230. "indic_normalization",
  231. "bengali_normalization",
  232. "bengali_stop",
  233. "bengali_stemmer"
  234. ]
  235. }
  236. }
  237. }
  238. }
  239. }
  240. ----------------------------------------------------
  241. // CONSOLE
  242. // TEST[s/"bengali_keywords",//]
  243. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: bengali_example, first: bengali, second: rebuilt_bengali}\nendyaml\n/]
  244. <1> The default stopwords can be overridden with the `stopwords`
  245. or `stopwords_path` parameters.
  246. <2> This filter should be removed unless there are words which should
  247. be excluded from stemming.
  248. [[brazilian-analyzer]]
  249. ===== `brazilian` analyzer
  250. The `brazilian` analyzer could be reimplemented as a `custom` analyzer as follows:
  251. [source,js]
  252. ----------------------------------------------------
  253. PUT /brazilian_example
  254. {
  255. "settings": {
  256. "analysis": {
  257. "filter": {
  258. "brazilian_stop": {
  259. "type": "stop",
  260. "stopwords": "_brazilian_" <1>
  261. },
  262. "brazilian_keywords": {
  263. "type": "keyword_marker",
  264. "keywords": ["exemplo"] <2>
  265. },
  266. "brazilian_stemmer": {
  267. "type": "stemmer",
  268. "language": "brazilian"
  269. }
  270. },
  271. "analyzer": {
  272. "rebuilt_brazilian": {
  273. "tokenizer": "standard",
  274. "filter": [
  275. "lowercase",
  276. "brazilian_stop",
  277. "brazilian_keywords",
  278. "brazilian_stemmer"
  279. ]
  280. }
  281. }
  282. }
  283. }
  284. }
  285. ----------------------------------------------------
  286. // CONSOLE
  287. // TEST[s/"brazilian_keywords",//]
  288. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: brazilian_example, first: brazilian, second: rebuilt_brazilian}\nendyaml\n/]
  289. <1> The default stopwords can be overridden with the `stopwords`
  290. or `stopwords_path` parameters.
  291. <2> This filter should be removed unless there are words which should
  292. be excluded from stemming.
  293. [[bulgarian-analyzer]]
  294. ===== `bulgarian` analyzer
  295. The `bulgarian` analyzer could be reimplemented as a `custom` analyzer as follows:
  296. [source,js]
  297. ----------------------------------------------------
  298. PUT /bulgarian_example
  299. {
  300. "settings": {
  301. "analysis": {
  302. "filter": {
  303. "bulgarian_stop": {
  304. "type": "stop",
  305. "stopwords": "_bulgarian_" <1>
  306. },
  307. "bulgarian_keywords": {
  308. "type": "keyword_marker",
  309. "keywords": ["пример"] <2>
  310. },
  311. "bulgarian_stemmer": {
  312. "type": "stemmer",
  313. "language": "bulgarian"
  314. }
  315. },
  316. "analyzer": {
  317. "rebuilt_bulgarian": {
  318. "tokenizer": "standard",
  319. "filter": [
  320. "lowercase",
  321. "bulgarian_stop",
  322. "bulgarian_keywords",
  323. "bulgarian_stemmer"
  324. ]
  325. }
  326. }
  327. }
  328. }
  329. }
  330. ----------------------------------------------------
  331. // CONSOLE
  332. // TEST[s/"bulgarian_keywords",//]
  333. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: bulgarian_example, first: bulgarian, second: rebuilt_bulgarian}\nendyaml\n/]
  334. <1> The default stopwords can be overridden with the `stopwords`
  335. or `stopwords_path` parameters.
  336. <2> This filter should be removed unless there are words which should
  337. be excluded from stemming.
  338. [[catalan-analyzer]]
  339. ===== `catalan` analyzer
  340. The `catalan` analyzer could be reimplemented as a `custom` analyzer as follows:
  341. [source,js]
  342. ----------------------------------------------------
  343. PUT /catalan_example
  344. {
  345. "settings": {
  346. "analysis": {
  347. "filter": {
  348. "catalan_elision": {
  349. "type": "elision",
  350. "articles": [ "d", "l", "m", "n", "s", "t"],
  351. "articles_case": true
  352. },
  353. "catalan_stop": {
  354. "type": "stop",
  355. "stopwords": "_catalan_" <1>
  356. },
  357. "catalan_keywords": {
  358. "type": "keyword_marker",
  359. "keywords": ["exemple"] <2>
  360. },
  361. "catalan_stemmer": {
  362. "type": "stemmer",
  363. "language": "catalan"
  364. }
  365. },
  366. "analyzer": {
  367. "rebuilt_catalan": {
  368. "tokenizer": "standard",
  369. "filter": [
  370. "catalan_elision",
  371. "lowercase",
  372. "catalan_stop",
  373. "catalan_keywords",
  374. "catalan_stemmer"
  375. ]
  376. }
  377. }
  378. }
  379. }
  380. }
  381. ----------------------------------------------------
  382. // CONSOLE
  383. // TEST[s/"catalan_keywords",//]
  384. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: catalan_example, first: catalan, second: rebuilt_catalan}\nendyaml\n/]
  385. <1> The default stopwords can be overridden with the `stopwords`
  386. or `stopwords_path` parameters.
  387. <2> This filter should be removed unless there are words which should
  388. be excluded from stemming.
  389. [[cjk-analyzer]]
  390. ===== `cjk` analyzer
  391. NOTE: You may find that `icu_analyzer` in the ICU analysis plugin works better
  392. for CJK text than the `cjk` analyzer. Experiment with your text and queries.
  393. The `cjk` analyzer could be reimplemented as a `custom` analyzer as follows:
  394. [source,js]
  395. ----------------------------------------------------
  396. PUT /cjk_example
  397. {
  398. "settings": {
  399. "analysis": {
  400. "filter": {
  401. "english_stop": {
  402. "type": "stop",
  403. "stopwords": [ <1>
  404. "a", "and", "are", "as", "at", "be", "but", "by", "for",
  405. "if", "in", "into", "is", "it", "no", "not", "of", "on",
  406. "or", "s", "such", "t", "that", "the", "their", "then",
  407. "there", "these", "they", "this", "to", "was", "will",
  408. "with", "www"
  409. ]
  410. }
  411. },
  412. "analyzer": {
  413. "rebuilt_cjk": {
  414. "tokenizer": "standard",
  415. "filter": [
  416. "cjk_width",
  417. "lowercase",
  418. "cjk_bigram",
  419. "english_stop"
  420. ]
  421. }
  422. }
  423. }
  424. }
  425. }
  426. ----------------------------------------------------
  427. // CONSOLE
  428. // TEST[s/"cjk_keywords",//]
  429. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: cjk_example, first: cjk, second: rebuilt_cjk}\nendyaml\n/]
  430. <1> The default stopwords can be overridden with the `stopwords`
  431. or `stopwords_path` parameters. The default stop words are
  432. *almost* the same as the `_english_` set, but not exactly
  433. the same.
  434. [[czech-analyzer]]
  435. ===== `czech` analyzer
  436. The `czech` analyzer could be reimplemented as a `custom` analyzer as follows:
  437. [source,js]
  438. ----------------------------------------------------
  439. PUT /czech_example
  440. {
  441. "settings": {
  442. "analysis": {
  443. "filter": {
  444. "czech_stop": {
  445. "type": "stop",
  446. "stopwords": "_czech_" <1>
  447. },
  448. "czech_keywords": {
  449. "type": "keyword_marker",
  450. "keywords": ["příklad"] <2>
  451. },
  452. "czech_stemmer": {
  453. "type": "stemmer",
  454. "language": "czech"
  455. }
  456. },
  457. "analyzer": {
  458. "rebuilt_czech": {
  459. "tokenizer": "standard",
  460. "filter": [
  461. "lowercase",
  462. "czech_stop",
  463. "czech_keywords",
  464. "czech_stemmer"
  465. ]
  466. }
  467. }
  468. }
  469. }
  470. }
  471. ----------------------------------------------------
  472. // CONSOLE
  473. // TEST[s/"czech_keywords",//]
  474. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: czech_example, first: czech, second: rebuilt_czech}\nendyaml\n/]
  475. <1> The default stopwords can be overridden with the `stopwords`
  476. or `stopwords_path` parameters.
  477. <2> This filter should be removed unless there are words which should
  478. be excluded from stemming.
  479. [[danish-analyzer]]
  480. ===== `danish` analyzer
  481. The `danish` analyzer could be reimplemented as a `custom` analyzer as follows:
  482. [source,js]
  483. ----------------------------------------------------
  484. PUT /danish_example
  485. {
  486. "settings": {
  487. "analysis": {
  488. "filter": {
  489. "danish_stop": {
  490. "type": "stop",
  491. "stopwords": "_danish_" <1>
  492. },
  493. "danish_keywords": {
  494. "type": "keyword_marker",
  495. "keywords": ["eksempel"] <2>
  496. },
  497. "danish_stemmer": {
  498. "type": "stemmer",
  499. "language": "danish"
  500. }
  501. },
  502. "analyzer": {
  503. "rebuilt_danish": {
  504. "tokenizer": "standard",
  505. "filter": [
  506. "lowercase",
  507. "danish_stop",
  508. "danish_keywords",
  509. "danish_stemmer"
  510. ]
  511. }
  512. }
  513. }
  514. }
  515. }
  516. ----------------------------------------------------
  517. // CONSOLE
  518. // TEST[s/"danish_keywords",//]
  519. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: danish_example, first: danish, second: rebuilt_danish}\nendyaml\n/]
  520. <1> The default stopwords can be overridden with the `stopwords`
  521. or `stopwords_path` parameters.
  522. <2> This filter should be removed unless there are words which should
  523. be excluded from stemming.
  524. [[dutch-analyzer]]
  525. ===== `dutch` analyzer
  526. The `dutch` analyzer could be reimplemented as a `custom` analyzer as follows:
  527. [source,js]
  528. ----------------------------------------------------
  529. PUT /dutch_example
  530. {
  531. "settings": {
  532. "analysis": {
  533. "filter": {
  534. "dutch_stop": {
  535. "type": "stop",
  536. "stopwords": "_dutch_" <1>
  537. },
  538. "dutch_keywords": {
  539. "type": "keyword_marker",
  540. "keywords": ["voorbeeld"] <2>
  541. },
  542. "dutch_stemmer": {
  543. "type": "stemmer",
  544. "language": "dutch"
  545. },
  546. "dutch_override": {
  547. "type": "stemmer_override",
  548. "rules": [
  549. "fiets=>fiets",
  550. "bromfiets=>bromfiets",
  551. "ei=>eier",
  552. "kind=>kinder"
  553. ]
  554. }
  555. },
  556. "analyzer": {
  557. "rebuilt_dutch": {
  558. "tokenizer": "standard",
  559. "filter": [
  560. "lowercase",
  561. "dutch_stop",
  562. "dutch_keywords",
  563. "dutch_override",
  564. "dutch_stemmer"
  565. ]
  566. }
  567. }
  568. }
  569. }
  570. }
  571. ----------------------------------------------------
  572. // CONSOLE
  573. // TEST[s/"dutch_keywords",//]
  574. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: dutch_example, first: dutch, second: rebuilt_dutch}\nendyaml\n/]
  575. <1> The default stopwords can be overridden with the `stopwords`
  576. or `stopwords_path` parameters.
  577. <2> This filter should be removed unless there are words which should
  578. be excluded from stemming.
  579. [[english-analyzer]]
  580. ===== `english` analyzer
  581. The `english` analyzer could be reimplemented as a `custom` analyzer as follows:
  582. [source,js]
  583. ----------------------------------------------------
  584. PUT /english_example
  585. {
  586. "settings": {
  587. "analysis": {
  588. "filter": {
  589. "english_stop": {
  590. "type": "stop",
  591. "stopwords": "_english_" <1>
  592. },
  593. "english_keywords": {
  594. "type": "keyword_marker",
  595. "keywords": ["example"] <2>
  596. },
  597. "english_stemmer": {
  598. "type": "stemmer",
  599. "language": "english"
  600. },
  601. "english_possessive_stemmer": {
  602. "type": "stemmer",
  603. "language": "possessive_english"
  604. }
  605. },
  606. "analyzer": {
  607. "rebuilt_english": {
  608. "tokenizer": "standard",
  609. "filter": [
  610. "english_possessive_stemmer",
  611. "lowercase",
  612. "english_stop",
  613. "english_keywords",
  614. "english_stemmer"
  615. ]
  616. }
  617. }
  618. }
  619. }
  620. }
  621. ----------------------------------------------------
  622. // CONSOLE
  623. // TEST[s/"english_keywords",//]
  624. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: english_example, first: english, second: rebuilt_english}\nendyaml\n/]
  625. <1> The default stopwords can be overridden with the `stopwords`
  626. or `stopwords_path` parameters.
  627. <2> This filter should be removed unless there are words which should
  628. be excluded from stemming.
  629. [[finnish-analyzer]]
  630. ===== `finnish` analyzer
  631. The `finnish` analyzer could be reimplemented as a `custom` analyzer as follows:
  632. [source,js]
  633. ----------------------------------------------------
  634. PUT /finnish_example
  635. {
  636. "settings": {
  637. "analysis": {
  638. "filter": {
  639. "finnish_stop": {
  640. "type": "stop",
  641. "stopwords": "_finnish_" <1>
  642. },
  643. "finnish_keywords": {
  644. "type": "keyword_marker",
  645. "keywords": ["esimerkki"] <2>
  646. },
  647. "finnish_stemmer": {
  648. "type": "stemmer",
  649. "language": "finnish"
  650. }
  651. },
  652. "analyzer": {
  653. "rebuilt_finnish": {
  654. "tokenizer": "standard",
  655. "filter": [
  656. "lowercase",
  657. "finnish_stop",
  658. "finnish_keywords",
  659. "finnish_stemmer"
  660. ]
  661. }
  662. }
  663. }
  664. }
  665. }
  666. ----------------------------------------------------
  667. // CONSOLE
  668. // TEST[s/"finnish_keywords",//]
  669. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: finnish_example, first: finnish, second: rebuilt_finnish}\nendyaml\n/]
  670. <1> The default stopwords can be overridden with the `stopwords`
  671. or `stopwords_path` parameters.
  672. <2> This filter should be removed unless there are words which should
  673. be excluded from stemming.
  674. [[french-analyzer]]
  675. ===== `french` analyzer
  676. The `french` analyzer could be reimplemented as a `custom` analyzer as follows:
  677. [source,js]
  678. ----------------------------------------------------
  679. PUT /french_example
  680. {
  681. "settings": {
  682. "analysis": {
  683. "filter": {
  684. "french_elision": {
  685. "type": "elision",
  686. "articles_case": true,
  687. "articles": [
  688. "l", "m", "t", "qu", "n", "s",
  689. "j", "d", "c", "jusqu", "quoiqu",
  690. "lorsqu", "puisqu"
  691. ]
  692. },
  693. "french_stop": {
  694. "type": "stop",
  695. "stopwords": "_french_" <1>
  696. },
  697. "french_keywords": {
  698. "type": "keyword_marker",
  699. "keywords": ["Exemple"] <2>
  700. },
  701. "french_stemmer": {
  702. "type": "stemmer",
  703. "language": "light_french"
  704. }
  705. },
  706. "analyzer": {
  707. "rebuilt_french": {
  708. "tokenizer": "standard",
  709. "filter": [
  710. "french_elision",
  711. "lowercase",
  712. "french_stop",
  713. "french_keywords",
  714. "french_stemmer"
  715. ]
  716. }
  717. }
  718. }
  719. }
  720. }
  721. ----------------------------------------------------
  722. // CONSOLE
  723. // TEST[s/"french_keywords",//]
  724. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: french_example, first: french, second: rebuilt_french}\nendyaml\n/]
  725. <1> The default stopwords can be overridden with the `stopwords`
  726. or `stopwords_path` parameters.
  727. <2> This filter should be removed unless there are words which should
  728. be excluded from stemming.
  729. [[galician-analyzer]]
  730. ===== `galician` analyzer
  731. The `galician` analyzer could be reimplemented as a `custom` analyzer as follows:
  732. [source,js]
  733. ----------------------------------------------------
  734. PUT /galician_example
  735. {
  736. "settings": {
  737. "analysis": {
  738. "filter": {
  739. "galician_stop": {
  740. "type": "stop",
  741. "stopwords": "_galician_" <1>
  742. },
  743. "galician_keywords": {
  744. "type": "keyword_marker",
  745. "keywords": ["exemplo"] <2>
  746. },
  747. "galician_stemmer": {
  748. "type": "stemmer",
  749. "language": "galician"
  750. }
  751. },
  752. "analyzer": {
  753. "rebuilt_galician": {
  754. "tokenizer": "standard",
  755. "filter": [
  756. "lowercase",
  757. "galician_stop",
  758. "galician_keywords",
  759. "galician_stemmer"
  760. ]
  761. }
  762. }
  763. }
  764. }
  765. }
  766. ----------------------------------------------------
  767. // CONSOLE
  768. // TEST[s/"galician_keywords",//]
  769. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: galician_example, first: galician, second: rebuilt_galician}\nendyaml\n/]
  770. <1> The default stopwords can be overridden with the `stopwords`
  771. or `stopwords_path` parameters.
  772. <2> This filter should be removed unless there are words which should
  773. be excluded from stemming.
  774. [[german-analyzer]]
  775. ===== `german` analyzer
  776. The `german` analyzer could be reimplemented as a `custom` analyzer as follows:
  777. [source,js]
  778. ----------------------------------------------------
  779. PUT /german_example
  780. {
  781. "settings": {
  782. "analysis": {
  783. "filter": {
  784. "german_stop": {
  785. "type": "stop",
  786. "stopwords": "_german_" <1>
  787. },
  788. "german_keywords": {
  789. "type": "keyword_marker",
  790. "keywords": ["Beispiel"] <2>
  791. },
  792. "german_stemmer": {
  793. "type": "stemmer",
  794. "language": "light_german"
  795. }
  796. },
  797. "analyzer": {
  798. "rebuilt_german": {
  799. "tokenizer": "standard",
  800. "filter": [
  801. "lowercase",
  802. "german_stop",
  803. "german_keywords",
  804. "german_normalization",
  805. "german_stemmer"
  806. ]
  807. }
  808. }
  809. }
  810. }
  811. }
  812. ----------------------------------------------------
  813. // CONSOLE
  814. // TEST[s/"german_keywords",//]
  815. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: german_example, first: german, second: rebuilt_german}\nendyaml\n/]
  816. <1> The default stopwords can be overridden with the `stopwords`
  817. or `stopwords_path` parameters.
  818. <2> This filter should be removed unless there are words which should
  819. be excluded from stemming.
  820. [[greek-analyzer]]
  821. ===== `greek` analyzer
  822. The `greek` analyzer could be reimplemented as a `custom` analyzer as follows:
  823. [source,js]
  824. ----------------------------------------------------
  825. PUT /greek_example
  826. {
  827. "settings": {
  828. "analysis": {
  829. "filter": {
  830. "greek_stop": {
  831. "type": "stop",
  832. "stopwords": "_greek_" <1>
  833. },
  834. "greek_lowercase": {
  835. "type": "lowercase",
  836. "language": "greek"
  837. },
  838. "greek_keywords": {
  839. "type": "keyword_marker",
  840. "keywords": ["παράδειγμα"] <2>
  841. },
  842. "greek_stemmer": {
  843. "type": "stemmer",
  844. "language": "greek"
  845. }
  846. },
  847. "analyzer": {
  848. "rebuilt_greek": {
  849. "tokenizer": "standard",
  850. "filter": [
  851. "greek_lowercase",
  852. "greek_stop",
  853. "greek_keywords",
  854. "greek_stemmer"
  855. ]
  856. }
  857. }
  858. }
  859. }
  860. }
  861. ----------------------------------------------------
  862. // CONSOLE
  863. // TEST[s/"greek_keywords",//]
  864. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: greek_example, first: greek, second: rebuilt_greek}\nendyaml\n/]
  865. <1> The default stopwords can be overridden with the `stopwords`
  866. or `stopwords_path` parameters.
  867. <2> This filter should be removed unless there are words which should
  868. be excluded from stemming.
  869. [[hindi-analyzer]]
  870. ===== `hindi` analyzer
  871. The `hindi` analyzer could be reimplemented as a `custom` analyzer as follows:
  872. [source,js]
  873. ----------------------------------------------------
  874. PUT /hindi_example
  875. {
  876. "settings": {
  877. "analysis": {
  878. "filter": {
  879. "hindi_stop": {
  880. "type": "stop",
  881. "stopwords": "_hindi_" <1>
  882. },
  883. "hindi_keywords": {
  884. "type": "keyword_marker",
  885. "keywords": ["उदाहरण"] <2>
  886. },
  887. "hindi_stemmer": {
  888. "type": "stemmer",
  889. "language": "hindi"
  890. }
  891. },
  892. "analyzer": {
  893. "rebuilt_hindi": {
  894. "tokenizer": "standard",
  895. "filter": [
  896. "lowercase",
  897. "decimal_digit",
  898. "hindi_keywords",
  899. "indic_normalization",
  900. "hindi_normalization",
  901. "hindi_stop",
  902. "hindi_stemmer"
  903. ]
  904. }
  905. }
  906. }
  907. }
  908. }
  909. ----------------------------------------------------
  910. // CONSOLE
  911. // TEST[s/"hindi_keywords",//]
  912. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: hindi_example, first: hindi, second: rebuilt_hindi}\nendyaml\n/]
  913. <1> The default stopwords can be overridden with the `stopwords`
  914. or `stopwords_path` parameters.
  915. <2> This filter should be removed unless there are words which should
  916. be excluded from stemming.
  917. [[hungarian-analyzer]]
  918. ===== `hungarian` analyzer
  919. The `hungarian` analyzer could be reimplemented as a `custom` analyzer as follows:
  920. [source,js]
  921. ----------------------------------------------------
  922. PUT /hungarian_example
  923. {
  924. "settings": {
  925. "analysis": {
  926. "filter": {
  927. "hungarian_stop": {
  928. "type": "stop",
  929. "stopwords": "_hungarian_" <1>
  930. },
  931. "hungarian_keywords": {
  932. "type": "keyword_marker",
  933. "keywords": ["példa"] <2>
  934. },
  935. "hungarian_stemmer": {
  936. "type": "stemmer",
  937. "language": "hungarian"
  938. }
  939. },
  940. "analyzer": {
  941. "rebuilt_hungarian": {
  942. "tokenizer": "standard",
  943. "filter": [
  944. "lowercase",
  945. "hungarian_stop",
  946. "hungarian_keywords",
  947. "hungarian_stemmer"
  948. ]
  949. }
  950. }
  951. }
  952. }
  953. }
  954. ----------------------------------------------------
  955. // CONSOLE
  956. // TEST[s/"hungarian_keywords",//]
  957. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: hungarian_example, first: hungarian, second: rebuilt_hungarian}\nendyaml\n/]
  958. <1> The default stopwords can be overridden with the `stopwords`
  959. or `stopwords_path` parameters.
  960. <2> This filter should be removed unless there are words which should
  961. be excluded from stemming.
  962. [[indonesian-analyzer]]
  963. ===== `indonesian` analyzer
  964. The `indonesian` analyzer could be reimplemented as a `custom` analyzer as follows:
  965. [source,js]
  966. ----------------------------------------------------
  967. PUT /indonesian_example
  968. {
  969. "settings": {
  970. "analysis": {
  971. "filter": {
  972. "indonesian_stop": {
  973. "type": "stop",
  974. "stopwords": "_indonesian_" <1>
  975. },
  976. "indonesian_keywords": {
  977. "type": "keyword_marker",
  978. "keywords": ["contoh"] <2>
  979. },
  980. "indonesian_stemmer": {
  981. "type": "stemmer",
  982. "language": "indonesian"
  983. }
  984. },
  985. "analyzer": {
  986. "rebuilt_indonesian": {
  987. "tokenizer": "standard",
  988. "filter": [
  989. "lowercase",
  990. "indonesian_stop",
  991. "indonesian_keywords",
  992. "indonesian_stemmer"
  993. ]
  994. }
  995. }
  996. }
  997. }
  998. }
  999. ----------------------------------------------------
  1000. // CONSOLE
  1001. // TEST[s/"indonesian_keywords",//]
  1002. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: indonesian_example, first: indonesian, second: rebuilt_indonesian}\nendyaml\n/]
  1003. <1> The default stopwords can be overridden with the `stopwords`
  1004. or `stopwords_path` parameters.
  1005. <2> This filter should be removed unless there are words which should
  1006. be excluded from stemming.
  1007. [[irish-analyzer]]
  1008. ===== `irish` analyzer
  1009. The `irish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1010. [source,js]
  1011. ----------------------------------------------------
  1012. PUT /irish_example
  1013. {
  1014. "settings": {
  1015. "analysis": {
  1016. "filter": {
  1017. "irish_hyphenation": {
  1018. "type": "stop",
  1019. "stopwords": [ "h", "n", "t" ],
  1020. "ignore_case": true
  1021. },
  1022. "irish_elision": {
  1023. "type": "elision",
  1024. "articles": [ "d", "m", "b" ],
  1025. "articles_case": true
  1026. },
  1027. "irish_stop": {
  1028. "type": "stop",
  1029. "stopwords": "_irish_" <1>
  1030. },
  1031. "irish_lowercase": {
  1032. "type": "lowercase",
  1033. "language": "irish"
  1034. },
  1035. "irish_keywords": {
  1036. "type": "keyword_marker",
  1037. "keywords": ["sampla"] <2>
  1038. },
  1039. "irish_stemmer": {
  1040. "type": "stemmer",
  1041. "language": "irish"
  1042. }
  1043. },
  1044. "analyzer": {
  1045. "rebuilt_irish": {
  1046. "tokenizer": "standard",
  1047. "filter": [
  1048. "irish_hyphenation",
  1049. "irish_elision",
  1050. "irish_lowercase",
  1051. "irish_stop",
  1052. "irish_keywords",
  1053. "irish_stemmer"
  1054. ]
  1055. }
  1056. }
  1057. }
  1058. }
  1059. }
  1060. ----------------------------------------------------
  1061. // CONSOLE
  1062. // TEST[s/"irish_keywords",//]
  1063. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: irish_example, first: irish, second: rebuilt_irish}\nendyaml\n/]
  1064. <1> The default stopwords can be overridden with the `stopwords`
  1065. or `stopwords_path` parameters.
  1066. <2> This filter should be removed unless there are words which should
  1067. be excluded from stemming.
  1068. [[italian-analyzer]]
  1069. ===== `italian` analyzer
  1070. The `italian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1071. [source,js]
  1072. ----------------------------------------------------
  1073. PUT /italian_example
  1074. {
  1075. "settings": {
  1076. "analysis": {
  1077. "filter": {
  1078. "italian_elision": {
  1079. "type": "elision",
  1080. "articles": [
  1081. "c", "l", "all", "dall", "dell",
  1082. "nell", "sull", "coll", "pell",
  1083. "gl", "agl", "dagl", "degl", "negl",
  1084. "sugl", "un", "m", "t", "s", "v", "d"
  1085. ],
  1086. "articles_case": true
  1087. },
  1088. "italian_stop": {
  1089. "type": "stop",
  1090. "stopwords": "_italian_" <1>
  1091. },
  1092. "italian_keywords": {
  1093. "type": "keyword_marker",
  1094. "keywords": ["esempio"] <2>
  1095. },
  1096. "italian_stemmer": {
  1097. "type": "stemmer",
  1098. "language": "light_italian"
  1099. }
  1100. },
  1101. "analyzer": {
  1102. "rebuilt_italian": {
  1103. "tokenizer": "standard",
  1104. "filter": [
  1105. "italian_elision",
  1106. "lowercase",
  1107. "italian_stop",
  1108. "italian_keywords",
  1109. "italian_stemmer"
  1110. ]
  1111. }
  1112. }
  1113. }
  1114. }
  1115. }
  1116. ----------------------------------------------------
  1117. // CONSOLE
  1118. // TEST[s/"italian_keywords",//]
  1119. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: italian_example, first: italian, second: rebuilt_italian}\nendyaml\n/]
  1120. <1> The default stopwords can be overridden with the `stopwords`
  1121. or `stopwords_path` parameters.
  1122. <2> This filter should be removed unless there are words which should
  1123. be excluded from stemming.
  1124. [[latvian-analyzer]]
  1125. ===== `latvian` analyzer
  1126. The `latvian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1127. [source,js]
  1128. ----------------------------------------------------
  1129. PUT /latvian_example
  1130. {
  1131. "settings": {
  1132. "analysis": {
  1133. "filter": {
  1134. "latvian_stop": {
  1135. "type": "stop",
  1136. "stopwords": "_latvian_" <1>
  1137. },
  1138. "latvian_keywords": {
  1139. "type": "keyword_marker",
  1140. "keywords": ["piemērs"] <2>
  1141. },
  1142. "latvian_stemmer": {
  1143. "type": "stemmer",
  1144. "language": "latvian"
  1145. }
  1146. },
  1147. "analyzer": {
  1148. "rebuilt_latvian": {
  1149. "tokenizer": "standard",
  1150. "filter": [
  1151. "lowercase",
  1152. "latvian_stop",
  1153. "latvian_keywords",
  1154. "latvian_stemmer"
  1155. ]
  1156. }
  1157. }
  1158. }
  1159. }
  1160. }
  1161. ----------------------------------------------------
  1162. // CONSOLE
  1163. // TEST[s/"latvian_keywords",//]
  1164. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: latvian_example, first: latvian, second: rebuilt_latvian}\nendyaml\n/]
  1165. <1> The default stopwords can be overridden with the `stopwords`
  1166. or `stopwords_path` parameters.
  1167. <2> This filter should be removed unless there are words which should
  1168. be excluded from stemming.
  1169. [[lithuanian-analyzer]]
  1170. ===== `lithuanian` analyzer
  1171. The `lithuanian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1172. [source,js]
  1173. ----------------------------------------------------
  1174. PUT /lithuanian_example
  1175. {
  1176. "settings": {
  1177. "analysis": {
  1178. "filter": {
  1179. "lithuanian_stop": {
  1180. "type": "stop",
  1181. "stopwords": "_lithuanian_" <1>
  1182. },
  1183. "lithuanian_keywords": {
  1184. "type": "keyword_marker",
  1185. "keywords": ["pavyzdys"] <2>
  1186. },
  1187. "lithuanian_stemmer": {
  1188. "type": "stemmer",
  1189. "language": "lithuanian"
  1190. }
  1191. },
  1192. "analyzer": {
  1193. "rebuilt_lithuanian": {
  1194. "tokenizer": "standard",
  1195. "filter": [
  1196. "lowercase",
  1197. "lithuanian_stop",
  1198. "lithuanian_keywords",
  1199. "lithuanian_stemmer"
  1200. ]
  1201. }
  1202. }
  1203. }
  1204. }
  1205. }
  1206. ----------------------------------------------------
  1207. // CONSOLE
  1208. // TEST[s/"lithuanian_keywords",//]
  1209. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: lithuanian_example, first: lithuanian, second: rebuilt_lithuanian}\nendyaml\n/]
  1210. <1> The default stopwords can be overridden with the `stopwords`
  1211. or `stopwords_path` parameters.
  1212. <2> This filter should be removed unless there are words which should
  1213. be excluded from stemming.
  1214. [[norwegian-analyzer]]
  1215. ===== `norwegian` analyzer
  1216. The `norwegian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1217. [source,js]
  1218. ----------------------------------------------------
  1219. PUT /norwegian_example
  1220. {
  1221. "settings": {
  1222. "analysis": {
  1223. "filter": {
  1224. "norwegian_stop": {
  1225. "type": "stop",
  1226. "stopwords": "_norwegian_" <1>
  1227. },
  1228. "norwegian_keywords": {
  1229. "type": "keyword_marker",
  1230. "keywords": ["eksempel"] <2>
  1231. },
  1232. "norwegian_stemmer": {
  1233. "type": "stemmer",
  1234. "language": "norwegian"
  1235. }
  1236. },
  1237. "analyzer": {
  1238. "rebuilt_norwegian": {
  1239. "tokenizer": "standard",
  1240. "filter": [
  1241. "lowercase",
  1242. "norwegian_stop",
  1243. "norwegian_keywords",
  1244. "norwegian_stemmer"
  1245. ]
  1246. }
  1247. }
  1248. }
  1249. }
  1250. }
  1251. ----------------------------------------------------
  1252. // CONSOLE
  1253. // TEST[s/"norwegian_keywords",//]
  1254. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: norwegian_example, first: norwegian, second: rebuilt_norwegian}\nendyaml\n/]
  1255. <1> The default stopwords can be overridden with the `stopwords`
  1256. or `stopwords_path` parameters.
  1257. <2> This filter should be removed unless there are words which should
  1258. be excluded from stemming.
  1259. [[persian-analyzer]]
  1260. ===== `persian` analyzer
  1261. The `persian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1262. [source,js]
  1263. ----------------------------------------------------
  1264. PUT /persian_example
  1265. {
  1266. "settings": {
  1267. "analysis": {
  1268. "char_filter": {
  1269. "zero_width_spaces": {
  1270. "type": "mapping",
  1271. "mappings": [ "\\u200C=>\\u0020"] <1>
  1272. }
  1273. },
  1274. "filter": {
  1275. "persian_stop": {
  1276. "type": "stop",
  1277. "stopwords": "_persian_" <2>
  1278. }
  1279. },
  1280. "analyzer": {
  1281. "rebuilt_persian": {
  1282. "tokenizer": "standard",
  1283. "char_filter": [ "zero_width_spaces" ],
  1284. "filter": [
  1285. "lowercase",
  1286. "decimal_digit",
  1287. "arabic_normalization",
  1288. "persian_normalization",
  1289. "persian_stop"
  1290. ]
  1291. }
  1292. }
  1293. }
  1294. }
  1295. }
  1296. ----------------------------------------------------
  1297. // CONSOLE
  1298. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: persian_example, first: persian, second: rebuilt_persian}\nendyaml\n/]
  1299. <1> Replaces zero-width non-joiners with an ASCII space.
  1300. <2> The default stopwords can be overridden with the `stopwords`
  1301. or `stopwords_path` parameters.
  1302. [[portuguese-analyzer]]
  1303. ===== `portuguese` analyzer
  1304. The `portuguese` analyzer could be reimplemented as a `custom` analyzer as follows:
  1305. [source,js]
  1306. ----------------------------------------------------
  1307. PUT /portuguese_example
  1308. {
  1309. "settings": {
  1310. "analysis": {
  1311. "filter": {
  1312. "portuguese_stop": {
  1313. "type": "stop",
  1314. "stopwords": "_portuguese_" <1>
  1315. },
  1316. "portuguese_keywords": {
  1317. "type": "keyword_marker",
  1318. "keywords": ["exemplo"] <2>
  1319. },
  1320. "portuguese_stemmer": {
  1321. "type": "stemmer",
  1322. "language": "light_portuguese"
  1323. }
  1324. },
  1325. "analyzer": {
  1326. "rebuilt_portuguese": {
  1327. "tokenizer": "standard",
  1328. "filter": [
  1329. "lowercase",
  1330. "portuguese_stop",
  1331. "portuguese_keywords",
  1332. "portuguese_stemmer"
  1333. ]
  1334. }
  1335. }
  1336. }
  1337. }
  1338. }
  1339. ----------------------------------------------------
  1340. // CONSOLE
  1341. // TEST[s/"portuguese_keywords",//]
  1342. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: portuguese_example, first: portuguese, second: rebuilt_portuguese}\nendyaml\n/]
  1343. <1> The default stopwords can be overridden with the `stopwords`
  1344. or `stopwords_path` parameters.
  1345. <2> This filter should be removed unless there are words which should
  1346. be excluded from stemming.
  1347. [[romanian-analyzer]]
  1348. ===== `romanian` analyzer
  1349. The `romanian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1350. [source,js]
  1351. ----------------------------------------------------
  1352. PUT /romanian_example
  1353. {
  1354. "settings": {
  1355. "analysis": {
  1356. "filter": {
  1357. "romanian_stop": {
  1358. "type": "stop",
  1359. "stopwords": "_romanian_" <1>
  1360. },
  1361. "romanian_keywords": {
  1362. "type": "keyword_marker",
  1363. "keywords": ["exemplu"] <2>
  1364. },
  1365. "romanian_stemmer": {
  1366. "type": "stemmer",
  1367. "language": "romanian"
  1368. }
  1369. },
  1370. "analyzer": {
  1371. "rebuilt_romanian": {
  1372. "tokenizer": "standard",
  1373. "filter": [
  1374. "lowercase",
  1375. "romanian_stop",
  1376. "romanian_keywords",
  1377. "romanian_stemmer"
  1378. ]
  1379. }
  1380. }
  1381. }
  1382. }
  1383. }
  1384. ----------------------------------------------------
  1385. // CONSOLE
  1386. // TEST[s/"romanian_keywords",//]
  1387. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: romanian_example, first: romanian, second: rebuilt_romanian}\nendyaml\n/]
  1388. <1> The default stopwords can be overridden with the `stopwords`
  1389. or `stopwords_path` parameters.
  1390. <2> This filter should be removed unless there are words which should
  1391. be excluded from stemming.
  1392. [[russian-analyzer]]
  1393. ===== `russian` analyzer
  1394. The `russian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1395. [source,js]
  1396. ----------------------------------------------------
  1397. PUT /russian_example
  1398. {
  1399. "settings": {
  1400. "analysis": {
  1401. "filter": {
  1402. "russian_stop": {
  1403. "type": "stop",
  1404. "stopwords": "_russian_" <1>
  1405. },
  1406. "russian_keywords": {
  1407. "type": "keyword_marker",
  1408. "keywords": ["пример"] <2>
  1409. },
  1410. "russian_stemmer": {
  1411. "type": "stemmer",
  1412. "language": "russian"
  1413. }
  1414. },
  1415. "analyzer": {
  1416. "rebuilt_russian": {
  1417. "tokenizer": "standard",
  1418. "filter": [
  1419. "lowercase",
  1420. "russian_stop",
  1421. "russian_keywords",
  1422. "russian_stemmer"
  1423. ]
  1424. }
  1425. }
  1426. }
  1427. }
  1428. }
  1429. ----------------------------------------------------
  1430. // CONSOLE
  1431. // TEST[s/"russian_keywords",//]
  1432. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: russian_example, first: russian, second: rebuilt_russian}\nendyaml\n/]
  1433. <1> The default stopwords can be overridden with the `stopwords`
  1434. or `stopwords_path` parameters.
  1435. <2> This filter should be removed unless there are words which should
  1436. be excluded from stemming.
  1437. [[sorani-analyzer]]
  1438. ===== `sorani` analyzer
  1439. The `sorani` analyzer could be reimplemented as a `custom` analyzer as follows:
  1440. [source,js]
  1441. ----------------------------------------------------
  1442. PUT /sorani_example
  1443. {
  1444. "settings": {
  1445. "analysis": {
  1446. "filter": {
  1447. "sorani_stop": {
  1448. "type": "stop",
  1449. "stopwords": "_sorani_" <1>
  1450. },
  1451. "sorani_keywords": {
  1452. "type": "keyword_marker",
  1453. "keywords": ["mînak"] <2>
  1454. },
  1455. "sorani_stemmer": {
  1456. "type": "stemmer",
  1457. "language": "sorani"
  1458. }
  1459. },
  1460. "analyzer": {
  1461. "rebuilt_sorani": {
  1462. "tokenizer": "standard",
  1463. "filter": [
  1464. "sorani_normalization",
  1465. "lowercase",
  1466. "decimal_digit",
  1467. "sorani_stop",
  1468. "sorani_keywords",
  1469. "sorani_stemmer"
  1470. ]
  1471. }
  1472. }
  1473. }
  1474. }
  1475. }
  1476. ----------------------------------------------------
  1477. // CONSOLE
  1478. // TEST[s/"sorani_keywords",//]
  1479. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: sorani_example, first: sorani, second: rebuilt_sorani}\nendyaml\n/]
  1480. <1> The default stopwords can be overridden with the `stopwords`
  1481. or `stopwords_path` parameters.
  1482. <2> This filter should be removed unless there are words which should
  1483. be excluded from stemming.
  1484. [[spanish-analyzer]]
  1485. ===== `spanish` analyzer
  1486. The `spanish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1487. [source,js]
  1488. ----------------------------------------------------
  1489. PUT /spanish_example
  1490. {
  1491. "settings": {
  1492. "analysis": {
  1493. "filter": {
  1494. "spanish_stop": {
  1495. "type": "stop",
  1496. "stopwords": "_spanish_" <1>
  1497. },
  1498. "spanish_keywords": {
  1499. "type": "keyword_marker",
  1500. "keywords": ["ejemplo"] <2>
  1501. },
  1502. "spanish_stemmer": {
  1503. "type": "stemmer",
  1504. "language": "light_spanish"
  1505. }
  1506. },
  1507. "analyzer": {
  1508. "rebuilt_spanish": {
  1509. "tokenizer": "standard",
  1510. "filter": [
  1511. "lowercase",
  1512. "spanish_stop",
  1513. "spanish_keywords",
  1514. "spanish_stemmer"
  1515. ]
  1516. }
  1517. }
  1518. }
  1519. }
  1520. }
  1521. ----------------------------------------------------
  1522. // CONSOLE
  1523. // TEST[s/"spanish_keywords",//]
  1524. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: spanish_example, first: spanish, second: rebuilt_spanish}\nendyaml\n/]
  1525. <1> The default stopwords can be overridden with the `stopwords`
  1526. or `stopwords_path` parameters.
  1527. <2> This filter should be removed unless there are words which should
  1528. be excluded from stemming.
  1529. [[swedish-analyzer]]
  1530. ===== `swedish` analyzer
  1531. The `swedish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1532. [source,js]
  1533. ----------------------------------------------------
  1534. PUT /swedish_example
  1535. {
  1536. "settings": {
  1537. "analysis": {
  1538. "filter": {
  1539. "swedish_stop": {
  1540. "type": "stop",
  1541. "stopwords": "_swedish_" <1>
  1542. },
  1543. "swedish_keywords": {
  1544. "type": "keyword_marker",
  1545. "keywords": ["exempel"] <2>
  1546. },
  1547. "swedish_stemmer": {
  1548. "type": "stemmer",
  1549. "language": "swedish"
  1550. }
  1551. },
  1552. "analyzer": {
  1553. "rebuilt_swedish": {
  1554. "tokenizer": "standard",
  1555. "filter": [
  1556. "lowercase",
  1557. "swedish_stop",
  1558. "swedish_keywords",
  1559. "swedish_stemmer"
  1560. ]
  1561. }
  1562. }
  1563. }
  1564. }
  1565. }
  1566. ----------------------------------------------------
  1567. // CONSOLE
  1568. // TEST[s/"swedish_keywords",//]
  1569. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: swedish_example, first: swedish, second: rebuilt_swedish}\nendyaml\n/]
  1570. <1> The default stopwords can be overridden with the `stopwords`
  1571. or `stopwords_path` parameters.
  1572. <2> This filter should be removed unless there are words which should
  1573. be excluded from stemming.
  1574. [[turkish-analyzer]]
  1575. ===== `turkish` analyzer
  1576. The `turkish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1577. [source,js]
  1578. ----------------------------------------------------
  1579. PUT /turkish_example
  1580. {
  1581. "settings": {
  1582. "analysis": {
  1583. "filter": {
  1584. "turkish_stop": {
  1585. "type": "stop",
  1586. "stopwords": "_turkish_" <1>
  1587. },
  1588. "turkish_lowercase": {
  1589. "type": "lowercase",
  1590. "language": "turkish"
  1591. },
  1592. "turkish_keywords": {
  1593. "type": "keyword_marker",
  1594. "keywords": ["örnek"] <2>
  1595. },
  1596. "turkish_stemmer": {
  1597. "type": "stemmer",
  1598. "language": "turkish"
  1599. }
  1600. },
  1601. "analyzer": {
  1602. "rebuilt_turkish": {
  1603. "tokenizer": "standard",
  1604. "filter": [
  1605. "apostrophe",
  1606. "turkish_lowercase",
  1607. "turkish_stop",
  1608. "turkish_keywords",
  1609. "turkish_stemmer"
  1610. ]
  1611. }
  1612. }
  1613. }
  1614. }
  1615. }
  1616. ----------------------------------------------------
  1617. // CONSOLE
  1618. // TEST[s/"turkish_keywords",//]
  1619. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: turkish_example, first: turkish, second: rebuilt_turkish}\nendyaml\n/]
  1620. <1> The default stopwords can be overridden with the `stopwords`
  1621. or `stopwords_path` parameters.
  1622. <2> This filter should be removed unless there are words which should
  1623. be excluded from stemming.
  1624. [[thai-analyzer]]
  1625. ===== `thai` analyzer
  1626. The `thai` analyzer could be reimplemented as a `custom` analyzer as follows:
  1627. [source,js]
  1628. ----------------------------------------------------
  1629. PUT /thai_example
  1630. {
  1631. "settings": {
  1632. "analysis": {
  1633. "filter": {
  1634. "thai_stop": {
  1635. "type": "stop",
  1636. "stopwords": "_thai_" <1>
  1637. }
  1638. },
  1639. "analyzer": {
  1640. "rebuilt_thai": {
  1641. "tokenizer": "thai",
  1642. "filter": [
  1643. "lowercase",
  1644. "decimal_digit",
  1645. "thai_stop"
  1646. ]
  1647. }
  1648. }
  1649. }
  1650. }
  1651. }
  1652. ----------------------------------------------------
  1653. // CONSOLE
  1654. // TEST[s/"thai_keywords",//]
  1655. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: thai_example, first: thai, second: rebuilt_thai}\nendyaml\n/]
  1656. <1> The default stopwords can be overridden with the `stopwords`
  1657. or `stopwords_path` parameters.