lang-analyzer.asciidoc 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769
  1. [[analysis-lang-analyzer]]
  2. === Language Analyzers
  3. A set of analyzers aimed at analyzing specific language text. The
  4. following types are supported:
  5. <<arabic-analyzer,`arabic`>>,
  6. <<armenian-analyzer,`armenian`>>,
  7. <<basque-analyzer,`basque`>>,
  8. <<bengali-analyzer,`bengali`>>,
  9. <<brazilian-analyzer,`brazilian`>>,
  10. <<bulgarian-analyzer,`bulgarian`>>,
  11. <<catalan-analyzer,`catalan`>>,
  12. <<cjk-analyzer,`cjk`>>,
  13. <<czech-analyzer,`czech`>>,
  14. <<danish-analyzer,`danish`>>,
  15. <<dutch-analyzer,`dutch`>>,
  16. <<english-analyzer,`english`>>,
  17. <<finnish-analyzer,`finnish`>>,
  18. <<french-analyzer,`french`>>,
  19. <<galician-analyzer,`galician`>>,
  20. <<german-analyzer,`german`>>,
  21. <<greek-analyzer,`greek`>>,
  22. <<hindi-analyzer,`hindi`>>,
  23. <<hungarian-analyzer,`hungarian`>>,
  24. <<indonesian-analyzer,`indonesian`>>,
  25. <<irish-analyzer,`irish`>>,
  26. <<italian-analyzer,`italian`>>,
  27. <<latvian-analyzer,`latvian`>>,
  28. <<lithuanian-analyzer,`lithuanian`>>,
  29. <<norwegian-analyzer,`norwegian`>>,
  30. <<persian-analyzer,`persian`>>,
  31. <<portuguese-analyzer,`portuguese`>>,
  32. <<romanian-analyzer,`romanian`>>,
  33. <<russian-analyzer,`russian`>>,
  34. <<sorani-analyzer,`sorani`>>,
  35. <<spanish-analyzer,`spanish`>>,
  36. <<swedish-analyzer,`swedish`>>,
  37. <<turkish-analyzer,`turkish`>>,
  38. <<thai-analyzer,`thai`>>.
  39. ==== Configuring language analyzers
  40. ===== Stopwords
  41. All analyzers support setting custom `stopwords` either internally in
  42. the config, or by using an external stopwords file by setting
  43. `stopwords_path`. Check <<analysis-stop-analyzer,Stop Analyzer>> for
  44. more details.
  45. ===== Excluding words from stemming
  46. The `stem_exclusion` parameter allows you to specify an array
  47. of lowercase words that should not be stemmed. Internally, this
  48. functionality is implemented by adding the
  49. <<analysis-keyword-marker-tokenfilter,`keyword_marker` token filter>>
  50. with the `keywords` set to the value of the `stem_exclusion` parameter.
  51. The following analyzers support setting custom `stem_exclusion` list:
  52. `arabic`, `armenian`, `basque`, `bengali`, `bulgarian`, `catalan`, `czech`,
  53. `dutch`, `english`, `finnish`, `french`, `galician`,
  54. `german`, `hindi`, `hungarian`, `indonesian`, `irish`, `italian`, `latvian`,
  55. `lithuanian`, `norwegian`, `portuguese`, `romanian`, `russian`, `sorani`,
  56. `spanish`, `swedish`, `turkish`.
  57. ==== Reimplementing language analyzers
  58. The built-in language analyzers can be reimplemented as `custom` analyzers
  59. (as described below) in order to customize their behaviour.
  60. NOTE: If you do not intend to exclude words from being stemmed (the
  61. equivalent of the `stem_exclusion` parameter above), then you should remove
  62. the `keyword_marker` token filter from the custom analyzer configuration.
  63. [[arabic-analyzer]]
  64. ===== `arabic` analyzer
  65. The `arabic` analyzer could be reimplemented as a `custom` analyzer as follows:
  66. [source,js]
  67. ----------------------------------------------------
  68. PUT /arabic_example
  69. {
  70. "settings": {
  71. "analysis": {
  72. "filter": {
  73. "arabic_stop": {
  74. "type": "stop",
  75. "stopwords": "_arabic_" <1>
  76. },
  77. "arabic_keywords": {
  78. "type": "keyword_marker",
  79. "keywords": ["مثال"] <2>
  80. },
  81. "arabic_stemmer": {
  82. "type": "stemmer",
  83. "language": "arabic"
  84. }
  85. },
  86. "analyzer": {
  87. "rebuilt_arabic": {
  88. "tokenizer": "standard",
  89. "filter": [
  90. "lowercase",
  91. "decimal_digit",
  92. "arabic_stop",
  93. "arabic_normalization",
  94. "arabic_keywords",
  95. "arabic_stemmer"
  96. ]
  97. }
  98. }
  99. }
  100. }
  101. }
  102. ----------------------------------------------------
  103. // CONSOLE
  104. // TEST[s/"arabic_keywords",//]
  105. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: arabic_example, first: arabic, second: rebuilt_arabic}\nendyaml\n/]
  106. <1> The default stopwords can be overridden with the `stopwords`
  107. or `stopwords_path` parameters.
  108. <2> This filter should be removed unless there are words which should
  109. be excluded from stemming.
  110. [[armenian-analyzer]]
  111. ===== `armenian` analyzer
  112. The `armenian` analyzer could be reimplemented as a `custom` analyzer as follows:
  113. [source,js]
  114. ----------------------------------------------------
  115. PUT /armenian_example
  116. {
  117. "settings": {
  118. "analysis": {
  119. "filter": {
  120. "armenian_stop": {
  121. "type": "stop",
  122. "stopwords": "_armenian_" <1>
  123. },
  124. "armenian_keywords": {
  125. "type": "keyword_marker",
  126. "keywords": ["օրինակ"] <2>
  127. },
  128. "armenian_stemmer": {
  129. "type": "stemmer",
  130. "language": "armenian"
  131. }
  132. },
  133. "analyzer": {
  134. "rebuilt_armenian": {
  135. "tokenizer": "standard",
  136. "filter": [
  137. "lowercase",
  138. "armenian_stop",
  139. "armenian_keywords",
  140. "armenian_stemmer"
  141. ]
  142. }
  143. }
  144. }
  145. }
  146. }
  147. ----------------------------------------------------
  148. // CONSOLE
  149. // TEST[s/"armenian_keywords",//]
  150. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: armenian_example, first: armenian, second: rebuilt_armenian}\nendyaml\n/]
  151. <1> The default stopwords can be overridden with the `stopwords`
  152. or `stopwords_path` parameters.
  153. <2> This filter should be removed unless there are words which should
  154. be excluded from stemming.
  155. [[basque-analyzer]]
  156. ===== `basque` analyzer
  157. The `basque` analyzer could be reimplemented as a `custom` analyzer as follows:
  158. [source,js]
  159. ----------------------------------------------------
  160. PUT /basque_example
  161. {
  162. "settings": {
  163. "analysis": {
  164. "filter": {
  165. "basque_stop": {
  166. "type": "stop",
  167. "stopwords": "_basque_" <1>
  168. },
  169. "basque_keywords": {
  170. "type": "keyword_marker",
  171. "keywords": ["Adibidez"] <2>
  172. },
  173. "basque_stemmer": {
  174. "type": "stemmer",
  175. "language": "basque"
  176. }
  177. },
  178. "analyzer": {
  179. "rebuilt_basque": {
  180. "tokenizer": "standard",
  181. "filter": [
  182. "lowercase",
  183. "basque_stop",
  184. "basque_keywords",
  185. "basque_stemmer"
  186. ]
  187. }
  188. }
  189. }
  190. }
  191. }
  192. ----------------------------------------------------
  193. // CONSOLE
  194. // TEST[s/"basque_keywords",//]
  195. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: basque_example, first: basque, second: rebuilt_basque}\nendyaml\n/]
  196. <1> The default stopwords can be overridden with the `stopwords`
  197. or `stopwords_path` parameters.
  198. <2> This filter should be removed unless there are words which should
  199. be excluded from stemming.
  200. [[bengali-analyzer]]
  201. ===== `bengali` analyzer
  202. The `bengali` analyzer could be reimplemented as a `custom` analyzer as follows:
  203. [source,js]
  204. ----------------------------------------------------
  205. PUT /bengali_example
  206. {
  207. "settings": {
  208. "analysis": {
  209. "filter": {
  210. "bengali_stop": {
  211. "type": "stop",
  212. "stopwords": "_bengali_" <1>
  213. },
  214. "bengali_keywords": {
  215. "type": "keyword_marker",
  216. "keywords": ["উদাহরণ"] <2>
  217. },
  218. "bengali_stemmer": {
  219. "type": "stemmer",
  220. "language": "bengali"
  221. }
  222. },
  223. "analyzer": {
  224. "rebuilt_bengali": {
  225. "tokenizer": "standard",
  226. "filter": [
  227. "lowercase",
  228. "decimal_digit",
  229. "bengali_keywords",
  230. "indic_normalization",
  231. "bengali_normalization",
  232. "bengali_stop",
  233. "bengali_stemmer"
  234. ]
  235. }
  236. }
  237. }
  238. }
  239. }
  240. ----------------------------------------------------
  241. // CONSOLE
  242. // TEST[s/"bengali_keywords",//]
  243. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: bengali_example, first: bengali, second: rebuilt_bengali}\nendyaml\n/]
  244. <1> The default stopwords can be overridden with the `stopwords`
  245. or `stopwords_path` parameters.
  246. <2> This filter should be removed unless there are words which should
  247. be excluded from stemming.
  248. [[brazilian-analyzer]]
  249. ===== `brazilian` analyzer
  250. The `brazilian` analyzer could be reimplemented as a `custom` analyzer as follows:
  251. [source,js]
  252. ----------------------------------------------------
  253. PUT /brazilian_example
  254. {
  255. "settings": {
  256. "analysis": {
  257. "filter": {
  258. "brazilian_stop": {
  259. "type": "stop",
  260. "stopwords": "_brazilian_" <1>
  261. },
  262. "brazilian_keywords": {
  263. "type": "keyword_marker",
  264. "keywords": ["exemplo"] <2>
  265. },
  266. "brazilian_stemmer": {
  267. "type": "stemmer",
  268. "language": "brazilian"
  269. }
  270. },
  271. "analyzer": {
  272. "rebuilt_brazilian": {
  273. "tokenizer": "standard",
  274. "filter": [
  275. "lowercase",
  276. "brazilian_stop",
  277. "brazilian_keywords",
  278. "brazilian_stemmer"
  279. ]
  280. }
  281. }
  282. }
  283. }
  284. }
  285. ----------------------------------------------------
  286. // CONSOLE
  287. // TEST[s/"brazilian_keywords",//]
  288. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: brazilian_example, first: brazilian, second: rebuilt_brazilian}\nendyaml\n/]
  289. <1> The default stopwords can be overridden with the `stopwords`
  290. or `stopwords_path` parameters.
  291. <2> This filter should be removed unless there are words which should
  292. be excluded from stemming.
  293. [[bulgarian-analyzer]]
  294. ===== `bulgarian` analyzer
  295. The `bulgarian` analyzer could be reimplemented as a `custom` analyzer as follows:
  296. [source,js]
  297. ----------------------------------------------------
  298. PUT /bulgarian_example
  299. {
  300. "settings": {
  301. "analysis": {
  302. "filter": {
  303. "bulgarian_stop": {
  304. "type": "stop",
  305. "stopwords": "_bulgarian_" <1>
  306. },
  307. "bulgarian_keywords": {
  308. "type": "keyword_marker",
  309. "keywords": ["пример"] <2>
  310. },
  311. "bulgarian_stemmer": {
  312. "type": "stemmer",
  313. "language": "bulgarian"
  314. }
  315. },
  316. "analyzer": {
  317. "rebuilt_bulgarian": {
  318. "tokenizer": "standard",
  319. "filter": [
  320. "lowercase",
  321. "bulgarian_stop",
  322. "bulgarian_keywords",
  323. "bulgarian_stemmer"
  324. ]
  325. }
  326. }
  327. }
  328. }
  329. }
  330. ----------------------------------------------------
  331. // CONSOLE
  332. // TEST[s/"bulgarian_keywords",//]
  333. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: bulgarian_example, first: bulgarian, second: rebuilt_bulgarian}\nendyaml\n/]
  334. <1> The default stopwords can be overridden with the `stopwords`
  335. or `stopwords_path` parameters.
  336. <2> This filter should be removed unless there are words which should
  337. be excluded from stemming.
  338. [[catalan-analyzer]]
  339. ===== `catalan` analyzer
  340. The `catalan` analyzer could be reimplemented as a `custom` analyzer as follows:
  341. [source,js]
  342. ----------------------------------------------------
  343. PUT /catalan_example
  344. {
  345. "settings": {
  346. "analysis": {
  347. "filter": {
  348. "catalan_elision": {
  349. "type": "elision",
  350. "articles": [ "d", "l", "m", "n", "s", "t"],
  351. "articles_case": true
  352. },
  353. "catalan_stop": {
  354. "type": "stop",
  355. "stopwords": "_catalan_" <1>
  356. },
  357. "catalan_keywords": {
  358. "type": "keyword_marker",
  359. "keywords": ["exemple"] <2>
  360. },
  361. "catalan_stemmer": {
  362. "type": "stemmer",
  363. "language": "catalan"
  364. }
  365. },
  366. "analyzer": {
  367. "rebuilt_catalan": {
  368. "tokenizer": "standard",
  369. "filter": [
  370. "catalan_elision",
  371. "lowercase",
  372. "catalan_stop",
  373. "catalan_keywords",
  374. "catalan_stemmer"
  375. ]
  376. }
  377. }
  378. }
  379. }
  380. }
  381. ----------------------------------------------------
  382. // CONSOLE
  383. // TEST[s/"catalan_keywords",//]
  384. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: catalan_example, first: catalan, second: rebuilt_catalan}\nendyaml\n/]
  385. <1> The default stopwords can be overridden with the `stopwords`
  386. or `stopwords_path` parameters.
  387. <2> This filter should be removed unless there are words which should
  388. be excluded from stemming.
  389. [[cjk-analyzer]]
  390. ===== `cjk` analyzer
  391. The `cjk` analyzer could be reimplemented as a `custom` analyzer as follows:
  392. [source,js]
  393. ----------------------------------------------------
  394. PUT /cjk_example
  395. {
  396. "settings": {
  397. "analysis": {
  398. "filter": {
  399. "english_stop": {
  400. "type": "stop",
  401. "stopwords": [ <1>
  402. "a", "and", "are", "as", "at", "be", "but", "by", "for",
  403. "if", "in", "into", "is", "it", "no", "not", "of", "on",
  404. "or", "s", "such", "t", "that", "the", "their", "then",
  405. "there", "these", "they", "this", "to", "was", "will",
  406. "with", "www"
  407. ]
  408. }
  409. },
  410. "analyzer": {
  411. "rebuilt_cjk": {
  412. "tokenizer": "standard",
  413. "filter": [
  414. "cjk_width",
  415. "lowercase",
  416. "cjk_bigram",
  417. "english_stop"
  418. ]
  419. }
  420. }
  421. }
  422. }
  423. }
  424. ----------------------------------------------------
  425. // CONSOLE
  426. // TEST[s/"cjk_keywords",//]
  427. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: cjk_example, first: cjk, second: rebuilt_cjk}\nendyaml\n/]
  428. <1> The default stopwords can be overridden with the `stopwords`
  429. or `stopwords_path` parameters. The default stop words are
  430. *almost* the same as the `_english_` set, but not exactly
  431. the same.
  432. [[czech-analyzer]]
  433. ===== `czech` analyzer
  434. The `czech` analyzer could be reimplemented as a `custom` analyzer as follows:
  435. [source,js]
  436. ----------------------------------------------------
  437. PUT /czech_example
  438. {
  439. "settings": {
  440. "analysis": {
  441. "filter": {
  442. "czech_stop": {
  443. "type": "stop",
  444. "stopwords": "_czech_" <1>
  445. },
  446. "czech_keywords": {
  447. "type": "keyword_marker",
  448. "keywords": ["příklad"] <2>
  449. },
  450. "czech_stemmer": {
  451. "type": "stemmer",
  452. "language": "czech"
  453. }
  454. },
  455. "analyzer": {
  456. "rebuilt_czech": {
  457. "tokenizer": "standard",
  458. "filter": [
  459. "lowercase",
  460. "czech_stop",
  461. "czech_keywords",
  462. "czech_stemmer"
  463. ]
  464. }
  465. }
  466. }
  467. }
  468. }
  469. ----------------------------------------------------
  470. // CONSOLE
  471. // TEST[s/"czech_keywords",//]
  472. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: czech_example, first: czech, second: rebuilt_czech}\nendyaml\n/]
  473. <1> The default stopwords can be overridden with the `stopwords`
  474. or `stopwords_path` parameters.
  475. <2> This filter should be removed unless there are words which should
  476. be excluded from stemming.
  477. [[danish-analyzer]]
  478. ===== `danish` analyzer
  479. The `danish` analyzer could be reimplemented as a `custom` analyzer as follows:
  480. [source,js]
  481. ----------------------------------------------------
  482. PUT /danish_example
  483. {
  484. "settings": {
  485. "analysis": {
  486. "filter": {
  487. "danish_stop": {
  488. "type": "stop",
  489. "stopwords": "_danish_" <1>
  490. },
  491. "danish_keywords": {
  492. "type": "keyword_marker",
  493. "keywords": ["eksempel"] <2>
  494. },
  495. "danish_stemmer": {
  496. "type": "stemmer",
  497. "language": "danish"
  498. }
  499. },
  500. "analyzer": {
  501. "rebuilt_danish": {
  502. "tokenizer": "standard",
  503. "filter": [
  504. "lowercase",
  505. "danish_stop",
  506. "danish_keywords",
  507. "danish_stemmer"
  508. ]
  509. }
  510. }
  511. }
  512. }
  513. }
  514. ----------------------------------------------------
  515. // CONSOLE
  516. // TEST[s/"danish_keywords",//]
  517. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: danish_example, first: danish, second: rebuilt_danish}\nendyaml\n/]
  518. <1> The default stopwords can be overridden with the `stopwords`
  519. or `stopwords_path` parameters.
  520. <2> This filter should be removed unless there are words which should
  521. be excluded from stemming.
  522. [[dutch-analyzer]]
  523. ===== `dutch` analyzer
  524. The `dutch` analyzer could be reimplemented as a `custom` analyzer as follows:
  525. [source,js]
  526. ----------------------------------------------------
  527. PUT /dutch_example
  528. {
  529. "settings": {
  530. "analysis": {
  531. "filter": {
  532. "dutch_stop": {
  533. "type": "stop",
  534. "stopwords": "_dutch_" <1>
  535. },
  536. "dutch_keywords": {
  537. "type": "keyword_marker",
  538. "keywords": ["voorbeeld"] <2>
  539. },
  540. "dutch_stemmer": {
  541. "type": "stemmer",
  542. "language": "dutch"
  543. },
  544. "dutch_override": {
  545. "type": "stemmer_override",
  546. "rules": [
  547. "fiets=>fiets",
  548. "bromfiets=>bromfiets",
  549. "ei=>eier",
  550. "kind=>kinder"
  551. ]
  552. }
  553. },
  554. "analyzer": {
  555. "rebuilt_dutch": {
  556. "tokenizer": "standard",
  557. "filter": [
  558. "lowercase",
  559. "dutch_stop",
  560. "dutch_keywords",
  561. "dutch_override",
  562. "dutch_stemmer"
  563. ]
  564. }
  565. }
  566. }
  567. }
  568. }
  569. ----------------------------------------------------
  570. // CONSOLE
  571. // TEST[s/"dutch_keywords",//]
  572. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: dutch_example, first: dutch, second: rebuilt_dutch}\nendyaml\n/]
  573. <1> The default stopwords can be overridden with the `stopwords`
  574. or `stopwords_path` parameters.
  575. <2> This filter should be removed unless there are words which should
  576. be excluded from stemming.
  577. [[english-analyzer]]
  578. ===== `english` analyzer
  579. The `english` analyzer could be reimplemented as a `custom` analyzer as follows:
  580. [source,js]
  581. ----------------------------------------------------
  582. PUT /english_example
  583. {
  584. "settings": {
  585. "analysis": {
  586. "filter": {
  587. "english_stop": {
  588. "type": "stop",
  589. "stopwords": "_english_" <1>
  590. },
  591. "english_keywords": {
  592. "type": "keyword_marker",
  593. "keywords": ["example"] <2>
  594. },
  595. "english_stemmer": {
  596. "type": "stemmer",
  597. "language": "english"
  598. },
  599. "english_possessive_stemmer": {
  600. "type": "stemmer",
  601. "language": "possessive_english"
  602. }
  603. },
  604. "analyzer": {
  605. "rebuilt_english": {
  606. "tokenizer": "standard",
  607. "filter": [
  608. "english_possessive_stemmer",
  609. "lowercase",
  610. "english_stop",
  611. "english_keywords",
  612. "english_stemmer"
  613. ]
  614. }
  615. }
  616. }
  617. }
  618. }
  619. ----------------------------------------------------
  620. // CONSOLE
  621. // TEST[s/"english_keywords",//]
  622. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: english_example, first: english, second: rebuilt_english}\nendyaml\n/]
  623. <1> The default stopwords can be overridden with the `stopwords`
  624. or `stopwords_path` parameters.
  625. <2> This filter should be removed unless there are words which should
  626. be excluded from stemming.
  627. [[finnish-analyzer]]
  628. ===== `finnish` analyzer
  629. The `finnish` analyzer could be reimplemented as a `custom` analyzer as follows:
  630. [source,js]
  631. ----------------------------------------------------
  632. PUT /finnish_example
  633. {
  634. "settings": {
  635. "analysis": {
  636. "filter": {
  637. "finnish_stop": {
  638. "type": "stop",
  639. "stopwords": "_finnish_" <1>
  640. },
  641. "finnish_keywords": {
  642. "type": "keyword_marker",
  643. "keywords": ["esimerkki"] <2>
  644. },
  645. "finnish_stemmer": {
  646. "type": "stemmer",
  647. "language": "finnish"
  648. }
  649. },
  650. "analyzer": {
  651. "rebuilt_finnish": {
  652. "tokenizer": "standard",
  653. "filter": [
  654. "lowercase",
  655. "finnish_stop",
  656. "finnish_keywords",
  657. "finnish_stemmer"
  658. ]
  659. }
  660. }
  661. }
  662. }
  663. }
  664. ----------------------------------------------------
  665. // CONSOLE
  666. // TEST[s/"finnish_keywords",//]
  667. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: finnish_example, first: finnish, second: rebuilt_finnish}\nendyaml\n/]
  668. <1> The default stopwords can be overridden with the `stopwords`
  669. or `stopwords_path` parameters.
  670. <2> This filter should be removed unless there are words which should
  671. be excluded from stemming.
  672. [[french-analyzer]]
  673. ===== `french` analyzer
  674. The `french` analyzer could be reimplemented as a `custom` analyzer as follows:
  675. [source,js]
  676. ----------------------------------------------------
  677. PUT /french_example
  678. {
  679. "settings": {
  680. "analysis": {
  681. "filter": {
  682. "french_elision": {
  683. "type": "elision",
  684. "articles_case": true,
  685. "articles": [
  686. "l", "m", "t", "qu", "n", "s",
  687. "j", "d", "c", "jusqu", "quoiqu",
  688. "lorsqu", "puisqu"
  689. ]
  690. },
  691. "french_stop": {
  692. "type": "stop",
  693. "stopwords": "_french_" <1>
  694. },
  695. "french_keywords": {
  696. "type": "keyword_marker",
  697. "keywords": ["Exemple"] <2>
  698. },
  699. "french_stemmer": {
  700. "type": "stemmer",
  701. "language": "light_french"
  702. }
  703. },
  704. "analyzer": {
  705. "rebuilt_french": {
  706. "tokenizer": "standard",
  707. "filter": [
  708. "french_elision",
  709. "lowercase",
  710. "french_stop",
  711. "french_keywords",
  712. "french_stemmer"
  713. ]
  714. }
  715. }
  716. }
  717. }
  718. }
  719. ----------------------------------------------------
  720. // CONSOLE
  721. // TEST[s/"french_keywords",//]
  722. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: french_example, first: french, second: rebuilt_french}\nendyaml\n/]
  723. <1> The default stopwords can be overridden with the `stopwords`
  724. or `stopwords_path` parameters.
  725. <2> This filter should be removed unless there are words which should
  726. be excluded from stemming.
  727. [[galician-analyzer]]
  728. ===== `galician` analyzer
  729. The `galician` analyzer could be reimplemented as a `custom` analyzer as follows:
  730. [source,js]
  731. ----------------------------------------------------
  732. PUT /galician_example
  733. {
  734. "settings": {
  735. "analysis": {
  736. "filter": {
  737. "galician_stop": {
  738. "type": "stop",
  739. "stopwords": "_galician_" <1>
  740. },
  741. "galician_keywords": {
  742. "type": "keyword_marker",
  743. "keywords": ["exemplo"] <2>
  744. },
  745. "galician_stemmer": {
  746. "type": "stemmer",
  747. "language": "galician"
  748. }
  749. },
  750. "analyzer": {
  751. "rebuilt_galician": {
  752. "tokenizer": "standard",
  753. "filter": [
  754. "lowercase",
  755. "galician_stop",
  756. "galician_keywords",
  757. "galician_stemmer"
  758. ]
  759. }
  760. }
  761. }
  762. }
  763. }
  764. ----------------------------------------------------
  765. // CONSOLE
  766. // TEST[s/"galician_keywords",//]
  767. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: galician_example, first: galician, second: rebuilt_galician}\nendyaml\n/]
  768. <1> The default stopwords can be overridden with the `stopwords`
  769. or `stopwords_path` parameters.
  770. <2> This filter should be removed unless there are words which should
  771. be excluded from stemming.
  772. [[german-analyzer]]
  773. ===== `german` analyzer
  774. The `german` analyzer could be reimplemented as a `custom` analyzer as follows:
  775. [source,js]
  776. ----------------------------------------------------
  777. PUT /german_example
  778. {
  779. "settings": {
  780. "analysis": {
  781. "filter": {
  782. "german_stop": {
  783. "type": "stop",
  784. "stopwords": "_german_" <1>
  785. },
  786. "german_keywords": {
  787. "type": "keyword_marker",
  788. "keywords": ["Beispiel"] <2>
  789. },
  790. "german_stemmer": {
  791. "type": "stemmer",
  792. "language": "light_german"
  793. }
  794. },
  795. "analyzer": {
  796. "rebuilt_german": {
  797. "tokenizer": "standard",
  798. "filter": [
  799. "lowercase",
  800. "german_stop",
  801. "german_keywords",
  802. "german_normalization",
  803. "german_stemmer"
  804. ]
  805. }
  806. }
  807. }
  808. }
  809. }
  810. ----------------------------------------------------
  811. // CONSOLE
  812. // TEST[s/"german_keywords",//]
  813. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: german_example, first: german, second: rebuilt_german}\nendyaml\n/]
  814. <1> The default stopwords can be overridden with the `stopwords`
  815. or `stopwords_path` parameters.
  816. <2> This filter should be removed unless there are words which should
  817. be excluded from stemming.
  818. [[greek-analyzer]]
  819. ===== `greek` analyzer
  820. The `greek` analyzer could be reimplemented as a `custom` analyzer as follows:
  821. [source,js]
  822. ----------------------------------------------------
  823. PUT /greek_example
  824. {
  825. "settings": {
  826. "analysis": {
  827. "filter": {
  828. "greek_stop": {
  829. "type": "stop",
  830. "stopwords": "_greek_" <1>
  831. },
  832. "greek_lowercase": {
  833. "type": "lowercase",
  834. "language": "greek"
  835. },
  836. "greek_keywords": {
  837. "type": "keyword_marker",
  838. "keywords": ["παράδειγμα"] <2>
  839. },
  840. "greek_stemmer": {
  841. "type": "stemmer",
  842. "language": "greek"
  843. }
  844. },
  845. "analyzer": {
  846. "rebuilt_greek": {
  847. "tokenizer": "standard",
  848. "filter": [
  849. "greek_lowercase",
  850. "greek_stop",
  851. "greek_keywords",
  852. "greek_stemmer"
  853. ]
  854. }
  855. }
  856. }
  857. }
  858. }
  859. ----------------------------------------------------
  860. // CONSOLE
  861. // TEST[s/"greek_keywords",//]
  862. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: greek_example, first: greek, second: rebuilt_greek}\nendyaml\n/]
  863. <1> The default stopwords can be overridden with the `stopwords`
  864. or `stopwords_path` parameters.
  865. <2> This filter should be removed unless there are words which should
  866. be excluded from stemming.
  867. [[hindi-analyzer]]
  868. ===== `hindi` analyzer
  869. The `hindi` analyzer could be reimplemented as a `custom` analyzer as follows:
  870. [source,js]
  871. ----------------------------------------------------
  872. PUT /hindi_example
  873. {
  874. "settings": {
  875. "analysis": {
  876. "filter": {
  877. "hindi_stop": {
  878. "type": "stop",
  879. "stopwords": "_hindi_" <1>
  880. },
  881. "hindi_keywords": {
  882. "type": "keyword_marker",
  883. "keywords": ["उदाहरण"] <2>
  884. },
  885. "hindi_stemmer": {
  886. "type": "stemmer",
  887. "language": "hindi"
  888. }
  889. },
  890. "analyzer": {
  891. "rebuilt_hindi": {
  892. "tokenizer": "standard",
  893. "filter": [
  894. "lowercase",
  895. "decimal_digit",
  896. "hindi_keywords",
  897. "indic_normalization",
  898. "hindi_normalization",
  899. "hindi_stop",
  900. "hindi_stemmer"
  901. ]
  902. }
  903. }
  904. }
  905. }
  906. }
  907. ----------------------------------------------------
  908. // CONSOLE
  909. // TEST[s/"hindi_keywords",//]
  910. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: hindi_example, first: hindi, second: rebuilt_hindi}\nendyaml\n/]
  911. <1> The default stopwords can be overridden with the `stopwords`
  912. or `stopwords_path` parameters.
  913. <2> This filter should be removed unless there are words which should
  914. be excluded from stemming.
  915. [[hungarian-analyzer]]
  916. ===== `hungarian` analyzer
  917. The `hungarian` analyzer could be reimplemented as a `custom` analyzer as follows:
  918. [source,js]
  919. ----------------------------------------------------
  920. PUT /hungarian_example
  921. {
  922. "settings": {
  923. "analysis": {
  924. "filter": {
  925. "hungarian_stop": {
  926. "type": "stop",
  927. "stopwords": "_hungarian_" <1>
  928. },
  929. "hungarian_keywords": {
  930. "type": "keyword_marker",
  931. "keywords": ["példa"] <2>
  932. },
  933. "hungarian_stemmer": {
  934. "type": "stemmer",
  935. "language": "hungarian"
  936. }
  937. },
  938. "analyzer": {
  939. "rebuilt_hungarian": {
  940. "tokenizer": "standard",
  941. "filter": [
  942. "lowercase",
  943. "hungarian_stop",
  944. "hungarian_keywords",
  945. "hungarian_stemmer"
  946. ]
  947. }
  948. }
  949. }
  950. }
  951. }
  952. ----------------------------------------------------
  953. // CONSOLE
  954. // TEST[s/"hungarian_keywords",//]
  955. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: hungarian_example, first: hungarian, second: rebuilt_hungarian}\nendyaml\n/]
  956. <1> The default stopwords can be overridden with the `stopwords`
  957. or `stopwords_path` parameters.
  958. <2> This filter should be removed unless there are words which should
  959. be excluded from stemming.
  960. [[indonesian-analyzer]]
  961. ===== `indonesian` analyzer
  962. The `indonesian` analyzer could be reimplemented as a `custom` analyzer as follows:
  963. [source,js]
  964. ----------------------------------------------------
  965. PUT /indonesian_example
  966. {
  967. "settings": {
  968. "analysis": {
  969. "filter": {
  970. "indonesian_stop": {
  971. "type": "stop",
  972. "stopwords": "_indonesian_" <1>
  973. },
  974. "indonesian_keywords": {
  975. "type": "keyword_marker",
  976. "keywords": ["contoh"] <2>
  977. },
  978. "indonesian_stemmer": {
  979. "type": "stemmer",
  980. "language": "indonesian"
  981. }
  982. },
  983. "analyzer": {
  984. "rebuilt_indonesian": {
  985. "tokenizer": "standard",
  986. "filter": [
  987. "lowercase",
  988. "indonesian_stop",
  989. "indonesian_keywords",
  990. "indonesian_stemmer"
  991. ]
  992. }
  993. }
  994. }
  995. }
  996. }
  997. ----------------------------------------------------
  998. // CONSOLE
  999. // TEST[s/"indonesian_keywords",//]
  1000. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: indonesian_example, first: indonesian, second: rebuilt_indonesian}\nendyaml\n/]
  1001. <1> The default stopwords can be overridden with the `stopwords`
  1002. or `stopwords_path` parameters.
  1003. <2> This filter should be removed unless there are words which should
  1004. be excluded from stemming.
  1005. [[irish-analyzer]]
  1006. ===== `irish` analyzer
  1007. The `irish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1008. [source,js]
  1009. ----------------------------------------------------
  1010. PUT /irish_example
  1011. {
  1012. "settings": {
  1013. "analysis": {
  1014. "filter": {
  1015. "irish_hyphenation": {
  1016. "type": "stop",
  1017. "stopwords": [ "h", "n", "t" ],
  1018. "ignore_case": true
  1019. },
  1020. "irish_elision": {
  1021. "type": "elision",
  1022. "articles": [ "d", "m", "b" ],
  1023. "articles_case": true
  1024. },
  1025. "irish_stop": {
  1026. "type": "stop",
  1027. "stopwords": "_irish_" <1>
  1028. },
  1029. "irish_lowercase": {
  1030. "type": "lowercase",
  1031. "language": "irish"
  1032. },
  1033. "irish_keywords": {
  1034. "type": "keyword_marker",
  1035. "keywords": ["sampla"] <2>
  1036. },
  1037. "irish_stemmer": {
  1038. "type": "stemmer",
  1039. "language": "irish"
  1040. }
  1041. },
  1042. "analyzer": {
  1043. "rebuilt_irish": {
  1044. "tokenizer": "standard",
  1045. "filter": [
  1046. "irish_hyphenation",
  1047. "irish_elision",
  1048. "irish_lowercase",
  1049. "irish_stop",
  1050. "irish_keywords",
  1051. "irish_stemmer"
  1052. ]
  1053. }
  1054. }
  1055. }
  1056. }
  1057. }
  1058. ----------------------------------------------------
  1059. // CONSOLE
  1060. // TEST[s/"irish_keywords",//]
  1061. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: irish_example, first: irish, second: rebuilt_irish}\nendyaml\n/]
  1062. <1> The default stopwords can be overridden with the `stopwords`
  1063. or `stopwords_path` parameters.
  1064. <2> This filter should be removed unless there are words which should
  1065. be excluded from stemming.
  1066. [[italian-analyzer]]
  1067. ===== `italian` analyzer
  1068. The `italian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1069. [source,js]
  1070. ----------------------------------------------------
  1071. PUT /italian_example
  1072. {
  1073. "settings": {
  1074. "analysis": {
  1075. "filter": {
  1076. "italian_elision": {
  1077. "type": "elision",
  1078. "articles": [
  1079. "c", "l", "all", "dall", "dell",
  1080. "nell", "sull", "coll", "pell",
  1081. "gl", "agl", "dagl", "degl", "negl",
  1082. "sugl", "un", "m", "t", "s", "v", "d"
  1083. ],
  1084. "articles_case": true
  1085. },
  1086. "italian_stop": {
  1087. "type": "stop",
  1088. "stopwords": "_italian_" <1>
  1089. },
  1090. "italian_keywords": {
  1091. "type": "keyword_marker",
  1092. "keywords": ["esempio"] <2>
  1093. },
  1094. "italian_stemmer": {
  1095. "type": "stemmer",
  1096. "language": "light_italian"
  1097. }
  1098. },
  1099. "analyzer": {
  1100. "rebuilt_italian": {
  1101. "tokenizer": "standard",
  1102. "filter": [
  1103. "italian_elision",
  1104. "lowercase",
  1105. "italian_stop",
  1106. "italian_keywords",
  1107. "italian_stemmer"
  1108. ]
  1109. }
  1110. }
  1111. }
  1112. }
  1113. }
  1114. ----------------------------------------------------
  1115. // CONSOLE
  1116. // TEST[s/"italian_keywords",//]
  1117. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: italian_example, first: italian, second: rebuilt_italian}\nendyaml\n/]
  1118. <1> The default stopwords can be overridden with the `stopwords`
  1119. or `stopwords_path` parameters.
  1120. <2> This filter should be removed unless there are words which should
  1121. be excluded from stemming.
  1122. [[latvian-analyzer]]
  1123. ===== `latvian` analyzer
  1124. The `latvian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1125. [source,js]
  1126. ----------------------------------------------------
  1127. PUT /latvian_example
  1128. {
  1129. "settings": {
  1130. "analysis": {
  1131. "filter": {
  1132. "latvian_stop": {
  1133. "type": "stop",
  1134. "stopwords": "_latvian_" <1>
  1135. },
  1136. "latvian_keywords": {
  1137. "type": "keyword_marker",
  1138. "keywords": ["piemērs"] <2>
  1139. },
  1140. "latvian_stemmer": {
  1141. "type": "stemmer",
  1142. "language": "latvian"
  1143. }
  1144. },
  1145. "analyzer": {
  1146. "rebuilt_latvian": {
  1147. "tokenizer": "standard",
  1148. "filter": [
  1149. "lowercase",
  1150. "latvian_stop",
  1151. "latvian_keywords",
  1152. "latvian_stemmer"
  1153. ]
  1154. }
  1155. }
  1156. }
  1157. }
  1158. }
  1159. ----------------------------------------------------
  1160. // CONSOLE
  1161. // TEST[s/"latvian_keywords",//]
  1162. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: latvian_example, first: latvian, second: rebuilt_latvian}\nendyaml\n/]
  1163. <1> The default stopwords can be overridden with the `stopwords`
  1164. or `stopwords_path` parameters.
  1165. <2> This filter should be removed unless there are words which should
  1166. be excluded from stemming.
  1167. [[lithuanian-analyzer]]
  1168. ===== `lithuanian` analyzer
  1169. The `lithuanian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1170. [source,js]
  1171. ----------------------------------------------------
  1172. PUT /lithuanian_example
  1173. {
  1174. "settings": {
  1175. "analysis": {
  1176. "filter": {
  1177. "lithuanian_stop": {
  1178. "type": "stop",
  1179. "stopwords": "_lithuanian_" <1>
  1180. },
  1181. "lithuanian_keywords": {
  1182. "type": "keyword_marker",
  1183. "keywords": ["pavyzdys"] <2>
  1184. },
  1185. "lithuanian_stemmer": {
  1186. "type": "stemmer",
  1187. "language": "lithuanian"
  1188. }
  1189. },
  1190. "analyzer": {
  1191. "rebuilt_lithuanian": {
  1192. "tokenizer": "standard",
  1193. "filter": [
  1194. "lowercase",
  1195. "lithuanian_stop",
  1196. "lithuanian_keywords",
  1197. "lithuanian_stemmer"
  1198. ]
  1199. }
  1200. }
  1201. }
  1202. }
  1203. }
  1204. ----------------------------------------------------
  1205. // CONSOLE
  1206. // TEST[s/"lithuanian_keywords",//]
  1207. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: lithuanian_example, first: lithuanian, second: rebuilt_lithuanian}\nendyaml\n/]
  1208. <1> The default stopwords can be overridden with the `stopwords`
  1209. or `stopwords_path` parameters.
  1210. <2> This filter should be removed unless there are words which should
  1211. be excluded from stemming.
  1212. [[norwegian-analyzer]]
  1213. ===== `norwegian` analyzer
  1214. The `norwegian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1215. [source,js]
  1216. ----------------------------------------------------
  1217. PUT /norwegian_example
  1218. {
  1219. "settings": {
  1220. "analysis": {
  1221. "filter": {
  1222. "norwegian_stop": {
  1223. "type": "stop",
  1224. "stopwords": "_norwegian_" <1>
  1225. },
  1226. "norwegian_keywords": {
  1227. "type": "keyword_marker",
  1228. "keywords": ["eksempel"] <2>
  1229. },
  1230. "norwegian_stemmer": {
  1231. "type": "stemmer",
  1232. "language": "norwegian"
  1233. }
  1234. },
  1235. "analyzer": {
  1236. "rebuilt_norwegian": {
  1237. "tokenizer": "standard",
  1238. "filter": [
  1239. "lowercase",
  1240. "norwegian_stop",
  1241. "norwegian_keywords",
  1242. "norwegian_stemmer"
  1243. ]
  1244. }
  1245. }
  1246. }
  1247. }
  1248. }
  1249. ----------------------------------------------------
  1250. // CONSOLE
  1251. // TEST[s/"norwegian_keywords",//]
  1252. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: norwegian_example, first: norwegian, second: rebuilt_norwegian}\nendyaml\n/]
  1253. <1> The default stopwords can be overridden with the `stopwords`
  1254. or `stopwords_path` parameters.
  1255. <2> This filter should be removed unless there are words which should
  1256. be excluded from stemming.
  1257. [[persian-analyzer]]
  1258. ===== `persian` analyzer
  1259. The `persian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1260. [source,js]
  1261. ----------------------------------------------------
  1262. PUT /persian_example
  1263. {
  1264. "settings": {
  1265. "analysis": {
  1266. "char_filter": {
  1267. "zero_width_spaces": {
  1268. "type": "mapping",
  1269. "mappings": [ "\\u200C=> "] <1>
  1270. }
  1271. },
  1272. "filter": {
  1273. "persian_stop": {
  1274. "type": "stop",
  1275. "stopwords": "_persian_" <2>
  1276. }
  1277. },
  1278. "analyzer": {
  1279. "rebuilt_persian": {
  1280. "tokenizer": "standard",
  1281. "char_filter": [ "zero_width_spaces" ],
  1282. "filter": [
  1283. "lowercase",
  1284. "decimal_digit",
  1285. "arabic_normalization",
  1286. "persian_normalization",
  1287. "persian_stop"
  1288. ]
  1289. }
  1290. }
  1291. }
  1292. }
  1293. }
  1294. ----------------------------------------------------
  1295. // CONSOLE
  1296. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: persian_example, first: persian, second: rebuilt_persian}\nendyaml\n/]
  1297. <1> Replaces zero-width non-joiners with an ASCII space.
  1298. <2> The default stopwords can be overridden with the `stopwords`
  1299. or `stopwords_path` parameters.
  1300. [[portuguese-analyzer]]
  1301. ===== `portuguese` analyzer
  1302. The `portuguese` analyzer could be reimplemented as a `custom` analyzer as follows:
  1303. [source,js]
  1304. ----------------------------------------------------
  1305. PUT /portuguese_example
  1306. {
  1307. "settings": {
  1308. "analysis": {
  1309. "filter": {
  1310. "portuguese_stop": {
  1311. "type": "stop",
  1312. "stopwords": "_portuguese_" <1>
  1313. },
  1314. "portuguese_keywords": {
  1315. "type": "keyword_marker",
  1316. "keywords": ["exemplo"] <2>
  1317. },
  1318. "portuguese_stemmer": {
  1319. "type": "stemmer",
  1320. "language": "light_portuguese"
  1321. }
  1322. },
  1323. "analyzer": {
  1324. "rebuilt_portuguese": {
  1325. "tokenizer": "standard",
  1326. "filter": [
  1327. "lowercase",
  1328. "portuguese_stop",
  1329. "portuguese_keywords",
  1330. "portuguese_stemmer"
  1331. ]
  1332. }
  1333. }
  1334. }
  1335. }
  1336. }
  1337. ----------------------------------------------------
  1338. // CONSOLE
  1339. // TEST[s/"portuguese_keywords",//]
  1340. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: portuguese_example, first: portuguese, second: rebuilt_portuguese}\nendyaml\n/]
  1341. <1> The default stopwords can be overridden with the `stopwords`
  1342. or `stopwords_path` parameters.
  1343. <2> This filter should be removed unless there are words which should
  1344. be excluded from stemming.
  1345. [[romanian-analyzer]]
  1346. ===== `romanian` analyzer
  1347. The `romanian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1348. [source,js]
  1349. ----------------------------------------------------
  1350. PUT /romanian_example
  1351. {
  1352. "settings": {
  1353. "analysis": {
  1354. "filter": {
  1355. "romanian_stop": {
  1356. "type": "stop",
  1357. "stopwords": "_romanian_" <1>
  1358. },
  1359. "romanian_keywords": {
  1360. "type": "keyword_marker",
  1361. "keywords": ["exemplu"] <2>
  1362. },
  1363. "romanian_stemmer": {
  1364. "type": "stemmer",
  1365. "language": "romanian"
  1366. }
  1367. },
  1368. "analyzer": {
  1369. "rebuilt_romanian": {
  1370. "tokenizer": "standard",
  1371. "filter": [
  1372. "lowercase",
  1373. "romanian_stop",
  1374. "romanian_keywords",
  1375. "romanian_stemmer"
  1376. ]
  1377. }
  1378. }
  1379. }
  1380. }
  1381. }
  1382. ----------------------------------------------------
  1383. // CONSOLE
  1384. // TEST[s/"romanian_keywords",//]
  1385. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: romanian_example, first: romanian, second: rebuilt_romanian}\nendyaml\n/]
  1386. <1> The default stopwords can be overridden with the `stopwords`
  1387. or `stopwords_path` parameters.
  1388. <2> This filter should be removed unless there are words which should
  1389. be excluded from stemming.
  1390. [[russian-analyzer]]
  1391. ===== `russian` analyzer
  1392. The `russian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1393. [source,js]
  1394. ----------------------------------------------------
  1395. PUT /russian_example
  1396. {
  1397. "settings": {
  1398. "analysis": {
  1399. "filter": {
  1400. "russian_stop": {
  1401. "type": "stop",
  1402. "stopwords": "_russian_" <1>
  1403. },
  1404. "russian_keywords": {
  1405. "type": "keyword_marker",
  1406. "keywords": ["пример"] <2>
  1407. },
  1408. "russian_stemmer": {
  1409. "type": "stemmer",
  1410. "language": "russian"
  1411. }
  1412. },
  1413. "analyzer": {
  1414. "rebuilt_russian": {
  1415. "tokenizer": "standard",
  1416. "filter": [
  1417. "lowercase",
  1418. "russian_stop",
  1419. "russian_keywords",
  1420. "russian_stemmer"
  1421. ]
  1422. }
  1423. }
  1424. }
  1425. }
  1426. }
  1427. ----------------------------------------------------
  1428. // CONSOLE
  1429. // TEST[s/"russian_keywords",//]
  1430. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: russian_example, first: russian, second: rebuilt_russian}\nendyaml\n/]
  1431. <1> The default stopwords can be overridden with the `stopwords`
  1432. or `stopwords_path` parameters.
  1433. <2> This filter should be removed unless there are words which should
  1434. be excluded from stemming.
  1435. [[sorani-analyzer]]
  1436. ===== `sorani` analyzer
  1437. The `sorani` analyzer could be reimplemented as a `custom` analyzer as follows:
  1438. [source,js]
  1439. ----------------------------------------------------
  1440. PUT /sorani_example
  1441. {
  1442. "settings": {
  1443. "analysis": {
  1444. "filter": {
  1445. "sorani_stop": {
  1446. "type": "stop",
  1447. "stopwords": "_sorani_" <1>
  1448. },
  1449. "sorani_keywords": {
  1450. "type": "keyword_marker",
  1451. "keywords": ["mînak"] <2>
  1452. },
  1453. "sorani_stemmer": {
  1454. "type": "stemmer",
  1455. "language": "sorani"
  1456. }
  1457. },
  1458. "analyzer": {
  1459. "rebuilt_sorani": {
  1460. "tokenizer": "standard",
  1461. "filter": [
  1462. "sorani_normalization",
  1463. "lowercase",
  1464. "decimal_digit",
  1465. "sorani_stop",
  1466. "sorani_keywords",
  1467. "sorani_stemmer"
  1468. ]
  1469. }
  1470. }
  1471. }
  1472. }
  1473. }
  1474. ----------------------------------------------------
  1475. // CONSOLE
  1476. // TEST[s/"sorani_keywords",//]
  1477. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: sorani_example, first: sorani, second: rebuilt_sorani}\nendyaml\n/]
  1478. <1> The default stopwords can be overridden with the `stopwords`
  1479. or `stopwords_path` parameters.
  1480. <2> This filter should be removed unless there are words which should
  1481. be excluded from stemming.
  1482. [[spanish-analyzer]]
  1483. ===== `spanish` analyzer
  1484. The `spanish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1485. [source,js]
  1486. ----------------------------------------------------
  1487. PUT /spanish_example
  1488. {
  1489. "settings": {
  1490. "analysis": {
  1491. "filter": {
  1492. "spanish_stop": {
  1493. "type": "stop",
  1494. "stopwords": "_spanish_" <1>
  1495. },
  1496. "spanish_keywords": {
  1497. "type": "keyword_marker",
  1498. "keywords": ["ejemplo"] <2>
  1499. },
  1500. "spanish_stemmer": {
  1501. "type": "stemmer",
  1502. "language": "light_spanish"
  1503. }
  1504. },
  1505. "analyzer": {
  1506. "rebuilt_spanish": {
  1507. "tokenizer": "standard",
  1508. "filter": [
  1509. "lowercase",
  1510. "spanish_stop",
  1511. "spanish_keywords",
  1512. "spanish_stemmer"
  1513. ]
  1514. }
  1515. }
  1516. }
  1517. }
  1518. }
  1519. ----------------------------------------------------
  1520. // CONSOLE
  1521. // TEST[s/"spanish_keywords",//]
  1522. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: spanish_example, first: spanish, second: rebuilt_spanish}\nendyaml\n/]
  1523. <1> The default stopwords can be overridden with the `stopwords`
  1524. or `stopwords_path` parameters.
  1525. <2> This filter should be removed unless there are words which should
  1526. be excluded from stemming.
  1527. [[swedish-analyzer]]
  1528. ===== `swedish` analyzer
  1529. The `swedish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1530. [source,js]
  1531. ----------------------------------------------------
  1532. PUT /swedish_example
  1533. {
  1534. "settings": {
  1535. "analysis": {
  1536. "filter": {
  1537. "swedish_stop": {
  1538. "type": "stop",
  1539. "stopwords": "_swedish_" <1>
  1540. },
  1541. "swedish_keywords": {
  1542. "type": "keyword_marker",
  1543. "keywords": ["exempel"] <2>
  1544. },
  1545. "swedish_stemmer": {
  1546. "type": "stemmer",
  1547. "language": "swedish"
  1548. }
  1549. },
  1550. "analyzer": {
  1551. "rebuilt_swedish": {
  1552. "tokenizer": "standard",
  1553. "filter": [
  1554. "lowercase",
  1555. "swedish_stop",
  1556. "swedish_keywords",
  1557. "swedish_stemmer"
  1558. ]
  1559. }
  1560. }
  1561. }
  1562. }
  1563. }
  1564. ----------------------------------------------------
  1565. // CONSOLE
  1566. // TEST[s/"swedish_keywords",//]
  1567. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: swedish_example, first: swedish, second: rebuilt_swedish}\nendyaml\n/]
  1568. <1> The default stopwords can be overridden with the `stopwords`
  1569. or `stopwords_path` parameters.
  1570. <2> This filter should be removed unless there are words which should
  1571. be excluded from stemming.
  1572. [[turkish-analyzer]]
  1573. ===== `turkish` analyzer
  1574. The `turkish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1575. [source,js]
  1576. ----------------------------------------------------
  1577. PUT /turkish_example
  1578. {
  1579. "settings": {
  1580. "analysis": {
  1581. "filter": {
  1582. "turkish_stop": {
  1583. "type": "stop",
  1584. "stopwords": "_turkish_" <1>
  1585. },
  1586. "turkish_lowercase": {
  1587. "type": "lowercase",
  1588. "language": "turkish"
  1589. },
  1590. "turkish_keywords": {
  1591. "type": "keyword_marker",
  1592. "keywords": ["örnek"] <2>
  1593. },
  1594. "turkish_stemmer": {
  1595. "type": "stemmer",
  1596. "language": "turkish"
  1597. }
  1598. },
  1599. "analyzer": {
  1600. "rebuilt_turkish": {
  1601. "tokenizer": "standard",
  1602. "filter": [
  1603. "apostrophe",
  1604. "turkish_lowercase",
  1605. "turkish_stop",
  1606. "turkish_keywords",
  1607. "turkish_stemmer"
  1608. ]
  1609. }
  1610. }
  1611. }
  1612. }
  1613. }
  1614. ----------------------------------------------------
  1615. // CONSOLE
  1616. // TEST[s/"turkish_keywords",//]
  1617. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: turkish_example, first: turkish, second: rebuilt_turkish}\nendyaml\n/]
  1618. <1> The default stopwords can be overridden with the `stopwords`
  1619. or `stopwords_path` parameters.
  1620. <2> This filter should be removed unless there are words which should
  1621. be excluded from stemming.
  1622. [[thai-analyzer]]
  1623. ===== `thai` analyzer
  1624. The `thai` analyzer could be reimplemented as a `custom` analyzer as follows:
  1625. [source,js]
  1626. ----------------------------------------------------
  1627. PUT /thai_example
  1628. {
  1629. "settings": {
  1630. "analysis": {
  1631. "filter": {
  1632. "thai_stop": {
  1633. "type": "stop",
  1634. "stopwords": "_thai_" <1>
  1635. }
  1636. },
  1637. "analyzer": {
  1638. "rebuilt_thai": {
  1639. "tokenizer": "thai",
  1640. "filter": [
  1641. "lowercase",
  1642. "decimal_digit",
  1643. "thai_stop"
  1644. ]
  1645. }
  1646. }
  1647. }
  1648. }
  1649. }
  1650. ----------------------------------------------------
  1651. // CONSOLE
  1652. // TEST[s/"thai_keywords",//]
  1653. // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: thai_example, first: thai, second: rebuilt_thai}\nendyaml\n/]
  1654. <1> The default stopwords can be overridden with the `stopwords`
  1655. or `stopwords_path` parameters.