lang-analyzer.asciidoc 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562
  1. [[analysis-lang-analyzer]]
  2. === Language Analyzers
  3. A set of analyzers aimed at analyzing specific language text. The
  4. following types are supported:
  5. <<arabic-analyzer,`arabic`>>,
  6. <<armenian-analyzer,`armenian`>>,
  7. <<basque-analyzer,`basque`>>,
  8. <<brazilian-analyzer,`brazilian`>>,
  9. <<bulgarian-analyzer,`bulgarian`>>,
  10. <<catalan-analyzer,`catalan`>>,
  11. <<cjk-analyzer,`cjk`>>,
  12. <<czech-analyzer,`czech`>>,
  13. <<danish-analyzer,`danish`>>,
  14. <<dutch-analyzer,`dutch`>>,
  15. <<english-analyzer,`english`>>,
  16. <<finnish-analyzer,`finnish`>>,
  17. <<french-analyzer,`french`>>,
  18. <<galician-analyzer,`galician`>>,
  19. <<german-analyzer,`german`>>,
  20. <<greek-analyzer,`greek`>>,
  21. <<hindi-analyzer,`hindi`>>,
  22. <<hungarian-analyzer,`hungarian`>>,
  23. <<indonesian-analyzer,`indonesian`>>,
  24. <<irish-analyzer,`irish`>>,
  25. <<italian-analyzer,`italian`>>,
  26. <<latvian-analyzer,`latvian`>>,
  27. <<lithuanian-analyzer,`lithuanian`>>,
  28. <<norwegian-analyzer,`norwegian`>>,
  29. <<persian-analyzer,`persian`>>,
  30. <<portuguese-analyzer,`portuguese`>>,
  31. <<romanian-analyzer,`romanian`>>,
  32. <<russian-analyzer,`russian`>>,
  33. <<sorani-analyzer,`sorani`>>,
  34. <<spanish-analyzer,`spanish`>>,
  35. <<swedish-analyzer,`swedish`>>,
  36. <<turkish-analyzer,`turkish`>>,
  37. <<thai-analyzer,`thai`>>.
  38. ==== Configuring language analyzers
  39. ===== Stopwords
  40. All analyzers support setting custom `stopwords` either internally in
  41. the config, or by using an external stopwords file by setting
  42. `stopwords_path`. Check <<analysis-stop-analyzer,Stop Analyzer>> for
  43. more details.
  44. ===== Excluding words from stemming
  45. The `stem_exclusion` parameter allows you to specify an array
  46. of lowercase words that should not be stemmed. Internally, this
  47. functionality is implemented by adding the
  48. <<analysis-keyword-marker-tokenfilter,`keyword_marker` token filter>>
  49. with the `keywords` set to the value of the `stem_exclusion` parameter.
  50. The following analyzers support setting custom `stem_exclusion` list:
  51. `arabic`, `armenian`, `basque`, `catalan`, `bulgarian`, `catalan`,
  52. `czech`, `finnish`, `dutch`, `english`, `finnish`, `french`, `galician`,
  53. `german`, `irish`, `hindi`, `hungarian`, `indonesian`, `italian`, `latvian`,
  54. `lithuanian`, `norwegian`, `portuguese`, `romanian`, `russian`, `sorani`,
  55. `spanish`, `swedish`, `turkish`.
  56. ==== Reimplementing language analyzers
  57. The built-in language analyzers can be reimplemented as `custom` analyzers
  58. (as described below) in order to customize their behaviour.
  59. NOTE: If you do not intend to exclude words from being stemmed (the
  60. equivalent of the `stem_exclusion` parameter above), then you should remove
  61. the `keyword_marker` token filter from the custom analyzer configuration.
  62. [[arabic-analyzer]]
  63. ===== `arabic` analyzer
  64. The `arabic` analyzer could be reimplemented as a `custom` analyzer as follows:
  65. [source,js]
  66. ----------------------------------------------------
  67. {
  68. "settings": {
  69. "analysis": {
  70. "filter": {
  71. "arabic_stop": {
  72. "type": "stop",
  73. "stopwords": "_arabic_" <1>
  74. },
  75. "arabic_keywords": {
  76. "type": "keyword_marker",
  77. "keywords": [] <2>
  78. },
  79. "arabic_stemmer": {
  80. "type": "stemmer",
  81. "language": "arabic"
  82. }
  83. },
  84. "analyzer": {
  85. "arabic": {
  86. "tokenizer": "standard",
  87. "filter": [
  88. "lowercase",
  89. "arabic_stop",
  90. "arabic_normalization",
  91. "arabic_keywords",
  92. "arabic_stemmer"
  93. ]
  94. }
  95. }
  96. }
  97. }
  98. }
  99. ----------------------------------------------------
  100. <1> The default stopwords can be overridden with the `stopwords`
  101. or `stopwords_path` parameters.
  102. <2> This filter should be removed unless there are words which should
  103. be excluded from stemming.
  104. [[armenian-analyzer]]
  105. ===== `armenian` analyzer
  106. The `armenian` analyzer could be reimplemented as a `custom` analyzer as follows:
  107. [source,js]
  108. ----------------------------------------------------
  109. {
  110. "settings": {
  111. "analysis": {
  112. "filter": {
  113. "armenian_stop": {
  114. "type": "stop",
  115. "stopwords": "_armenian_" <1>
  116. },
  117. "armenian_keywords": {
  118. "type": "keyword_marker",
  119. "keywords": [] <2>
  120. },
  121. "armenian_stemmer": {
  122. "type": "stemmer",
  123. "language": "armenian"
  124. }
  125. },
  126. "analyzer": {
  127. "armenian": {
  128. "tokenizer": "standard",
  129. "filter": [
  130. "lowercase",
  131. "armenian_stop",
  132. "armenian_keywords",
  133. "armenian_stemmer"
  134. ]
  135. }
  136. }
  137. }
  138. }
  139. }
  140. ----------------------------------------------------
  141. <1> The default stopwords can be overridden with the `stopwords`
  142. or `stopwords_path` parameters.
  143. <2> This filter should be removed unless there are words which should
  144. be excluded from stemming.
  145. [[basque-analyzer]]
  146. ===== `basque` analyzer
  147. The `basque` analyzer could be reimplemented as a `custom` analyzer as follows:
  148. [source,js]
  149. ----------------------------------------------------
  150. {
  151. "settings": {
  152. "analysis": {
  153. "filter": {
  154. "basque_stop": {
  155. "type": "stop",
  156. "stopwords": "_basque_" <1>
  157. },
  158. "basque_keywords": {
  159. "type": "keyword_marker",
  160. "keywords": [] <2>
  161. },
  162. "basque_stemmer": {
  163. "type": "stemmer",
  164. "language": "basque"
  165. }
  166. },
  167. "analyzer": {
  168. "basque": {
  169. "tokenizer": "standard",
  170. "filter": [
  171. "lowercase",
  172. "basque_stop",
  173. "basque_keywords",
  174. "basque_stemmer"
  175. ]
  176. }
  177. }
  178. }
  179. }
  180. }
  181. ----------------------------------------------------
  182. <1> The default stopwords can be overridden with the `stopwords`
  183. or `stopwords_path` parameters.
  184. <2> This filter should be removed unless there are words which should
  185. be excluded from stemming.
  186. [[brazilian-analyzer]]
  187. ===== `brazilian` analyzer
  188. The `brazilian` analyzer could be reimplemented as a `custom` analyzer as follows:
  189. [source,js]
  190. ----------------------------------------------------
  191. {
  192. "settings": {
  193. "analysis": {
  194. "filter": {
  195. "brazilian_stop": {
  196. "type": "stop",
  197. "stopwords": "_brazilian_" <1>
  198. },
  199. "brazilian_keywords": {
  200. "type": "keyword_marker",
  201. "keywords": [] <2>
  202. },
  203. "brazilian_stemmer": {
  204. "type": "stemmer",
  205. "language": "brazilian"
  206. }
  207. },
  208. "analyzer": {
  209. "brazilian": {
  210. "tokenizer": "standard",
  211. "filter": [
  212. "lowercase",
  213. "brazilian_stop",
  214. "brazilian_keywords",
  215. "brazilian_stemmer"
  216. ]
  217. }
  218. }
  219. }
  220. }
  221. }
  222. ----------------------------------------------------
  223. <1> The default stopwords can be overridden with the `stopwords`
  224. or `stopwords_path` parameters.
  225. <2> This filter should be removed unless there are words which should
  226. be excluded from stemming.
  227. [[bulgarian-analyzer]]
  228. ===== `bulgarian` analyzer
  229. The `bulgarian` analyzer could be reimplemented as a `custom` analyzer as follows:
  230. [source,js]
  231. ----------------------------------------------------
  232. {
  233. "settings": {
  234. "analysis": {
  235. "filter": {
  236. "bulgarian_stop": {
  237. "type": "stop",
  238. "stopwords": "_bulgarian_" <1>
  239. },
  240. "bulgarian_keywords": {
  241. "type": "keyword_marker",
  242. "keywords": [] <2>
  243. },
  244. "bulgarian_stemmer": {
  245. "type": "stemmer",
  246. "language": "bulgarian"
  247. }
  248. },
  249. "analyzer": {
  250. "bulgarian": {
  251. "tokenizer": "standard",
  252. "filter": [
  253. "lowercase",
  254. "bulgarian_stop",
  255. "bulgarian_keywords",
  256. "bulgarian_stemmer"
  257. ]
  258. }
  259. }
  260. }
  261. }
  262. }
  263. ----------------------------------------------------
  264. <1> The default stopwords can be overridden with the `stopwords`
  265. or `stopwords_path` parameters.
  266. <2> This filter should be removed unless there are words which should
  267. be excluded from stemming.
  268. [[catalan-analyzer]]
  269. ===== `catalan` analyzer
  270. The `catalan` analyzer could be reimplemented as a `custom` analyzer as follows:
  271. [source,js]
  272. ----------------------------------------------------
  273. {
  274. "settings": {
  275. "analysis": {
  276. "filter": {
  277. "catalan_elision": {
  278. "type": "elision",
  279. "articles": [ "d", "l", "m", "n", "s", "t"]
  280. },
  281. "catalan_stop": {
  282. "type": "stop",
  283. "stopwords": "_catalan_" <1>
  284. },
  285. "catalan_keywords": {
  286. "type": "keyword_marker",
  287. "keywords": [] <2>
  288. },
  289. "catalan_stemmer": {
  290. "type": "stemmer",
  291. "language": "catalan"
  292. }
  293. },
  294. "analyzer": {
  295. "catalan": {
  296. "tokenizer": "standard",
  297. "filter": [
  298. "catalan_elision",
  299. "lowercase",
  300. "catalan_stop",
  301. "catalan_keywords",
  302. "catalan_stemmer"
  303. ]
  304. }
  305. }
  306. }
  307. }
  308. }
  309. ----------------------------------------------------
  310. <1> The default stopwords can be overridden with the `stopwords`
  311. or `stopwords_path` parameters.
  312. <2> This filter should be removed unless there are words which should
  313. be excluded from stemming.
  314. [[cjk-analyzer]]
  315. ===== `cjk` analyzer
  316. The `cjk` analyzer could be reimplemented as a `custom` analyzer as follows:
  317. [source,js]
  318. ----------------------------------------------------
  319. {
  320. "settings": {
  321. "analysis": {
  322. "filter": {
  323. "english_stop": {
  324. "type": "stop",
  325. "stopwords": "_english_" <1>
  326. }
  327. },
  328. "analyzer": {
  329. "cjk": {
  330. "tokenizer": "standard",
  331. "filter": [
  332. "cjk_width",
  333. "lowercase",
  334. "cjk_bigram",
  335. "english_stop"
  336. ]
  337. }
  338. }
  339. }
  340. }
  341. }
  342. ----------------------------------------------------
  343. <1> The default stopwords can be overridden with the `stopwords`
  344. or `stopwords_path` parameters.
  345. [[czech-analyzer]]
  346. ===== `czech` analyzer
  347. The `czech` analyzer could be reimplemented as a `custom` analyzer as follows:
  348. [source,js]
  349. ----------------------------------------------------
  350. {
  351. "settings": {
  352. "analysis": {
  353. "filter": {
  354. "czech_stop": {
  355. "type": "stop",
  356. "stopwords": "_czech_" <1>
  357. },
  358. "czech_keywords": {
  359. "type": "keyword_marker",
  360. "keywords": [] <2>
  361. },
  362. "czech_stemmer": {
  363. "type": "stemmer",
  364. "language": "czech"
  365. }
  366. },
  367. "analyzer": {
  368. "czech": {
  369. "tokenizer": "standard",
  370. "filter": [
  371. "lowercase",
  372. "czech_stop",
  373. "czech_keywords",
  374. "czech_stemmer"
  375. ]
  376. }
  377. }
  378. }
  379. }
  380. }
  381. ----------------------------------------------------
  382. <1> The default stopwords can be overridden with the `stopwords`
  383. or `stopwords_path` parameters.
  384. <2> This filter should be removed unless there are words which should
  385. be excluded from stemming.
  386. [[danish-analyzer]]
  387. ===== `danish` analyzer
  388. The `danish` analyzer could be reimplemented as a `custom` analyzer as follows:
  389. [source,js]
  390. ----------------------------------------------------
  391. {
  392. "settings": {
  393. "analysis": {
  394. "filter": {
  395. "danish_stop": {
  396. "type": "stop",
  397. "stopwords": "_danish_" <1>
  398. },
  399. "danish_keywords": {
  400. "type": "keyword_marker",
  401. "keywords": [] <2>
  402. },
  403. "danish_stemmer": {
  404. "type": "stemmer",
  405. "language": "danish"
  406. }
  407. },
  408. "analyzer": {
  409. "danish": {
  410. "tokenizer": "standard",
  411. "filter": [
  412. "lowercase",
  413. "danish_stop",
  414. "danish_keywords",
  415. "danish_stemmer"
  416. ]
  417. }
  418. }
  419. }
  420. }
  421. }
  422. ----------------------------------------------------
  423. <1> The default stopwords can be overridden with the `stopwords`
  424. or `stopwords_path` parameters.
  425. <2> This filter should be removed unless there are words which should
  426. be excluded from stemming.
  427. [[dutch-analyzer]]
  428. ===== `dutch` analyzer
  429. The `dutch` analyzer could be reimplemented as a `custom` analyzer as follows:
  430. [source,js]
  431. ----------------------------------------------------
  432. {
  433. "settings": {
  434. "analysis": {
  435. "filter": {
  436. "dutch_stop": {
  437. "type": "stop",
  438. "stopwords": "_dutch_" <1>
  439. },
  440. "dutch_keywords": {
  441. "type": "keyword_marker",
  442. "keywords": [] <2>
  443. },
  444. "dutch_stemmer": {
  445. "type": "stemmer",
  446. "language": "dutch"
  447. },
  448. "dutch_override": {
  449. "type": "stemmer_override",
  450. "rules": [
  451. "fiets=>fiets",
  452. "bromfiets=>bromfiets",
  453. "ei=>eier",
  454. "kind=>kinder"
  455. ]
  456. }
  457. },
  458. "analyzer": {
  459. "dutch": {
  460. "tokenizer": "standard",
  461. "filter": [
  462. "lowercase",
  463. "dutch_stop",
  464. "dutch_keywords",
  465. "dutch_override",
  466. "dutch_stemmer"
  467. ]
  468. }
  469. }
  470. }
  471. }
  472. }
  473. ----------------------------------------------------
  474. <1> The default stopwords can be overridden with the `stopwords`
  475. or `stopwords_path` parameters.
  476. <2> This filter should be removed unless there are words which should
  477. be excluded from stemming.
  478. [[english-analyzer]]
  479. ===== `english` analyzer
  480. The `english` analyzer could be reimplemented as a `custom` analyzer as follows:
  481. [source,js]
  482. ----------------------------------------------------
  483. {
  484. "settings": {
  485. "analysis": {
  486. "filter": {
  487. "english_stop": {
  488. "type": "stop",
  489. "stopwords": "_english_" <1>
  490. },
  491. "english_keywords": {
  492. "type": "keyword_marker",
  493. "keywords": [] <2>
  494. },
  495. "english_stemmer": {
  496. "type": "stemmer",
  497. "language": "english"
  498. },
  499. "english_possessive_stemmer": {
  500. "type": "stemmer",
  501. "language": "possessive_english"
  502. }
  503. },
  504. "analyzer": {
  505. "english": {
  506. "tokenizer": "standard",
  507. "filter": [
  508. "english_possessive_stemmer",
  509. "lowercase",
  510. "english_stop",
  511. "english_keywords",
  512. "english_stemmer"
  513. ]
  514. }
  515. }
  516. }
  517. }
  518. }
  519. ----------------------------------------------------
  520. <1> The default stopwords can be overridden with the `stopwords`
  521. or `stopwords_path` parameters.
  522. <2> This filter should be removed unless there are words which should
  523. be excluded from stemming.
  524. [[finnish-analyzer]]
  525. ===== `finnish` analyzer
  526. The `finnish` analyzer could be reimplemented as a `custom` analyzer as follows:
  527. [source,js]
  528. ----------------------------------------------------
  529. {
  530. "settings": {
  531. "analysis": {
  532. "filter": {
  533. "finnish_stop": {
  534. "type": "stop",
  535. "stopwords": "_finnish_" <1>
  536. },
  537. "finnish_keywords": {
  538. "type": "keyword_marker",
  539. "keywords": [] <2>
  540. },
  541. "finnish_stemmer": {
  542. "type": "stemmer",
  543. "language": "finnish"
  544. }
  545. },
  546. "analyzer": {
  547. "finnish": {
  548. "tokenizer": "standard",
  549. "filter": [
  550. "lowercase",
  551. "finnish_stop",
  552. "finnish_keywords",
  553. "finnish_stemmer"
  554. ]
  555. }
  556. }
  557. }
  558. }
  559. }
  560. ----------------------------------------------------
  561. <1> The default stopwords can be overridden with the `stopwords`
  562. or `stopwords_path` parameters.
  563. <2> This filter should be removed unless there are words which should
  564. be excluded from stemming.
  565. [[french-analyzer]]
  566. ===== `french` analyzer
  567. The `french` analyzer could be reimplemented as a `custom` analyzer as follows:
  568. [source,js]
  569. ----------------------------------------------------
  570. {
  571. "settings": {
  572. "analysis": {
  573. "filter": {
  574. "french_elision": {
  575. "type": "elision",
  576. "articles": [ "l", "m", "t", "qu", "n", "s",
  577. "j", "d", "c", "jusqu", "quoiqu",
  578. "lorsqu", "puisqu"
  579. ]
  580. },
  581. "french_stop": {
  582. "type": "stop",
  583. "stopwords": "_french_" <1>
  584. },
  585. "french_keywords": {
  586. "type": "keyword_marker",
  587. "keywords": [] <2>
  588. },
  589. "french_stemmer": {
  590. "type": "stemmer",
  591. "language": "light_french"
  592. }
  593. },
  594. "analyzer": {
  595. "french": {
  596. "tokenizer": "standard",
  597. "filter": [
  598. "french_elision",
  599. "lowercase",
  600. "french_stop",
  601. "french_keywords",
  602. "french_stemmer"
  603. ]
  604. }
  605. }
  606. }
  607. }
  608. }
  609. ----------------------------------------------------
  610. <1> The default stopwords can be overridden with the `stopwords`
  611. or `stopwords_path` parameters.
  612. <2> This filter should be removed unless there are words which should
  613. be excluded from stemming.
  614. [[galician-analyzer]]
  615. ===== `galician` analyzer
  616. The `galician` analyzer could be reimplemented as a `custom` analyzer as follows:
  617. [source,js]
  618. ----------------------------------------------------
  619. {
  620. "settings": {
  621. "analysis": {
  622. "filter": {
  623. "galician_stop": {
  624. "type": "stop",
  625. "stopwords": "_galician_" <1>
  626. },
  627. "galician_keywords": {
  628. "type": "keyword_marker",
  629. "keywords": [] <2>
  630. },
  631. "galician_stemmer": {
  632. "type": "stemmer",
  633. "language": "galician"
  634. }
  635. },
  636. "analyzer": {
  637. "galician": {
  638. "tokenizer": "standard",
  639. "filter": [
  640. "lowercase",
  641. "galician_stop",
  642. "galician_keywords",
  643. "galician_stemmer"
  644. ]
  645. }
  646. }
  647. }
  648. }
  649. }
  650. ----------------------------------------------------
  651. <1> The default stopwords can be overridden with the `stopwords`
  652. or `stopwords_path` parameters.
  653. <2> This filter should be removed unless there are words which should
  654. be excluded from stemming.
  655. [[german-analyzer]]
  656. ===== `german` analyzer
  657. The `german` analyzer could be reimplemented as a `custom` analyzer as follows:
  658. [source,js]
  659. ----------------------------------------------------
  660. {
  661. "settings": {
  662. "analysis": {
  663. "filter": {
  664. "german_stop": {
  665. "type": "stop",
  666. "stopwords": "_german_" <1>
  667. },
  668. "german_keywords": {
  669. "type": "keyword_marker",
  670. "keywords": [] <2>
  671. },
  672. "german_stemmer": {
  673. "type": "stemmer",
  674. "language": "light_german"
  675. }
  676. },
  677. "analyzer": {
  678. "german": {
  679. "tokenizer": "standard",
  680. "filter": [
  681. "lowercase",
  682. "german_stop",
  683. "german_keywords",
  684. "german_normalization",
  685. "german_stemmer"
  686. ]
  687. }
  688. }
  689. }
  690. }
  691. }
  692. ----------------------------------------------------
  693. <1> The default stopwords can be overridden with the `stopwords`
  694. or `stopwords_path` parameters.
  695. <2> This filter should be removed unless there are words which should
  696. be excluded from stemming.
  697. [[greek-analyzer]]
  698. ===== `greek` analyzer
  699. The `greek` analyzer could be reimplemented as a `custom` analyzer as follows:
  700. [source,js]
  701. ----------------------------------------------------
  702. {
  703. "settings": {
  704. "analysis": {
  705. "filter": {
  706. "greek_stop": {
  707. "type": "stop",
  708. "stopwords": "_greek_" <1>
  709. },
  710. "greek_lowercase": {
  711. "type": "lowercase",
  712. "language": "greek"
  713. },
  714. "greek_keywords": {
  715. "type": "keyword_marker",
  716. "keywords": [] <2>
  717. },
  718. "greek_stemmer": {
  719. "type": "stemmer",
  720. "language": "greek"
  721. }
  722. },
  723. "analyzer": {
  724. "greek": {
  725. "tokenizer": "standard",
  726. "filter": [
  727. "greek_lowercase",
  728. "greek_stop",
  729. "greek_keywords",
  730. "greek_stemmer"
  731. ]
  732. }
  733. }
  734. }
  735. }
  736. }
  737. ----------------------------------------------------
  738. <1> The default stopwords can be overridden with the `stopwords`
  739. or `stopwords_path` parameters.
  740. <2> This filter should be removed unless there are words which should
  741. be excluded from stemming.
  742. [[hindi-analyzer]]
  743. ===== `hindi` analyzer
  744. The `hindi` analyzer could be reimplemented as a `custom` analyzer as follows:
  745. [source,js]
  746. ----------------------------------------------------
  747. {
  748. "settings": {
  749. "analysis": {
  750. "filter": {
  751. "hindi_stop": {
  752. "type": "stop",
  753. "stopwords": "_hindi_" <1>
  754. },
  755. "hindi_keywords": {
  756. "type": "keyword_marker",
  757. "keywords": [] <2>
  758. },
  759. "hindi_stemmer": {
  760. "type": "stemmer",
  761. "language": "hindi"
  762. }
  763. },
  764. "analyzer": {
  765. "hindi": {
  766. "tokenizer": "standard",
  767. "filter": [
  768. "lowercase",
  769. "indic_normalization",
  770. "hindi_normalization",
  771. "hindi_stop",
  772. "hindi_keywords",
  773. "hindi_stemmer"
  774. ]
  775. }
  776. }
  777. }
  778. }
  779. }
  780. ----------------------------------------------------
  781. <1> The default stopwords can be overridden with the `stopwords`
  782. or `stopwords_path` parameters.
  783. <2> This filter should be removed unless there are words which should
  784. be excluded from stemming.
  785. [[hungarian-analyzer]]
  786. ===== `hungarian` analyzer
  787. The `hungarian` analyzer could be reimplemented as a `custom` analyzer as follows:
  788. [source,js]
  789. ----------------------------------------------------
  790. {
  791. "settings": {
  792. "analysis": {
  793. "filter": {
  794. "hungarian_stop": {
  795. "type": "stop",
  796. "stopwords": "_hungarian_" <1>
  797. },
  798. "hungarian_keywords": {
  799. "type": "keyword_marker",
  800. "keywords": [] <2>
  801. },
  802. "hungarian_stemmer": {
  803. "type": "stemmer",
  804. "language": "hungarian"
  805. }
  806. },
  807. "analyzer": {
  808. "hungarian": {
  809. "tokenizer": "standard",
  810. "filter": [
  811. "lowercase",
  812. "hungarian_stop",
  813. "hungarian_keywords",
  814. "hungarian_stemmer"
  815. ]
  816. }
  817. }
  818. }
  819. }
  820. }
  821. ----------------------------------------------------
  822. <1> The default stopwords can be overridden with the `stopwords`
  823. or `stopwords_path` parameters.
  824. <2> This filter should be removed unless there are words which should
  825. be excluded from stemming.
  826. [[indonesian-analyzer]]
  827. ===== `indonesian` analyzer
  828. The `indonesian` analyzer could be reimplemented as a `custom` analyzer as follows:
  829. [source,js]
  830. ----------------------------------------------------
  831. {
  832. "settings": {
  833. "analysis": {
  834. "filter": {
  835. "indonesian_stop": {
  836. "type": "stop",
  837. "stopwords": "_indonesian_" <1>
  838. },
  839. "indonesian_keywords": {
  840. "type": "keyword_marker",
  841. "keywords": [] <2>
  842. },
  843. "indonesian_stemmer": {
  844. "type": "stemmer",
  845. "language": "indonesian"
  846. }
  847. },
  848. "analyzer": {
  849. "indonesian": {
  850. "tokenizer": "standard",
  851. "filter": [
  852. "lowercase",
  853. "indonesian_stop",
  854. "indonesian_keywords",
  855. "indonesian_stemmer"
  856. ]
  857. }
  858. }
  859. }
  860. }
  861. }
  862. ----------------------------------------------------
  863. <1> The default stopwords can be overridden with the `stopwords`
  864. or `stopwords_path` parameters.
  865. <2> This filter should be removed unless there are words which should
  866. be excluded from stemming.
  867. [[irish-analyzer]]
  868. ===== `irish` analyzer
  869. The `irish` analyzer could be reimplemented as a `custom` analyzer as follows:
  870. [source,js]
  871. ----------------------------------------------------
  872. {
  873. "settings": {
  874. "analysis": {
  875. "filter": {
  876. "irish_elision": {
  877. "type": "elision",
  878. "articles": [ "h", "n", "t" ]
  879. },
  880. "irish_stop": {
  881. "type": "stop",
  882. "stopwords": "_irish_" <1>
  883. },
  884. "irish_lowercase": {
  885. "type": "lowercase",
  886. "language": "irish"
  887. },
  888. "irish_keywords": {
  889. "type": "keyword_marker",
  890. "keywords": [] <2>
  891. },
  892. "irish_stemmer": {
  893. "type": "stemmer",
  894. "language": "irish"
  895. }
  896. },
  897. "analyzer": {
  898. "irish": {
  899. "tokenizer": "standard",
  900. "filter": [
  901. "irish_stop",
  902. "irish_elision",
  903. "irish_lowercase",
  904. "irish_keywords",
  905. "irish_stemmer"
  906. ]
  907. }
  908. }
  909. }
  910. }
  911. }
  912. ----------------------------------------------------
  913. <1> The default stopwords can be overridden with the `stopwords`
  914. or `stopwords_path` parameters.
  915. <2> This filter should be removed unless there are words which should
  916. be excluded from stemming.
  917. [[italian-analyzer]]
  918. ===== `italian` analyzer
  919. The `italian` analyzer could be reimplemented as a `custom` analyzer as follows:
  920. [source,js]
  921. ----------------------------------------------------
  922. {
  923. "settings": {
  924. "analysis": {
  925. "filter": {
  926. "italian_elision": {
  927. "type": "elision",
  928. "articles": [
  929. "c", "l", "all", "dall", "dell",
  930. "nell", "sull", "coll", "pell",
  931. "gl", "agl", "dagl", "degl", "negl",
  932. "sugl", "un", "m", "t", "s", "v", "d"
  933. ]
  934. },
  935. "italian_stop": {
  936. "type": "stop",
  937. "stopwords": "_italian_" <1>
  938. },
  939. "italian_keywords": {
  940. "type": "keyword_marker",
  941. "keywords": [] <2>
  942. },
  943. "italian_stemmer": {
  944. "type": "stemmer",
  945. "language": "light_italian"
  946. }
  947. },
  948. "analyzer": {
  949. "italian": {
  950. "tokenizer": "standard",
  951. "filter": [
  952. "italian_elision",
  953. "lowercase",
  954. "italian_stop",
  955. "italian_keywords",
  956. "italian_stemmer"
  957. ]
  958. }
  959. }
  960. }
  961. }
  962. }
  963. ----------------------------------------------------
  964. <1> The default stopwords can be overridden with the `stopwords`
  965. or `stopwords_path` parameters.
  966. <2> This filter should be removed unless there are words which should
  967. be excluded from stemming.
  968. [[latvian-analyzer]]
  969. ===== `latvian` analyzer
  970. The `latvian` analyzer could be reimplemented as a `custom` analyzer as follows:
  971. [source,js]
  972. ----------------------------------------------------
  973. {
  974. "settings": {
  975. "analysis": {
  976. "filter": {
  977. "latvian_stop": {
  978. "type": "stop",
  979. "stopwords": "_latvian_" <1>
  980. },
  981. "latvian_keywords": {
  982. "type": "keyword_marker",
  983. "keywords": [] <2>
  984. },
  985. "latvian_stemmer": {
  986. "type": "stemmer",
  987. "language": "latvian"
  988. }
  989. },
  990. "analyzer": {
  991. "latvian": {
  992. "tokenizer": "standard",
  993. "filter": [
  994. "lowercase",
  995. "latvian_stop",
  996. "latvian_keywords",
  997. "latvian_stemmer"
  998. ]
  999. }
  1000. }
  1001. }
  1002. }
  1003. }
  1004. ----------------------------------------------------
  1005. <1> The default stopwords can be overridden with the `stopwords`
  1006. or `stopwords_path` parameters.
  1007. <2> This filter should be removed unless there are words which should
  1008. be excluded from stemming.
  1009. [[lithuanian-analyzer]]
  1010. ===== `lithuanian` analyzer
  1011. The `lithuanian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1012. [source,js]
  1013. ----------------------------------------------------
  1014. {
  1015. "settings": {
  1016. "analysis": {
  1017. "filter": {
  1018. "lithuanian_stop": {
  1019. "type": "stop",
  1020. "stopwords": "_lithuanian_" <1>
  1021. },
  1022. "lithuanian_keywords": {
  1023. "type": "keyword_marker",
  1024. "keywords": [] <2>
  1025. },
  1026. "lithuanian_stemmer": {
  1027. "type": "stemmer",
  1028. "language": "lithuanian"
  1029. }
  1030. },
  1031. "analyzer": {
  1032. "lithuanian": {
  1033. "tokenizer": "standard",
  1034. "filter": [
  1035. "lowercase",
  1036. "lithuanian_stop",
  1037. "lithuanian_keywords",
  1038. "lithuanian_stemmer"
  1039. ]
  1040. }
  1041. }
  1042. }
  1043. }
  1044. }
  1045. ----------------------------------------------------
  1046. <1> The default stopwords can be overridden with the `stopwords`
  1047. or `stopwords_path` parameters.
  1048. <2> This filter should be removed unless there are words which should
  1049. be excluded from stemming.
  1050. [[norwegian-analyzer]]
  1051. ===== `norwegian` analyzer
  1052. The `norwegian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1053. [source,js]
  1054. ----------------------------------------------------
  1055. {
  1056. "settings": {
  1057. "analysis": {
  1058. "filter": {
  1059. "norwegian_stop": {
  1060. "type": "stop",
  1061. "stopwords": "_norwegian_" <1>
  1062. },
  1063. "norwegian_keywords": {
  1064. "type": "keyword_marker",
  1065. "keywords": [] <2>
  1066. },
  1067. "norwegian_stemmer": {
  1068. "type": "stemmer",
  1069. "language": "norwegian"
  1070. }
  1071. },
  1072. "analyzer": {
  1073. "norwegian": {
  1074. "tokenizer": "standard",
  1075. "filter": [
  1076. "lowercase",
  1077. "norwegian_stop",
  1078. "norwegian_keywords",
  1079. "norwegian_stemmer"
  1080. ]
  1081. }
  1082. }
  1083. }
  1084. }
  1085. }
  1086. ----------------------------------------------------
  1087. <1> The default stopwords can be overridden with the `stopwords`
  1088. or `stopwords_path` parameters.
  1089. <2> This filter should be removed unless there are words which should
  1090. be excluded from stemming.
  1091. [[persian-analyzer]]
  1092. ===== `persian` analyzer
  1093. The `persian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1094. [source,js]
  1095. ----------------------------------------------------
  1096. {
  1097. "settings": {
  1098. "analysis": {
  1099. "char_filter": {
  1100. "zero_width_spaces": {
  1101. "type": "mapping",
  1102. "mappings": [ "\\u200C=> "] <1>
  1103. }
  1104. },
  1105. "filter": {
  1106. "persian_stop": {
  1107. "type": "stop",
  1108. "stopwords": "_persian_" <2>
  1109. }
  1110. },
  1111. "analyzer": {
  1112. "persian": {
  1113. "tokenizer": "standard",
  1114. "char_filter": [ "zero_width_spaces" ],
  1115. "filter": [
  1116. "lowercase",
  1117. "arabic_normalization",
  1118. "persian_normalization",
  1119. "persian_stop"
  1120. ]
  1121. }
  1122. }
  1123. }
  1124. }
  1125. }
  1126. ----------------------------------------------------
  1127. <1> Replaces zero-width non-joiners with an ASCII space.
  1128. <2> The default stopwords can be overridden with the `stopwords`
  1129. or `stopwords_path` parameters.
  1130. [[portuguese-analyzer]]
  1131. ===== `portuguese` analyzer
  1132. The `portuguese` analyzer could be reimplemented as a `custom` analyzer as follows:
  1133. [source,js]
  1134. ----------------------------------------------------
  1135. {
  1136. "settings": {
  1137. "analysis": {
  1138. "filter": {
  1139. "portuguese_stop": {
  1140. "type": "stop",
  1141. "stopwords": "_portuguese_" <1>
  1142. },
  1143. "portuguese_keywords": {
  1144. "type": "keyword_marker",
  1145. "keywords": [] <2>
  1146. },
  1147. "portuguese_stemmer": {
  1148. "type": "stemmer",
  1149. "language": "light_portuguese"
  1150. }
  1151. },
  1152. "analyzer": {
  1153. "portuguese": {
  1154. "tokenizer": "standard",
  1155. "filter": [
  1156. "lowercase",
  1157. "portuguese_stop",
  1158. "portuguese_keywords",
  1159. "portuguese_stemmer"
  1160. ]
  1161. }
  1162. }
  1163. }
  1164. }
  1165. }
  1166. ----------------------------------------------------
  1167. <1> The default stopwords can be overridden with the `stopwords`
  1168. or `stopwords_path` parameters.
  1169. <2> This filter should be removed unless there are words which should
  1170. be excluded from stemming.
  1171. [[romanian-analyzer]]
  1172. ===== `romanian` analyzer
  1173. The `romanian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1174. [source,js]
  1175. ----------------------------------------------------
  1176. {
  1177. "settings": {
  1178. "analysis": {
  1179. "filter": {
  1180. "romanian_stop": {
  1181. "type": "stop",
  1182. "stopwords": "_romanian_" <1>
  1183. },
  1184. "romanian_keywords": {
  1185. "type": "keyword_marker",
  1186. "keywords": [] <2>
  1187. },
  1188. "romanian_stemmer": {
  1189. "type": "stemmer",
  1190. "language": "romanian"
  1191. }
  1192. },
  1193. "analyzer": {
  1194. "romanian": {
  1195. "tokenizer": "standard",
  1196. "filter": [
  1197. "lowercase",
  1198. "romanian_stop",
  1199. "romanian_keywords",
  1200. "romanian_stemmer"
  1201. ]
  1202. }
  1203. }
  1204. }
  1205. }
  1206. }
  1207. ----------------------------------------------------
  1208. <1> The default stopwords can be overridden with the `stopwords`
  1209. or `stopwords_path` parameters.
  1210. <2> This filter should be removed unless there are words which should
  1211. be excluded from stemming.
  1212. [[russian-analyzer]]
  1213. ===== `russian` analyzer
  1214. The `russian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1215. [source,js]
  1216. ----------------------------------------------------
  1217. {
  1218. "settings": {
  1219. "analysis": {
  1220. "filter": {
  1221. "russian_stop": {
  1222. "type": "stop",
  1223. "stopwords": "_russian_" <1>
  1224. },
  1225. "russian_keywords": {
  1226. "type": "keyword_marker",
  1227. "keywords": [] <2>
  1228. },
  1229. "russian_stemmer": {
  1230. "type": "stemmer",
  1231. "language": "russian"
  1232. }
  1233. },
  1234. "analyzer": {
  1235. "russian": {
  1236. "tokenizer": "standard",
  1237. "filter": [
  1238. "lowercase",
  1239. "russian_stop",
  1240. "russian_keywords",
  1241. "russian_stemmer"
  1242. ]
  1243. }
  1244. }
  1245. }
  1246. }
  1247. }
  1248. ----------------------------------------------------
  1249. <1> The default stopwords can be overridden with the `stopwords`
  1250. or `stopwords_path` parameters.
  1251. <2> This filter should be removed unless there are words which should
  1252. be excluded from stemming.
  1253. [[sorani-analyzer]]
  1254. ===== `sorani` analyzer
  1255. The `sorani` analyzer could be reimplemented as a `custom` analyzer as follows:
  1256. [source,js]
  1257. ----------------------------------------------------
  1258. {
  1259. "settings": {
  1260. "analysis": {
  1261. "filter": {
  1262. "sorani_stop": {
  1263. "type": "stop",
  1264. "stopwords": "_sorani_" <1>
  1265. },
  1266. "sorani_keywords": {
  1267. "type": "keyword_marker",
  1268. "keywords": [] <2>
  1269. },
  1270. "sorani_stemmer": {
  1271. "type": "stemmer",
  1272. "language": "sorani"
  1273. }
  1274. },
  1275. "analyzer": {
  1276. "sorani": {
  1277. "tokenizer": "standard",
  1278. "filter": [
  1279. "sorani_normalization",
  1280. "lowercase",
  1281. "sorani_stop",
  1282. "sorani_keywords",
  1283. "sorani_stemmer"
  1284. ]
  1285. }
  1286. }
  1287. }
  1288. }
  1289. }
  1290. ----------------------------------------------------
  1291. <1> The default stopwords can be overridden with the `stopwords`
  1292. or `stopwords_path` parameters.
  1293. <2> This filter should be removed unless there are words which should
  1294. be excluded from stemming.
  1295. [[spanish-analyzer]]
  1296. ===== `spanish` analyzer
  1297. The `spanish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1298. [source,js]
  1299. ----------------------------------------------------
  1300. {
  1301. "settings": {
  1302. "analysis": {
  1303. "filter": {
  1304. "spanish_stop": {
  1305. "type": "stop",
  1306. "stopwords": "_spanish_" <1>
  1307. },
  1308. "spanish_keywords": {
  1309. "type": "keyword_marker",
  1310. "keywords": [] <2>
  1311. },
  1312. "spanish_stemmer": {
  1313. "type": "stemmer",
  1314. "language": "light_spanish"
  1315. }
  1316. },
  1317. "analyzer": {
  1318. "spanish": {
  1319. "tokenizer": "standard",
  1320. "filter": [
  1321. "lowercase",
  1322. "spanish_stop",
  1323. "spanish_keywords",
  1324. "spanish_stemmer"
  1325. ]
  1326. }
  1327. }
  1328. }
  1329. }
  1330. }
  1331. ----------------------------------------------------
  1332. <1> The default stopwords can be overridden with the `stopwords`
  1333. or `stopwords_path` parameters.
  1334. <2> This filter should be removed unless there are words which should
  1335. be excluded from stemming.
  1336. [[swedish-analyzer]]
  1337. ===== `swedish` analyzer
  1338. The `swedish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1339. [source,js]
  1340. ----------------------------------------------------
  1341. {
  1342. "settings": {
  1343. "analysis": {
  1344. "filter": {
  1345. "swedish_stop": {
  1346. "type": "stop",
  1347. "stopwords": "_swedish_" <1>
  1348. },
  1349. "swedish_keywords": {
  1350. "type": "keyword_marker",
  1351. "keywords": [] <2>
  1352. },
  1353. "swedish_stemmer": {
  1354. "type": "stemmer",
  1355. "language": "swedish"
  1356. }
  1357. },
  1358. "analyzer": {
  1359. "swedish": {
  1360. "tokenizer": "standard",
  1361. "filter": [
  1362. "lowercase",
  1363. "swedish_stop",
  1364. "swedish_keywords",
  1365. "swedish_stemmer"
  1366. ]
  1367. }
  1368. }
  1369. }
  1370. }
  1371. }
  1372. ----------------------------------------------------
  1373. <1> The default stopwords can be overridden with the `stopwords`
  1374. or `stopwords_path` parameters.
  1375. <2> This filter should be removed unless there are words which should
  1376. be excluded from stemming.
  1377. [[turkish-analyzer]]
  1378. ===== `turkish` analyzer
  1379. The `turkish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1380. [source,js]
  1381. ----------------------------------------------------
  1382. {
  1383. "settings": {
  1384. "analysis": {
  1385. "filter": {
  1386. "turkish_stop": {
  1387. "type": "stop",
  1388. "stopwords": "_turkish_" <1>
  1389. },
  1390. "turkish_lowercase": {
  1391. "type": "lowercase",
  1392. "language": "turkish"
  1393. },
  1394. "turkish_keywords": {
  1395. "type": "keyword_marker",
  1396. "keywords": [] <2>
  1397. },
  1398. "turkish_stemmer": {
  1399. "type": "stemmer",
  1400. "language": "turkish"
  1401. }
  1402. },
  1403. "analyzer": {
  1404. "turkish": {
  1405. "tokenizer": "standard",
  1406. "filter": [
  1407. "apostrophe",
  1408. "turkish_lowercase",
  1409. "turkish_stop",
  1410. "turkish_keywords",
  1411. "turkish_stemmer"
  1412. ]
  1413. }
  1414. }
  1415. }
  1416. }
  1417. }
  1418. ----------------------------------------------------
  1419. <1> The default stopwords can be overridden with the `stopwords`
  1420. or `stopwords_path` parameters.
  1421. <2> This filter should be removed unless there are words which should
  1422. be excluded from stemming.
  1423. [[thai-analyzer]]
  1424. ===== `thai` analyzer
  1425. The `thai` analyzer could be reimplemented as a `custom` analyzer as follows:
  1426. [source,js]
  1427. ----------------------------------------------------
  1428. {
  1429. "settings": {
  1430. "analysis": {
  1431. "filter": {
  1432. "thai_stop": {
  1433. "type": "stop",
  1434. "stopwords": "_thai_" <1>
  1435. }
  1436. },
  1437. "analyzer": {
  1438. "thai": {
  1439. "tokenizer": "thai",
  1440. "filter": [
  1441. "lowercase",
  1442. "thai_stop"
  1443. ]
  1444. }
  1445. }
  1446. }
  1447. }
  1448. }
  1449. ----------------------------------------------------
  1450. <1> The default stopwords can be overridden with the `stopwords`
  1451. or `stopwords_path` parameters.