lang-analyzer.asciidoc 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526
  1. [[analysis-lang-analyzer]]
  2. === Language Analyzers
  3. A set of analyzers aimed at analyzing specific language text. The
  4. following types are supported:
  5. <<arabic-analyzer,`arabic`>>,
  6. <<armenian-analyzer,`armenian`>>,
  7. <<basque-analyzer,`basque`>>,
  8. <<brazilian-analyzer,`brazilian`>>,
  9. <<bulgarian-analyzer,`bulgarian`>>,
  10. <<catalan-analyzer,`catalan`>>,
  11. <<chinese-analyzer,`chinese`>>,
  12. <<cjk-analyzer,`cjk`>>,
  13. <<czech-analyzer,`czech`>>,
  14. <<danish-analyzer,`danish`>>,
  15. <<dutch-analyzer,`dutch`>>,
  16. <<english-analyzer,`english`>>,
  17. <<finnish-analyzer,`finnish`>>,
  18. <<french-analyzer,`french`>>,
  19. <<galician-analyzer,`galician`>>,
  20. <<german-analyzer,`german`>>,
  21. <<greek-analyzer,`greek`>>,
  22. <<hindi-analyzer,`hindi`>>,
  23. <<hungarian-analyzer,`hungarian`>>,
  24. <<indonesian-analyzer,`indonesian`>>,
  25. <<irish-analyzer,`irish`>>,
  26. <<italian-analyzer,`italian`>>,
  27. <<latvian-analyzer,`latvian`>>,
  28. <<norwegian-analyzer,`norwegian`>>,
  29. <<persian-analyzer,`persian`>>,
  30. <<portuguese-analyzer,`portuguese`>>,
  31. <<romanian-analyzer,`romanian`>>,
  32. <<russian-analyzer,`russian`>>,
  33. <<sorani-analyzer,`sorani`>>,
  34. <<spanish-analyzer,`spanish`>>,
  35. <<swedish-analyzer,`swedish`>>,
  36. <<turkish-analyzer,`turkish`>>,
  37. <<thai-analyzer,`thai`>>.
  38. ==== Configuring language analyzers
  39. ===== Stopwords
  40. All analyzers support setting custom `stopwords` either internally in
  41. the config, or by using an external stopwords file by setting
  42. `stopwords_path`. Check <<analysis-stop-analyzer,Stop Analyzer>> for
  43. more details.
  44. ===== Excluding words from stemming
  45. The `stem_exclusion` parameter allows you to specify an array
  46. of lowercase words that should not be stemmed. Internally, this
  47. functionality is implemented by adding the
  48. <<analysis-keyword-marker-tokenfilter,`keyword_marker` token filter>>
  49. with the `keywords` set to the value of the `stem_exclusion` parameter.
  50. The following analyzers support setting custom `stem_exclusion` list:
  51. `arabic`, `armenian`, `basque`, `catalan`, `bulgarian`, `catalan`,
  52. `czech`, `finnish`, `dutch`, `english`, `finnish`, `french`, `galician`,
  53. `german`, `irish`, `hindi`, `hungarian`, `indonesian`, `italian`, `latvian`, `norwegian`,
  54. `portuguese`, `romanian`, `russian`, `sorani`, `spanish`, `swedish`, `turkish`.
  55. ==== Reimplementing language analyzers
  56. The built-in language analyzers can be reimplemented as `custom` analyzers
  57. (as described below) in order to customize their behaviour.
  58. NOTE: If you do not intend to exclude words from being stemmed (the
  59. equivalent of the `stem_exclusion` parameter above), then you should remove
  60. the `keyword_marker` token filter from the custom analyzer configuration.
  61. [[arabic-analyzer]]
  62. ===== `arabic` analyzer
  63. The `arabic` analyzer could be reimplemented as a `custom` analyzer as follows:
  64. [source,js]
  65. ----------------------------------------------------
  66. {
  67. "settings": {
  68. "analysis": {
  69. "filter": {
  70. "arabic_stop": {
  71. "type": "stop",
  72. "stopwords": "_arabic_" <1>
  73. },
  74. "arabic_keywords": {
  75. "type": "keyword_marker",
  76. "keywords": [] <2>
  77. },
  78. "arabic_stemmer": {
  79. "type": "stemmer",
  80. "language": "arabic"
  81. }
  82. },
  83. "analyzer": {
  84. "arabic": {
  85. "tokenizer": "standard",
  86. "filter": [
  87. "lowercase",
  88. "arabic_stop",
  89. "arabic_normalization",
  90. "arabic_keywords",
  91. "arabic_stemmer"
  92. ]
  93. }
  94. }
  95. }
  96. }
  97. }
  98. ----------------------------------------------------
  99. <1> The default stopwords can be overridden with the `stopwords`
  100. or `stopwords_path` parameters.
  101. <2> This filter should be removed unless there are words which should
  102. be excluded from stemming.
  103. [[armenian-analyzer]]
  104. ===== `armenian` analyzer
  105. The `armenian` analyzer could be reimplemented as a `custom` analyzer as follows:
  106. [source,js]
  107. ----------------------------------------------------
  108. {
  109. "settings": {
  110. "analysis": {
  111. "filter": {
  112. "armenian_stop": {
  113. "type": "stop",
  114. "stopwords": "_armenian_" <1>
  115. },
  116. "armenian_keywords": {
  117. "type": "keyword_marker",
  118. "keywords": [] <2>
  119. },
  120. "armenian_stemmer": {
  121. "type": "stemmer",
  122. "language": "armenian"
  123. }
  124. },
  125. "analyzer": {
  126. "armenian": {
  127. "tokenizer": "standard",
  128. "filter": [
  129. "lowercase",
  130. "armenian_stop",
  131. "armenian_keywords",
  132. "armenian_stemmer"
  133. ]
  134. }
  135. }
  136. }
  137. }
  138. }
  139. ----------------------------------------------------
  140. <1> The default stopwords can be overridden with the `stopwords`
  141. or `stopwords_path` parameters.
  142. <2> This filter should be removed unless there are words which should
  143. be excluded from stemming.
  144. [[basque-analyzer]]
  145. ===== `basque` analyzer
  146. The `basque` analyzer could be reimplemented as a `custom` analyzer as follows:
  147. [source,js]
  148. ----------------------------------------------------
  149. {
  150. "settings": {
  151. "analysis": {
  152. "filter": {
  153. "basque_stop": {
  154. "type": "stop",
  155. "stopwords": "_basque_" <1>
  156. },
  157. "basque_keywords": {
  158. "type": "keyword_marker",
  159. "keywords": [] <2>
  160. },
  161. "basque_stemmer": {
  162. "type": "stemmer",
  163. "language": "basque"
  164. }
  165. },
  166. "analyzer": {
  167. "basque": {
  168. "tokenizer": "standard",
  169. "filter": [
  170. "lowercase",
  171. "basque_stop",
  172. "basque_keywords",
  173. "basque_stemmer"
  174. ]
  175. }
  176. }
  177. }
  178. }
  179. }
  180. ----------------------------------------------------
  181. <1> The default stopwords can be overridden with the `stopwords`
  182. or `stopwords_path` parameters.
  183. <2> This filter should be removed unless there are words which should
  184. be excluded from stemming.
  185. [[brazilian-analyzer]]
  186. ===== `brazilian` analyzer
  187. The `brazilian` analyzer could be reimplemented as a `custom` analyzer as follows:
  188. [source,js]
  189. ----------------------------------------------------
  190. {
  191. "settings": {
  192. "analysis": {
  193. "filter": {
  194. "brazilian_stop": {
  195. "type": "stop",
  196. "stopwords": "_brazilian_" <1>
  197. },
  198. "brazilian_keywords": {
  199. "type": "keyword_marker",
  200. "keywords": [] <2>
  201. },
  202. "brazilian_stemmer": {
  203. "type": "stemmer",
  204. "language": "brazilian"
  205. }
  206. },
  207. "analyzer": {
  208. "brazilian": {
  209. "tokenizer": "standard",
  210. "filter": [
  211. "lowercase",
  212. "brazilian_stop",
  213. "brazilian_keywords",
  214. "brazilian_stemmer"
  215. ]
  216. }
  217. }
  218. }
  219. }
  220. }
  221. ----------------------------------------------------
  222. <1> The default stopwords can be overridden with the `stopwords`
  223. or `stopwords_path` parameters.
  224. <2> This filter should be removed unless there are words which should
  225. be excluded from stemming.
  226. [[bulgarian-analyzer]]
  227. ===== `bulgarian` analyzer
  228. The `bulgarian` analyzer could be reimplemented as a `custom` analyzer as follows:
  229. [source,js]
  230. ----------------------------------------------------
  231. {
  232. "settings": {
  233. "analysis": {
  234. "filter": {
  235. "bulgarian_stop": {
  236. "type": "stop",
  237. "stopwords": "_bulgarian_" <1>
  238. },
  239. "bulgarian_keywords": {
  240. "type": "keyword_marker",
  241. "keywords": [] <2>
  242. },
  243. "bulgarian_stemmer": {
  244. "type": "stemmer",
  245. "language": "bulgarian"
  246. }
  247. },
  248. "analyzer": {
  249. "bulgarian": {
  250. "tokenizer": "standard",
  251. "filter": [
  252. "lowercase",
  253. "bulgarian_stop",
  254. "bulgarian_keywords",
  255. "bulgarian_stemmer"
  256. ]
  257. }
  258. }
  259. }
  260. }
  261. }
  262. ----------------------------------------------------
  263. <1> The default stopwords can be overridden with the `stopwords`
  264. or `stopwords_path` parameters.
  265. <2> This filter should be removed unless there are words which should
  266. be excluded from stemming.
  267. [[catalan-analyzer]]
  268. ===== `catalan` analyzer
  269. The `catalan` analyzer could be reimplemented as a `custom` analyzer as follows:
  270. [source,js]
  271. ----------------------------------------------------
  272. {
  273. "settings": {
  274. "analysis": {
  275. "filter": {
  276. "catalan_elision": {
  277. "type": "elision",
  278. "articles": [ "d", "l", "m", "n", "s", "t"]
  279. },
  280. "catalan_stop": {
  281. "type": "stop",
  282. "stopwords": "_catalan_" <1>
  283. },
  284. "catalan_keywords": {
  285. "type": "keyword_marker",
  286. "keywords": [] <2>
  287. },
  288. "catalan_stemmer": {
  289. "type": "stemmer",
  290. "language": "catalan"
  291. }
  292. },
  293. "analyzer": {
  294. "catalan": {
  295. "tokenizer": "standard",
  296. "filter": [
  297. "catalan_elision",
  298. "lowercase",
  299. "catalan_stop",
  300. "catalan_keywords",
  301. "catalan_stemmer"
  302. ]
  303. }
  304. }
  305. }
  306. }
  307. }
  308. ----------------------------------------------------
  309. <1> The default stopwords can be overridden with the `stopwords`
  310. or `stopwords_path` parameters.
  311. <2> This filter should be removed unless there are words which should
  312. be excluded from stemming.
  313. [[chinese-analyzer]]
  314. ===== `chinese` analyzer
  315. The `chinese` analyzer cannot be reimplemented as a `custom` analyzer
  316. because it depends on the ChineseTokenizer and ChineseFilter classes,
  317. which are not exposed in Elasticsearch. These classes are
  318. deprecated in Lucene 4 and the `chinese` analyzer will be replaced
  319. with the <<analysis-standard-analyzer>> in Lucene 5.
  320. [[cjk-analyzer]]
  321. ===== `cjk` analyzer
  322. The `cjk` analyzer could be reimplemented as a `custom` analyzer as follows:
  323. [source,js]
  324. ----------------------------------------------------
  325. {
  326. "settings": {
  327. "analysis": {
  328. "filter": {
  329. "english_stop": {
  330. "type": "stop",
  331. "stopwords": "_english_" <1>
  332. }
  333. },
  334. "analyzer": {
  335. "cjk": {
  336. "tokenizer": "standard",
  337. "filter": [
  338. "cjk_width",
  339. "lowercase",
  340. "cjk_bigram",
  341. "english_stop"
  342. ]
  343. }
  344. }
  345. }
  346. }
  347. }
  348. ----------------------------------------------------
  349. <1> The default stopwords can be overridden with the `stopwords`
  350. or `stopwords_path` parameters.
  351. [[czech-analyzer]]
  352. ===== `czech` analyzer
  353. The `czech` analyzer could be reimplemented as a `custom` analyzer as follows:
  354. [source,js]
  355. ----------------------------------------------------
  356. {
  357. "settings": {
  358. "analysis": {
  359. "filter": {
  360. "czech_stop": {
  361. "type": "stop",
  362. "stopwords": "_czech_" <1>
  363. },
  364. "czech_keywords": {
  365. "type": "keyword_marker",
  366. "keywords": [] <2>
  367. },
  368. "czech_stemmer": {
  369. "type": "stemmer",
  370. "language": "czech"
  371. }
  372. },
  373. "analyzer": {
  374. "czech": {
  375. "tokenizer": "standard",
  376. "filter": [
  377. "lowercase",
  378. "czech_stop",
  379. "czech_keywords",
  380. "czech_stemmer"
  381. ]
  382. }
  383. }
  384. }
  385. }
  386. }
  387. ----------------------------------------------------
  388. <1> The default stopwords can be overridden with the `stopwords`
  389. or `stopwords_path` parameters.
  390. <2> This filter should be removed unless there are words which should
  391. be excluded from stemming.
  392. [[danish-analyzer]]
  393. ===== `danish` analyzer
  394. The `danish` analyzer could be reimplemented as a `custom` analyzer as follows:
  395. [source,js]
  396. ----------------------------------------------------
  397. {
  398. "settings": {
  399. "analysis": {
  400. "filter": {
  401. "danish_stop": {
  402. "type": "stop",
  403. "stopwords": "_danish_" <1>
  404. },
  405. "danish_keywords": {
  406. "type": "keyword_marker",
  407. "keywords": [] <2>
  408. },
  409. "danish_stemmer": {
  410. "type": "stemmer",
  411. "language": "danish"
  412. }
  413. },
  414. "analyzer": {
  415. "danish": {
  416. "tokenizer": "standard",
  417. "filter": [
  418. "lowercase",
  419. "danish_stop",
  420. "danish_keywords",
  421. "danish_stemmer"
  422. ]
  423. }
  424. }
  425. }
  426. }
  427. }
  428. ----------------------------------------------------
  429. <1> The default stopwords can be overridden with the `stopwords`
  430. or `stopwords_path` parameters.
  431. <2> This filter should be removed unless there are words which should
  432. be excluded from stemming.
  433. [[dutch-analyzer]]
  434. ===== `dutch` analyzer
  435. The `dutch` analyzer could be reimplemented as a `custom` analyzer as follows:
  436. [source,js]
  437. ----------------------------------------------------
  438. {
  439. "settings": {
  440. "analysis": {
  441. "filter": {
  442. "dutch_stop": {
  443. "type": "stop",
  444. "stopwords": "_dutch_" <1>
  445. },
  446. "dutch_keywords": {
  447. "type": "keyword_marker",
  448. "keywords": [] <2>
  449. },
  450. "dutch_stemmer": {
  451. "type": "stemmer",
  452. "language": "dutch"
  453. },
  454. "dutch_override": {
  455. "type": "stemmer_override",
  456. "rules": [
  457. "fiets=>fiets",
  458. "bromfiets=>bromfiets",
  459. "ei=>eier",
  460. "kind=>kinder"
  461. ]
  462. }
  463. },
  464. "analyzer": {
  465. "dutch": {
  466. "tokenizer": "standard",
  467. "filter": [
  468. "lowercase",
  469. "dutch_stop",
  470. "dutch_keywords",
  471. "dutch_override",
  472. "dutch_stemmer"
  473. ]
  474. }
  475. }
  476. }
  477. }
  478. }
  479. ----------------------------------------------------
  480. <1> The default stopwords can be overridden with the `stopwords`
  481. or `stopwords_path` parameters.
  482. <2> This filter should be removed unless there are words which should
  483. be excluded from stemming.
  484. [[english-analyzer]]
  485. ===== `english` analyzer
  486. The `english` analyzer could be reimplemented as a `custom` analyzer as follows:
  487. [source,js]
  488. ----------------------------------------------------
  489. {
  490. "settings": {
  491. "analysis": {
  492. "filter": {
  493. "english_stop": {
  494. "type": "stop",
  495. "stopwords": "_english_" <1>
  496. },
  497. "english_keywords": {
  498. "type": "keyword_marker",
  499. "keywords": [] <2>
  500. },
  501. "english_stemmer": {
  502. "type": "stemmer",
  503. "language": "english"
  504. },
  505. "english_possessive_stemmer": {
  506. "type": "stemmer",
  507. "language": "possessive_english"
  508. }
  509. },
  510. "analyzer": {
  511. "english": {
  512. "tokenizer": "standard",
  513. "filter": [
  514. "english_possessive_stemmer",
  515. "lowercase",
  516. "english_stop",
  517. "english_keywords",
  518. "english_stemmer"
  519. ]
  520. }
  521. }
  522. }
  523. }
  524. }
  525. ----------------------------------------------------
  526. <1> The default stopwords can be overridden with the `stopwords`
  527. or `stopwords_path` parameters.
  528. <2> This filter should be removed unless there are words which should
  529. be excluded from stemming.
  530. [[finnish-analyzer]]
  531. ===== `finnish` analyzer
  532. The `finnish` analyzer could be reimplemented as a `custom` analyzer as follows:
  533. [source,js]
  534. ----------------------------------------------------
  535. {
  536. "settings": {
  537. "analysis": {
  538. "filter": {
  539. "finnish_stop": {
  540. "type": "stop",
  541. "stopwords": "_finnish_" <1>
  542. },
  543. "finnish_keywords": {
  544. "type": "keyword_marker",
  545. "keywords": [] <2>
  546. },
  547. "finnish_stemmer": {
  548. "type": "stemmer",
  549. "language": "finnish"
  550. }
  551. },
  552. "analyzer": {
  553. "finnish": {
  554. "tokenizer": "standard",
  555. "filter": [
  556. "lowercase",
  557. "finnish_stop",
  558. "finnish_keywords",
  559. "finnish_stemmer"
  560. ]
  561. }
  562. }
  563. }
  564. }
  565. }
  566. ----------------------------------------------------
  567. <1> The default stopwords can be overridden with the `stopwords`
  568. or `stopwords_path` parameters.
  569. <2> This filter should be removed unless there are words which should
  570. be excluded from stemming.
  571. [[french-analyzer]]
  572. ===== `french` analyzer
  573. The `french` analyzer could be reimplemented as a `custom` analyzer as follows:
  574. [source,js]
  575. ----------------------------------------------------
  576. {
  577. "settings": {
  578. "analysis": {
  579. "filter": {
  580. "french_elision": {
  581. "type": "elision",
  582. "articles": [ "l", "m", "t", "qu", "n", "s",
  583. "j", "d", "c", "jusqu", "quoiqu",
  584. "lorsqu", "puisqu"
  585. ]
  586. },
  587. "french_stop": {
  588. "type": "stop",
  589. "stopwords": "_french_" <1>
  590. },
  591. "french_keywords": {
  592. "type": "keyword_marker",
  593. "keywords": [] <2>
  594. },
  595. "french_stemmer": {
  596. "type": "stemmer",
  597. "language": "light_french"
  598. }
  599. },
  600. "analyzer": {
  601. "french": {
  602. "tokenizer": "standard",
  603. "filter": [
  604. "french_elision",
  605. "lowercase",
  606. "french_stop",
  607. "french_keywords",
  608. "french_stemmer"
  609. ]
  610. }
  611. }
  612. }
  613. }
  614. }
  615. ----------------------------------------------------
  616. <1> The default stopwords can be overridden with the `stopwords`
  617. or `stopwords_path` parameters.
  618. <2> This filter should be removed unless there are words which should
  619. be excluded from stemming.
  620. [[galician-analyzer]]
  621. ===== `galician` analyzer
  622. The `galician` analyzer could be reimplemented as a `custom` analyzer as follows:
  623. [source,js]
  624. ----------------------------------------------------
  625. {
  626. "settings": {
  627. "analysis": {
  628. "filter": {
  629. "galician_stop": {
  630. "type": "stop",
  631. "stopwords": "_galician_" <1>
  632. },
  633. "galician_keywords": {
  634. "type": "keyword_marker",
  635. "keywords": [] <2>
  636. },
  637. "galician_stemmer": {
  638. "type": "stemmer",
  639. "language": "galician"
  640. }
  641. },
  642. "analyzer": {
  643. "galician": {
  644. "tokenizer": "standard",
  645. "filter": [
  646. "lowercase",
  647. "galician_stop",
  648. "galician_keywords",
  649. "galician_stemmer"
  650. ]
  651. }
  652. }
  653. }
  654. }
  655. }
  656. ----------------------------------------------------
  657. <1> The default stopwords can be overridden with the `stopwords`
  658. or `stopwords_path` parameters.
  659. <2> This filter should be removed unless there are words which should
  660. be excluded from stemming.
  661. [[german-analyzer]]
  662. ===== `german` analyzer
  663. The `german` analyzer could be reimplemented as a `custom` analyzer as follows:
  664. [source,js]
  665. ----------------------------------------------------
  666. {
  667. "settings": {
  668. "analysis": {
  669. "filter": {
  670. "german_stop": {
  671. "type": "stop",
  672. "stopwords": "_german_" <1>
  673. },
  674. "german_keywords": {
  675. "type": "keyword_marker",
  676. "keywords": [] <2>
  677. },
  678. "german_stemmer": {
  679. "type": "stemmer",
  680. "language": "light_german"
  681. }
  682. },
  683. "analyzer": {
  684. "german": {
  685. "tokenizer": "standard",
  686. "filter": [
  687. "lowercase",
  688. "german_stop",
  689. "german_keywords",
  690. "german_normalization",
  691. "german_stemmer"
  692. ]
  693. }
  694. }
  695. }
  696. }
  697. }
  698. ----------------------------------------------------
  699. <1> The default stopwords can be overridden with the `stopwords`
  700. or `stopwords_path` parameters.
  701. <2> This filter should be removed unless there are words which should
  702. be excluded from stemming.
  703. [[greek-analyzer]]
  704. ===== `greek` analyzer
  705. The `greek` analyzer could be reimplemented as a `custom` analyzer as follows:
  706. [source,js]
  707. ----------------------------------------------------
  708. {
  709. "settings": {
  710. "analysis": {
  711. "filter": {
  712. "greek_stop": {
  713. "type": "stop",
  714. "stopwords": "_greek_" <1>
  715. },
  716. "greek_lowercase": {
  717. "type": "lowercase",
  718. "language": "greek"
  719. },
  720. "greek_keywords": {
  721. "type": "keyword_marker",
  722. "keywords": [] <2>
  723. },
  724. "greek_stemmer": {
  725. "type": "stemmer",
  726. "language": "greek"
  727. }
  728. },
  729. "analyzer": {
  730. "greek": {
  731. "tokenizer": "standard",
  732. "filter": [
  733. "greek_lowercase",
  734. "greek_stop",
  735. "greek_keywords",
  736. "greek_stemmer"
  737. ]
  738. }
  739. }
  740. }
  741. }
  742. }
  743. ----------------------------------------------------
  744. <1> The default stopwords can be overridden with the `stopwords`
  745. or `stopwords_path` parameters.
  746. <2> This filter should be removed unless there are words which should
  747. be excluded from stemming.
  748. [[hindi-analyzer]]
  749. ===== `hindi` analyzer
  750. The `hindi` analyzer could be reimplemented as a `custom` analyzer as follows:
  751. [source,js]
  752. ----------------------------------------------------
  753. {
  754. "settings": {
  755. "analysis": {
  756. "filter": {
  757. "hindi_stop": {
  758. "type": "stop",
  759. "stopwords": "_hindi_" <1>
  760. },
  761. "hindi_keywords": {
  762. "type": "keyword_marker",
  763. "keywords": [] <2>
  764. },
  765. "hindi_stemmer": {
  766. "type": "stemmer",
  767. "language": "hindi"
  768. }
  769. },
  770. "analyzer": {
  771. "hindi": {
  772. "tokenizer": "standard",
  773. "filter": [
  774. "lowercase",
  775. "indic_normalization",
  776. "hindi_normalization",
  777. "hindi_stop",
  778. "hindi_keywords",
  779. "hindi_stemmer"
  780. ]
  781. }
  782. }
  783. }
  784. }
  785. }
  786. ----------------------------------------------------
  787. <1> The default stopwords can be overridden with the `stopwords`
  788. or `stopwords_path` parameters.
  789. <2> This filter should be removed unless there are words which should
  790. be excluded from stemming.
  791. [[hungarian-analyzer]]
  792. ===== `hungarian` analyzer
  793. The `hungarian` analyzer could be reimplemented as a `custom` analyzer as follows:
  794. [source,js]
  795. ----------------------------------------------------
  796. {
  797. "settings": {
  798. "analysis": {
  799. "filter": {
  800. "hungarian_stop": {
  801. "type": "stop",
  802. "stopwords": "_hungarian_" <1>
  803. },
  804. "hungarian_keywords": {
  805. "type": "keyword_marker",
  806. "keywords": [] <2>
  807. },
  808. "hungarian_stemmer": {
  809. "type": "stemmer",
  810. "language": "hungarian"
  811. }
  812. },
  813. "analyzer": {
  814. "hungarian": {
  815. "tokenizer": "standard",
  816. "filter": [
  817. "lowercase",
  818. "hungarian_stop",
  819. "hungarian_keywords",
  820. "hungarian_stemmer"
  821. ]
  822. }
  823. }
  824. }
  825. }
  826. }
  827. ----------------------------------------------------
  828. <1> The default stopwords can be overridden with the `stopwords`
  829. or `stopwords_path` parameters.
  830. <2> This filter should be removed unless there are words which should
  831. be excluded from stemming.
  832. [[indonesian-analyzer]]
  833. ===== `indonesian` analyzer
  834. The `indonesian` analyzer could be reimplemented as a `custom` analyzer as follows:
  835. [source,js]
  836. ----------------------------------------------------
  837. {
  838. "settings": {
  839. "analysis": {
  840. "filter": {
  841. "indonesian_stop": {
  842. "type": "stop",
  843. "stopwords": "_indonesian_" <1>
  844. },
  845. "indonesian_keywords": {
  846. "type": "keyword_marker",
  847. "keywords": [] <2>
  848. },
  849. "indonesian_stemmer": {
  850. "type": "stemmer",
  851. "language": "indonesian"
  852. }
  853. },
  854. "analyzer": {
  855. "indonesian": {
  856. "tokenizer": "standard",
  857. "filter": [
  858. "lowercase",
  859. "indonesian_stop",
  860. "indonesian_keywords",
  861. "indonesian_stemmer"
  862. ]
  863. }
  864. }
  865. }
  866. }
  867. }
  868. ----------------------------------------------------
  869. <1> The default stopwords can be overridden with the `stopwords`
  870. or `stopwords_path` parameters.
  871. <2> This filter should be removed unless there are words which should
  872. be excluded from stemming.
  873. [[irish-analyzer]]
  874. ===== `irish` analyzer
  875. The `irish` analyzer could be reimplemented as a `custom` analyzer as follows:
  876. [source,js]
  877. ----------------------------------------------------
  878. {
  879. "settings": {
  880. "analysis": {
  881. "filter": {
  882. "irish_elision": {
  883. "type": "elision",
  884. "articles": [ "h", "n", "t" ]
  885. },
  886. "irish_stop": {
  887. "type": "stop",
  888. "stopwords": "_irish_" <1>
  889. },
  890. "irish_lowercase": {
  891. "type": "lowercase",
  892. "language": "irish"
  893. },
  894. "irish_keywords": {
  895. "type": "keyword_marker",
  896. "keywords": [] <2>
  897. },
  898. "irish_stemmer": {
  899. "type": "stemmer",
  900. "language": "irish"
  901. }
  902. },
  903. "analyzer": {
  904. "irish": {
  905. "tokenizer": "standard",
  906. "filter": [
  907. "irish_stop",
  908. "irish_elision",
  909. "irish_lowercase",
  910. "irish_keywords",
  911. "irish_stemmer"
  912. ]
  913. }
  914. }
  915. }
  916. }
  917. }
  918. ----------------------------------------------------
  919. <1> The default stopwords can be overridden with the `stopwords`
  920. or `stopwords_path` parameters.
  921. <2> This filter should be removed unless there are words which should
  922. be excluded from stemming.
  923. [[italian-analyzer]]
  924. ===== `italian` analyzer
  925. The `italian` analyzer could be reimplemented as a `custom` analyzer as follows:
  926. [source,js]
  927. ----------------------------------------------------
  928. {
  929. "settings": {
  930. "analysis": {
  931. "filter": {
  932. "italian_elision": {
  933. "type": "elision",
  934. "articles": [
  935. "c", "l", "all", "dall", "dell",
  936. "nell", "sull", "coll", "pell",
  937. "gl", "agl", "dagl", "degl", "negl",
  938. "sugl", "un", "m", "t", "s", "v", "d"
  939. ]
  940. },
  941. "italian_stop": {
  942. "type": "stop",
  943. "stopwords": "_italian_" <1>
  944. },
  945. "italian_keywords": {
  946. "type": "keyword_marker",
  947. "keywords": [] <2>
  948. },
  949. "italian_stemmer": {
  950. "type": "stemmer",
  951. "language": "light_italian"
  952. }
  953. },
  954. "analyzer": {
  955. "italian": {
  956. "tokenizer": "standard",
  957. "filter": [
  958. "italian_elision",
  959. "lowercase",
  960. "italian_stop",
  961. "italian_keywords",
  962. "italian_stemmer"
  963. ]
  964. }
  965. }
  966. }
  967. }
  968. }
  969. ----------------------------------------------------
  970. <1> The default stopwords can be overridden with the `stopwords`
  971. or `stopwords_path` parameters.
  972. <2> This filter should be removed unless there are words which should
  973. be excluded from stemming.
  974. [[latvian-analyzer]]
  975. ===== `latvian` analyzer
  976. The `latvian` analyzer could be reimplemented as a `custom` analyzer as follows:
  977. [source,js]
  978. ----------------------------------------------------
  979. {
  980. "settings": {
  981. "analysis": {
  982. "filter": {
  983. "latvian_stop": {
  984. "type": "stop",
  985. "stopwords": "_latvian_" <1>
  986. },
  987. "latvian_keywords": {
  988. "type": "keyword_marker",
  989. "keywords": [] <2>
  990. },
  991. "latvian_stemmer": {
  992. "type": "stemmer",
  993. "language": "latvian"
  994. }
  995. },
  996. "analyzer": {
  997. "latvian": {
  998. "tokenizer": "standard",
  999. "filter": [
  1000. "lowercase",
  1001. "latvian_stop",
  1002. "latvian_keywords",
  1003. "latvian_stemmer"
  1004. ]
  1005. }
  1006. }
  1007. }
  1008. }
  1009. }
  1010. ----------------------------------------------------
  1011. <1> The default stopwords can be overridden with the `stopwords`
  1012. or `stopwords_path` parameters.
  1013. <2> This filter should be removed unless there are words which should
  1014. be excluded from stemming.
  1015. [[norwegian-analyzer]]
  1016. ===== `norwegian` analyzer
  1017. The `norwegian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1018. [source,js]
  1019. ----------------------------------------------------
  1020. {
  1021. "settings": {
  1022. "analysis": {
  1023. "filter": {
  1024. "norwegian_stop": {
  1025. "type": "stop",
  1026. "stopwords": "_norwegian_" <1>
  1027. },
  1028. "norwegian_keywords": {
  1029. "type": "keyword_marker",
  1030. "keywords": [] <2>
  1031. },
  1032. "norwegian_stemmer": {
  1033. "type": "stemmer",
  1034. "language": "norwegian"
  1035. }
  1036. },
  1037. "analyzer": {
  1038. "norwegian": {
  1039. "tokenizer": "standard",
  1040. "filter": [
  1041. "lowercase",
  1042. "norwegian_stop",
  1043. "norwegian_keywords",
  1044. "norwegian_stemmer"
  1045. ]
  1046. }
  1047. }
  1048. }
  1049. }
  1050. }
  1051. ----------------------------------------------------
  1052. <1> The default stopwords can be overridden with the `stopwords`
  1053. or `stopwords_path` parameters.
  1054. <2> This filter should be removed unless there are words which should
  1055. be excluded from stemming.
  1056. [[persian-analyzer]]
  1057. ===== `persian` analyzer
  1058. The `persian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1059. [source,js]
  1060. ----------------------------------------------------
  1061. {
  1062. "settings": {
  1063. "analysis": {
  1064. "char_filter": {
  1065. "zero_width_spaces": {
  1066. "type": "mapping",
  1067. "mappings": [ "\\u200C=> "] <1>
  1068. }
  1069. },
  1070. "filter": {
  1071. "persian_stop": {
  1072. "type": "stop",
  1073. "stopwords": "_persian_" <2>
  1074. }
  1075. },
  1076. "analyzer": {
  1077. "persian": {
  1078. "tokenizer": "standard",
  1079. "char_filter": [ "zero_width_spaces" ],
  1080. "filter": [
  1081. "lowercase",
  1082. "arabic_normalization",
  1083. "persian_normalization",
  1084. "persian_stop"
  1085. ]
  1086. }
  1087. }
  1088. }
  1089. }
  1090. }
  1091. ----------------------------------------------------
  1092. <1> Replaces zero-width non-joiners with an ASCII space.
  1093. <2> The default stopwords can be overridden with the `stopwords`
  1094. or `stopwords_path` parameters.
  1095. [[portuguese-analyzer]]
  1096. ===== `portuguese` analyzer
  1097. The `portuguese` analyzer could be reimplemented as a `custom` analyzer as follows:
  1098. [source,js]
  1099. ----------------------------------------------------
  1100. {
  1101. "settings": {
  1102. "analysis": {
  1103. "filter": {
  1104. "portuguese_stop": {
  1105. "type": "stop",
  1106. "stopwords": "_portuguese_" <1>
  1107. },
  1108. "portuguese_keywords": {
  1109. "type": "keyword_marker",
  1110. "keywords": [] <2>
  1111. },
  1112. "portuguese_stemmer": {
  1113. "type": "stemmer",
  1114. "language": "light_portuguese"
  1115. }
  1116. },
  1117. "analyzer": {
  1118. "portuguese": {
  1119. "tokenizer": "standard",
  1120. "filter": [
  1121. "lowercase",
  1122. "portuguese_stop",
  1123. "portuguese_keywords",
  1124. "portuguese_stemmer"
  1125. ]
  1126. }
  1127. }
  1128. }
  1129. }
  1130. }
  1131. ----------------------------------------------------
  1132. <1> The default stopwords can be overridden with the `stopwords`
  1133. or `stopwords_path` parameters.
  1134. <2> This filter should be removed unless there are words which should
  1135. be excluded from stemming.
  1136. [[romanian-analyzer]]
  1137. ===== `romanian` analyzer
  1138. The `romanian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1139. [source,js]
  1140. ----------------------------------------------------
  1141. {
  1142. "settings": {
  1143. "analysis": {
  1144. "filter": {
  1145. "romanian_stop": {
  1146. "type": "stop",
  1147. "stopwords": "_romanian_" <1>
  1148. },
  1149. "romanian_keywords": {
  1150. "type": "keyword_marker",
  1151. "keywords": [] <2>
  1152. },
  1153. "romanian_stemmer": {
  1154. "type": "stemmer",
  1155. "language": "romanian"
  1156. }
  1157. },
  1158. "analyzer": {
  1159. "romanian": {
  1160. "tokenizer": "standard",
  1161. "filter": [
  1162. "lowercase",
  1163. "romanian_stop",
  1164. "romanian_keywords",
  1165. "romanian_stemmer"
  1166. ]
  1167. }
  1168. }
  1169. }
  1170. }
  1171. }
  1172. ----------------------------------------------------
  1173. <1> The default stopwords can be overridden with the `stopwords`
  1174. or `stopwords_path` parameters.
  1175. <2> This filter should be removed unless there are words which should
  1176. be excluded from stemming.
  1177. [[russian-analyzer]]
  1178. ===== `russian` analyzer
  1179. The `russian` analyzer could be reimplemented as a `custom` analyzer as follows:
  1180. [source,js]
  1181. ----------------------------------------------------
  1182. {
  1183. "settings": {
  1184. "analysis": {
  1185. "filter": {
  1186. "russian_stop": {
  1187. "type": "stop",
  1188. "stopwords": "_russian_" <1>
  1189. },
  1190. "russian_keywords": {
  1191. "type": "keyword_marker",
  1192. "keywords": [] <2>
  1193. },
  1194. "russian_stemmer": {
  1195. "type": "stemmer",
  1196. "language": "russian"
  1197. }
  1198. },
  1199. "analyzer": {
  1200. "russian": {
  1201. "tokenizer": "standard",
  1202. "filter": [
  1203. "lowercase",
  1204. "russian_stop",
  1205. "russian_keywords",
  1206. "russian_stemmer"
  1207. ]
  1208. }
  1209. }
  1210. }
  1211. }
  1212. }
  1213. ----------------------------------------------------
  1214. <1> The default stopwords can be overridden with the `stopwords`
  1215. or `stopwords_path` parameters.
  1216. <2> This filter should be removed unless there are words which should
  1217. be excluded from stemming.
  1218. [[sorani-analyzer]]
  1219. ===== `sorani` analyzer
  1220. The `sorani` analyzer could be reimplemented as a `custom` analyzer as follows:
  1221. [source,js]
  1222. ----------------------------------------------------
  1223. {
  1224. "settings": {
  1225. "analysis": {
  1226. "filter": {
  1227. "sorani_stop": {
  1228. "type": "stop",
  1229. "stopwords": "_sorani_" <1>
  1230. },
  1231. "sorani_keywords": {
  1232. "type": "keyword_marker",
  1233. "keywords": [] <2>
  1234. },
  1235. "sorani_stemmer": {
  1236. "type": "stemmer",
  1237. "language": "sorani"
  1238. }
  1239. },
  1240. "analyzer": {
  1241. "sorani": {
  1242. "tokenizer": "standard",
  1243. "filter": [
  1244. "sorani_normalization",
  1245. "lowercase",
  1246. "sorani_stop",
  1247. "sorani_keywords",
  1248. "sorani_stemmer"
  1249. ]
  1250. }
  1251. }
  1252. }
  1253. }
  1254. }
  1255. ----------------------------------------------------
  1256. <1> The default stopwords can be overridden with the `stopwords`
  1257. or `stopwords_path` parameters.
  1258. <2> This filter should be removed unless there are words which should
  1259. be excluded from stemming.
  1260. [[spanish-analyzer]]
  1261. ===== `spanish` analyzer
  1262. The `spanish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1263. [source,js]
  1264. ----------------------------------------------------
  1265. {
  1266. "settings": {
  1267. "analysis": {
  1268. "filter": {
  1269. "spanish_stop": {
  1270. "type": "stop",
  1271. "stopwords": "_spanish_" <1>
  1272. },
  1273. "spanish_keywords": {
  1274. "type": "keyword_marker",
  1275. "keywords": [] <2>
  1276. },
  1277. "spanish_stemmer": {
  1278. "type": "stemmer",
  1279. "language": "light_spanish"
  1280. }
  1281. },
  1282. "analyzer": {
  1283. "spanish": {
  1284. "tokenizer": "standard",
  1285. "filter": [
  1286. "lowercase",
  1287. "spanish_stop",
  1288. "spanish_keywords",
  1289. "spanish_stemmer"
  1290. ]
  1291. }
  1292. }
  1293. }
  1294. }
  1295. }
  1296. ----------------------------------------------------
  1297. <1> The default stopwords can be overridden with the `stopwords`
  1298. or `stopwords_path` parameters.
  1299. <2> This filter should be removed unless there are words which should
  1300. be excluded from stemming.
  1301. [[swedish-analyzer]]
  1302. ===== `swedish` analyzer
  1303. The `swedish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1304. [source,js]
  1305. ----------------------------------------------------
  1306. {
  1307. "settings": {
  1308. "analysis": {
  1309. "filter": {
  1310. "swedish_stop": {
  1311. "type": "stop",
  1312. "stopwords": "_swedish_" <1>
  1313. },
  1314. "swedish_keywords": {
  1315. "type": "keyword_marker",
  1316. "keywords": [] <2>
  1317. },
  1318. "swedish_stemmer": {
  1319. "type": "stemmer",
  1320. "language": "swedish"
  1321. }
  1322. },
  1323. "analyzer": {
  1324. "swedish": {
  1325. "tokenizer": "standard",
  1326. "filter": [
  1327. "lowercase",
  1328. "swedish_stop",
  1329. "swedish_keywords",
  1330. "swedish_stemmer"
  1331. ]
  1332. }
  1333. }
  1334. }
  1335. }
  1336. }
  1337. ----------------------------------------------------
  1338. <1> The default stopwords can be overridden with the `stopwords`
  1339. or `stopwords_path` parameters.
  1340. <2> This filter should be removed unless there are words which should
  1341. be excluded from stemming.
  1342. [[turkish-analyzer]]
  1343. ===== `turkish` analyzer
  1344. The `turkish` analyzer could be reimplemented as a `custom` analyzer as follows:
  1345. [source,js]
  1346. ----------------------------------------------------
  1347. {
  1348. "settings": {
  1349. "analysis": {
  1350. "filter": {
  1351. "turkish_stop": {
  1352. "type": "stop",
  1353. "stopwords": "_turkish_" <1>
  1354. },
  1355. "turkish_lowercase": {
  1356. "type": "lowercase",
  1357. "language": "turkish"
  1358. },
  1359. "turkish_keywords": {
  1360. "type": "keyword_marker",
  1361. "keywords": [] <2>
  1362. },
  1363. "turkish_stemmer": {
  1364. "type": "stemmer",
  1365. "language": "turkish"
  1366. }
  1367. },
  1368. "analyzer": {
  1369. "turkish": {
  1370. "tokenizer": "standard",
  1371. "filter": [
  1372. "apostrophe",
  1373. "turkish_lowercase",
  1374. "turkish_stop",
  1375. "turkish_keywords",
  1376. "turkish_stemmer"
  1377. ]
  1378. }
  1379. }
  1380. }
  1381. }
  1382. }
  1383. ----------------------------------------------------
  1384. <1> The default stopwords can be overridden with the `stopwords`
  1385. or `stopwords_path` parameters.
  1386. <2> This filter should be removed unless there are words which should
  1387. be excluded from stemming.
  1388. [[thai-analyzer]]
  1389. ===== `thai` analyzer
  1390. The `thai` analyzer could be reimplemented as a `custom` analyzer as follows:
  1391. [source,js]
  1392. ----------------------------------------------------
  1393. {
  1394. "settings": {
  1395. "analysis": {
  1396. "filter": {
  1397. "thai_stop": {
  1398. "type": "stop",
  1399. "stopwords": "_thai_" <1>
  1400. }
  1401. },
  1402. "analyzer": {
  1403. "thai": {
  1404. "tokenizer": "thai",
  1405. "filter": [
  1406. "lowercase",
  1407. "thai_stop"
  1408. ]
  1409. }
  1410. }
  1411. }
  1412. }
  1413. }
  1414. ----------------------------------------------------
  1415. <1> The default stopwords can be overridden with the `stopwords`
  1416. or `stopwords_path` parameters.