build.gradle 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529
  1. /*
  2. * Licensed to Elasticsearch under one or more contributor
  3. * license agreements. See the NOTICE file distributed with
  4. * this work for additional information regarding copyright
  5. * ownership. Elasticsearch licenses this file to you under
  6. * the Apache License, Version 2.0 (the "License"); you may
  7. * not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing,
  13. * software distributed under the License is distributed on an
  14. * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
  15. * KIND, either express or implied. See the License for the
  16. * specific language governing permissions and limitations
  17. * under the License.
  18. */
  19. apply plugin: 'elasticsearch.docs-test'
  20. /* List of files that have snippets that probably should be converted to
  21. * `// CONSOLE` and `// TESTRESPONSE` but have yet to be converted. Try and
  22. * only remove entries from this list. When it is empty we'll remove it
  23. * entirely and have a party! There will be cake and everything.... */
  24. buildRestTests.expectedUnconvertedCandidates = [
  25. 'reference/aggregations/bucket/nested-aggregation.asciidoc',
  26. 'reference/aggregations/bucket/range-aggregation.asciidoc',
  27. 'reference/aggregations/bucket/reverse-nested-aggregation.asciidoc',
  28. 'reference/aggregations/bucket/significantterms-aggregation.asciidoc',
  29. 'reference/aggregations/bucket/terms-aggregation.asciidoc',
  30. 'reference/aggregations/matrix/stats-aggregation.asciidoc',
  31. 'reference/aggregations/metrics/tophits-aggregation.asciidoc',
  32. 'reference/cluster/allocation-explain.asciidoc',
  33. ]
  34. integTestCluster {
  35. setting 'script.max_compilations_per_minute', '1000'
  36. /* Enable regexes in painless so our tests don't complain about example
  37. * snippets that use them. */
  38. setting 'script.painless.regex.enabled', 'true'
  39. Closure configFile = {
  40. extraConfigFile it, "src/test/cluster/config/$it"
  41. }
  42. configFile 'analysis/example_word_list.txt'
  43. configFile 'analysis/hyphenation_patterns.xml'
  44. configFile 'analysis/synonym.txt'
  45. configFile 'analysis/stemmer_override.txt'
  46. configFile 'userdict_ja.txt'
  47. configFile 'KeywordTokenizer.rbbi'
  48. extraConfigFile 'hunspell/en_US/en_US.aff', '../core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.aff'
  49. extraConfigFile 'hunspell/en_US/en_US.dic', '../core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic'
  50. // Whitelist reindexing from the local node so we can test it.
  51. setting 'reindex.remote.whitelist', '127.0.0.1:*'
  52. }
  53. // Build the cluster with all plugins
  54. project.rootProject.subprojects.findAll { it.parent.path == ':plugins' }.each { subproj ->
  55. /* Skip repositories. We just aren't going to be able to test them so it
  56. * doesn't make sense to waste time installing them. */
  57. if (subproj.path.startsWith(':plugins:repository-')) {
  58. return
  59. }
  60. subproj.afterEvaluate { // need to wait until the project has been configured
  61. integTestCluster {
  62. plugin subproj.path
  63. }
  64. }
  65. }
  66. buildRestTests.docs = fileTree(projectDir) {
  67. // No snippets in here!
  68. exclude 'build.gradle'
  69. // That is where the snippets go, not where they come from!
  70. exclude 'build'
  71. }
  72. Closure setupTwitter = { String name, int count ->
  73. buildRestTests.setups[name] = '''
  74. - do:
  75. indices.create:
  76. index: twitter
  77. body:
  78. settings:
  79. number_of_shards: 1
  80. number_of_replicas: 1
  81. mappings:
  82. tweet:
  83. properties:
  84. user:
  85. type: keyword
  86. doc_values: true
  87. date:
  88. type: date
  89. likes:
  90. type: long
  91. - do:
  92. bulk:
  93. index: twitter
  94. type: tweet
  95. refresh: true
  96. body: |'''
  97. for (int i = 0; i < count; i++) {
  98. String user, text
  99. if (i == 0) {
  100. user = 'kimchy'
  101. text = 'trying out Elasticsearch'
  102. } else {
  103. user = 'test'
  104. text = "some message with the number $i"
  105. }
  106. buildRestTests.setups[name] += """
  107. {"index":{"_id": "$i"}}
  108. {"user": "$user", "message": "$text", "date": "2009-11-15T14:12:12", "likes": $i}"""
  109. }
  110. }
  111. setupTwitter('twitter', 5)
  112. setupTwitter('big_twitter', 120)
  113. setupTwitter('huge_twitter', 1200)
  114. buildRestTests.setups['host'] = '''
  115. # Fetch the http host. We use the host of the master because we know there will always be a master.
  116. - do:
  117. cluster.state: {}
  118. - set: { master_node: master }
  119. - do:
  120. nodes.info:
  121. metric: [ http ]
  122. - is_true: nodes.$master.http.publish_address
  123. - set: {nodes.$master.http.publish_address: host}
  124. '''
  125. buildRestTests.setups['node'] = '''
  126. # Fetch the node name. We use the host of the master because we know there will always be a master.
  127. - do:
  128. cluster.state: {}
  129. - is_true: master_node
  130. - set: { master_node: node_name }
  131. '''
  132. // Used by scripted metric docs
  133. buildRestTests.setups['ledger'] = '''
  134. - do:
  135. indices.create:
  136. index: ledger
  137. body:
  138. settings:
  139. number_of_shards: 2
  140. number_of_replicas: 1
  141. mappings:
  142. sale:
  143. properties:
  144. type:
  145. type: keyword
  146. amount:
  147. type: double
  148. - do:
  149. bulk:
  150. index: ledger
  151. type: sale
  152. refresh: true
  153. body: |
  154. {"index":{}}
  155. {"date": "2015/01/01 00:00:00", "amount": 200, "type": "sale", "description": "something"}
  156. {"index":{}}
  157. {"date": "2015/01/01 00:00:00", "amount": 10, "type": "expense", "decription": "another thing"}
  158. {"index":{}}
  159. {"date": "2015/01/01 00:00:00", "amount": 150, "type": "sale", "description": "blah"}
  160. {"index":{}}
  161. {"date": "2015/01/01 00:00:00", "amount": 50, "type": "expense", "description": "cost of blah"}
  162. {"index":{}}
  163. {"date": "2015/01/01 00:00:00", "amount": 50, "type": "expense", "description": "advertisement"}'''
  164. // Used by aggregation docs
  165. buildRestTests.setups['sales'] = '''
  166. - do:
  167. indices.create:
  168. index: sales
  169. body:
  170. settings:
  171. number_of_shards: 2
  172. number_of_replicas: 1
  173. mappings:
  174. sale:
  175. properties:
  176. type:
  177. type: keyword
  178. - do:
  179. bulk:
  180. index: sales
  181. type: sale
  182. refresh: true
  183. body: |
  184. {"index":{}}
  185. {"date": "2015/01/01 00:00:00", "price": 200, "promoted": true, "rating": 1, "type": "hat"}
  186. {"index":{}}
  187. {"date": "2015/01/01 00:00:00", "price": 200, "promoted": true, "rating": 1, "type": "t-shirt"}
  188. {"index":{}}
  189. {"date": "2015/01/01 00:00:00", "price": 150, "promoted": true, "rating": 5, "type": "bag"}
  190. {"index":{}}
  191. {"date": "2015/02/01 00:00:00", "price": 50, "promoted": false, "rating": 1, "type": "hat"}
  192. {"index":{}}
  193. {"date": "2015/02/01 00:00:00", "price": 10, "promoted": true, "rating": 4, "type": "t-shirt"}
  194. {"index":{}}
  195. {"date": "2015/03/01 00:00:00", "price": 200, "promoted": true, "rating": 1, "type": "hat"}
  196. {"index":{}}
  197. {"date": "2015/03/01 00:00:00", "price": 175, "promoted": false, "rating": 2, "type": "t-shirt"}'''
  198. // Dummy bank account data used by getting-started.asciidoc
  199. buildRestTests.setups['bank'] = '''
  200. - do:
  201. bulk:
  202. index: bank
  203. type: account
  204. refresh: true
  205. body: |
  206. #bank_data#
  207. '''
  208. /* Load the actual accounts only if we're going to use them. This complicates
  209. * dependency checking but that is a small price to pay for not building a
  210. * 400kb string every time we start the build. */
  211. File accountsFile = new File("$projectDir/src/test/resources/accounts.json")
  212. buildRestTests.inputs.file(accountsFile)
  213. buildRestTests.doFirst {
  214. String accounts = accountsFile.getText('UTF-8')
  215. // Indent like a yaml test needs
  216. accounts = accounts.replaceAll('(?m)^', ' ')
  217. buildRestTests.setups['bank'] =
  218. buildRestTests.setups['bank'].replace('#bank_data#', accounts)
  219. }
  220. buildRestTests.setups['range_index'] = '''
  221. - do :
  222. indices.create:
  223. index: range_index
  224. body:
  225. settings:
  226. number_of_shards: 2
  227. number_of_replicas: 1
  228. mappings:
  229. my_type:
  230. properties:
  231. expected_attendees:
  232. type: integer_range
  233. time_frame:
  234. type: date_range
  235. format: yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis
  236. - do:
  237. bulk:
  238. index: range_index
  239. type: my_type
  240. refresh: true
  241. body: |
  242. {"index":{"_id": 1}}
  243. {"expected_attendees": {"gte": 10, "lte": 20}, "time_frame": {"gte": "2015-10-31 12:00:00", "lte": "2015-11-01"}}'''
  244. // Used by index boost doc
  245. buildRestTests.setups['index_boost'] = '''
  246. - do:
  247. indices.create:
  248. index: index1
  249. - do:
  250. indices.create:
  251. index: index2
  252. - do:
  253. indices.put_alias:
  254. index: index1
  255. name: alias1
  256. '''
  257. // Used by sampler and diversified-sampler aggregation docs
  258. buildRestTests.setups['stackoverflow'] = '''
  259. - do:
  260. indices.create:
  261. index: stackoverflow
  262. body:
  263. settings:
  264. number_of_shards: 1
  265. number_of_replicas: 1
  266. mappings:
  267. question:
  268. properties:
  269. author:
  270. type: keyword
  271. tags:
  272. type: keyword
  273. - do:
  274. bulk:
  275. index: stackoverflow
  276. type: question
  277. refresh: true
  278. body: |'''
  279. // Make Kibana strongly connected to elasticsearch and logstash
  280. // Make Kibana rarer (and therefore higher-ranking) than Javascript
  281. // Make Javascript strongly connected to jquery and angular
  282. // Make Cabana strongly connected to elasticsearch but only as a result of a single author
  283. for (int i = 0; i < 150; i++) {
  284. buildRestTests.setups['stackoverflow'] += """
  285. {"index":{}}
  286. {"author": "very_relevant_$i", "tags": ["elasticsearch", "kibana"]}"""
  287. }
  288. for (int i = 0; i < 50; i++) {
  289. buildRestTests.setups['stackoverflow'] += """
  290. {"index":{}}
  291. {"author": "very_relevant_$i", "tags": ["logstash", "kibana"]}"""
  292. }
  293. for (int i = 0; i < 200; i++) {
  294. buildRestTests.setups['stackoverflow'] += """
  295. {"index":{}}
  296. {"author": "partially_relevant_$i", "tags": ["javascript", "jquery"]}"""
  297. }
  298. for (int i = 0; i < 200; i++) {
  299. buildRestTests.setups['stackoverflow'] += """
  300. {"index":{}}
  301. {"author": "partially_relevant_$i", "tags": ["javascript", "angular"]}"""
  302. }
  303. for (int i = 0; i < 50; i++) {
  304. buildRestTests.setups['stackoverflow'] += """
  305. {"index":{}}
  306. {"author": "noisy author", "tags": ["elasticsearch", "cabana"]}"""
  307. }
  308. buildRestTests.setups['stackoverflow'] += """
  309. """
  310. // Used by significant_text aggregation docs
  311. buildRestTests.setups['news'] = '''
  312. - do:
  313. indices.create:
  314. index: news
  315. body:
  316. settings:
  317. number_of_shards: 1
  318. number_of_replicas: 1
  319. mappings:
  320. question:
  321. properties:
  322. source:
  323. type: keyword
  324. content:
  325. type: text
  326. - do:
  327. bulk:
  328. index: news
  329. type: article
  330. refresh: true
  331. body: |'''
  332. // Make h5n1 strongly connected to bird flu
  333. for (int i = 0; i < 100; i++) {
  334. buildRestTests.setups['news'] += """
  335. {"index":{}}
  336. {"source": "very_relevant_$i", "content": "bird flu h5n1"}"""
  337. }
  338. for (int i = 0; i < 100; i++) {
  339. buildRestTests.setups['news'] += """
  340. {"index":{}}
  341. {"source": "filler_$i", "content": "bird dupFiller "}"""
  342. }
  343. for (int i = 0; i < 100; i++) {
  344. buildRestTests.setups['news'] += """
  345. {"index":{}}
  346. {"source": "filler_$i", "content": "flu dupFiller "}"""
  347. }
  348. for (int i = 0; i < 20; i++) {
  349. buildRestTests.setups['news'] += """
  350. {"index":{}}
  351. {"source": "partially_relevant_$i", "content": "elasticsearch dupFiller dupFiller dupFiller dupFiller pozmantier"}"""
  352. }
  353. for (int i = 0; i < 10; i++) {
  354. buildRestTests.setups['news'] += """
  355. {"index":{}}
  356. {"source": "partially_relevant_$i", "content": "elasticsearch logstash kibana"}"""
  357. }
  358. buildRestTests.setups['news'] += """
  359. """
  360. // Used by some aggregations
  361. buildRestTests.setups['exams'] = '''
  362. - do:
  363. indices.create:
  364. index: exams
  365. body:
  366. settings:
  367. number_of_shards: 1
  368. number_of_replicas: 1
  369. mappings:
  370. exam:
  371. properties:
  372. grade:
  373. type: byte
  374. - do:
  375. bulk:
  376. index: exams
  377. type: exam
  378. refresh: true
  379. body: |
  380. {"index":{}}
  381. {"grade": 100}
  382. {"index":{}}
  383. {"grade": 50}'''
  384. buildRestTests.setups['stored_example_script'] = '''
  385. # Simple script to load a field. Not really a good example, but a simple one.
  386. - do:
  387. put_script:
  388. id: "my_script"
  389. body: { "script": { "lang": "painless", "source": "doc[params.field].value" } }
  390. - match: { acknowledged: true }
  391. '''
  392. buildRestTests.setups['stored_scripted_metric_script'] = '''
  393. - do:
  394. put_script:
  395. id: "my_init_script"
  396. body: { "script": { "lang": "painless", "source": "params._agg.transactions = []" } }
  397. - match: { acknowledged: true }
  398. - do:
  399. put_script:
  400. id: "my_map_script"
  401. body: { "script": { "lang": "painless", "source": "params._agg.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value)" } }
  402. - match: { acknowledged: true }
  403. - do:
  404. put_script:
  405. id: "my_combine_script"
  406. body: { "script": { "lang": "painless", "source": "double profit = 0;for (t in params._agg.transactions) { profit += t; } return profit" } }
  407. - match: { acknowledged: true }
  408. - do:
  409. put_script:
  410. id: "my_reduce_script"
  411. body: { "script": { "lang": "painless", "source": "double profit = 0;for (a in params._aggs) { profit += a; } return profit" } }
  412. - match: { acknowledged: true }
  413. '''
  414. // Used by analyze api
  415. buildRestTests.setups['analyze_sample'] = '''
  416. - do:
  417. indices.create:
  418. index: analyze_sample
  419. body:
  420. settings:
  421. number_of_shards: 1
  422. number_of_replicas: 0
  423. analysis:
  424. normalizer:
  425. my_normalizer:
  426. type: custom
  427. filter: [lowercase]
  428. mappings:
  429. tweet:
  430. properties:
  431. obj1.field1:
  432. type: text'''
  433. // Used by percentile/percentile-rank aggregations
  434. buildRestTests.setups['latency'] = '''
  435. - do:
  436. indices.create:
  437. index: latency
  438. body:
  439. settings:
  440. number_of_shards: 1
  441. number_of_replicas: 1
  442. mappings:
  443. data:
  444. properties:
  445. load_time:
  446. type: long
  447. - do:
  448. bulk:
  449. index: latency
  450. type: data
  451. refresh: true
  452. body: |'''
  453. for (int i = 0; i < 100; i++) {
  454. def value = i
  455. if (i % 10) {
  456. value = i*10
  457. }
  458. buildRestTests.setups['latency'] += """
  459. {"index":{}}
  460. {"load_time": "$value"}"""
  461. }
  462. // Used by iprange agg
  463. buildRestTests.setups['iprange'] = '''
  464. - do:
  465. indices.create:
  466. index: ip_addresses
  467. body:
  468. settings:
  469. number_of_shards: 1
  470. number_of_replicas: 1
  471. mappings:
  472. data:
  473. properties:
  474. ip:
  475. type: ip
  476. - do:
  477. bulk:
  478. index: ip_addresses
  479. type: data
  480. refresh: true
  481. body: |'''
  482. for (int i = 0; i < 255; i++) {
  483. buildRestTests.setups['iprange'] += """
  484. {"index":{}}
  485. {"ip": "10.0.0.$i"}"""
  486. }
  487. for (int i = 0; i < 5; i++) {
  488. buildRestTests.setups['iprange'] += """
  489. {"index":{}}
  490. {"ip": "9.0.0.$i"}"""
  491. buildRestTests.setups['iprange'] += """
  492. {"index":{}}
  493. {"ip": "11.0.0.$i"}"""
  494. buildRestTests.setups['iprange'] += """
  495. {"index":{}}
  496. {"ip": "12.0.0.$i"}"""
  497. }