Co-authored-by: Elastic Machine <elasticmachine@users.noreply.github.com>
@@ -0,0 +1,5 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Description*
+Categorizes text messages.
+Performs a query string query. Returns true if the provided query string matches the row.
@@ -0,0 +1,13 @@
+*Example*
+[source.merge.styled,esql]
+----
+include::{esql-specs}/qstr-function.csv-spec[tag=qstr-with-field]
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/qstr-function.csv-spec[tag=qstr-with-field-result]
@@ -56,5 +56,7 @@
"examples" : [
"ROW number = -1.0 \n| EVAL abs_number = ABS(number)",
"FROM employees\n| KEEP first_name, last_name, height\n| EVAL abs_height = ABS(0.0 - height)"
- ]
+ ],
+ "preview" : false,
+ "snapshot_only" : false
}
@@ -55,5 +55,7 @@
],
"ROW a=.9\n| EVAL acos=ACOS(a)"
@@ -292,5 +292,7 @@
"variadic" : false,
"returnType" : "unsigned_long"
"ROW a=.9\n| EVAL asin=ASIN(a)"
"ROW a=12.9\n| EVAL atan=ATAN(a)"
@@ -295,5 +295,7 @@
"ROW y=12.9, x=.6\n| EVAL atan2=ATAN2(y, x)"
@@ -44,5 +44,7 @@
"FROM employees\n| STATS AVG(height)",
"FROM employees\n| STATS avg_salary_change = ROUND(AVG(MV_AVG(salary_change)), 10)"
@@ -1293,5 +1293,7 @@
"FROM sample_data \n| WHERE @timestamp >= NOW() - 1 day and @timestamp < NOW()\n| STATS COUNT(*) BY bucket = BUCKET(@timestamp, 25, NOW() - 1 day, NOW())",
"FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS AVG(salary) BY bucket = BUCKET(hire_date, 20, \"1985-01-01T00:00:00Z\", \"1986-01-01T00:00:00Z\")\n| SORT bucket",
"FROM employees\n| STATS s1 = b1 + 1, s2 = BUCKET(salary / 1000 + 999, 50.) + 2 BY b1 = BUCKET(salary / 100 + 99, 50.), b2 = BUCKET(salary / 1000 + 999, 50.)\n| SORT b1, b2\n| KEEP s1, b1, s2, b2"
@@ -597,5 +597,7 @@
"FROM employees\n| EVAL type = CASE(\n languages <= 1, \"monolingual\",\n languages <= 2, \"bilingual\",\n \"polyglot\")\n| KEEP emp_no, languages, type",
"FROM sample_data\n| EVAL successful = CASE(\n STARTS_WITH(message, \"Connected to\"), 1,\n message == \"Connection error\", 0\n )\n| STATS success_rate = AVG(successful)",
"FROM sample_data\n| EVAL error = CASE(message LIKE \"*error*\", 1, 0)\n| EVAL hour = DATE_TRUNC(1 hour, @timestamp)\n| STATS error_rate = AVG(error) by hour\n| SORT hour"
@@ -0,0 +1,34 @@
+{
+ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
+ "type" : "eval",
+ "name" : "categorize",
+ "description" : "Categorizes text messages.",
+ "signatures" : [
+ {
+ "params" : [
+ "name" : "field",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "Expression to categorize"
+ }
+ "variadic" : false,
+ "returnType" : "integer"
+ },
+ "type" : "text",
+ "snapshot_only" : true
+}
"ROW d = 1000.0\n| EVAL c = cbrt(d)"
"ROW a=1.8\n| EVAL a=CEIL(a)"
@@ -43,5 +43,7 @@
"FROM hosts \n| WHERE CIDR_MATCH(ip1, \"127.0.0.2/32\", \"127.0.0.3/32\") \n| KEEP card, host, ip0, ip1"
@@ -283,5 +283,7 @@
"ROW a=null, b=\"b\"\n| EVAL COALESCE(a, b)"
@@ -79,5 +79,7 @@
"FROM employees\n| KEEP first_name, last_name\n| EVAL fullname = CONCAT(first_name, \" \", last_name)"
"ROW a=1.8 \n| EVAL cos=COS(a)"
"ROW a=1.8 \n| EVAL cosh=COSH(a)"
@@ -155,5 +155,7 @@
"ROW words=\"foo;bar;baz;qux;quux;foo\"\n| STATS word_count = COUNT(SPLIT(words, \";\"))",
"ROW n=1\n| WHERE n < 0\n| STATS COUNT(n)",
"ROW n=1\n| STATS COUNT(n > 0 OR NULL), COUNT(n < 0 OR NULL)"
@@ -603,5 +603,7 @@
"FROM hosts\n| STATS COUNT_DISTINCT(ip0), COUNT_DISTINCT(ip1)",
"FROM hosts\n| STATS COUNT_DISTINCT(ip0, 80000), COUNT_DISTINCT(ip1, 5)",
"ROW words=\"foo;bar;baz;qux;quux;foo\"\n| STATS distinct_word_count = COUNT_DISTINCT(SPLIT(words, \";\"))"
"ROW date1 = TO_DATETIME(\"2023-12-02T11:00:00.000Z\"), date2 = TO_DATETIME(\"2023-12-02T11:00:00.001Z\")\n| EVAL dd_ms = DATE_DIFF(\"microseconds\", date1, date2)",
"ROW end_23=\"2023-12-31T23:59:59.999Z\"::DATETIME,\n start_24=\"2024-01-01T00:00:00.000Z\"::DATETIME,\n end_24=\"2024-12-31T23:59:59.999\"::DATETIME\n| EVAL end23_to_start24=DATE_DIFF(\"year\", end_23, start_24)\n| EVAL end23_to_end24=DATE_DIFF(\"year\", end_23, end_24)\n| EVAL start_to_end_24=DATE_DIFF(\"year\", start_24, end_24)"
"ROW date = DATE_PARSE(\"yyyy-MM-dd\", \"2022-05-06\")\n| EVAL year = DATE_EXTRACT(\"year\", date)",
"FROM sample_data\n| WHERE DATE_EXTRACT(\"hour_of_day\", @timestamp) < 9 AND DATE_EXTRACT(\"hour_of_day\", @timestamp) >= 17"
"FROM employees\n| KEEP first_name, last_name, hire_date\n| EVAL hired = DATE_FORMAT(\"yyyy-MM-dd\", hire_date)"
"ROW date_string = \"2022-05-06\"\n| EVAL date = DATE_PARSE(\"yyyy-MM-dd\", date_string)"
@@ -45,5 +45,7 @@
"FROM employees\n| KEEP first_name, last_name, hire_date\n| EVAL year_hired = DATE_TRUNC(1 year, hire_date)",
"FROM employees\n| EVAL year = DATE_TRUNC(1 year, hire_date)\n| STATS hires = COUNT(emp_no) BY year\n| SORT year",
@@ -185,5 +185,7 @@
@@ -11,5 +11,7 @@
"ROW E()"
"FROM employees\n| KEEP last_name\n| EVAL ln_E = ENDS_WITH(last_name, \"d\")"
@@ -401,5 +401,7 @@
"returnType" : "boolean"
"ROW d = 5.0\n| EVAL s = EXP(d)"
"ROW a=1.8\n| EVAL a=FLOOR(a)"
@@ -31,5 +31,7 @@
"row a = \"ZWxhc3RpYw==\" \n| eval d = from_base64(a)"
@@ -311,5 +311,7 @@
@@ -212,5 +212,7 @@
"ROW a = 10, b = 20\n| EVAL g = GREATEST(a, b)"
@@ -259,5 +259,7 @@
"ROW a = 1, b = 4, c = 3\n| WHERE c-a IN (3, b / 2, a)"
"row ip4 = to_ip(\"1.2.3.4\"), ip6 = to_ip(\"fe80::cae2:65ff:fece:feb9\")\n| eval ip4_prefix = ip_prefix(ip4, 24, 0), ip6_prefix = ip_prefix(ip6, 0, 112);"
@@ -211,5 +211,7 @@
"ROW a = 10, b = 20\n| EVAL l = LEAST(a, b)"
"FROM employees\n| KEEP last_name\n| EVAL left = LEFT(last_name, 3)\n| SORT last_name ASC\n| LIMIT 5"
"FROM employees\n| KEEP first_name, last_name\n| EVAL fn_length = LENGTH(first_name)"
"FROM employees\n| WHERE first_name LIKE \"?b*\"\n| KEEP first_name, last_name"
@@ -175,5 +175,7 @@
"row a = \"hello\"\n| eval a_ll = locate(a, \"ll\")"
@@ -344,5 +344,7 @@
"ROW base = 2.0, value = 8.0\n| EVAL s = LOG(base, value)",
"row value = 100\n| EVAL s = LOG(value);"
"ROW d = 1000.0 \n| EVAL s = LOG10(d)"
"ROW message = \" some text \", color = \" red \"\n| EVAL message = LTRIM(message)\n| EVAL color = LTRIM(color)\n| EVAL message = CONCAT(\"'\", message, \"'\")\n| EVAL color = CONCAT(\"'\", color, \"'\")"
@@ -116,5 +116,7 @@
"FROM employees\n| STATS MAX(languages)",
"FROM employees\n| STATS max_avg_salary_change = MAX(MV_AVG(salary_change))"
"FROM employees\n| STATS MEDIAN(salary), PERCENTILE(salary, 50)",
"FROM employees\n| STATS median_max_salary_change = MEDIAN(MV_MAX(salary_change))"
"FROM employees\n| STATS MEDIAN(salary), MEDIAN_ABSOLUTE_DEVIATION(salary)",
"FROM employees\n| STATS m_a_d_max_salary_change = MEDIAN_ABSOLUTE_DEVIATION(MV_MAX(salary_change))"
"FROM employees\n| STATS MIN(languages)",
"FROM employees\n| STATS min_avg_salary_change = MIN(MV_AVG(salary_change))"
@@ -184,5 +184,7 @@
@@ -238,5 +238,7 @@
"returnType" : "version"
"ROW a=[3, 5, 1, 6]\n| EVAL avg_a = MV_AVG(a)"
@@ -80,5 +80,7 @@
"ROW a=[\"foo\", \"zoo\", \"bar\"]\n| EVAL j = MV_CONCAT(a, \", \")",
"ROW a=[10, 9, 8]\n| EVAL j = MV_CONCAT(TO_STRING(a), \", \")"
"ROW a=[\"foo\", \"zoo\", \"bar\"]\n| EVAL count_a = MV_COUNT(a)"
@@ -164,5 +164,7 @@
"ROW a=[\"foo\", \"foo\", \"bar\", \"foo\"]\n| EVAL dedupe_a = MV_DEDUPE(a)"
"ROW a=\"foo;bar;baz\"\n| EVAL first_a = MV_FIRST(SPLIT(a, \";\"))"
"ROW a=\"foo;bar;baz\"\n| EVAL last_a = MV_LAST(SPLIT(a, \";\"))"
@@ -128,5 +128,7 @@
"ROW a=[3, 5, 1]\n| EVAL max_a = MV_MAX(a)",
"ROW a=[\"foo\", \"zoo\", \"bar\"]\n| EVAL max_a = MV_MAX(a)"
"ROW a=[3, 5, 1]\n| EVAL median_a = MV_MEDIAN(a)",
"ROW a=[3, 7, 1, 6]\n| EVAL median_a = MV_MEDIAN(a)"
"ROW values = [0, 2, 5, 6]\n| EVAL median_absolute_deviation = MV_MEDIAN_ABSOLUTE_DEVIATION(values), median = MV_MEDIAN(values)"
"ROW a=[2, 1]\n| EVAL min_a = MV_MIN(a)",
"ROW a=[\"foo\", \"bar\"]\n| EVAL min_a = MV_MIN(a)"
@@ -169,5 +169,7 @@
"ROW values = [5, 5, 10, 12, 5000]\n| EVAL p50 = MV_PERCENTILE(values, 50), median = MV_MEDIAN(values)"
@@ -25,5 +25,7 @@
"ROW a = [70.0, 45.0, 21.0, 21.0, 21.0]\n| EVAL sum = MV_PSERIES_WEIGHTED_SUM(a, 1.5)\n| KEEP sum"
@@ -320,5 +320,7 @@
"row a = [1, 2, 2, 3]\n| eval a1 = mv_slice(a, 1), a2 = mv_slice(a, 2, 3)",
"row a = [1, 2, 2, 3]\n| eval a1 = mv_slice(a, -2), a2 = mv_slice(a, -3, -1)"
"ROW a = [4, 2, -3, 2]\n| EVAL sa = mv_sort(a), sd = mv_sort(a, \"DESC\")"
"ROW a=[3, 5, 6]\n| EVAL sum_a = MV_SUM(a)"
@@ -271,5 +271,7 @@
"ROW a = [\"x\", \"y\", \"z\"], b = [\"1\", \"2\"]\n| EVAL c = mv_zip(a, b, \"-\")\n| KEEP a, b, c"
@@ -64,5 +64,7 @@
"returnType" : "time_duration"
@@ -12,5 +12,7 @@
"ROW current_date = NOW()",
"FROM sample_data\n| WHERE @timestamp > NOW() - 1 hour"
@@ -170,5 +170,7 @@
"FROM employees\n| STATS p0 = PERCENTILE(salary, 0)\n , p50 = PERCENTILE(salary, 50)\n , p99 = PERCENTILE(salary, 99)",
"FROM employees\n| STATS p80_max_salary_change = PERCENTILE(MV_MAX(salary_change), 80)"
"ROW PI()"
@@ -297,5 +297,7 @@
"ROW base = 2.0, exponent = 2\n| EVAL result = POW(base, exponent)",
"ROW base = 4, exponent = 0.5\n| EVAL s = POW(base, exponent)"
@@ -0,0 +1,37 @@
+ "name" : "qstr",
+ "description" : "Performs a query string query. Returns true if the provided query string matches the row.",
+ "name" : "query",
+ "description" : "Query string in Lucene query string format."
+ "returnType" : "boolean"
+ "examples" : [
+ "from books \n| where qstr(\"author: Faulkner\")\n| keep book_no, author \n| sort book_no \n| limit 5;"
+ "preview" : true,
"ROW a = \"Hello!\"\n| EVAL triple_a = REPEAT(a, 3);"
@@ -199,5 +199,7 @@
"ROW str = \"Hello World\"\n| EVAL str = REPLACE(str, \"World\", \"Universe\")\n| KEEP str"
"FROM employees\n| KEEP last_name\n| EVAL right = RIGHT(last_name, 3)\n| SORT last_name ASC\n| LIMIT 5"
"FROM employees\n| WHERE first_name RLIKE \".leja.*\"\n| KEEP first_name, last_name"
@@ -109,5 +109,7 @@
"FROM employees\n| KEEP first_name, last_name, height\n| EVAL height_ft = ROUND(height * 3.281, 1)"
"ROW message = \" some text \", color = \" red \"\n| EVAL message = RTRIM(message)\n| EVAL color = RTRIM(color)\n| EVAL message = CONCAT(\"'\", message, \"'\")\n| EVAL color = CONCAT(\"'\", color, \"'\")"
"ROW d = 100.0\n| EVAL s = SIGNUM(d)"
"ROW a=1.8 \n| EVAL sin=SIN(a)"
"ROW a=1.8 \n| EVAL sinh=SINH(a)"
@@ -19,5 +19,7 @@
"ROW message = CONCAT(\"Hello\", SPACE(1), \"World!\");"
"ROW words=\"foo;bar;baz;qux;quux;corge\"\n| EVAL word = SPLIT(words, \";\")"
"ROW d = 100.0\n| EVAL s = SQRT(d)"
"FROM airports\n| STATS centroid=ST_CENTROID_AGG(location)"
@@ -151,5 +151,7 @@
"FROM airport_city_boundaries\n| WHERE ST_CONTAINS(city_boundary, TO_GEOSHAPE(\"POLYGON((109.35 18.3, 109.45 18.3, 109.45 18.4, 109.35 18.4, 109.35 18.3))\"))\n| KEEP abbrev, airport, region, city, city_location"
"FROM airport_city_boundaries\n| WHERE ST_DISJOINT(city_boundary, TO_GEOSHAPE(\"POLYGON((-10 -60, 120 -60, 120 60, -10 60, -10 -60))\"))\n| KEEP abbrev, airport, region, city, city_location"
"FROM airports\n| WHERE abbrev == \"CPH\"\n| EVAL distance = ST_DISTANCE(location, city_location)\n| KEEP abbrev, name, location, city_location, distance"
"FROM airports\n| WHERE ST_INTERSECTS(location, TO_GEOSHAPE(\"POLYGON((42 14, 43 14, 43 15, 42 15, 42 14))\"))"
"FROM airport_city_boundaries\n| WHERE ST_WITHIN(city_boundary, TO_GEOSHAPE(\"POLYGON((109.1 18.15, 109.6 18.15, 109.6 18.65, 109.1 18.65, 109.1 18.15))\"))\n| KEEP abbrev, airport, region, city, city_location"
"ROW point = TO_GEOPOINT(\"POINT(42.97109629958868 14.7552534006536)\")\n| EVAL x = ST_X(point), y = ST_Y(point)"
"FROM employees\n| KEEP last_name\n| EVAL ln_S = STARTS_WITH(last_name, \"B\")"