فهرست منبع

Make it possible to use Stack logging in Docker (#65778)

Closes #62758.

Include the Stack log4j config in the Docker image, in order to
make it possible to write logs in a container environment in the
same way as for an archive or package deployment. This is useful
in situations where the user is bind-mounting the logs directory
and has their own arrangements for log shipping.

To use stack logging, set the environment variable `ES_LOG_STYLE`
to `file`. It can also be set to `console`, which is the same as
not specifying it at all.

The Docker logging config is now auto-generated at image build time,
by running the default config through a transformer program when
preparing the distribution in an image builder step.

Also, in the docker distribution `build.gradle`, I changed a helper
closure into a class with a static method in order to fix an
issue where the Docker image was always being rebuilt, even when
there were no changes.
Rory Hunter 4 سال پیش
والد
کامیت
68b5465575

+ 45 - 0
buildSrc/src/main/java/org/elasticsearch/gradle/docker/ShellRetry.java

@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gradle.docker;
+
+/**
+ * The methods in this class take a shell command and wrap it in retry logic, so that our
+ * Docker builds can be more robust in the face of transient errors e.g. network issues.
+ */
+public class ShellRetry {
+    static String loop(String name, String command) {
+        return loop(name, command, 4, "exit");
+    }
+
+    static String loop(String name, String command, int indentSize, String exitKeyword) {
+        String indent = " ".repeat(indentSize);
+
+        StringBuilder commandWithRetry = new StringBuilder("for iter in {1..10}; do \n");
+        commandWithRetry.append(indent).append("  ").append(command).append(" && \n");
+        commandWithRetry.append(indent).append("  exit_code=0 && break || \n");
+        commandWithRetry.append(indent);
+        commandWithRetry.append("    exit_code=$? && echo \"").append(name).append(" error: retry $iter in 10s\" && sleep 10; \n");
+        commandWithRetry.append(indent).append("done; \n");
+        commandWithRetry.append(indent).append(exitKeyword).append(" $exit_code");
+
+        // We need to escape all newlines so that the build process doesn't run all lines onto a single line
+        return commandWithRetry.toString().replaceAll(" *\n", " \\\\\n");
+    }
+}

+ 9 - 54
distribution/docker/build.gradle

@@ -4,6 +4,7 @@ import org.elasticsearch.gradle.ElasticsearchDistribution.Flavor
 import org.elasticsearch.gradle.LoggedExec
 import org.elasticsearch.gradle.LoggedExec
 import org.elasticsearch.gradle.VersionProperties
 import org.elasticsearch.gradle.VersionProperties
 import org.elasticsearch.gradle.docker.DockerBuildTask
 import org.elasticsearch.gradle.docker.DockerBuildTask
+import org.elasticsearch.gradle.docker.ShellRetry
 import org.elasticsearch.gradle.info.BuildParams
 import org.elasticsearch.gradle.info.BuildParams
 import org.elasticsearch.gradle.testfixtures.TestFixturesPlugin
 import org.elasticsearch.gradle.testfixtures.TestFixturesPlugin
 
 
@@ -21,6 +22,7 @@ configurations {
   dockerSource
   dockerSource
   aarch64OssDockerSource
   aarch64OssDockerSource
   ossDockerSource
   ossDockerSource
+  transformLog4jJar
 }
 }
 
 
 dependencies {
 dependencies {
@@ -28,6 +30,7 @@ dependencies {
   dockerSource project(path: ":distribution:archives:linux-tar", configuration:"default")
   dockerSource project(path: ":distribution:archives:linux-tar", configuration:"default")
   aarch64OssDockerSource project(path: ":distribution:archives:oss-linux-aarch64-tar", configuration:"default")
   aarch64OssDockerSource project(path: ":distribution:archives:oss-linux-aarch64-tar", configuration:"default")
   ossDockerSource project(path: ":distribution:archives:oss-linux-tar", configuration:"default")
   ossDockerSource project(path: ":distribution:archives:oss-linux-tar", configuration:"default")
+  transformLog4jJar project(path: ":distribution:docker:transform-log4j-config", configuration: "default")
 }
 }
 
 
 ext.expansions = { Architecture architecture, boolean oss, DockerBase base, boolean local ->
 ext.expansions = { Architecture architecture, boolean oss, DockerBase base, boolean local ->
@@ -67,7 +70,7 @@ ARG BASE_TAG=8.2
     sourceElasticsearch = "COPY $elasticsearch /opt/elasticsearch.tar.gz"
     sourceElasticsearch = "COPY $elasticsearch /opt/elasticsearch.tar.gz"
   } else {
   } else {
     sourceElasticsearch = """
     sourceElasticsearch = """
-RUN curl --retry 8 -S -L \\
+RUN curl --retry 10 -S -L \\
       --output /opt/elasticsearch.tar.gz \\
       --output /opt/elasticsearch.tar.gz \\
       https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/$elasticsearch
       https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/$elasticsearch
 """.trim()
 """.trim()
@@ -75,18 +78,6 @@ RUN curl --retry 8 -S -L \\
 
 
   def (major,minor) = VersionProperties.elasticsearch.split("\\.")
   def (major,minor) = VersionProperties.elasticsearch.split("\\.")
 
 
-  def retry_loop = { name, command, indentSize = 4, exitKeyword = 'exit'  ->
-    String indent = ' ' * indentSize
-    String commandWithRetry = """for iter in {1..10}; do
-${indent}  ${command} &&
-${indent}  exit_code=0 && break ||
-${indent}    exit_code=\$? && echo "${name} error: retry \$iter in 10s" && sleep 10;
-${indent}done;
-${indent}${exitKeyword} \$exit_code"""
-
-    return commandWithRetry.replaceAll(" *\n", " \\\\\n")
-  }
-
   return [
   return [
     'base_image'          : base.getImage(),
     'base_image'          : base.getImage(),
     'bin_dir'             : base == DockerBase.IRON_BANK ? 'scripts' : 'bin',
     'bin_dir'             : base == DockerBase.IRON_BANK ? 'scripts' : 'bin',
@@ -100,7 +91,7 @@ ${indent}${exitKeyword} \$exit_code"""
     'docker_base'         : base.name().toLowerCase(),
     'docker_base'         : base.name().toLowerCase(),
     'version'             : VersionProperties.elasticsearch,
     'version'             : VersionProperties.elasticsearch,
     'major_minor_version' : "${major}.${minor}",
     'major_minor_version' : "${major}.${minor}",
-    'retry_loop'          : retry_loop
+    'retry'               : ShellRetry
   ]
   ]
 }
 }
 
 
@@ -185,6 +176,10 @@ void addCopyDockerContextTask(Architecture architecture, boolean oss, DockerBase
 
 
     with dockerBuildContext(architecture, oss, base, true)
     with dockerBuildContext(architecture, oss, base, true)
 
 
+    into(base == DockerBase.IRON_BANK ? 'scripts' : 'bin') {
+      from configurations.transformLog4jJar
+    }
+
     if (architecture == Architecture.AARCH64) {
     if (architecture == Architecture.AARCH64) {
       if (oss) {
       if (oss) {
         from configurations.aarch64OssDockerSource
         from configurations.aarch64OssDockerSource
@@ -219,46 +214,6 @@ tasks.register("copyKeystore", Sync) {
   }
   }
 }
 }
 
 
-tasks.register("checkSecurityAuditLayoutPatternIdentical") {
-  // the two log4j2.properties files containing security audit configuration for archive and docker builds respectively
-  def originalLog4j = project(":x-pack:plugin:core").file('src/main/config/log4j2.properties')
-  def dockerLog4j = project.file("src/docker/config/log4j2.properties")
-  inputs.files(originalLog4j, dockerLog4j)
-  def patternPropertyKey = "appender.audit_rolling.layout.pattern"
-  doLast {
-    def coreLog4jProperties = new Properties()
-    originalLog4j.withInputStream { input ->
-      coreLog4jProperties.load(input)
-    }
-
-    if (false == coreLog4jProperties.containsKey(patternPropertyKey)) {
-      throw new GradleException("The [${originalLog4j.getPath()}] file changed such that the layout pattern is not " +
-              "referred to by the property named [${patternPropertyKey}]. Please update the task [${name}] " +
-              "definition from project [${path}] to reflect the new name for the layout pattern property.")
-    }
-
-    def dockerLog4jProperties = new Properties()
-    dockerLog4j.withInputStream { input ->
-      dockerLog4jProperties.load(input)
-    }
-
-    if (false == dockerLog4jProperties.containsKey(patternPropertyKey)) {
-      throw new GradleException("The [${dockerLog4j.getPath()}] file changed such that the layout pattern is not " +
-              "referred to by the property named [${patternPropertyKey}]. Please update the task [${name}] " +
-              "definition from project [${path}] to reflect the new name for the layout pattern property.")
-    }
-
-    if (false == coreLog4jProperties.getProperty(patternPropertyKey).equals(dockerLog4jProperties.getProperty(patternPropertyKey))) {
-      throw new GradleException("The property value for the layout pattern [${patternPropertyKey}] is NOT identical " +
-              "between the [${originalLog4j.getPath()}] and the [${dockerLog4j.getPath()}] files.")
-    }
-  }
-}
-
-tasks.named("precommit").configure {
-  dependsOn 'checkSecurityAuditLayoutPatternIdentical'
-}
-
 elasticsearch_distributions {
 elasticsearch_distributions {
   Architecture.values().each { eachArchitecture ->
   Architecture.values().each { eachArchitecture ->
     Flavor.values().each { distroFlavor ->
     Flavor.values().each { distroFlavor ->

+ 16 - 11
distribution/docker/src/docker/Dockerfile

@@ -27,7 +27,7 @@
 FROM ${base_image} AS builder
 FROM ${base_image} AS builder
 
 
 # Install required packages to extract the Elasticsearch distribution
 # Install required packages to extract the Elasticsearch distribution
-RUN <%= retry_loop(package_manager, "${package_manager} install -y findutils tar gzip") %>
+RUN <%= retry.loop(package_manager, "${package_manager} install -y findutils tar gzip") %>
 
 
 # `tini` is a tiny but valid init for containers. This is used to cleanly
 # `tini` is a tiny but valid init for containers. This is used to cleanly
 # control how ES and any child processes are shut down.
 # control how ES and any child processes are shut down.
@@ -74,7 +74,7 @@ ENV TARBALL_URL https://curl.haxx.se/download/curl-\${VERSION}.tar.xz
 ENV TARBALL_PATH curl-\${VERSION}.tar.xz
 ENV TARBALL_PATH curl-\${VERSION}.tar.xz
 
 
 # Install dependencies
 # Install dependencies
-RUN <%= retry_loop('apk', 'apk add gnupg gcc make musl-dev openssl-dev openssl-libs-static file') %>
+RUN <%= retry.loop('apk', 'apk add gnupg gcc make musl-dev openssl-dev openssl-libs-static file') %>
 
 
 RUN mkdir /work
 RUN mkdir /work
 WORKDIR /work
 WORKDIR /work
@@ -83,7 +83,7 @@ WORKDIR /work
 RUN function retry_wget() { \\
 RUN function retry_wget() { \\
       local URL="\$1" ; \\
       local URL="\$1" ; \\
       local DEST="\$2" ; \\
       local DEST="\$2" ; \\
-      <%= retry_loop('wget', 'wget "\$URL\" -O "\$DEST"', 6, 'return') %> ; \\
+      <%= retry.loop('wget', 'wget "\$URL\" -O "\$DEST"', 6, 'return') %> ; \\
     } ; \\
     } ; \\
     retry_wget "https://daniel.haxx.se/mykey.asc" "curl-gpg.pub" && \\
     retry_wget "https://daniel.haxx.se/mykey.asc" "curl-gpg.pub" && \\
     retry_wget "\${TARBALL_URL}.asc" "\${TARBALL_PATH}.asc" && \\
     retry_wget "\${TARBALL_URL}.asc" "\${TARBALL_PATH}.asc" && \\
@@ -223,21 +223,26 @@ ${source_elasticsearch}
 RUN tar -zxf /opt/elasticsearch.tar.gz --strip-components=1
 RUN tar -zxf /opt/elasticsearch.tar.gz --strip-components=1
 
 
 # The distribution includes a `config` directory, no need to create it
 # The distribution includes a `config` directory, no need to create it
-COPY ${config_dir}/elasticsearch.yml ${config_dir}/log4j2.properties config/
+COPY ${config_dir}/elasticsearch.yml config/
+COPY ${bin_dir}/transform-log4j-config-${version}.jar /tmp/
 
 
 # 1. Configure the distribution for Docker
 # 1. Configure the distribution for Docker
 # 2. Ensure directories are created. Most already are, but make sure
 # 2. Ensure directories are created. Most already are, but make sure
 # 3. Apply correct permissions
 # 3. Apply correct permissions
-# 4. Apply more correct permissions
-# 5. The JDK's directories' permissions don't allow `java` to be executed under a different
+# 4. Move the distribution's default logging config aside
+# 5. Generate a docker logging config, to be used by default
+# 6. Apply more correct permissions
+# 7. The JDK's directories' permissions don't allow `java` to be executed under a different
 #    group to the default. Fix this.
 #    group to the default. Fix this.
-# 6. Ensure that there are no files with setuid or setgid, in order to mitigate "stackclash" attacks.
-# 7. Ensure all files are world-readable by default. It should be possible to
+# 8. Ensure that there are no files with setuid or setgid, in order to mitigate "stackclash" attacks.
+# 9. Ensure all files are world-readable by default. It should be possible to
 #    examine the contents of the image under any UID:GID
 #    examine the contents of the image under any UID:GID
-RUN sed -i -e 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' /usr/share/elasticsearch/bin/elasticsearch-env && \\
+RUN sed -i -e 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' bin/elasticsearch-env && \\
     mkdir -p config/jvm.options.d data logs plugins && \\
     mkdir -p config/jvm.options.d data logs plugins && \\
     chmod 0775 config config/jvm.options.d data logs plugins && \\
     chmod 0775 config config/jvm.options.d data logs plugins && \\
-    chmod 0660 config/elasticsearch.yml config/log4j2.properties && \\
+    mv config/log4j2.properties config/log4j2.file.properties && \\
+    jdk/bin/java -jar /tmp/transform-log4j-config-${version}.jar config/log4j2.file.properties > config/log4j2.properties && \\
+    chmod 0660 config/elasticsearch.yml config/log4j2*.properties && \\
     find ./jdk -type d -exec chmod 0755 {} + && \\
     find ./jdk -type d -exec chmod 0755 {} + && \\
     find . -xdev -perm -4000 -exec chmod ug-s {} + && \\
     find . -xdev -perm -4000 -exec chmod ug-s {} + && \\
     find . -type f -exec chmod o+r {} +
     find . -type f -exec chmod o+r {} +
@@ -255,7 +260,7 @@ FROM ${base_image}
 
 
 <% if (docker_base == "ubi") { %>
 <% if (docker_base == "ubi") { %>
 
 
-RUN <%= retry_loop(
+RUN <%= retry.loop(
     package_manager,
     package_manager,
       "${package_manager} update --setopt=tsflags=nodocs -y && \n" +
       "${package_manager} update --setopt=tsflags=nodocs -y && \n" +
       "      ${package_manager} install --setopt=tsflags=nodocs -y \n" +
       "      ${package_manager} install --setopt=tsflags=nodocs -y \n" +

+ 15 - 0
distribution/docker/src/docker/bin/docker-entrypoint.sh

@@ -57,6 +57,21 @@ if [[ -f bin/elasticsearch-users ]]; then
   fi
   fi
 fi
 fi
 
 
+if [[ -n "$ES_LOG_STYLE" ]]; then
+  case "$ES_LOG_STYLE" in
+    console)
+      # This is the default. Nothing to do.
+      ;;
+    file)
+      # Overwrite the default config with the stack config
+      mv /usr/share/elasticsearch/config/log4j2.file.properties /usr/share/elasticsearch/config/log4j2.properties
+      ;;
+    *)
+      echo "ERROR: ES_LOG_STYLE set to [$ES_LOG_STYLE]. Expected [console] or [file]" >&2
+      exit 1 ;;
+  esac
+fi
+
 # Signal forwarding and child reaping is handled by `tini`, which is the
 # Signal forwarding and child reaping is handled by `tini`, which is the
 # actual entrypoint of the container
 # actual entrypoint of the container
 exec /usr/share/elasticsearch/bin/elasticsearch <<<"$KEYSTORE_PASSWORD"
 exec /usr/share/elasticsearch/bin/elasticsearch <<<"$KEYSTORE_PASSWORD"

+ 0 - 128
distribution/docker/src/docker/config/log4j2.properties

@@ -1,128 +0,0 @@
-status = error
-
-appender.rolling.type = Console
-appender.rolling.name = rolling
-appender.rolling.layout.type = ECSJsonLayout
-appender.rolling.layout.type_name = server
-
-rootLogger.level = info
-rootLogger.appenderRef.rolling.ref = rolling
-
-appender.deprecation_rolling.type = Console
-appender.deprecation_rolling.name = deprecation_rolling
-appender.deprecation_rolling.layout.type = ECSJsonLayout
-appender.deprecation_rolling.layout.type_name = deprecation
-appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter
-
-appender.header_warning.type = HeaderWarningAppender
-appender.header_warning.name = header_warning
-
-logger.deprecation.name = org.elasticsearch.deprecation
-logger.deprecation.level = deprecation
-logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
-logger.deprecation.appenderRef.header_warning.ref = header_warning
-logger.deprecation.additivity = false
-
-appender.index_search_slowlog_rolling.type = Console
-appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
-appender.index_search_slowlog_rolling.layout.type = ECSJsonLayout
-appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog
-
-logger.index_search_slowlog_rolling.name = index.search.slowlog
-logger.index_search_slowlog_rolling.level = trace
-logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
-logger.index_search_slowlog_rolling.additivity = false
-
-appender.index_indexing_slowlog_rolling.type = Console
-appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
-appender.index_indexing_slowlog_rolling.layout.type = ECSJsonLayout
-appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog
-
-logger.index_indexing_slowlog.name = index.indexing.slowlog.index
-logger.index_indexing_slowlog.level = trace
-logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
-logger.index_indexing_slowlog.additivity = false
-
-appender.audit_rolling.type = Console
-appender.audit_rolling.name = audit_rolling
-appender.audit_rolling.layout.type = PatternLayout
-appender.audit_rolling.layout.pattern = {\
-"type":"audit", \
-"timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\
-%varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\
-%varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\
-%varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\
-%varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\
-%varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\
-%varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\
-%varsNotEmpty{, "authentication.type":"%enc{%map{authentication.type}}{JSON}"}\
-%varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\
-%varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\
-%varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\
-%varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\
-%varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\
-%varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\
-%varsNotEmpty{, "user.roles":%map{user.roles}}\
-%varsNotEmpty{, "apikey.id":"%enc{%map{apikey.id}}{JSON}"}\
-%varsNotEmpty{, "apikey.name":"%enc{%map{apikey.name}}{JSON}"}\
-%varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\
-%varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\
-%varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\
-%varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\
-%varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\
-%varsNotEmpty{, "request.method":"%enc{%map{request.method}}{JSON}"}\
-%varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\
-%varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\
-%varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\
-%varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\
-%varsNotEmpty{, "indices":%map{indices}}\
-%varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\
-%varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\
-%varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\
-%varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\
-%varsNotEmpty{, "event.category":"%enc{%map{event.category}}{JSON}"}\
-}%n
-# "node.name" node name from the `elasticsearch.yml` settings
-# "node.id" node id which should not change between cluster restarts
-# "host.name" unresolved hostname of the local node
-# "host.ip" the local bound ip (i.e. the ip listening for connections)
-# "event.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal)
-# "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc.
-# "authentication.type" one of "realm", "api_key", "token", "anonymous" or "internal"
-# "user.name" the subject name as authenticated by a realm
-# "user.run_by.name" the original authenticated subject name that is impersonating another one.
-# "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as.
-# "user.realm" the name of the realm that authenticated "user.name"
-# "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name")
-# "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from
-# "user.roles" the roles array of the user; these are the roles that are granting privileges
-# "apikey.id" this field is present if and only if the "authentication.type" is "api_key"
-# "apikey.name" this field is present if and only if the "authentication.type" is "api_key"
-# "origin.type" it is "rest" if the event is originating (is in relation to) a REST request; possible other values are "transport" and "ip_filter"
-# "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node
-# "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated
-# "url.path" the URI component between the port and the query string; it is percent (URL) encoded
-# "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded
-# "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT
-# "request.body" the content of the request body entity, JSON escaped
-# "request.id" a synthentic identifier for the incoming request, this is unique per incoming request, and consistent across all audit events generated by that request
-# "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal)
-# "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal)
-# "indices" the array of indices that the "action" is acting upon
-# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header
-# "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array)
-# "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event
-# "rule" name of the applied rule if the "origin.type" is "ip_filter"
-# "event.category" fixed value "elasticsearch-audit"
-
-logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail
-logger.xpack_security_audit_logfile.level = info
-logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling
-logger.xpack_security_audit_logfile.additivity = false
-
-logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature
-logger.xmlsig.level = error
-logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter
-logger.samlxml_decrypt.level = fatal
-logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter
-logger.saml2_decrypt.level = fatal

+ 29 - 0
distribution/docker/transform-log4j-config/build.gradle

@@ -0,0 +1,29 @@
+apply plugin: 'elasticsearch.build'
+
+repositories {
+  jcenter()
+}
+
+dependencies {
+  testImplementation "junit:junit:${versions.junit}"
+  testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}"
+}
+
+tasks.named('jar').configure {
+  manifest {
+    attributes 'Main-Class': 'org.elasticsearch.transform.log4j.TransformLog4jConfig'
+  }
+}
+
+// This tests depend on ES core
+disableTasks('forbiddenApisMain', 'forbiddenApisTest')
+
+tasks.named('testingConventions').configure {
+  naming.clear()
+  naming {
+    Tests {
+      baseClass 'junit.framework.TestCase'
+    }
+  }
+}
+

+ 126 - 0
distribution/docker/transform-log4j-config/src/main/java/org/elasticsearch/transform/log4j/TransformLog4jConfig.java

@@ -0,0 +1,126 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transform.log4j;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * This class takes in a log4j configuration file, and transform it into a config that
+ * writes everything to the console. This is useful when running Elasticsearch in a Docker
+ * container, where the Docker convention is to log to stdout / stderr and let the
+ * orchestration layer direct the output.
+ */
+public class TransformLog4jConfig {
+
+    public static void main(String[] args) throws IOException {
+        List<String> lines = getConfigFile(args);
+
+        final List<String> output = transformConfig(lines);
+
+        output.forEach(System.out::println);
+    }
+
+    private static List<String> getConfigFile(String[] args) throws IOException {
+        if (args.length != 1) {
+            System.err.println("ERROR: Must supply a single argument, the file to process");
+            System.exit(1);
+        }
+
+        Path configPath = Path.of(args[0]);
+
+        if (Files.exists(configPath) == false) {
+            System.err.println("ERROR: [" + configPath + "] does not exist");
+            System.exit(1);
+        }
+
+        if (Files.isReadable(configPath) == false) {
+            System.err.println("ERROR: [" + configPath + "] exists but is not readable");
+            System.exit(1);
+        }
+
+        return Files.readAllLines(configPath);
+    }
+
+    public static List<String> transformConfig(List<String> lines) {
+        final List<String> output = new ArrayList<>(lines.size());
+
+        // This flag provides a way to handle properties whose values are split
+        // over multiple lines and we need to omit those properties.
+        boolean skipNext = false;
+
+        for (final String line : lines) {
+            if (skipNext) {
+                if (line.endsWith("\\") == false) {
+                    skipNext = false;
+                }
+                continue;
+            }
+            if (line.startsWith("appender.")) {
+                String[] parts = line.split("\\s*=\\s*");
+                String key = parts[0];
+                String[] keyParts = key.split("\\.");
+                String value = parts[1];
+
+                // We don't need to explicitly define a console appender because the
+                // "rolling" appender will become a console appender. We also don't
+                // carry over "rolling_old"
+                if (keyParts[1].equals("console") || keyParts[1].equals("rolling_old")) {
+                    continue;
+                }
+
+                switch (keyParts[2]) {
+                    case "type":
+                        if (value.equals("RollingFile")) {
+                            value = "Console";
+                        }
+                        output.add(key + " = " + value);
+                        break;
+
+                    case "fileName":
+                    case "filePattern":
+                    case "policies":
+                    case "strategy":
+                        // No longer applicable. Omit it.
+                        skipNext = line.endsWith("\\");
+                        break;
+
+                    default:
+                        output.add(line);
+                        break;
+                }
+            } else if (line.startsWith("rootLogger.appenderRef")) {
+                String[] parts = line.split("\\s*=\\s*");
+
+                // The root logger only needs this appender
+                if (parts[1].equals("rolling")) {
+                    output.add(line);
+                }
+            } else {
+                output.add(line);
+            }
+        }
+
+        return output;
+    }
+}

+ 135 - 0
distribution/docker/transform-log4j-config/src/test/java/org/elasticsearch/transform/log4j/TransformLog4jConfigTests.java

@@ -0,0 +1,135 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transform.log4j;
+
+import junit.framework.TestCase;
+
+import java.util.List;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.junit.Assert.assertThat;
+
+public class TransformLog4jConfigTests extends TestCase {
+
+    /**
+     * Check that the transformer doesn't explode when given an empty file.
+     */
+    public void testTransformEmptyConfig() {
+        runTest(List.of(), List.of());
+    }
+
+    /**
+     * Check that the transformer leaves non-appender lines alone.
+     */
+    public void testTransformEchoesNonAppenderLines() {
+        List<String> input = List.of(
+            "status = error",
+            "",
+            "##############################",
+            "rootLogger.level = info",
+            "example = \"broken\\",
+            "    line\""
+        );
+
+        runTest(input, input);
+    }
+
+    /**
+     * Check that the root logger appenders are filtered to just the "rolling" appender
+     */
+    public void testTransformFiltersRootLogger() {
+        List<String> input = List.of(
+            "rootLogger.appenderRef.console.ref = console",
+            "rootLogger.appenderRef.rolling.ref = rolling",
+            "rootLogger.appenderRef.rolling_old.ref = rolling_old"
+        );
+        List<String> expected = List.of("rootLogger.appenderRef.rolling.ref = rolling");
+
+        runTest(input, expected);
+    }
+
+    /**
+     * Check that any explicit 'console' or 'rolling_old' appenders are removed.
+     */
+    public void testTransformRemoveExplicitConsoleAndRollingOldAppenders() {
+        List<String> input = List.of(
+            "appender.console.type = Console",
+            "appender.console.name = console",
+            "appender.console.layout.type = PatternLayout",
+            "appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n",
+            "appender.rolling_old.type = RollingFile",
+            "appender.rolling_old.name = rolling_old",
+            "appender.rolling_old.layout.type = PatternLayout",
+            "appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n"
+        );
+
+        runTest(input, List.of());
+    }
+
+    /**
+     * Check that rolling file appenders are converted to console appenders.
+     */
+    public void testTransformConvertsRollingToConsole() {
+        List<String> input = List.of("appender.rolling.type = RollingFile", "appender.rolling.name = rolling");
+
+        List<String> expected = List.of("appender.rolling.type = Console", "appender.rolling.name = rolling");
+
+        runTest(input, expected);
+    }
+
+    /**
+     * Check that rolling file appenders have redundant properties removed.
+     */
+    public void testTransformRemovedRedundantProperties() {
+        List<String> input = List.of(
+            "appender.rolling.fileName = ${sys:es.logs.base_path}/${sys:es.logs.cluster_name}_server.json",
+            "appender.rolling.layout.type = ECSJsonLayout",
+            "appender.rolling.layout.type_name = server",
+            "appender.rolling.filePattern = ${sys:es.logs.base_path}/${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz",
+            "appender.rolling.policies.type = Policies",
+            "appender.rolling.strategy.type = DefaultRolloverStrategy"
+        );
+
+        List<String> expected = List.of("appender.rolling.layout.type = ECSJsonLayout", "appender.rolling.layout.type_name = server");
+
+        runTest(input, expected);
+    }
+
+    /**
+     * Check that rolling file appenders have redundant properties removed.
+     */
+    public void testTransformSkipsPropertiesWithLineBreaks() {
+        List<String> input = List.of(
+            "appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}\\",
+            "    ${sys:es.logs.cluster_name}_server.json",
+            "appender.rolling.layout.type = ECSJsonLayout"
+        );
+
+        List<String> expected = List.of("appender.rolling.layout.type = ECSJsonLayout");
+
+        runTest(input, expected);
+    }
+
+    private void runTest(List<String> input, List<String> expected) {
+        final List<String> transformed = TransformLog4jConfig.transformConfig(input);
+
+        assertThat(transformed, equalTo(expected));
+    }
+}

+ 3 - 1
docs/reference/setup/install/docker.asciidoc

@@ -122,7 +122,9 @@ curl -X GET "localhost:9200/_cat/nodes?v&pretty"
 // NOTCONSOLE
 // NOTCONSOLE
 
 
 Log messages go to the console and are handled by the configured Docker logging driver.
 Log messages go to the console and are handled by the configured Docker logging driver.
-By default you can access logs with `docker logs`.
+By default you can access logs with `docker logs`. If you would prefer the {es}
+container to write logs to disk, set the `ES_LOG_STYLE` environment variable to `file`.
+This causes {es} to use the same logging configuration as other {es} distribution formats.
 
 
 To stop the cluster, run `docker-compose down`.
 To stop the cluster, run `docker-compose down`.
 The data in the Docker volumes is preserved and loaded
 The data in the Docker volumes is preserved and loaded

+ 47 - 0
qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java

@@ -36,8 +36,10 @@ import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Path;
 import java.util.HashMap;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Set;
 import java.util.Set;
+import java.util.stream.Collectors;
 
 
 import static java.nio.file.attribute.PosixFilePermissions.fromString;
 import static java.nio.file.attribute.PosixFilePermissions.fromString;
 import static org.elasticsearch.packaging.util.Docker.chownWithPrivilegeEscalation;
 import static org.elasticsearch.packaging.util.Docker.chownWithPrivilegeEscalation;
@@ -68,8 +70,10 @@ import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.Matchers.greaterThanOrEqualTo;
 import static org.hamcrest.Matchers.greaterThanOrEqualTo;
 import static org.hamcrest.Matchers.hasKey;
 import static org.hamcrest.Matchers.hasKey;
 import static org.hamcrest.Matchers.is;
 import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.matchesPattern;
 import static org.hamcrest.Matchers.not;
 import static org.hamcrest.Matchers.not;
 import static org.hamcrest.Matchers.nullValue;
 import static org.hamcrest.Matchers.nullValue;
+import static org.hamcrest.Matchers.startsWith;
 import static org.junit.Assume.assumeFalse;
 import static org.junit.Assume.assumeFalse;
 import static org.junit.Assume.assumeTrue;
 import static org.junit.Assume.assumeTrue;
 
 
@@ -639,6 +643,49 @@ public class DockerTests extends PackagingTestCase {
         assertThat("Container logs don't contain INFO level messages", containerLogs.stdout, containsString("INFO"));
         assertThat("Container logs don't contain INFO level messages", containerLogs.stdout, containsString("INFO"));
     }
     }
 
 
+    /**
+     * Check that it is possible to write logs to disk
+     */
+    public void test121CanUseStackLoggingConfig() throws Exception {
+        runContainer(distribution(), builder().envVars(Map.of("ES_LOG_STYLE", "file")));
+
+        waitForElasticsearch(installation);
+
+        final Result containerLogs = getContainerLogs();
+        final List<String> stdout = containerLogs.stdout.lines().collect(Collectors.toList());
+
+        assertThat(
+            "Container logs should be formatted using the stack config",
+            stdout.get(stdout.size() - 1),
+            matchesPattern("^\\[\\d\\d\\d\\d-.*")
+        );
+        assertThat("[logs/docker-cluster.log] should exist but it doesn't", existsInContainer("logs/docker-cluster.log"), is(true));
+    }
+
+    /**
+     * Check that the default logging config can be explicitly selected.
+     */
+    public void test122CanUseDockerLoggingConfig() throws Exception {
+        runContainer(distribution(), builder().envVars(Map.of("ES_LOG_STYLE", "console")));
+
+        waitForElasticsearch(installation);
+
+        final Result containerLogs = getContainerLogs();
+        final List<String> stdout = containerLogs.stdout.lines().collect(Collectors.toList());
+
+        assertThat("Container logs should be formatted using the docker config", stdout.get(stdout.size() - 1), startsWith("{\""));
+        assertThat("[logs/docker-cluster.log] shouldn't exist but it does", existsInContainer("logs/docker-cluster.log"), is(false));
+    }
+
+    /**
+     * Check that an unknown logging config is rejected
+     */
+    public void test123CannotUseUnknownLoggingConfig() {
+        final Result result = runContainerExpectingFailure(distribution(), builder().envVars(Map.of("ES_LOG_STYLE", "unknown")));
+
+        assertThat(result.stderr, containsString("ERROR: ES_LOG_STYLE set to [unknown]. Expected [console] or [file]"));
+    }
+
     /**
     /**
      * Check that the Java process running inside the container has the expected UID, GID and username.
      * Check that the Java process running inside the container has the expected UID, GID and username.
      */
      */

+ 1 - 1
qa/os/src/test/java/org/elasticsearch/packaging/util/Docker.java

@@ -200,7 +200,7 @@ public class Docker {
 
 
         if (isElasticsearchRunning) {
         if (isElasticsearchRunning) {
             final Shell.Result dockerLogs = getContainerLogs();
             final Shell.Result dockerLogs = getContainerLogs();
-            fail("Elasticsearch container did exit.\n\nStdout:\n" + dockerLogs.stdout + "\n\nStderr:\n" + dockerLogs.stderr);
+            fail("Elasticsearch container didn't exit.\n\nStdout:\n" + dockerLogs.stdout + "\n\nStderr:\n" + dockerLogs.stderr);
         }
         }
     }
     }
 
 

+ 1 - 0
settings.gradle

@@ -42,6 +42,7 @@ List projects = [
   'distribution:docker:oss-docker-aarch64-export',
   'distribution:docker:oss-docker-aarch64-export',
   'distribution:docker:oss-docker-build-context',
   'distribution:docker:oss-docker-build-context',
   'distribution:docker:oss-docker-export',
   'distribution:docker:oss-docker-export',
+  'distribution:docker:transform-log4j-config',
   'distribution:docker:ubi-docker-aarch64-export',
   'distribution:docker:ubi-docker-aarch64-export',
   'distribution:docker:ubi-docker-build-context',
   'distribution:docker:ubi-docker-build-context',
   'distribution:docker:ubi-docker-export',
   'distribution:docker:ubi-docker-export',