Browse Source

Introduce Kerberos Test Fixture for Repository HDFS Security Tests (#24493)

This PR introduces a subproject in test/fixtures that contains a Vagrantfile used for standing up a 
KRB5 KDC (Kerberos). The PR also includes helper scripts for provisioning principals, a few 
changes to the HDFS Fixture to allow it to interface with the KDC, as well as a new suite of 
integration tests for the HDFS Repository plugin.

The HDFS Repository plugin senses if the local environment can support the HDFS Fixture 
(Windows is generally a restricted environment). If it can use the regular fixture, it then tests if 
Vagrant is installed with a compatible version to determine if the secure test fixtures should be 
enabled. If the secure tests are enabled, then we create a Kerberos KDC fixture, tasks for adding 
the required principals, and an HDFS fixture configured for security. A new integration test task is 
also configured to use the KDC and secure HDFS fixture and to run a testing suite that uses 
authentication. At the end of the secure integration test the fixtures are torn down.
James Baiera 8 years ago
parent
commit
6a113ae499
17 changed files with 930 additions and 11 deletions
  1. 114 0
      plugins/repository-hdfs/build.gradle
  2. 30 0
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/10_basic.yaml
  3. 29 0
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/20_repository_create.yaml
  4. 54 0
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/20_repository_delete.yaml
  5. 25 0
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/20_repository_verify.yaml
  6. 50 0
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot.yaml
  7. 72 0
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_get.yaml
  8. 81 0
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/40_restore.yaml
  9. 1 0
      settings.gradle
  10. 54 11
      test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java
  11. 53 0
      test/fixtures/krb5kdc-fixture/Vagrantfile
  12. 84 0
      test/fixtures/krb5kdc-fixture/build.gradle
  13. 20 0
      test/fixtures/krb5kdc-fixture/src/main/resources/env.properties
  14. 58 0
      test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh
  15. 120 0
      test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh
  16. 35 0
      test/fixtures/krb5kdc-fixture/src/main/resources/provision/kdc.conf.template
  17. 50 0
      test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template

+ 114 - 0
plugins/repository-hdfs/build.gradle

@@ -18,6 +18,8 @@
  */
 
 import org.apache.tools.ant.taskdefs.condition.Os
+import org.elasticsearch.gradle.test.RestIntegTestTask
+
 import java.nio.file.Files
 import java.nio.file.Path
 import java.nio.file.Paths
@@ -68,6 +70,61 @@ task hdfsFixture(type: org.elasticsearch.gradle.test.AntFixture) {
        baseDir
 }
 
+// MIT Kerberos Vagrant Testing Fixture
+String box = "krb5kdc"
+Map<String,String> vagrantEnvVars = [
+        'VAGRANT_CWD'           : "${project(':test:fixtures:krb5kdc-fixture').projectDir}",
+        'VAGRANT_VAGRANTFILE'   : 'Vagrantfile',
+        'VAGRANT_PROJECT_DIR'   : "${project(':test:fixtures:krb5kdc-fixture').projectDir}"
+]
+
+task krb5kdcUpdate(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) {
+  command 'box'
+  subcommand 'update'
+  boxName box
+  environmentVars vagrantEnvVars
+}
+
+task krb5kdcFixture(type: org.elasticsearch.gradle.test.VagrantFixture) {
+  command 'up'
+  args '--provision', '--provider', 'virtualbox'
+  boxName box
+  environmentVars vagrantEnvVars
+  dependsOn krb5kdcUpdate
+}
+
+task krb5AddPrincipals {
+  dependsOn krb5kdcFixture
+}
+
+List<String> principals = [ "elasticsearch", "hdfs/hdfs.build.elastic.co" ]
+String realm = "BUILD.ELASTIC.CO"
+
+for (String principal : principals) {
+  Task create = project.tasks.create("addPrincipal#${principal}", org.elasticsearch.gradle.vagrant.VagrantCommandTask) {
+    command 'ssh'
+    args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh $principal"
+    boxName box
+    environmentVars vagrantEnvVars
+    dependsOn krb5kdcFixture
+  }
+  krb5AddPrincipals.dependsOn(create)
+}
+
+task secureHdfsFixture(type: org.elasticsearch.gradle.test.AntFixture) {
+  dependsOn project.configurations.hdfsFixture, krb5kdcFixture, krb5AddPrincipals
+  executable = new File(project.javaHome, 'bin/java')
+  env 'CLASSPATH', "${ -> project.configurations.hdfsFixture.asPath }"
+
+  Path keytabPath = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs").resolve("hdfs_hdfs.build.elastic.co.keytab")
+  Path krb5Config = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("conf").resolve("krb5.conf")
+
+  args "-Djava.security.krb5.conf=${krb5Config}", 'hdfs.MiniHDFS',
+          baseDir,
+          "hdfs/hdfs.build.elastic.co@${realm}",
+          "${keytabPath}"
+}
+
 boolean fixtureSupported = false;
 if (Os.isFamily(Os.FAMILY_WINDOWS)) {
   // hdfs fixture will not start without hadoop native libraries on windows
@@ -89,12 +146,69 @@ if (Os.isFamily(Os.FAMILY_WINDOWS)) {
 
 if (fixtureSupported) {
   integTestCluster.dependsOn hdfsFixture
+  integTestRunner.systemProperty 'tests.rest.suite', 'hdfs_repository'
 } else {
   logger.warn("hdfsFixture unsupported, please set HADOOP_HOME and put HADOOP_HOME\\bin in PATH")
   // just tests that the plugin loads
   integTestRunner.systemProperty 'tests.rest.suite', 'hdfs_repository/10_basic'
 }
 
+boolean secureFixtureSupported = false;
+if (fixtureSupported) {
+  // Only do secure fixture support if the regular fixture is supported,
+  // and if vagrant is installed. The ignoreExitValue on exec only matters
+  // in cases where the command can be found and successfully started. In
+  // situations where the vagrant command isn't able to be started at all
+  // (it's not installed) then Gradle still throws ExecException.
+  ByteArrayOutputStream pipe = new ByteArrayOutputStream()
+  try {
+    ExecResult runResult = exec {
+      commandLine 'vagrant', '--version'
+      standardOutput pipe
+      ignoreExitValue true
+    }
+    String output = pipe.toString().trim()
+    if (runResult.exitValue == 0) {
+      secureFixtureSupported = (output ==~ /Vagrant 1\.(8\.[6-9]|9\.[0-9])+/)
+    } else {
+      logger.warn("Could not read installed vagrant version:\n" + output)
+    }
+  } catch (org.gradle.process.internal.ExecException e) {
+    logger.warn("Could not find vagrant: " + e.message)
+    // Swallow error. Vagrant isn't installed. Leave secure fixture support off.
+  }
+}
+
+// Create a Integration Test suite just for security based tests
+if (secureFixtureSupported) {
+  // This must execute before the afterEvaluate block from integTestSecure
+  project.afterEvaluate {
+    Path elasticsearchKT = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs").resolve("elasticsearch.keytab").toAbsolutePath()
+    Path krb5conf = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("conf").resolve("krb5.conf").toAbsolutePath()
+
+    project.integTestSecureCluster.dependsOn(project.bundlePlugin)
+    project.integTestSecure.clusterConfig.plugin(project.path)
+    project.integTestSecure.clusterConfig.extraConfigFile("repository-hdfs/krb5.keytab", "${elasticsearchKT}")
+    project.integTestSecure.clusterConfig.jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') +
+            " " + "-Xmx" + System.getProperty('tests.heap.size', '512m') +
+            " " + "-Djava.security.krb5.conf=${krb5conf}" +
+            " " + System.getProperty('tests.jvm.argline', '')
+  }
+
+  RestIntegTestTask integTestSecure = project.tasks.create('integTestSecure', RestIntegTestTask.class) {
+    description = "Runs rest tests against an elasticsearch cluster with HDFS secured by MIT Kerberos."
+  }
+
+  integTestSecure.mustRunAfter(project.integTest)
+  project.check.dependsOn(integTestSecure)
+
+  // Fixture dependencies
+  integTestSecureCluster.dependsOn secureHdfsFixture, krb5kdcFixture
+  integTestSecureRunner.systemProperty 'tests.rest.suite', 'secure_hdfs_repository'
+} else {
+  logger.warn("secured hdfsFixture is unsupported, please install Vagrant 1.8.6+ to enable")
+}
+
 thirdPartyAudit.excludes = [
   // classes are missing, because we added hadoop jars one by one until tests pass.
   'com.google.gson.stream.JsonReader', 

+ 30 - 0
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/10_basic.yaml

@@ -0,0 +1,30 @@
+# Integration tests for HDFS Repository plugin
+#
+# Check plugin is installed
+#
+"Plugin loaded":
+    - do:
+        cluster.state: {}
+
+    # Get master node id
+    - set: { master_node: master }
+
+    - do:
+        nodes.info: {}
+
+    - match:  { nodes.$master.plugins.0.name: repository-hdfs  }
+---
+#
+# Check that we can't use file:// repositories or anything like that
+# We only test this plugin against hdfs://
+#
+"HDFS only":
+    - do:
+        catch: /Invalid scheme/
+        snapshot.create_repository:
+          repository: misconfigured_repository
+          body:
+            type: hdfs
+            settings:
+              uri: "file://bogus"
+              path: "foo/bar"

+ 29 - 0
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/20_repository_create.yaml

@@ -0,0 +1,29 @@
+# Integration tests for HDFS Repository plugin
+#
+# Tests creating a repository
+#
+"HDFS Repository Creation":
+    # Create repository
+    - do:
+        snapshot.create_repository:
+          repository: test_repository_create
+          body:
+            type: hdfs
+            settings:
+              uri: "hdfs://localhost:9998"
+              path: "/user/elasticsearch/test/repository_create"
+              security:
+                principal: "elasticsearch@BUILD.ELASTIC.CO"
+
+    # Get repository
+    - do:
+        snapshot.get_repository:
+          repository: test_repository_create
+
+    - is_true: test_repository_create
+    - match: {test_repository_create.settings.path : "/user/elasticsearch/test/repository_create"}
+
+    # Remove our repository
+    - do:
+       snapshot.delete_repository:
+         repository: test_repository_create

+ 54 - 0
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/20_repository_delete.yaml

@@ -0,0 +1,54 @@
+# Integration tests for HDFS Repository plugin
+#
+# Tests creating a repository, then deleting it and creating it again.
+#
+"HDFS Delete Repository":
+    # Create repository
+    - do:
+        snapshot.create_repository:
+          repository: test_repo_hdfs_1
+          body:
+            type: hdfs
+            settings:
+              uri: "hdfs://localhost:9998"
+              path: "/user/elasticsearch/foo/bar"
+              security:
+                principal: "elasticsearch@BUILD.ELASTIC.CO"
+
+    # Get repository
+    - do:
+        snapshot.get_repository:
+          repository: test_repo_hdfs_1
+
+    - is_true: test_repo_hdfs_1
+    - match: {test_repo_hdfs_1.settings.path : "/user/elasticsearch/foo/bar"}
+
+    # Delete repository
+    - do:
+        snapshot.delete_repository:
+          repository: test_repo_hdfs_1
+
+    # Get repository: It should be gone
+    - do:
+        catch:    /repository_missing_exception/
+        snapshot.get_repository:
+          repository: test_repo_hdfs_1
+
+    # Create it again
+    - do:
+        snapshot.create_repository:
+          repository: test_repo_hdfs_1
+          body:
+            type: hdfs
+            settings:
+              uri: "hdfs://localhost:9998"
+              path: "/user/elasticsearch/foo/bar"
+              security:
+                principal: "elasticsearch@BUILD.ELASTIC.CO"
+
+    # Get repository again
+    - do:
+        snapshot.get_repository:
+          repository: test_repo_hdfs_1
+
+    - is_true: test_repo_hdfs_1

+ 25 - 0
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/20_repository_verify.yaml

@@ -0,0 +1,25 @@
+# Integration tests for HDFS Repository plugin
+#
+# Tests explicit verify
+#
+"HDFS Repository Verify":
+    - do:
+        snapshot.create_repository:
+          repository: test_repository_verify
+          body:
+            type: hdfs
+            settings:
+              uri: "hdfs://localhost:9998"
+              path: "/user/elasticsearch/test/repository_verify"
+              security:
+                principal: "elasticsearch@BUILD.ELASTIC.CO"
+
+    # Verify repository
+    - do:
+        snapshot.verify_repository:
+          repository: test_repository_verify
+
+    # Remove our repository
+    - do:
+       snapshot.delete_repository:
+         repository: test_repository_verify

+ 50 - 0
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot.yaml

@@ -0,0 +1,50 @@
+# Integration tests for HDFS Repository plugin
+#
+# Actually perform a snapshot to hdfs
+#
+---
+"take snapshot":
+  # Create repository
+  - do:
+      snapshot.create_repository:
+        repository: test_snapshot_repository
+        body:
+          type: hdfs
+          settings:
+            uri: "hdfs://localhost:9998"
+            path: "/user/elasticsearch/test/snapshot"
+            security:
+              principal: "elasticsearch@BUILD.ELASTIC.CO"
+
+  # Create index
+  - do:
+      indices.create:
+        index: test_index
+        body:
+          settings:
+            number_of_shards:   1
+            number_of_replicas: 1
+
+  # Create snapshot
+  - do:
+      snapshot.create:
+        repository: test_snapshot_repository
+        snapshot: test_snapshot
+        wait_for_completion: true
+
+  - match: { snapshot.snapshot: test_snapshot }
+  - match: { snapshot.state : SUCCESS }
+  - match: { snapshot.shards.successful: 1 }
+  - match: { snapshot.shards.failed : 0 }
+
+  # Remove our snapshot
+  - do: 
+      snapshot.delete:
+        repository: test_snapshot_repository
+        snapshot: test_snapshot
+
+  # Remove our repository
+  - do:
+     snapshot.delete_repository:
+       repository: test_snapshot_repository
+

+ 72 - 0
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_get.yaml

@@ -0,0 +1,72 @@
+# Integration tests for HDFS Repository plugin
+#
+# Tests retrieving information about snapshot
+#
+---
+"Get a snapshot":
+  # Create repository
+  - do:
+      snapshot.create_repository:
+        repository: test_snapshot_get_repository
+        body:
+          type: hdfs
+          settings:
+            uri: "hdfs://localhost:9998"
+            path: "/user/elasticsearch/test/snapshot_get"
+            security:
+              principal: "elasticsearch@BUILD.ELASTIC.CO"
+
+  # Create index
+  - do:
+      indices.create:
+        index: test_index
+        body:
+          settings:
+            number_of_shards:   1
+            number_of_replicas: 0
+
+  # Wait for green
+  - do:
+      cluster.health:
+        wait_for_status: green
+
+  # Create snapshot
+  - do:
+      snapshot.create:
+        repository: test_snapshot_get_repository
+        snapshot: test_snapshot_get
+        wait_for_completion: true
+
+  - match: { snapshot.snapshot: test_snapshot_get }
+  - match: { snapshot.state : SUCCESS }
+  - match: { snapshot.shards.successful: 1 }
+  - match: { snapshot.shards.failed : 0 }
+
+  # Get snapshot info
+  - do:
+      snapshot.get:
+        repository: test_snapshot_get_repository
+        snapshot: test_snapshot_get
+
+  - length: { snapshots: 1 }
+  - match: { snapshots.0.snapshot : test_snapshot_get }
+
+  # List snapshot info
+  - do:
+      snapshot.get:
+        repository: test_snapshot_get_repository
+        snapshot: "*"
+
+  - length: { snapshots: 1 }
+  - match: { snapshots.0.snapshot : test_snapshot_get }
+
+  # Remove our snapshot
+  - do: 
+      snapshot.delete:
+        repository: test_snapshot_get_repository
+        snapshot: test_snapshot_get
+
+  # Remove our repository
+  - do:
+     snapshot.delete_repository:
+       repository: test_snapshot_get_repository

+ 81 - 0
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/40_restore.yaml

@@ -0,0 +1,81 @@
+# Integration tests for HDFS Repository plugin
+#
+# Actually perform a snapshot to hdfs, then restore it
+#
+---
+"Create a snapshot and then restore it":
+
+  # Create repository
+  - do:
+      snapshot.create_repository:
+        repository: test_restore_repository
+        body:
+          type: hdfs
+          settings:
+            uri: "hdfs://localhost:9998"
+            path: "/user/elasticsearch/test/restore"
+            security:
+              principal: "elasticsearch@BUILD.ELASTIC.CO"
+
+  # Create index
+  - do:
+      indices.create:
+        index: test_index
+        body:
+          settings:
+            number_of_shards:   1
+            number_of_replicas: 0
+
+  # Wait for green
+  - do:
+      cluster.health:
+        wait_for_status: green
+
+  # Take snapshot
+  - do:
+      snapshot.create:
+        repository: test_restore_repository
+        snapshot: test_restore
+        wait_for_completion: true
+
+  - match: { snapshot.snapshot: test_restore }
+  - match: { snapshot.state : SUCCESS }
+  - match: { snapshot.shards.successful: 1 }
+  - match: { snapshot.shards.failed : 0 }
+  - is_true: snapshot.version
+  - gt: { snapshot.version_id: 0}
+
+  # Close index
+  - do:
+      indices.close:
+        index : test_index
+
+  # Restore index
+  - do:
+      snapshot.restore:
+        repository: test_restore_repository
+        snapshot: test_restore
+        wait_for_completion: true
+
+  # Check recovery stats
+  - do:
+      indices.recovery:
+        index: test_index
+
+  - match: { test_index.shards.0.type: SNAPSHOT }
+  - match: { test_index.shards.0.stage: DONE }
+  - match: { test_index.shards.0.index.files.recovered: 0}
+  - match: { test_index.shards.0.index.size.recovered_in_bytes: 0}
+  - match: { test_index.shards.0.index.files.reused: 1}
+  - gt: { test_index.shards.0.index.size.reused_in_bytes: 0}
+
+  # Remove our snapshot
+  - do:
+      snapshot.delete:
+        repository: test_restore_repository
+        snapshot: test_restore
+
+  # Remove our repository
+  - do:
+     snapshot.delete_repository:
+       repository: test_restore_repository

+ 1 - 0
settings.gradle

@@ -25,6 +25,7 @@ List projects = [
   'test:framework',
   'test:fixtures:example-fixture',
   'test:fixtures:hdfs-fixture',
+  'test:fixtures:krb5kdc-fixture',
   'test:logger-usage',
   'modules:aggs-matrix-stats',
   'modules:analysis-common',

+ 54 - 11
test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java

@@ -19,19 +19,24 @@
 
 package hdfs;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-
-import java.io.IOException;
+import java.lang.management.ManagementFactory;
 import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.nio.file.StandardCopyOption;
-import java.util.Locale;
+import java.util.ArrayList;
+import java.util.List;
 
-import java.lang.management.ManagementFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.security.UserGroupInformation;
 
 /**
  * MiniHDFS test fixture. There is a CLI tool, but here we can
@@ -43,9 +48,11 @@ public class MiniHDFS {
     private static String PID_FILE_NAME = "pid";
 
     public static void main(String[] args) throws Exception {
-        if (args.length != 1) {
-           throw new IllegalArgumentException("MiniHDFS <baseDirectory>");
+        if (args.length != 1 && args.length != 3) {
+            throw new IllegalArgumentException("MiniHDFS <baseDirectory> [<kerberosPrincipal> <kerberosKeytab>]");
         }
+        boolean secure = args.length == 3;
+
         // configure Paths
         Path baseDir = Paths.get(args[0]);
         // hadoop-home/, so logs will not complain
@@ -57,13 +64,49 @@ public class MiniHDFS {
         // hdfs-data/, where any data is going
         Path hdfsHome = baseDir.resolve("hdfs-data");
 
-        // start cluster
+        // configure cluster
         Configuration cfg = new Configuration();
         cfg.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsHome.toAbsolutePath().toString());
         // lower default permission: TODO: needed?
         cfg.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, "766");
+
+        // optionally configure security
+        if (secure) {
+            String kerberosPrincipal = args[1];
+            String keytabFile = args[2];
+
+            cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+            cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "true");
+            cfg.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
+            cfg.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
+            cfg.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
+            cfg.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, keytabFile);
+            cfg.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, keytabFile);
+            cfg.set(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, "true");
+            cfg.set(DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY, "true");
+        }
+
+        UserGroupInformation.setConfiguration(cfg);
+
         // TODO: remove hardcoded port!
-        MiniDFSCluster dfs = new MiniDFSCluster.Builder(cfg).nameNodePort(9999).build();
+        MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(cfg);
+        if (secure) {
+            builder.nameNodePort(9998);
+        } else {
+            builder.nameNodePort(9999);
+        }
+        MiniDFSCluster dfs = builder.build();
+
+        // Set the elasticsearch user directory up
+        if (UserGroupInformation.isSecurityEnabled()) {
+            FileSystem fs = dfs.getFileSystem();
+            org.apache.hadoop.fs.Path esUserPath = new org.apache.hadoop.fs.Path("/user/elasticsearch");
+            fs.mkdirs(esUserPath);
+            List<AclEntry> acls = new ArrayList<>();
+            acls.add(new AclEntry.Builder().setType(AclEntryType.USER).setName("elasticsearch").setPermission(FsAction.ALL).build());
+            fs.modifyAclEntries(esUserPath, acls);
+            fs.close();
+        }
 
         // write our PID file
         Path tmp = Files.createTempFile(baseDir, null, null);

+ 53 - 0
test/fixtures/krb5kdc-fixture/Vagrantfile

@@ -0,0 +1,53 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# This Vagrantfile exists to define a virtual machine running MIT's Kerberos 5
+# for usage as a testing fixture for the build process.
+#
+# In order to connect to the KDC process on this virtual machine, find and use
+# the rendered krb5.conf file in the build output directory (build/conf).
+#
+# In order to provision principals in the KDC, use the provided addprinc.sh
+# script with vagrant's ssh facility:
+#
+#    vagrant ssh -c /vagrant/src/main/resources/provision/addprinc.sh principal
+#
+# You will find the newly created principal's keytab file in the build output
+# directory (build/keytabs). Principal creation is idempotent, and will recopy
+# existing user keytabs from the KDC if they already exist.
+
+Vagrant.configure("2") do |config|
+
+  config.vm.define "krb5kdc" do |config|
+    config.vm.box = "elastic/ubuntu-14.04-x86_64"
+  end
+
+  config.vm.hostname = "kerberos.build.elastic.co"
+
+  if Vagrant.has_plugin?("vagrant-cachier")
+    config.cache.scope = :box
+  end
+
+  config.vm.network "forwarded_port", guest: 88, host: 60088, protocol: "tcp"
+  config.vm.network "forwarded_port", guest: 88, host: 60088, protocol: "udp"
+
+  config.vm.provision "shell", path: "src/main/resources/provision/installkdc.sh"
+
+end

+ 84 - 0
test/fixtures/krb5kdc-fixture/build.gradle

@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+apply plugin: 'elasticsearch.build'
+
+Map<String, String> vagrantEnvVars = [
+        'VAGRANT_CWD'           : "${project.projectDir.absolutePath}",
+        'VAGRANT_VAGRANTFILE'   : 'Vagrantfile',
+        'VAGRANT_PROJECT_DIR'   : "${project.projectDir.absolutePath}"
+]
+
+String box = "krb5kdc"
+
+List<String> defaultPrincipals = [ "elasticsearch" ]
+
+task update(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) {
+    command 'box'
+    subcommand 'update'
+    boxName box
+    environmentVars vagrantEnvVars
+}
+
+task up(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) {
+    command 'up'
+    args '--provision', '--provider', 'virtualbox'
+    boxName box
+    environmentVars vagrantEnvVars
+    dependsOn update
+}
+
+task addDefaultPrincipals {
+    dependsOn up
+}
+
+for (String principal : defaultPrincipals) {
+    Task addTask = project.tasks.create("addPrincipal#${principal}", org.elasticsearch.gradle.vagrant.VagrantCommandTask) {
+        command 'ssh'
+        args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh $principal"
+        boxName box
+        environmentVars vagrantEnvVars
+        dependsOn up
+    }
+    addDefaultPrincipals.dependsOn(addTask)
+}
+
+task halt(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) {
+    command 'halt'
+    boxName box
+    environmentVars vagrantEnvVars
+}
+
+task destroy(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) {
+    command 'destroy'
+    args '-f'
+    boxName box
+    environmentVars vagrantEnvVars
+    dependsOn halt
+}
+
+thirdPartyAudit.enabled = false
+licenseHeaders.enabled = false
+test.enabled = false
+
+// installKDC uses tabs in it for the Kerberos ACL file.
+// Ignore it for pattern checking.
+forbiddenPatterns {
+    exclude "**/installkdc.sh"
+}

+ 20 - 0
test/fixtures/krb5kdc-fixture/src/main/resources/env.properties

@@ -0,0 +1,20 @@
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+realm=BUILD.ELASTIC.CO
+kdc=kerberos.build.elastic.co
+zone=build.elastic.co

+ 58 - 0
test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh

@@ -0,0 +1,58 @@
+#!/bin/bash
+
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+set -e
+
+if [[ $# -lt 1 ]]; then
+  echo 'Usage: addprinc.sh <principalNameNoRealm>'
+  exit 1
+fi
+
+PRINC="$1"
+USER=$(echo $PRINC | tr "/" "_")
+
+VDIR=/vagrant
+RESOURCES=$VDIR/src/main/resources
+PROV_DIR=$RESOURCES/provision
+ENVPROP_FILE=$RESOURCES/env.properties
+BUILD_DIR=$VDIR/build
+CONF_DIR=$BUILD_DIR/conf
+KEYTAB_DIR=$BUILD_DIR/keytabs
+LOCALSTATEDIR=/etc
+LOGDIR=/var/log/krb5
+
+mkdir -p $KEYTAB_DIR
+
+REALM=$(cat $ENVPROP_FILE | grep realm= | head -n 1 | cut -d '=' -f 2)
+
+ADMIN_PRIN=admin/admin@$REALM
+ADMIN_KTAB=$LOCALSTATEDIR/admin.keytab
+
+USER_PRIN=$PRINC@$REALM
+USER_KTAB=$LOCALSTATEDIR/$USER.keytab
+
+if [ -f $USER_KTAB ]; then
+  echo "Principal '${PRINC}@${REALM}' already exists. Re-copying keytab..."
+else
+  echo "Provisioning '${PRINC}@${REALM}' principal and keytab..."
+  sudo kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "addprinc -randkey $USER_PRIN"
+  sudo kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "ktadd -k $USER_KTAB $USER_PRIN"
+fi
+
+sudo cp $USER_KTAB $KEYTAB_DIR/$USER.keytab

+ 120 - 0
test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh

@@ -0,0 +1,120 @@
+#!/bin/bash
+
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+set -e
+
+# KDC installation steps and considerations based on https://web.mit.edu/kerberos/krb5-latest/doc/admin/install_kdc.html
+# and helpful input from https://help.ubuntu.com/community/Kerberos
+
+VDIR=/vagrant
+RESOURCES=$VDIR/src/main/resources
+PROV_DIR=$RESOURCES/provision
+ENVPROP_FILE=$RESOURCES/env.properties
+BUILD_DIR=$VDIR/build
+CONF_DIR=$BUILD_DIR/conf
+KEYTAB_DIR=$BUILD_DIR/keytabs
+LOCALSTATEDIR=/etc
+LOGDIR=/var/log/krb5
+
+MARKER_FILE=/etc/marker
+
+# Output location for our rendered configuration files and keytabs
+mkdir -p $BUILD_DIR
+rm -rf $BUILD_DIR/*
+mkdir -p $CONF_DIR
+mkdir -p $KEYTAB_DIR
+
+if [ -f $MARKER_FILE ]; then
+  echo "Already provisioned..."
+  echo "Recopying configuration files..."
+  cp $LOCALSTATEDIR/krb5.conf $CONF_DIR/krb5.conf
+  cp $LOCALSTATEDIR/krb5kdc/kdc.conf $CONF_DIR/kdc.conf
+  exit 0;
+fi
+
+# Pull environment information
+REALM_NAME=$(cat $ENVPROP_FILE | grep realm= | cut -d '=' -f 2)
+KDC_NAME=$(cat $ENVPROP_FILE | grep kdc= | cut -d '=' -f 2)
+BUILD_ZONE=$(cat $ENVPROP_FILE | grep zone= | cut -d '=' -f 2)
+ELASTIC_ZONE=$(echo $BUILD_ZONE | cut -d '.' -f 1,2)
+
+# Transfer and interpolate krb5.conf
+cp $PROV_DIR/krb5.conf.template $LOCALSTATEDIR/krb5.conf
+sed -i 's/${REALM_NAME}/'$REALM_NAME'/g' $LOCALSTATEDIR/krb5.conf
+sed -i 's/${KDC_NAME}/'$KDC_NAME'/g' $LOCALSTATEDIR/krb5.conf
+sed -i 's/${BUILD_ZONE}/'$BUILD_ZONE'/g' $LOCALSTATEDIR/krb5.conf
+sed -i 's/${ELASTIC_ZONE}/'$ELASTIC_ZONE'/g' $LOCALSTATEDIR/krb5.conf
+cp $LOCALSTATEDIR/krb5.conf $CONF_DIR/krb5.conf
+
+# Transfer and interpolate the kdc.conf
+mkdir -p $LOCALSTATEDIR/krb5kdc
+cp $PROV_DIR/kdc.conf.template $LOCALSTATEDIR/krb5kdc/kdc.conf
+sed -i 's/${REALM_NAME}/'$REALM_NAME'/g' $LOCALSTATEDIR/krb5kdc/kdc.conf
+sed -i 's/${KDC_NAME}/'$KDC_NAME'/g' $LOCALSTATEDIR/krb5kdc/kdc.conf
+sed -i 's/${BUILD_ZONE}/'$BUILD_ZONE'/g' $LOCALSTATEDIR/krb5kdc/kdc.conf
+sed -i 's/${ELASTIC_ZONE}/'$ELASTIC_ZONE'/g' $LOCALSTATEDIR/krb5.conf
+cp $LOCALSTATEDIR/krb5kdc/kdc.conf $CONF_DIR/kdc.conf
+
+# Touch logging locations
+mkdir -p $LOGDIR
+touch $LOGDIR/kadmin.log
+touch $LOGDIR/krb5kdc.log
+touch $LOGDIR/krb5lib.log
+
+# Update package manager
+apt-get update -qqy
+
+# Installation asks a bunch of questions via debconf. Set the answers ahead of time
+debconf-set-selections <<< "krb5-config krb5-config/read_conf boolean true"
+debconf-set-selections <<< "krb5-config krb5-config/kerberos_servers string $KDC_NAME"
+debconf-set-selections <<< "krb5-config krb5-config/add_servers boolean true"
+debconf-set-selections <<< "krb5-config krb5-config/admin_server string $KDC_NAME"
+debconf-set-selections <<< "krb5-config krb5-config/add_servers_realm string $REALM_NAME"
+debconf-set-selections <<< "krb5-config krb5-config/default_realm string $REALM_NAME"
+debconf-set-selections <<< "krb5-admin-server krb5-admin-server/kadmind boolean true"
+debconf-set-selections <<< "krb5-admin-server krb5-admin-server/newrealm note"
+debconf-set-selections <<< "krb5-kdc krb5-kdc/debconf boolean true"
+debconf-set-selections <<< "krb5-kdc krb5-kdc/purge_data_too boolean false"
+
+# Install krb5 packages
+apt-get install -qqy krb5-{admin-server,kdc}
+
+# /dev/random produces output very slowly on Ubuntu VM's. Install haveged to increase entropy.
+apt-get install -qqy haveged
+haveged
+
+# Create kerberos database with stash file and garbage password
+kdb5_util create -s -r $REALM_NAME -P zyxwvutsrpqonmlk9876
+
+# Set up admin acls
+cat << EOF > /etc/krb5kdc/kadm5.acl
+*/admin@$REALM_NAME	*
+*/*@$REALM_NAME		i
+EOF
+
+# Create admin principal
+kadmin.local -q "addprinc -pw elastic admin/admin@$REALM_NAME"
+kadmin.local -q "ktadd -k /etc/admin.keytab admin/admin@$REALM_NAME"
+
+# Start Kerberos Services
+krb5kdc
+kadmind
+
+# Mark that the vm is already provisioned
+touch $MARKER_FILE

+ 35 - 0
test/fixtures/krb5kdc-fixture/src/main/resources/provision/kdc.conf.template

@@ -0,0 +1,35 @@
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+[kdcdefaults]
+    kdc_listen = 88
+    kdc_tcp_listen = 88
+
+[realms]
+    ${REALM_NAME} = {
+        kadmind_port = 749
+        max_life = 12h 0m 0s
+        max_renewable_life = 7d 0h 0m 0s
+        master_key_type = aes256-cts
+        # remove aes256-cts:normal since unlimited strength policy needs installed for java to use it.
+        supported_enctypes = aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
+    }
+
+[logging]
+    kdc = FILE:/var/log/krb5kdc.log
+    admin_server = FILE:/var/log/kadmin.log
+    default = FILE:/var/log/krb5lib.log

+ 50 - 0
test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template

@@ -0,0 +1,50 @@
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+[libdefaults]
+    default_realm = ${REALM_NAME}
+    dns_canonicalize_hostname = false
+    dns_lookup_kdc = false
+    dns_lookup_realm = false
+    dns_uri_lookup = false
+    forwardable = true
+    ignore_acceptor_hostname = true
+    rdns = false
+    default_tgs_enctypes = rc4-hmac
+    default_tkt_enctypes = rc4-hmac
+    permitted_enctypes = rc4-hmac
+    # udp_preference_limit = 1
+    kdc_timeout = 3000
+
+[realms]
+    ${REALM_NAME} = {
+        kdc = ${KDC_NAME}:88
+        kdc = ${KDC_NAME}:60088
+        kdc = localhost:60088
+        kdc = localhost:88
+        kdc = 127.0.0.1:60088
+        kdc = 127.0.0.1:88
+        admin_server = ${KDC_NAME}:749
+        default_domain = ${BUILD_ZONE}
+    }
+
+[domain_realm]
+    ${BUILD_ZONE} = ${REALM_NAME}
+    .${BUILD_ZONE} = ${REALM_NAME}
+    ${ELASTIC_ZONE} = ${REALM_NAME}
+    .${ELASTIC_ZONE} = ${REALM_NAME}
+