Browse Source

Upgrade repository-hdfs to Hadoop 3 (#76897)

This upgrades the repository-hdfs plugin to hadoop 3. Tests are performed against both hadoop 2 and hadoop 3 HDFS. The advantages of using the hadoop 3 client are:
Over-the-wire encryption works (tests coming in an upcoming PR).
We don't have to add (or ask customers to add) additional jvm permissions to the elasticsearch jvm
It's compatible with java versions higher than java 8
Keith Massey 4 năm trước cách đây
mục cha
commit
a02e8ad90b
39 tập tin đã thay đổi với 461 bổ sung155 xóa
  1. 240 121
      plugins/repository-hdfs/build.gradle
  2. 1 1
      plugins/repository-hdfs/hadoop-client-api/build.gradle
  3. 0 0
      plugins/repository-hdfs/hadoop-client-api/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
  4. 0 1
      plugins/repository-hdfs/licenses/commons-configuration-1.6.jar.sha1
  5. 1 0
      plugins/repository-hdfs/licenses/commons-configuration2-2.7.jar.sha1
  6. 0 0
      plugins/repository-hdfs/licenses/commons-configuration2-LICENSE.txt
  7. 1 1
      plugins/repository-hdfs/licenses/commons-configuration2-NOTICE.txt
  8. 0 1
      plugins/repository-hdfs/licenses/guava-11.0.2.jar.sha1
  9. 1 0
      plugins/repository-hdfs/licenses/guava-27.1-jre.jar.sha1
  10. 0 1
      plugins/repository-hdfs/licenses/hadoop-annotations-2.8.5.jar.sha1
  11. 0 1
      plugins/repository-hdfs/licenses/hadoop-auth-2.8.5.jar.sha1
  12. 0 1
      plugins/repository-hdfs/licenses/hadoop-client-2.8.5.jar.sha1
  13. 1 0
      plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.1.jar.sha1
  14. 0 1
      plugins/repository-hdfs/licenses/hadoop-hdfs-2.8.5.jar.sha1
  15. 1 0
      plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.1.jar.sha1
  16. 0 1
      plugins/repository-hdfs/licenses/hadoop-hdfs-client-2.8.5.jar.sha1
  17. 1 1
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/20_repository_create.yml
  18. 2 2
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/20_repository_delete.yml
  19. 1 1
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/20_repository_verify.yml
  20. 2 2
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot.yml
  21. 2 2
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_get.yml
  22. 1 1
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_readonly.yml
  23. 1 1
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/40_restore.yml
  24. 1 1
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/20_repository_create.yml
  25. 2 2
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/20_repository_delete.yml
  26. 1 1
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/20_repository_verify.yml
  27. 2 2
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot.yml
  28. 2 2
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_get.yml
  29. 1 1
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_readonly.yml
  30. 1 1
      plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/40_restore.yml
  31. 2 1
      settings.gradle
  32. 0 0
      test/fixtures/hdfs2-fixture/build.gradle
  33. 0 0
      test/fixtures/hdfs2-fixture/src/main/java/hdfs/MiniHDFS.java
  34. 0 0
      test/fixtures/hdfs2-fixture/src/main/resources/readonly-repository.tar.gz
  35. 13 0
      test/fixtures/hdfs3-fixture/build.gradle
  36. 176 0
      test/fixtures/hdfs3-fixture/src/main/java/hdfs/MiniHDFS.java
  37. BIN
      test/fixtures/hdfs3-fixture/src/main/resources/readonly-repository.tar.gz
  38. 3 3
      x-pack/plugin/searchable-snapshots/qa/hdfs/build.gradle
  39. 1 1
      x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle

+ 240 - 121
plugins/repository-hdfs/build.gradle

@@ -6,6 +6,7 @@
  * Side Public License, v 1.
  */
 
+import org.apache.tools.ant.filters.ReplaceTokens
 import org.apache.tools.ant.taskdefs.condition.Os
 import org.elasticsearch.gradle.internal.info.BuildParams
 import org.elasticsearch.gradle.internal.test.RestIntegTestTask
@@ -26,45 +27,43 @@ esplugin {
 }
 
 versions << [
-  'hadoop2': '2.8.5'
+  'hadoop': '3.3.1'
 ]
 
+final int minTestedHadoopVersion = 2;
+final int maxTestedHadoopVersion = 3;
+
 testFixtures.useFixture ":test:fixtures:krb5kdc-fixture", "hdfs"
 
-configurations {
-  hdfsFixture
+for (int hadoopVersion = minTestedHadoopVersion; hadoopVersion <= maxTestedHadoopVersion; hadoopVersion++) {
+  configurations.create("hdfs" + hadoopVersion + "Fixture")
 }
 
 dependencies {
-  api "org.apache.hadoop:hadoop-client:${versions.hadoop2}"
-  api project(path: 'hadoop-common', configuration: 'shadow')
+  api project(path: 'hadoop-client-api', configuration: 'shadow')
   if (isEclipse) {
     /*
      * Eclipse can't pick up the shadow dependency so we point it at *something*
      * so it can compile things.
      */
-    api project(path: 'hadoop-common')
+    api project(path: 'hadoop-client-api')
   }
-  api "org.apache.hadoop:hadoop-annotations:${versions.hadoop2}"
-  api "org.apache.hadoop:hadoop-auth:${versions.hadoop2}"
-  api "org.apache.hadoop:hadoop-hdfs:${versions.hadoop2}"
-  api "org.apache.hadoop:hadoop-hdfs-client:${versions.hadoop2}"
+  runtimeOnly "org.apache.hadoop:hadoop-client-runtime:${versions.hadoop}"
+  implementation "org.apache.hadoop:hadoop-hdfs:${versions.hadoop}"
   api 'org.apache.htrace:htrace-core4:4.0.1-incubating'
-  runtimeOnly 'com.google.guava:guava:11.0.2'
+  runtimeOnly 'com.google.guava:guava:27.1-jre'
   api 'com.google.protobuf:protobuf-java:2.5.0'
   api 'commons-logging:commons-logging:1.1.3'
   api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}"
   api 'commons-cli:commons-cli:1.2'
   api "commons-codec:commons-codec:${versions.commonscodec}"
   api 'commons-collections:commons-collections:3.2.2'
-  api 'commons-configuration:commons-configuration:1.6'
+  api 'org.apache.commons:commons-configuration2:2.7'
   api 'commons-io:commons-io:2.4'
   api 'commons-lang:commons-lang:2.6'
   api 'javax.servlet:servlet-api:2.5'
   api "org.slf4j:slf4j-api:${versions.slf4j}"
   api "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}"
-
-  hdfsFixture project(':test:fixtures:hdfs-fixture')
   // Set the keytab files in the classpath so that we can access them from test code without the security manager
   // freaking out.
   if (isEclipse == false) {
@@ -73,6 +72,9 @@ dependencies {
     }
   }
 }
+for (int hadoopVersion = minTestedHadoopVersion; hadoopVersion <= maxTestedHadoopVersion; hadoopVersion++) {
+  dependencies.add("hdfs" + hadoopVersion + "Fixture", project(':test:fixtures:hdfs' + hadoopVersion + '-fixture'))
+}
 
 restResources {
   restApi {
@@ -95,6 +97,7 @@ tasks.named("dependencyLicenses").configure {
 
 tasks.named("integTest").configure {
   dependsOn(project.tasks.named("bundlePlugin"))
+  enabled = false
 }
 
 testClusters.matching { it.name == "integTest" }.configureEach {
@@ -105,79 +108,96 @@ String realm = "BUILD.ELASTIC.CO"
 String krb5conf = project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("hdfs")
 
 // Create HDFS File System Testing Fixtures for HA/Secure combinations
-for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', 'secureHaHdfsFixture']) {
-  project.tasks.register(fixtureName, org.elasticsearch.gradle.internal.test.AntFixture) {
-    dependsOn project.configurations.hdfsFixture, project(':test:fixtures:krb5kdc-fixture').tasks.postProcessFixture
-    executable = "${BuildParams.runtimeJavaHome}/bin/java"
-    env 'CLASSPATH', "${-> project.configurations.hdfsFixture.asPath}"
-    maxWaitInSeconds 60
-    onlyIf { BuildParams.inFipsJvm == false }
-    waitCondition = { fixture, ant ->
-      // the hdfs.MiniHDFS fixture writes the ports file when
-      // it's ready, so we can just wait for the file to exist
-      return fixture.portsFile.exists()
-    }
-    final List<String> miniHDFSArgs = []
+for (int hadoopVersion = minTestedHadoopVersion; hadoopVersion <= maxTestedHadoopVersion; hadoopVersion++) {
+  for (String fixtureName : ['hdfs' + hadoopVersion + 'Fixture', 'haHdfs' + hadoopVersion + 'Fixture', 'secureHdfs' + hadoopVersion + 'Fixture', 'secureHaHdfs' + hadoopVersion + 'Fixture']) {
+    final int hadoopVer = hadoopVersion
+    project.tasks.register(fixtureName, org.elasticsearch.gradle.internal.test.AntFixture) {
+      executable = "${BuildParams.runtimeJavaHome}/bin/java"
+      dependsOn project.configurations.getByName("hdfs" + hadoopVer + "Fixture"), project(':test:fixtures:krb5kdc-fixture').tasks.postProcessFixture
+      env 'CLASSPATH', "${-> project.configurations.getByName("hdfs" + hadoopVer + "Fixture").asPath}"
+
+      maxWaitInSeconds 60
+      onlyIf { BuildParams.inFipsJvm == false }
+      waitCondition = { fixture, ant ->
+        // the hdfs.MiniHDFS fixture writes the ports file when
+        // it's ready, so we can just wait for the file to exist
+        return fixture.portsFile.exists()
+      }
+      final List<String> miniHDFSArgs = []
 
-    // If it's a secure fixture, then depend on Kerberos Fixture and principals + add the krb5conf to the JVM options
-    if (name.equals('secureHdfsFixture') || name.equals('secureHaHdfsFixture')) {
-      miniHDFSArgs.add("-Djava.security.krb5.conf=${krb5conf}")
-      onlyIf { BuildParams.runtimeJavaVersion < JavaVersion.VERSION_16 }
-    }
-    // If it's an HA fixture, set a nameservice to use in the JVM options
-    if (name.equals('haHdfsFixture') || name.equals('secureHaHdfsFixture')) {
-      miniHDFSArgs.add("-Dha-nameservice=ha-hdfs")
-    }
+      // If it's a secure fixture, then depend on Kerberos Fixture and principals + add the krb5conf to the JVM options
+      if (name.startsWith('secure')) {
+        miniHDFSArgs.addAll(["--add-exports", "java.security.jgss/sun.security.krb5=ALL-UNNAMED"])
+        miniHDFSArgs.add("-Djava.security.krb5.conf=${krb5conf}")
+        miniHDFSArgs.add("-Dhdfs.config.port=" + getSecureNamenodePortForVersion(hadoopVer))
+      } else {
+        miniHDFSArgs.add("-Dhdfs.config.port=" + getNonSecureNamenodePortForVersion(hadoopVer))
+      }
+      // If it's an HA fixture, set a nameservice to use in the JVM options
+      if (name.startsWith('haHdfs') || name.startsWith('secureHaHdfs')) {
+        miniHDFSArgs.add("-Dha-nameservice=ha-hdfs")
+      }
 
-    // Common options
-    miniHDFSArgs.add('hdfs.MiniHDFS')
-    miniHDFSArgs.add(baseDir)
+      // Common options
+      miniHDFSArgs.add('hdfs.MiniHDFS')
+      miniHDFSArgs.add(baseDir)
 
-    // If it's a secure fixture, then set the principal name and keytab locations to use for auth.
-    if (name.equals('secureHdfsFixture') || name.equals('secureHaHdfsFixture')) {
-      miniHDFSArgs.add("hdfs/hdfs.build.elastic.co@${realm}")
-      miniHDFSArgs.add(
-        project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "hdfs_hdfs.build.elastic.co.keytab")
-      )
-    }
+      // If it's a secure fixture, then set the principal name and keytab locations to use for auth.
+      if (name.startsWith('secure')) {
+        miniHDFSArgs.add("hdfs/hdfs.build.elastic.co@${realm}")
+        miniHDFSArgs.add(
+          project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "hdfs_hdfs.build.elastic.co.keytab")
+        )
+      }
 
-    args miniHDFSArgs.toArray()
+      args miniHDFSArgs.toArray()
+    }
   }
 }
 
+def getSecureNamenodePortForVersion(hadoopVersion) {
+  return 10002 - (2 * hadoopVersion)
+}
+
+def getNonSecureNamenodePortForVersion(hadoopVersion) {
+  return 10003 - (2 * hadoopVersion)
+}
+
 Set disabledIntegTestTaskNames = []
 
-for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSecureHa']) {
-  def testTask = tasks.register(integTestTaskName, RestIntegTestTask) {
-    description = "Runs rest tests against an elasticsearch cluster with HDFS."
-    dependsOn("bundlePlugin")
+for (int hadoopVersion = minTestedHadoopVersion; hadoopVersion <= maxTestedHadoopVersion; hadoopVersion++) {
+  final int hadoopVer = hadoopVersion
+  for (String integTestTaskName : ['integTest' + hadoopVersion, 'integTestHa' + hadoopVersion, 'integTestSecure' + hadoopVersion,
+                                   'integTestSecureHa' + hadoopVersion]) {
+    def testTask = tasks.register(integTestTaskName, RestIntegTestTask) {
+      description = "Runs rest tests against an elasticsearch cluster with HDFS" + hadoopVer
+      dependsOn("bundlePlugin")
 
-    if (disabledIntegTestTaskNames.contains(name)) {
-      enabled = false;
-    }
+      if (disabledIntegTestTaskNames.contains(name)) {
+        enabled = false;
+      }
 
-    if (name.contains("Secure")) {
-      onlyIf { BuildParams.runtimeJavaVersion < JavaVersion.VERSION_16 }
-      if (name.contains("Ha")) {
-        dependsOn "secureHaHdfsFixture"
-      } else {
-        dependsOn "secureHdfsFixture"
+      if (name.contains("Secure")) {
+        if (name.contains("Ha")) {
+          dependsOn "secureHaHdfs" + hadoopVer + "Fixture"
+        } else {
+          dependsOn "secureHdfs" + hadoopVer + "Fixture"
+        }
       }
-    }
 
       onlyIf { BuildParams.inFipsJvm == false }
       if (name.contains("Ha")) {
         Path portsFile
-        File portsFileDir = file("${workingDir}/hdfsFixture")
+        File portsFileDir = file("${workingDir}/hdfs" + hadoopVer + "Fixture")
         if (name.contains("Secure")) {
           portsFile = buildDir.toPath()
             .resolve("fixtures")
-            .resolve("secureHaHdfsFixture")
+            .resolve("secureHaHdfs" + hadoopVer + "Fixture")
             .resolve("ports")
         } else {
           portsFile = buildDir.toPath()
             .resolve("fixtures")
-            .resolve("haHdfsFixture")
+            .resolve("haHdfs" + hadoopVer + "Fixture")
             .resolve("ports")
         }
         nonInputProperties.systemProperty "test.hdfs-fixture.ports", file("$portsFileDir/ports")
@@ -196,26 +216,47 @@ for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSec
         if (disabledIntegTestTaskNames.contains(name) == false) {
           nonInputProperties.systemProperty "test.krb5.principal.es", "elasticsearch@${realm}"
           nonInputProperties.systemProperty "test.krb5.principal.hdfs", "hdfs/hdfs.build.elastic.co@${realm}"
-          jvmArgs "-Djava.security.krb5.conf=${krb5conf}",
-                  "--add-exports=java.security.jgss/sun.security.krb5=ALL-UNNAMED"
+          jvmArgs "-Djava.security.krb5.conf=${krb5conf}"
           nonInputProperties.systemProperty(
             "test.krb5.keytab.hdfs",
             project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "hdfs_hdfs.build.elastic.co.keytab")
           )
         }
       }
+    }
+
+    testClusters.matching { it.name == testTask.name }.configureEach {
+      plugin(bundlePlugin.archiveFile)
+      if (integTestTaskName.contains("Secure")) {
+        systemProperty "java.security.krb5.conf", krb5conf
+        extraConfigFile(
+          "repository-hdfs/krb5.keytab",
+          file("${project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "elasticsearch.keytab")}"), IGNORE_VALUE
+        )
+      }
+    }
   }
 
-  testClusters.matching { it.name == testTask.name}.configureEach {
-    plugin(bundlePlugin.archiveFile)
-    if (integTestTaskName.contains("Secure")) {
-      systemProperty "java.security.krb5.conf", krb5conf
-      extraConfigFile(
-        "repository-hdfs/krb5.keytab",
-        file("${project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "elasticsearch.keytab")}"), IGNORE_VALUE
-      )
+  def processHadoopTestResources = tasks.register("processHadoop" + hadoopVer + "TestResources", Copy)
+  processHadoopTestResources.configure {
+    Map<String, Object> expansions = [
+      'hdfs_port': getNonSecureNamenodePortForVersion(hadoopVer),
+      'secure_hdfs_port': getSecureNamenodePortForVersion(hadoopVer),
+    ]
+    inputs.properties(expansions)
+    filter("tokens" : expansions.collectEntries {k, v -> [k, v
+      .toString()]} /* must be a map of strings */, ReplaceTokens.class)
+    it.into("build/resources/test/rest-api-spec/test")
+    it.into("hdfs_repository_" + hadoopVer) {
+      from "src/test/resources/rest-api-spec/test/hdfs_repository"
+    }
+    it.into("secure_hdfs_repository_" + hadoopVer) {
+      from "src/test/resources/rest-api-spec/test/secure_hdfs_repository"
     }
   }
+  tasks.named("processTestResources").configure {
+    dependsOn (processHadoopTestResources)
+  }
 }
 
 // Determine HDFS Fixture compatibility for the current build environment.
@@ -243,63 +284,66 @@ if (legalPath == false) {
   fixtureSupported = false
 }
 
-// Always ignore HA integration tests in the normal integration test runner, they are included below as
-// part of their own HA-specific integration test tasks.
-tasks.named("integTest").configure {
-  onlyIf { BuildParams.inFipsJvm == false }
-  exclude('**/Ha*TestSuiteIT.class')
-}
-
-if (fixtureSupported) {
-  // Check depends on the HA test. Already depends on the standard test.
-  tasks.named("check").configure {
-    dependsOn("integTestHa")
+for (int hadoopVersion = minTestedHadoopVersion; hadoopVersion <= maxTestedHadoopVersion; hadoopVersion++) {
+  final int hadoopVer = hadoopVersion
+  // Always ignore HA integration tests in the normal integration test runner, they are included below as
+  // part of their own HA-specific integration test tasks.
+  tasks.named("integTest" + hadoopVer).configure {
+    onlyIf { BuildParams.inFipsJvm == false }
+    exclude('**/Ha*TestSuiteIT.class')
   }
 
-  // Both standard and HA tests depend on their respective HDFS fixtures
-  tasks.named("integTest").configure {
-    dependsOn "hdfsFixture"
+  if (fixtureSupported) {
+    // Check depends on the HA test. Already depends on the standard test.
+    tasks.named("check").configure {
+      dependsOn("integTestHa" + hadoopVer)
+    }
 
-    // The normal test runner only runs the standard hdfs rest tests
-    systemProperty 'tests.rest.suite', 'hdfs_repository'
-  }
-  tasks.named("integTestHa").configure {
-    dependsOn "haHdfsFixture"
-    // Only include the HA integration tests for the HA test task
-    setIncludes(['**/Ha*TestSuiteIT.class'])
+    // Both standard and HA tests depend on their respective HDFS fixtures
+    tasks.named("integTest" + hadoopVer).configure {
+      dependsOn "hdfs" + hadoopVer + "Fixture"
 
-  }
+      // The normal test runner only runs the standard hdfs rest tests
+      systemProperty 'tests.rest.suite', 'hdfs_repository_' + hadoopVer
+    }
+    tasks.named("integTestHa" + hadoopVer).configure {
+      dependsOn "haHdfs" + hadoopVer + "Fixture"
+      // Only include the HA integration tests for the HA test task
+      setIncludes(['**/Ha*TestSuiteIT.class'])
+
+    }
 
-} else {
-  if (legalPath) {
-    logger.warn("hdfsFixture unsupported, please set HADOOP_HOME and put HADOOP_HOME\\bin in PATH")
   } else {
-    logger.warn("hdfsFixture unsupported since there are spaces in the path: '" + rootProject.rootDir.toString() + "'")
-  }
+    if (legalPath) {
+      logger.warn("hdfs" + hadoopVer + "Fixture unsupported, please set HADOOP_HOME and put HADOOP_HOME\\bin in PATH")
+    } else {
+      logger.warn("hdfs" + hadoopVer + "Fixture unsupported since there are spaces in the path: '" + rootProject.rootDir.toString() + "'")
+    }
 
-  // The normal integration test runner will just test that the plugin loads
-  tasks.named("integTest").configure {
-    systemProperty 'tests.rest.suite', 'hdfs_repository/10_basic'
-  }
-  // HA fixture is unsupported. Don't run them.
-  tasks.named("integTestHa").configure {
-    setEnabled(false)
+    // The normal integration test runner will just test that the plugin loads
+    tasks.named("integTest" + hadoopVer).configure {
+      systemProperty 'tests.rest.suite', 'hdfs_repository_' + hadoopVer + '/10_basic'
+    }
+    // HA fixture is unsupported. Don't run them.
+    tasks.named("integTestHa" + hadoopVer).configure {
+      setEnabled(false)
+    }
   }
-}
 
-tasks.named("check").configure {
-  dependsOn("integTestSecure", "integTestSecureHa")
-}
+  tasks.named("check").configure {
+    dependsOn("integTest" + hadoopVer, "integTestSecure" + hadoopVer, "integTestSecureHa" + hadoopVer)
+  }
 
 // Run just the secure hdfs rest test suite.
-tasks.named("integTestSecure").configure {
-  systemProperty 'tests.rest.suite', 'secure_hdfs_repository'
-  // Ignore HA integration Tests. They are included below as part of integTestSecureHa test runner.
-  exclude('**/Ha*TestSuiteIT.class')
-}
+  tasks.named("integTestSecure" + hadoopVer).configure {
+    systemProperty 'tests.rest.suite', 'secure_hdfs_repository_' + hadoopVer
+    // Ignore HA integration Tests. They are included below as part of integTestSecureHa test runner.
+    exclude('**/Ha*TestSuiteIT.class')
+  }
 // Only include the HA integration tests for the HA test task
-tasks.named("integTestSecureHa").configure {
-  setIncludes(['**/Ha*TestSuiteIT.class'])
+  tasks.named("integTestSecureHa" + hadoopVer).configure {
+    setIncludes(['**/Ha*TestSuiteIT.class'])
+  }
 }
 
 tasks.named("thirdPartyAudit").configure {
@@ -308,7 +352,82 @@ tasks.named("thirdPartyAudit").configure {
     // internal java api: sun.misc.Unsafe
     'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator',
     'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1',
-    'org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm',
-    'org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm$Slot',
+    'com.google.common.cache.Striped64',
+    'com.google.common.cache.Striped64$1',
+    'com.google.common.cache.Striped64$Cell',
+    'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray',
+    'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1',
+    'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2',
+    'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3',
+    'com.google.common.hash.Striped64',
+    'com.google.common.hash.Striped64$1',
+    'com.google.common.hash.Striped64$Cell',
+    'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper',
+    'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1',
+    'org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture$UnsafeAtomicHelper',
+    'org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture$UnsafeAtomicHelper$1',
+    'org.apache.hadoop.shaded.com.google.common.cache.Striped64',
+    'org.apache.hadoop.shaded.com.google.common.cache.Striped64$1',
+    'org.apache.hadoop.shaded.com.google.common.cache.Striped64$Cell',
+    'org.apache.hadoop.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray',
+    'org.apache.hadoop.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1',
+    'org.apache.hadoop.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2',
+    'org.apache.hadoop.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3',
+    'org.apache.hadoop.shaded.com.google.common.hash.Striped64',
+    'org.apache.hadoop.shaded.com.google.common.hash.Striped64$1',
+    'org.apache.hadoop.shaded.com.google.common.hash.Striped64$Cell',
+    'org.apache.hadoop.shaded.com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator',
+    'org.apache.hadoop.shaded.com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1',
+    'org.apache.hadoop.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper',
+    'org.apache.hadoop.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1',
+    'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe',
+    'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeBooleanField',
+    'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeByteField',
+    'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeCachedField',
+    'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeCharField',
+    'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeCustomEncodedField',
+    'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeDoubleField',
+    'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeFloatField',
+    'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeIntField',
+    'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeLongField',
+    'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeObjectField',
+    'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeShortField',
+    'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.cache.Striped64',
+    'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.cache.Striped64$1',
+    'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.cache.Striped64$Cell',
+    'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray',
+    'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1',
+    'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2',
+    'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3',
+    'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.Striped64',
+    'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.Striped64$1',
+    'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.Striped64$Cell',
+    'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator',
+    'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1',
+    'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper',
+    'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1',
+    'org.apache.hadoop.shaded.org.xbill.DNS.spi.DNSJavaNameServiceDescriptor',
+    'org.apache.hadoop.shaded.org.xerial.snappy.pure.PureJavaSnappy',
+    'org.apache.hadoop.shaded.org.xerial.snappy.pure.SnappyRawCompressor',
+    'org.apache.hadoop.shaded.org.xerial.snappy.pure.SnappyRawDecompressor',
+    'org.apache.hadoop.shaded.org.xerial.snappy.pure.UnsafeUtil',
+    'org.apache.hadoop.thirdparty.com.google.common.cache.Striped64',
+    'org.apache.hadoop.thirdparty.com.google.common.cache.Striped64$1',
+    'org.apache.hadoop.thirdparty.com.google.common.cache.Striped64$Cell',
+    'org.apache.hadoop.thirdparty.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray',
+    'org.apache.hadoop.thirdparty.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1',
+    'org.apache.hadoop.thirdparty.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2',
+    'org.apache.hadoop.thirdparty.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3',
+    'org.apache.hadoop.thirdparty.com.google.common.hash.Striped64',
+    'org.apache.hadoop.thirdparty.com.google.common.hash.Striped64$1',
+    'org.apache.hadoop.thirdparty.com.google.common.hash.Striped64$Cell',
+    'org.apache.hadoop.thirdparty.com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator',
+    'org.apache.hadoop.thirdparty.com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1',
+    'org.apache.hadoop.thirdparty.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper',
+    'org.apache.hadoop.thirdparty.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1',
+    'org.apache.hadoop.thirdparty.protobuf.UnsafeUtil',
+    'org.apache.hadoop.thirdparty.protobuf.UnsafeUtil$1',
+    'org.apache.hadoop.thirdparty.protobuf.UnsafeUtil$JvmMemoryAccessor',
+    'org.apache.hadoop.thirdparty.protobuf.UnsafeUtil$MemoryAccessor'
   )
 }

+ 1 - 1
plugins/repository-hdfs/hadoop-common/build.gradle → plugins/repository-hdfs/hadoop-client-api/build.gradle

@@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.java'
 apply plugin: 'com.github.johnrengelman.shadow'
 
 dependencies {
-  implementation "org.apache.hadoop:hadoop-common:${project.parent.versions.hadoop2}"
+  implementation "org.apache.hadoop:hadoop-client-api:${project.parent.versions.hadoop}"
 }
 
 tasks.named('shadowJar').configure {

+ 0 - 0
plugins/repository-hdfs/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java → plugins/repository-hdfs/hadoop-client-api/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java


+ 0 - 1
plugins/repository-hdfs/licenses/commons-configuration-1.6.jar.sha1

@@ -1 +0,0 @@
-32cadde23955d7681b0d94a2715846d20b425235

+ 1 - 0
plugins/repository-hdfs/licenses/commons-configuration2-2.7.jar.sha1

@@ -0,0 +1 @@
+593326399e5fb5e1f986607f06f63c1250ab36b4

+ 0 - 0
plugins/repository-hdfs/licenses/commons-configuration-LICENSE.txt → plugins/repository-hdfs/licenses/commons-configuration2-LICENSE.txt


+ 1 - 1
plugins/repository-hdfs/licenses/commons-configuration-NOTICE.txt → plugins/repository-hdfs/licenses/commons-configuration2-NOTICE.txt

@@ -1,4 +1,4 @@
-Apache Commons Configuration
+Apache Commons Configuration 2
 Copyright 2001-2015 The Apache Software Foundation
 
 This product includes software developed at

+ 0 - 1
plugins/repository-hdfs/licenses/guava-11.0.2.jar.sha1

@@ -1 +0,0 @@
-35a3c69e19d72743cac83778aecbee68680f63eb

+ 1 - 0
plugins/repository-hdfs/licenses/guava-27.1-jre.jar.sha1

@@ -0,0 +1 @@
+e47b59c893079b87743cdcfb6f17ca95c08c592c

+ 0 - 1
plugins/repository-hdfs/licenses/hadoop-annotations-2.8.5.jar.sha1

@@ -1 +0,0 @@
-3a301159daf9368b05364577c985498857b5c48f

+ 0 - 1
plugins/repository-hdfs/licenses/hadoop-auth-2.8.5.jar.sha1

@@ -1 +0,0 @@
-63fe1f9d9ef6bdf2cb52dfeb28ed8faf78e4b85c

+ 0 - 1
plugins/repository-hdfs/licenses/hadoop-client-2.8.5.jar.sha1

@@ -1 +0,0 @@
-45e11f6004581e53959bc3d38c4d01dbeb5f4b22

+ 1 - 0
plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.1.jar.sha1

@@ -0,0 +1 @@
+f3a55d882328ee87a1054f99d62ba987fa9029a4

+ 0 - 1
plugins/repository-hdfs/licenses/hadoop-hdfs-2.8.5.jar.sha1

@@ -1 +0,0 @@
-36914392fd3e77d46e54c3bb092dfc32d3f4a32b

+ 1 - 0
plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.1.jar.sha1

@@ -0,0 +1 @@
+5da7f270cb6564e099e0d2d424285a24fca62bd2

+ 0 - 1
plugins/repository-hdfs/licenses/hadoop-hdfs-client-2.8.5.jar.sha1

@@ -1 +0,0 @@
-9d72fa62b01c32f1c0587d53c5005fc49f2bd11c

+ 1 - 1
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/20_repository_create.yml

@@ -10,7 +10,7 @@
           body:
             type: hdfs
             settings:
-              uri: "hdfs://localhost:9999"
+              uri: "hdfs://localhost:@hdfs_port@"
               path: "test/repository_create"
 
     # Get repository

+ 2 - 2
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/20_repository_delete.yml

@@ -10,7 +10,7 @@
           body:
             type: hdfs
             settings:
-              uri: "hdfs://localhost:9999"
+              uri: "hdfs://localhost:@hdfs_port@"
               path: "foo/bar"
 
     # Get repository
@@ -39,7 +39,7 @@
           body:
             type: hdfs
             settings:
-              uri: "hdfs://localhost:9999"
+              uri: "hdfs://localhost:@hdfs_port@"
               path: "foo/bar"
 
     # Get repository again

+ 1 - 1
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/20_repository_verify.yml

@@ -9,7 +9,7 @@
           body:
             type: hdfs
             settings:
-              uri: "hdfs://localhost:9999"
+              uri: "hdfs://localhost:@hdfs_port@"
               path: "test/repository_verify"
 
     # Verify repository

+ 2 - 2
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot.yml

@@ -11,7 +11,7 @@
         body:
           type: hdfs
           settings:
-            uri: "hdfs://localhost:9999"
+            uri: "hdfs://localhost:@hdfs_port@"
             path: "test/snapshot"
 
   # Create index
@@ -36,7 +36,7 @@
   - match: { snapshot.shards.failed : 0 }
 
   # Remove our snapshot
-  - do: 
+  - do:
       snapshot.delete:
         repository: test_snapshot_repository
         snapshot: test_snapshot

+ 2 - 2
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_get.yml

@@ -11,7 +11,7 @@
         body:
           type: hdfs
           settings:
-            uri: "hdfs://localhost:9999"
+            uri: "hdfs://localhost:@hdfs_port@"
             path: "test/snapshot_get"
 
   # Create index
@@ -59,7 +59,7 @@
   - match: { snapshots.0.snapshot : test_snapshot_get }
 
   # Remove our snapshot
-  - do: 
+  - do:
       snapshot.delete:
         repository: test_snapshot_get_repository
         snapshot: test_snapshot_get

+ 1 - 1
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_readonly.yml

@@ -11,7 +11,7 @@
         body:
           type: hdfs
           settings:
-            uri: "hdfs://localhost:9999"
+            uri: "hdfs://localhost:@hdfs_port@"
             path: "/user/elasticsearch/existing/readonly-repository"
             readonly: true
 

+ 1 - 1
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/40_restore.yml

@@ -14,7 +14,7 @@
         body:
           type: hdfs
           settings:
-            uri: "hdfs://localhost:9999"
+            uri: "hdfs://localhost:@hdfs_port@"
             path: "test/restore"
 
   # Create index

+ 1 - 1
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/20_repository_create.yml

@@ -10,7 +10,7 @@
           body:
             type: hdfs
             settings:
-              uri: "hdfs://localhost:9998"
+              uri: "hdfs://localhost:@secure_hdfs_port@"
               path: "/user/elasticsearch/test/repository_create"
               security:
                 principal: "elasticsearch@BUILD.ELASTIC.CO"

+ 2 - 2
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/20_repository_delete.yml

@@ -10,7 +10,7 @@
           body:
             type: hdfs
             settings:
-              uri: "hdfs://localhost:9998"
+              uri: "hdfs://localhost:@secure_hdfs_port@"
               path: "/user/elasticsearch/foo/bar"
               security:
                 principal: "elasticsearch@BUILD.ELASTIC.CO"
@@ -41,7 +41,7 @@
           body:
             type: hdfs
             settings:
-              uri: "hdfs://localhost:9998"
+              uri: "hdfs://localhost:@secure_hdfs_port@"
               path: "/user/elasticsearch/foo/bar"
               security:
                 principal: "elasticsearch@BUILD.ELASTIC.CO"

+ 1 - 1
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/20_repository_verify.yml

@@ -9,7 +9,7 @@
           body:
             type: hdfs
             settings:
-              uri: "hdfs://localhost:9998"
+              uri: "hdfs://localhost:@secure_hdfs_port@"
               path: "/user/elasticsearch/test/repository_verify"
               security:
                 principal: "elasticsearch@BUILD.ELASTIC.CO"

+ 2 - 2
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot.yml

@@ -11,7 +11,7 @@
         body:
           type: hdfs
           settings:
-            uri: "hdfs://localhost:9998"
+            uri: "hdfs://localhost:@secure_hdfs_port@"
             path: "/user/elasticsearch/test/snapshot"
             security:
               principal: "elasticsearch@BUILD.ELASTIC.CO"
@@ -38,7 +38,7 @@
   - match: { snapshot.shards.failed : 0 }
 
   # Remove our snapshot
-  - do: 
+  - do:
       snapshot.delete:
         repository: test_snapshot_repository
         snapshot: test_snapshot

+ 2 - 2
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_get.yml

@@ -11,7 +11,7 @@
         body:
           type: hdfs
           settings:
-            uri: "hdfs://localhost:9998"
+            uri: "hdfs://localhost:@secure_hdfs_port@"
             path: "/user/elasticsearch/test/snapshot_get"
             security:
               principal: "elasticsearch@BUILD.ELASTIC.CO"
@@ -61,7 +61,7 @@
   - match: { snapshots.0.snapshot : test_snapshot_get }
 
   # Remove our snapshot
-  - do: 
+  - do:
       snapshot.delete:
         repository: test_snapshot_get_repository
         snapshot: test_snapshot_get

+ 1 - 1
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_readonly.yml

@@ -11,7 +11,7 @@
         body:
           type: hdfs
           settings:
-            uri: "hdfs://localhost:9998"
+            uri: "hdfs://localhost:@secure_hdfs_port@"
             path: "/user/elasticsearch/existing/readonly-repository"
             security:
               principal: "elasticsearch@BUILD.ELASTIC.CO"

+ 1 - 1
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/40_restore.yml

@@ -14,7 +14,7 @@
         body:
           type: hdfs
           settings:
-            uri: "hdfs://localhost:9998"
+            uri: "hdfs://localhost:@secure_hdfs_port@"
             path: "/user/elasticsearch/test/restore"
             security:
               principal: "elasticsearch@BUILD.ELASTIC.CO"

+ 2 - 1
settings.gradle

@@ -65,7 +65,8 @@ List projects = [
   'test:framework',
   'test:fixtures:azure-fixture',
   'test:fixtures:gcs-fixture',
-  'test:fixtures:hdfs-fixture',
+  'test:fixtures:hdfs2-fixture',
+  'test:fixtures:hdfs3-fixture',
   'test:fixtures:krb5kdc-fixture',
   'test:fixtures:minio-fixture',
   'test:fixtures:old-elasticsearch',

+ 0 - 0
test/fixtures/hdfs-fixture/build.gradle → test/fixtures/hdfs2-fixture/build.gradle


+ 0 - 0
test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java → test/fixtures/hdfs2-fixture/src/main/java/hdfs/MiniHDFS.java


+ 0 - 0
test/fixtures/hdfs-fixture/src/main/resources/readonly-repository.tar.gz → test/fixtures/hdfs2-fixture/src/main/resources/readonly-repository.tar.gz


+ 13 - 0
test/fixtures/hdfs3-fixture/build.gradle

@@ -0,0 +1,13 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+apply plugin: 'elasticsearch.java'
+
+dependencies {
+  api "org.apache.hadoop:hadoop-minicluster:3.3.1"
+}

+ 176 - 0
test/fixtures/hdfs3-fixture/src/main/java/hdfs/MiniHDFS.java

@@ -0,0 +1,176 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package hdfs;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import java.io.File;
+import java.lang.management.ManagementFactory;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardCopyOption;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * MiniHDFS test fixture. There is a CLI tool, but here we can
+ * easily properly setup logging, avoid parsing JSON, etc.
+ */
+public class MiniHDFS {
+
+    private static String PORT_FILE_NAME = "ports";
+    private static String PID_FILE_NAME = "pid";
+
+    public static void main(String[] args) throws Exception {
+        if (args.length != 1 && args.length != 3) {
+            throw new IllegalArgumentException(
+                "Expected: MiniHDFS <baseDirectory> [<kerberosPrincipal> <kerberosKeytab>], got: " + Arrays.toString(args)
+            );
+        }
+        boolean secure = args.length == 3;
+
+        // configure Paths
+        Path baseDir = Paths.get(args[0]);
+        // hadoop-home/, so logs will not complain
+        if (System.getenv("HADOOP_HOME") == null) {
+            Path hadoopHome = baseDir.resolve("hadoop-home");
+            Files.createDirectories(hadoopHome);
+            System.setProperty("hadoop.home.dir", hadoopHome.toAbsolutePath().toString());
+        }
+        // hdfs-data/, where any data is going
+        Path hdfsHome = baseDir.resolve("hdfs-data");
+
+        // configure cluster
+        Configuration cfg = new Configuration();
+        cfg.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsHome.toAbsolutePath().toString());
+        // lower default permission: TODO: needed?
+        cfg.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, "766");
+
+        // optionally configure security
+        if (secure) {
+            String kerberosPrincipal = args[1];
+            String keytabFile = args[2];
+
+            cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+            cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "true");
+            cfg.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
+            cfg.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
+            cfg.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
+            cfg.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, keytabFile);
+            cfg.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, keytabFile);
+            cfg.set(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, "true");
+            cfg.set(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, "true");
+            cfg.set(DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY, "true");
+            cfg.set(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, "true");
+            cfg.set(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, "AES/CTR/NoPadding");
+        }
+
+        UserGroupInformation.setConfiguration(cfg);
+
+        MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(cfg);
+        String explicitPort = System.getProperty("hdfs.config.port");
+        if (explicitPort != null) {
+            builder.nameNodePort(Integer.parseInt(explicitPort));
+        } else {
+            if (secure) {
+                builder.nameNodePort(9998);
+            } else {
+                builder.nameNodePort(9999);
+            }
+        }
+
+        // Configure HA mode
+        String haNameService = System.getProperty("ha-nameservice");
+        boolean haEnabled = haNameService != null;
+        if (haEnabled) {
+            MiniDFSNNTopology.NNConf nn1 = new MiniDFSNNTopology.NNConf("nn1").setIpcPort(0);
+            MiniDFSNNTopology.NNConf nn2 = new MiniDFSNNTopology.NNConf("nn2").setIpcPort(0);
+            MiniDFSNNTopology.NSConf nameservice = new MiniDFSNNTopology.NSConf(haNameService).addNN(nn1).addNN(nn2);
+            MiniDFSNNTopology namenodeTopology = new MiniDFSNNTopology().addNameservice(nameservice);
+            builder.nnTopology(namenodeTopology);
+        }
+
+        MiniDFSCluster dfs = builder.build();
+
+        // Configure contents of the filesystem
+        org.apache.hadoop.fs.Path esUserPath = new org.apache.hadoop.fs.Path("/user/elasticsearch");
+
+        FileSystem fs;
+        if (haEnabled) {
+            dfs.transitionToActive(0);
+            fs = HATestUtil.configureFailoverFs(dfs, cfg);
+        } else {
+            fs = dfs.getFileSystem();
+        }
+
+        try {
+            // Set the elasticsearch user directory up
+            fs.mkdirs(esUserPath);
+            if (UserGroupInformation.isSecurityEnabled()) {
+                List<AclEntry> acls = new ArrayList<>();
+                acls.add(new AclEntry.Builder().setType(AclEntryType.USER).setName("elasticsearch").setPermission(FsAction.ALL).build());
+                fs.modifyAclEntries(esUserPath, acls);
+            }
+
+            // Install a pre-existing repository into HDFS
+            String directoryName = "readonly-repository";
+            String archiveName = directoryName + ".tar.gz";
+            URL readOnlyRepositoryArchiveURL = MiniHDFS.class.getClassLoader().getResource(archiveName);
+            if (readOnlyRepositoryArchiveURL != null) {
+                Path tempDirectory = Files.createTempDirectory(MiniHDFS.class.getName());
+                File readOnlyRepositoryArchive = tempDirectory.resolve(archiveName).toFile();
+                FileUtils.copyURLToFile(readOnlyRepositoryArchiveURL, readOnlyRepositoryArchive);
+                FileUtil.unTar(readOnlyRepositoryArchive, tempDirectory.toFile());
+
+                fs.copyFromLocalFile(
+                    true,
+                    true,
+                    new org.apache.hadoop.fs.Path(tempDirectory.resolve(directoryName).toAbsolutePath().toUri()),
+                    esUserPath.suffix("/existing/" + directoryName)
+                );
+
+                FileUtils.deleteDirectory(tempDirectory.toFile());
+            }
+        } finally {
+            fs.close();
+        }
+
+        // write our PID file
+        Path tmp = Files.createTempFile(baseDir, null, null);
+        String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0];
+        Files.write(tmp, pid.getBytes(StandardCharsets.UTF_8));
+        Files.move(tmp, baseDir.resolve(PID_FILE_NAME), StandardCopyOption.ATOMIC_MOVE);
+
+        // write our port file
+        String portFileContent = Integer.toString(dfs.getNameNodePort(0));
+        if (haEnabled) {
+            portFileContent = portFileContent + "\n" + Integer.toString(dfs.getNameNodePort(1));
+        }
+        tmp = Files.createTempFile(baseDir, null, null);
+        Files.write(tmp, portFileContent.getBytes(StandardCharsets.UTF_8));
+        Files.move(tmp, baseDir.resolve(PORT_FILE_NAME), StandardCopyOption.ATOMIC_MOVE);
+    }
+
+}

BIN
test/fixtures/hdfs3-fixture/src/main/resources/readonly-repository.tar.gz


+ 3 - 3
x-pack/plugin/searchable-snapshots/qa/hdfs/build.gradle

@@ -21,7 +21,7 @@ apply plugin: 'elasticsearch.rest-test'
 apply plugin: 'elasticsearch.rest-resources'
 apply plugin: 'elasticsearch.internal-available-ports'
 
-final Project hdfsFixtureProject = project(':test:fixtures:hdfs-fixture')
+final Project hdfsFixtureProject = project(':test:fixtures:hdfs2-fixture')
 final Project krbFixtureProject = project(':test:fixtures:krb5kdc-fixture')
 final Project hdfsRepoPluginProject = project(':plugins:repository-hdfs')
 
@@ -81,7 +81,6 @@ for (String fixtureName : ['hdfsFixture', 'secureHdfsFixture']) {
 
     // If it's a secure fixture, then depend on Kerberos Fixture and principals + add the krb5conf to the JVM options
     if (name.equals('secureHdfsFixture')) {
-      onlyIf { BuildParams.runtimeJavaVersion < JavaVersion.VERSION_16 }
       miniHDFSArgs.addAll(["--add-exports", "java.security.jgss/sun.security.krb5=ALL-UNNAMED"])
       miniHDFSArgs.add("-Djava.security.krb5.conf=${krb5conf}")
     }
@@ -123,7 +122,7 @@ tasks.register("integTestSecure", RestIntegTestTask) {
     "test.krb5.keytab.hdfs",
     project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "hdfs_hdfs.build.elastic.co.keytab")
   )
-  onlyIf { BuildParams.inFipsJvm == false && BuildParams.runtimeJavaVersion < JavaVersion.VERSION_16}
+  onlyIf { BuildParams.inFipsJvm == false }
 }
 tasks.named("check").configure { dependsOn("integTestSecure") }
 
@@ -140,6 +139,7 @@ testClusters.configureEach {
 
 testClusters.matching { it.name == "integTestSecure" }.configureEach {
   systemProperty "java.security.krb5.conf", krb5conf
+  jvmArgs "--add-exports", "java.security.jgss/sun.security.krb5=ALL-UNNAMED"
   extraConfigFile(
     "repository-hdfs/krb5.keytab",
     file("${project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "elasticsearch.keytab")}"), IGNORE_VALUE

+ 1 - 1
x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle

@@ -22,7 +22,7 @@ apply plugin: 'elasticsearch.rest-test'
 apply plugin: 'elasticsearch.rest-resources'
 apply plugin: 'elasticsearch.internal-available-ports'
 
-final Project hdfsFixtureProject = project(':test:fixtures:hdfs-fixture')
+final Project hdfsFixtureProject = project(':test:fixtures:hdfs2-fixture')
 final Project krbFixtureProject = project(':test:fixtures:krb5kdc-fixture')
 final Project hdfsRepoPluginProject = project(':plugins:repository-hdfs')