Browse Source

fix and enable repository-hdfs secure tests (#44044)

Due to recent changes are done for converting `repository-hdfs` to test
clusters (#41252), the `integTestSecure*` tasks did not depend on
`secureHdfsFixture` which when running would fail as the fixture
would not be available. This commit adds the dependency of the fixture
to the task.

The `secureHdfsFixture` is a `AntFixture` which is spawned a process.
Internally it waits for 30 seconds for the resources to be made available.
For my local machine, it took almost 45 seconds to be available so I have
added the wait time as an input to the `AntFixture` defaults to 30 seconds
 and set it to 60 seconds in case of secure hdfs fixture.

The integ test for secure hdfs was disabled for a long time and so
the changes done in #42090 to fix the tests are also done in this commit.
Yogesh Gaikwad 6 years ago
parent
commit
8d87ab7573

+ 4 - 1
buildSrc/src/main/groovy/org/elasticsearch/gradle/test/AntFixture.groovy

@@ -58,6 +58,9 @@ public class AntFixture extends AntTask implements Fixture {
     @Input
     boolean useShell = false
 
+    @Input
+    int maxWaitInSeconds = 30
+
     /**
      * A flag to indicate whether the fixture should be run in the foreground, or spawned.
      * It is protected so subclasses can override (eg RunTask).
@@ -128,7 +131,7 @@ public class AntFixture extends AntTask implements Fixture {
 
         String failedProp = "failed${name}"
         // first wait for resources, or the failure marker from the wrapper script
-        ant.waitfor(maxwait: '30', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: failedProp) {
+        ant.waitfor(maxwait: maxWaitInSeconds, maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: failedProp) {
             or {
                 resourceexists {
                     file(file: failureMarker.toString())

+ 28 - 3
plugins/repository-hdfs/build.gradle

@@ -91,13 +91,13 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture',
     dependsOn project.configurations.hdfsFixture, project(':test:fixtures:krb5kdc-fixture').tasks.postProcessFixture
     executable = new File(project.runtimeJavaHome, 'bin/java')
     env 'CLASSPATH', "${ -> project.configurations.hdfsFixture.asPath }"
+    maxWaitInSeconds 60
     onlyIf {  project(':test:fixtures:krb5kdc-fixture').buildFixture.enabled }
     waitCondition = { fixture, ant ->
       // the hdfs.MiniHDFS fixture writes the ports file when
       // it's ready, so we can just wait for the file to exist
       return fixture.portsFile.exists()
     }
-
     final List<String> miniHDFSArgs = []
 
     // If it's a secure fixture, then depend on Kerberos Fixture and principals + add the krb5conf to the JVM options
@@ -125,7 +125,7 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture',
   }
 }
 
-Set disabledIntegTestTaskNames = ['integTestSecure', 'integTestSecureHa']
+Set disabledIntegTestTaskNames = []
 
 for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSecureHa']) {
   task "${integTestTaskName}"(type: RestIntegTestTask) {
@@ -136,10 +136,35 @@ for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSec
       enabled = false;
     }
 
+    if (integTestTaskName.contains("Secure")) {
+      if (integTestTaskName.contains("Ha")) {
+        dependsOn secureHaHdfsFixture
+      } else {
+        dependsOn secureHdfsFixture
+      }
+    }
+
     runner {
+      if (integTestTaskName.contains("Ha")) {
+        if (integTestTaskName.contains("Secure")) {
+          Path path = buildDir.toPath()
+                  .resolve("fixtures")
+                  .resolve("secureHaHdfsFixture")
+                  .resolve("ports")
+          nonInputProperties.systemProperty "test.hdfs-fixture.ports", path
+          classpath += files(path)
+        } else {
+          Path path = buildDir.toPath()
+                  .resolve("fixtures")
+                  .resolve("haHdfsFixture")
+                  .resolve("ports")
+          nonInputProperties.systemProperty "test.hdfs-fixture.ports", path
+          classpath += files(path)
+        }
+      }
+
       if (integTestTaskName.contains("Secure")) {
         if (disabledIntegTestTaskNames.contains(integTestTaskName) == false) {
-            dependsOn secureHdfsFixture
             nonInputProperties.systemProperty "test.krb5.principal.es", "elasticsearch@${realm}"
             nonInputProperties.systemProperty "test.krb5.principal.hdfs", "hdfs/hdfs.build.elastic.co@${realm}"
             jvmArgs "-Djava.security.krb5.conf=${krb5conf}"

+ 25 - 14
plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java

@@ -19,16 +19,6 @@
 
 package org.elasticsearch.repositories.hdfs;
 
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.security.AccessController;
-import java.security.PrivilegedActionException;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.List;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.BadFencingConfigurationException;
 import org.apache.hadoop.ha.HAServiceProtocol;
@@ -46,6 +36,16 @@ import org.elasticsearch.common.io.PathUtils;
 import org.elasticsearch.test.rest.ESRestTestCase;
 import org.junit.Assert;
 
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.security.AccessController;
+import java.security.PrivilegedActionException;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.List;
+
 /**
  * Integration test that runs against an HA-Enabled HDFS instance
  */
@@ -57,13 +57,24 @@ public class HaHdfsFailoverTestSuiteIT extends ESRestTestCase {
         String esKerberosPrincipal = System.getProperty("test.krb5.principal.es");
         String hdfsKerberosPrincipal = System.getProperty("test.krb5.principal.hdfs");
         String kerberosKeytabLocation = System.getProperty("test.krb5.keytab.hdfs");
+        String ports = System.getProperty("test.hdfs-fixture.ports");
+        String nn1Port = "10001";
+        String nn2Port = "10002";
+        if (ports.length() > 0) {
+             final Path path = PathUtils.get(ports);
+             final List<String> lines = AccessController.doPrivileged((PrivilegedExceptionAction<List<String>>) () -> {
+                return Files.readAllLines(path);
+             });
+             nn1Port = lines.get(0);
+             nn2Port = lines.get(1);
+        }
         boolean securityEnabled = hdfsKerberosPrincipal != null;
 
         Configuration hdfsConfiguration = new Configuration();
         hdfsConfiguration.set("dfs.nameservices", "ha-hdfs");
         hdfsConfiguration.set("dfs.ha.namenodes.ha-hdfs", "nn1,nn2");
-        hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn1", "localhost:10001");
-        hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn2", "localhost:10002");
+        hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn1", "localhost:" + nn1Port);
+        hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn2", "localhost:" + nn2Port);
         hdfsConfiguration.set(
             "dfs.client.failover.proxy.provider.ha-hdfs",
             "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
@@ -110,8 +121,8 @@ public class HaHdfsFailoverTestSuiteIT extends ESRestTestCase {
                         securityCredentials(securityEnabled, esKerberosPrincipal) +
                         "\"conf.dfs.nameservices\": \"ha-hdfs\"," +
                         "\"conf.dfs.ha.namenodes.ha-hdfs\": \"nn1,nn2\"," +
-                        "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn1\": \"localhost:10001\"," +
-                        "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn2\": \"localhost:10002\"," +
+                        "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn1\": \"localhost:"+nn1Port+"\"," +
+                        "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn2\": \"localhost:"+nn2Port+"\"," +
                         "\"conf.dfs.client.failover.proxy.provider.ha-hdfs\": " +
                             "\"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\"" +
                     "}" +

+ 4 - 4
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_get.yml

@@ -48,8 +48,8 @@
         repository: test_snapshot_get_repository
         snapshot: test_snapshot_get
 
-  - length: { snapshots: 1 }
-  - match: { snapshots.0.snapshot : test_snapshot_get }
+  - length: { responses.0.snapshots: 1 }
+  - match: { responses.0.snapshots.0.snapshot : test_snapshot_get }
 
   # List snapshot info
   - do:
@@ -57,8 +57,8 @@
         repository: test_snapshot_get_repository
         snapshot: "*"
 
-  - length: { snapshots: 1 }
-  - match: { snapshots.0.snapshot : test_snapshot_get }
+  - length: { responses.0.snapshots: 1 }
+  - match: { responses.0.snapshots.0.snapshot : test_snapshot_get }
 
   # Remove our snapshot
   - do: 

+ 1 - 1
plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_readonly.yml

@@ -23,7 +23,7 @@
         repository: test_snapshot_repository_ro
         snapshot: "_all"
 
-  - length: { snapshots: 1 }
+  - length: { responses.0.snapshots: 1 }
 
   # Remove our repository
   - do:

+ 2 - 2
test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java

@@ -109,8 +109,8 @@ public class MiniHDFS {
         String haNameService = System.getProperty("ha-nameservice");
         boolean haEnabled = haNameService != null;
         if (haEnabled) {
-            MiniDFSNNTopology.NNConf nn1 = new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001);
-            MiniDFSNNTopology.NNConf nn2 = new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002);
+            MiniDFSNNTopology.NNConf nn1 = new MiniDFSNNTopology.NNConf("nn1").setIpcPort(0);
+            MiniDFSNNTopology.NNConf nn2 = new MiniDFSNNTopology.NNConf("nn2").setIpcPort(0);
             MiniDFSNNTopology.NSConf nameservice = new MiniDFSNNTopology.NSConf(haNameService).addNN(nn1).addNN(nn2);
             MiniDFSNNTopology namenodeTopology = new MiniDFSNNTopology().addNameservice(nameservice);
             builder.nnTopology(namenodeTopology);