Ver código fonte

HDFS-9858. RollingFileSystemSink can throw an NPE on non-secure clusters. (Daniel Templeton via kasha)

(cherry picked from commit c2460dad642feee1086442d33c30c24ec77236b9)
Karthik Kambatla 9 anos atrás
pai
commit
6722e17f6d

+ 96 - 47
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java

@@ -132,6 +132,9 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
   private static final FastDateFormat DATE_FORMAT =
       FastDateFormat.getInstance("yyyyMMddHH", TimeZone.getTimeZone("GMT"));
   private final Object lock = new Object();
+  private boolean initialized = false;
+  private SubsetConfiguration properties;
+  private Configuration conf;
   private String source;
   private boolean ignoreError;
   private boolean allowAppend;
@@ -163,63 +166,102 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
   protected static FileSystem suppliedFilesystem = null;
 
   @Override
-  public void init(SubsetConfiguration conf) {
-    basePath = new Path(conf.getString(BASEPATH_KEY, BASEPATH_DEFAULT));
-    source = conf.getString(SOURCE_KEY, SOURCE_DEFAULT);
-    ignoreError = conf.getBoolean(IGNORE_ERROR_KEY, false);
-    allowAppend = conf.getBoolean(ALLOW_APPEND_KEY, false);
+  public void init(SubsetConfiguration metrics2Properties) {
+    properties = metrics2Properties;
+    basePath = new Path(properties.getString(BASEPATH_KEY, BASEPATH_DEFAULT));
+    source = properties.getString(SOURCE_KEY, SOURCE_DEFAULT);
+    ignoreError = properties.getBoolean(IGNORE_ERROR_KEY, false);
+    allowAppend = properties.getBoolean(ALLOW_APPEND_KEY, false);
 
-    Configuration configuration = loadConf();
-
-    UserGroupInformation.setConfiguration(configuration);
+    conf = loadConf();
+    UserGroupInformation.setConfiguration(conf);
 
     // Don't do secure setup if it's not needed.
     if (UserGroupInformation.isSecurityEnabled()) {
       // Validate config so that we don't get an NPE
-      checkForProperty(conf, KEYTAB_PROPERTY_KEY);
-      checkForProperty(conf, USERNAME_PROPERTY_KEY);
+      checkForProperty(properties, KEYTAB_PROPERTY_KEY);
+      checkForProperty(properties, USERNAME_PROPERTY_KEY);
 
 
       try {
         // Login as whoever we're supposed to be and let the hostname be pulled
         // from localhost. If security isn't enabled, this does nothing.
-        SecurityUtil.login(configuration, conf.getString(KEYTAB_PROPERTY_KEY),
-            conf.getString(USERNAME_PROPERTY_KEY));
+        SecurityUtil.login(conf, properties.getString(KEYTAB_PROPERTY_KEY),
+            properties.getString(USERNAME_PROPERTY_KEY));
       } catch (IOException ex) {
         throw new MetricsException("Error logging in securely: ["
             + ex.toString() + "]", ex);
       }
     }
+  }
+
+  /**
+   * Initialize the connection to HDFS and create the base directory. Also
+   * launch the flush thread.
+   */
+  private boolean initFs() {
+    boolean success = false;
 
-    fileSystem = getFileSystem(configuration);
+    fileSystem = getFileSystem();
 
     // This step isn't strictly necessary, but it makes debugging issues much
     // easier. We try to create the base directory eagerly and fail with
     // copious debug info if it fails.
     try {
       fileSystem.mkdirs(basePath);
+      success = true;
     } catch (Exception ex) {
-      throw new MetricsException("Failed to create " + basePath + "["
-          + SOURCE_KEY + "=" + source + ", "
-          + IGNORE_ERROR_KEY + "=" + ignoreError + ", "
-          + ALLOW_APPEND_KEY + "=" + allowAppend + ", "
-          + KEYTAB_PROPERTY_KEY + "="
-          + conf.getString(KEYTAB_PROPERTY_KEY) + ", "
-          + conf.getString(KEYTAB_PROPERTY_KEY) + "="
-          + configuration.get(conf.getString(KEYTAB_PROPERTY_KEY)) + ", "
-          + USERNAME_PROPERTY_KEY + "="
-          + conf.getString(USERNAME_PROPERTY_KEY) + ", "
-          + conf.getString(USERNAME_PROPERTY_KEY) + "="
-          + configuration.get(conf.getString(USERNAME_PROPERTY_KEY))
-          + "] -- " + ex.toString(), ex);
+      if (!ignoreError) {
+        throw new MetricsException("Failed to create " + basePath + "["
+            + SOURCE_KEY + "=" + source + ", "
+            + ALLOW_APPEND_KEY + "=" + allowAppend + ", "
+            + stringifySecurityProperty(KEYTAB_PROPERTY_KEY) + ", "
+            + stringifySecurityProperty(USERNAME_PROPERTY_KEY)
+            + "] -- " + ex.toString(), ex);
+      }
     }
 
-    // If we're permitted to append, check if we actually can
-    if (allowAppend) {
-      allowAppend = checkAppend(fileSystem);
+    if (success) {
+      // If we're permitted to append, check if we actually can
+      if (allowAppend) {
+        allowAppend = checkAppend(fileSystem);
+      }
+
+      flushTimer = new Timer("RollingFileSystemSink Flusher", true);
     }
 
-    flushTimer = new Timer("RollingFileSystemSink Flusher", true);
+    return success;
+  }
+
+  /**
+   * Turn a security property into a nicely formatted set of <i>name=value</i>
+   * strings, allowing for either the property or the configuration not to be
+   * set.
+   *
+   * @param properties the sink properties
+   * @param conf the conf
+   * @param property the property to stringify
+   * @return the stringified property
+   */
+  private String stringifySecurityProperty(String property) {
+    String securityProperty;
+
+    if (properties.containsKey(property)) {
+      String propertyValue = properties.getString(property);
+      String confValue = conf.get(properties.getString(property));
+
+      if (confValue != null) {
+        securityProperty = property + "=" + propertyValue
+            + ", " + properties.getString(property) + "=" + confValue;
+      } else {
+        securityProperty = property + "=" + propertyValue
+            + ", " + properties.getString(property) + "=<NOT SET>";
+      }
+    } else {
+      securityProperty = property + "=<NOT SET>";
+    }
+
+    return securityProperty;
   }
 
   /**
@@ -242,17 +284,17 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
    * @return the configuration to use
    */
   private Configuration loadConf() {
-    Configuration conf;
+    Configuration c;
 
     if (suppliedConf != null) {
-      conf = suppliedConf;
+      c = suppliedConf;
     } else {
       // The config we're handed in init() isn't the one we want here, so we
       // create a new one to pick up the full settings.
-      conf = new Configuration();
+      c = new Configuration();
     }
 
-    return conf;
+    return c;
   }
 
   /**
@@ -263,7 +305,7 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
    * @return the file system to use
    * @throws MetricsException thrown if the file system could not be retrieved
    */
-  private FileSystem getFileSystem(Configuration conf) throws MetricsException {
+  private FileSystem getFileSystem() throws MetricsException {
     FileSystem fs = null;
 
     if (suppliedFilesystem != null) {
@@ -317,22 +359,29 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
     // because if currentDirPath is null, then currentOutStream is null, but
     // currentOutStream can be null for other reasons.
     if ((currentOutStream == null) || !path.equals(currentDirPath)) {
-      // Close the stream. This step could have been handled already by the
-      // flusher thread, but if it has, the PrintStream will just swallow the
-      // exception, which is fine.
-      if (currentOutStream != null) {
-        currentOutStream.close();
+      // If we're not yet connected to HDFS, create the connection
+      if (!initialized) {
+        initialized = initFs();
       }
 
-      currentDirPath = path;
+      if (initialized) {
+        // Close the stream. This step could have been handled already by the
+        // flusher thread, but if it has, the PrintStream will just swallow the
+        // exception, which is fine.
+        if (currentOutStream != null) {
+          currentOutStream.close();
+        }
 
-      try {
-        rollLogDir();
-      } catch (IOException ex) {
-        throwMetricsException("Failed to create new log file", ex);
-      }
+        currentDirPath = path;
 
-      scheduleFlush(now);
+        try {
+          rollLogDir();
+        } catch (IOException ex) {
+          throwMetricsException("Failed to create new log file", ex);
+        }
+
+        scheduleFlush(now);
+      }
     }
   }
 

+ 5 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java

@@ -175,7 +175,7 @@ public class RollingFileSystemSinkTestBase {
     String prefix = methodName.getMethodName().toLowerCase();
 
     ConfigBuilder builder = new ConfigBuilder().add("*.period", 10000)
-        .add(prefix + ".sink.mysink0.class", ErrorSink.class.getName())
+        .add(prefix + ".sink.mysink0.class", MockSink.class.getName())
         .add(prefix + ".sink.mysink0.basepath", path)
         .add(prefix + ".sink.mysink0.source", "testsrc")
         .add(prefix + ".sink.mysink0.context", "test1")
@@ -503,8 +503,9 @@ public class RollingFileSystemSinkTestBase {
    * This class is a {@link RollingFileSystemSink} wrapper that tracks whether
    * an exception has been thrown during operations.
    */
-  public static class ErrorSink extends RollingFileSystemSink {
+  public static class MockSink extends RollingFileSystemSink {
     public static volatile boolean errored = false;
+    public static volatile boolean initialized = false;
 
     @Override
     public void init(SubsetConfiguration conf) {
@@ -515,6 +516,8 @@ public class RollingFileSystemSinkTestBase {
 
         throw new MetricsException(ex);
       }
+
+      initialized = true;
     }
 
     @Override

+ 6 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSink.java

@@ -23,6 +23,8 @@ import org.apache.hadoop.metrics2.MetricsSystem;
 import org.junit.Test;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 /**
  * Test the {@link RollingFileSystemSink} class in the context of the local file
@@ -106,7 +108,7 @@ public class TestRollingFileSystemSink extends RollingFileSystemSinkTestBase {
     new MyMetrics1().registerWith(ms);
 
     methodDir.setWritable(false);
-    ErrorSink.errored = false;
+    MockSink.errored = false;
 
     try {
       // publish the metrics
@@ -114,7 +116,7 @@ public class TestRollingFileSystemSink extends RollingFileSystemSinkTestBase {
 
       assertTrue("No exception was generated while writing metrics "
           + "even though the target directory was not writable",
-          ErrorSink.errored);
+          MockSink.errored);
 
       ms.stop();
       ms.shutdown();
@@ -135,7 +137,7 @@ public class TestRollingFileSystemSink extends RollingFileSystemSinkTestBase {
     new MyMetrics1().registerWith(ms);
 
     methodDir.setWritable(false);
-    ErrorSink.errored = false;
+    MockSink.errored = false;
 
     try {
       // publish the metrics
@@ -144,7 +146,7 @@ public class TestRollingFileSystemSink extends RollingFileSystemSinkTestBase {
       assertFalse("An exception was generated while writing metrics "
           + "when the target directory was not writable, even though the "
           + "sink is set to ignore errors",
-          ErrorSink.errored);
+          MockSink.errored);
 
       ms.stop();
       ms.shutdown();

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -86,6 +86,9 @@ Release 2.9.0 - UNRELEASED
     HDFS-9608. Disk IO imbalance in HDFS with heterogeneous storages.
     (Wei Zhou via wang)
 
+    HDFS-9858. RollingFileSystemSink can throw an NPE on non-secure clusters. 
+    (Daniel Templeton via kasha)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 25 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithHdfs.java

@@ -153,12 +153,12 @@ public class TestRollingFileSystemSinkWithHdfs
     new MyMetrics1().registerWith(ms);
 
     shutdownHdfs();
-    ErrorSink.errored = false;
+    MockSink.errored = false;
 
     ms.publishMetricsNow(); // publish the metrics
 
     assertTrue("No exception was generated while writing metrics "
-        + "even though HDFS was unavailable", ErrorSink.errored);
+        + "even though HDFS was unavailable", MockSink.errored);
 
     ms.stop();
     ms.shutdown();
@@ -180,7 +180,7 @@ public class TestRollingFileSystemSinkWithHdfs
     ms.publishMetricsNow(); // publish the metrics
 
     shutdownHdfs();
-    ErrorSink.errored = false;
+    MockSink.errored = false;
 
     try {
       ms.stop();
@@ -208,13 +208,13 @@ public class TestRollingFileSystemSinkWithHdfs
     new MyMetrics1().registerWith(ms);
 
     shutdownHdfs();
-    ErrorSink.errored = false;
+    MockSink.errored = false;
 
     ms.publishMetricsNow(); // publish the metrics
 
     assertFalse("An exception was generated writing metrics "
         + "while HDFS was unavailable, even though the sink is set to "
-        + "ignore errors", ErrorSink.errored);
+        + "ignore errors", MockSink.errored);
 
     ms.stop();
     ms.shutdown();
@@ -236,13 +236,13 @@ public class TestRollingFileSystemSinkWithHdfs
     ms.publishMetricsNow(); // publish the metrics
 
     shutdownHdfs();
-    ErrorSink.errored = false;
+    MockSink.errored = false;
 
     ms.stop();
 
     assertFalse("An exception was generated stopping sink "
         + "while HDFS was unavailable, even though the sink is set to "
-        + "ignore errors", ErrorSink.errored);
+        + "ignore errors", MockSink.errored);
 
     ms.shutdown();
   }
@@ -288,4 +288,22 @@ public class TestRollingFileSystemSinkWithHdfs
 
     ms.stop();
   }
+
+  /**
+   * Test that a failure to connect to HDFS does not cause the init() method
+   * to fail.
+   */
+  @Test
+  public void testInitWithNoHDFS() {
+    String path = "hdfs://" + cluster.getNameNode().getHostAndPort() + "/tmp";
+
+    shutdownHdfs();
+    MockSink.errored = false;
+    initMetricsSystem(path, true, false);
+
+    assertTrue("The sink was not initialized as expected",
+        MockSink.initialized);
+    assertFalse("The sink threw an unexpected error on initialization",
+        MockSink.errored);
+  }
 }

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithSecureHdfs.java

@@ -51,6 +51,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.junit.Test;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertTrue;
 
 /**
  * Test the {@link RollingFileSystemSink} class in the context of HDFS with
@@ -147,7 +148,7 @@ public class TestRollingFileSystemSinkWithSecureHdfs
 
       assertTrue("No exception was generated initializing the sink against a "
           + "secure cluster even though the principal and keytab properties "
-          + "were missing", ErrorSink.errored);
+          + "were missing", MockSink.errored);
     } finally {
       if (cluster != null) {
         cluster.shutdown();