Bläddra i källkod

HDDS-1469. Generate default configuration fragments based on annotations

Closes #773
Márton Elek 6 år sedan
förälder
incheckning
e2f0f72677
24 ändrade filer med 703 tillägg och 49 borttagningar
  1. 5 0
      hadoop-hdds/common/pom.xml
  2. 25 4
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
  3. 0 12
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
  4. 0 20
      hadoop-hdds/common/src/main/resources/ozone-default.xml
  5. 10 5
      hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java
  6. 2 2
      hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java
  7. 66 0
      hadoop-hdds/config/pom.xml
  8. 12 0
      hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java
  9. 127 0
      hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java
  10. 113 0
      hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
  11. 0 0
      hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java
  12. 44 0
      hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java
  13. 0 0
      hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java
  14. 1 1
      hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java
  15. 22 0
      hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
  16. 16 0
      hadoop-hdds/config/src/main/resources/META-INF/services/javax.annotation.processing.Processor
  17. 89 0
      hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java
  18. 48 0
      hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileAppender.java
  19. 24 0
      hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/package-info.java
  20. 7 0
      hadoop-hdds/pom.xml
  21. 30 5
      hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
  22. 14 0
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java
  23. 24 0
      hadoop-ozone/integration-test/src/test/resources/core-site.xml
  24. 24 0
      hadoop-ozone/integration-test/src/test/resources/hdfs-site.xml

+ 5 - 0
hadoop-hdds/common/pom.xml

@@ -36,6 +36,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   </properties>
 
   <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-config</artifactId>
+    </dependency>
+
     <dependency>
       <groupId>javax.annotation</groupId>
       <artifactId>javax.annotation-api</artifactId>

+ 25 - 4
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java

@@ -18,9 +18,6 @@
 
 package org.apache.hadoop.hdds.conf;
 
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-
 import javax.xml.bind.JAXBContext;
 import javax.xml.bind.JAXBException;
 import javax.xml.bind.Unmarshaller;
@@ -28,6 +25,7 @@ import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
+import java.io.IOException;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.net.URL;
@@ -36,6 +34,9 @@ import java.util.Enumeration;
 import java.util.List;
 import java.util.Properties;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+
 /**
  * Configuration for ozone.
  */
@@ -47,12 +48,33 @@ public class OzoneConfiguration extends Configuration {
 
   public OzoneConfiguration() {
     OzoneConfiguration.activate();
+    loadDefaults();
   }
 
   public OzoneConfiguration(Configuration conf) {
     super(conf);
     //load the configuration from the classloader of the original conf.
     setClassLoader(conf.getClassLoader());
+    if (!(conf instanceof OzoneConfiguration)) {
+      loadDefaults();
+    }
+  }
+
+  private void loadDefaults() {
+    try {
+      //there could be multiple ozone-default-generated.xml files on the
+      // classpath, which are generated by the annotation processor.
+      // Here we add all of them to the list of the available configuration.
+      Enumeration<URL> generatedDefaults =
+          OzoneConfiguration.class.getClassLoader().getResources(
+              "ozone-default-generated.xml");
+      while (generatedDefaults.hasMoreElements()) {
+        addResource(generatedDefaults.nextElement());
+      }
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+    addResource("ozone-site.xml");
   }
 
   public List<Property> readPropertyFromXml(URL url) throws JAXBException {
@@ -265,7 +287,6 @@ public class OzoneConfiguration extends Configuration {
     Configuration.addDefaultResource("hdfs-default.xml");
     Configuration.addDefaultResource("hdfs-site.xml");
     Configuration.addDefaultResource("ozone-default.xml");
-    Configuration.addDefaultResource("ozone-site.xml");
   }
 
   /**

+ 0 - 12
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java

@@ -348,18 +348,6 @@ public final class ScmConfigKeys {
   public static final String HDDS_SCM_WATCHER_TIMEOUT_DEFAULT =
       "10m";
 
-  public static final String HDDS_SCM_REPLICATION_THREAD_INTERVAL =
-      "hdds.scm.replication.thread.interval";
-
-  public static final String HDDS_SCM_REPLICATION_THREAD_INTERVAL_DEFAULT =
-      "5m";
-
-  public static final String HDDS_SCM_REPLICATION_EVENT_TIMEOUT =
-      "hdds.scm.replication.event.timeout";
-
-  public static final String HDDS_SCM_REPLICATION_EVENT_TIMEOUT_DEFAULT =
-      "10m";
-
   public static final String
       HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY =
       "hdds.scm.http.kerberos.principal";

+ 0 - 20
hadoop-hdds/common/src/main/resources/ozone-default.xml

@@ -2385,26 +2385,6 @@
       Request to flush the OM DB before taking checkpoint snapshot.
     </description>
   </property>
-  <property>
-    <name>hdds.scm.replication.thread.interval</name>
-    <value>5m</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      There is a replication monitor thread running inside SCM which
-      takes care of replicating the containers in the cluster. This
-      property is used to configure the interval in which that thread
-      runs.
-    </description>
-  </property>
-  <property>
-    <name>hdds.scm.replication.event.timeout</name>
-    <value>10m</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      Timeout for the container replication/deletion commands sent
-      to datanodes. After this timeout the command will be retried.
-    </description>
-  </property>
   <property>
     <name>hdds.tracing.enabled</name>
     <value>true</value>

+ 10 - 5
hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java

@@ -35,28 +35,33 @@ public class SimpleConfiguration {
 
   private long waitTime = 1;
 
-  @Config(key = "address")
+  @Config(key = "address", defaultValue = "localhost", description = "Just "
+      + "for testing", tags = ConfigTag.MANAGEMENT)
   public void setClientAddress(String clientAddress) {
     this.clientAddress = clientAddress;
   }
 
-  @Config(key = "bind.host")
+  @Config(key = "bind.host", defaultValue = "0.0.0.0", description = "Just "
+      + "for testing", tags = ConfigTag.MANAGEMENT)
   public void setBindHost(String bindHost) {
     this.bindHost = bindHost;
   }
 
-  @Config(key = "enabled")
+  @Config(key = "enabled", defaultValue = "true", description = "Just for "
+      + "testing", tags = ConfigTag.MANAGEMENT)
   public void setEnabled(boolean enabled) {
     this.enabled = enabled;
   }
 
-  @Config(key = "port")
+  @Config(key = "port", defaultValue = "9878", description = "Just for "
+      + "testing", tags = ConfigTag.MANAGEMENT)
   public void setPort(int port) {
     this.port = port;
   }
 
   @Config(key = "wait", type = ConfigType.TIME, timeUnit =
-      TimeUnit.SECONDS)
+      TimeUnit.SECONDS, defaultValue = "10m", description = "Just for "
+      + "testing", tags = ConfigTag.MANAGEMENT)
   public void setWaitTime(long waitTime) {
     this.waitTime = waitTime;
   }

+ 2 - 2
hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java

@@ -124,8 +124,8 @@ public class TestOzoneConfiguration {
     SimpleConfiguration configuration =
         ozoneConfiguration.getObject(SimpleConfiguration.class);
 
-    Assert.assertEquals(false, configuration.isEnabled());
-    Assert.assertEquals(9860, configuration.getPort());
+    Assert.assertEquals(true, configuration.isEnabled());
+    Assert.assertEquals(9878, configuration.getPort());
   }
 
 

+ 66 - 0
hadoop-hdds/config/pom.xml

@@ -0,0 +1,66 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-hdds</artifactId>
+    <version>0.5.0-SNAPSHOT</version>
+  </parent>
+  <artifactId>hadoop-hdds-config</artifactId>
+  <version>0.5.0-SNAPSHOT</version>
+  <description>Apache Hadoop Distributed Data Store Config Tools</description>
+  <name>Apache Hadoop HDDS Config</name>
+  <packaging>jar</packaging>
+
+  <properties>
+
+  </properties>
+
+  <dependencies>
+
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.1</version>
+        <executions>
+        <execution>
+          <id>default-compile</id>
+          <phase>compile</phase>
+          <goals>
+            <goal>compile</goal>
+          </goals>
+          <configuration>
+            <!-- don't need to activate annotation processor (which may not be available yet) for compilation -->
+            <compilerArgument>-proc:none</compilerArgument>
+          </configuration>
+        </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>

+ 12 - 0
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/Config.java → hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java

@@ -35,6 +35,16 @@ public @interface Config {
    */
   String key();
 
+  /**
+   * Default value to use if not set.
+   */
+  String defaultValue();
+
+  /**
+   * Custom description as a help.
+   */
+  String description();
+
   /**
    * Type of configuration. Use AUTO to decide it based on the java type.
    */
@@ -44,4 +54,6 @@ public @interface Config {
    * If type == TIME the unit should be defined with this attribute.
    */
   TimeUnit timeUnit() default TimeUnit.MILLISECONDS;
+
+  ConfigTag[] tags();
 }

+ 127 - 0
hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java

@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.transform.OutputKeys;
+import javax.xml.transform.Transformer;
+import javax.xml.transform.TransformerException;
+import javax.xml.transform.TransformerFactory;
+import javax.xml.transform.dom.DOMSource;
+import javax.xml.transform.stream.StreamResult;
+import java.io.InputStream;
+import java.io.Writer;
+import java.util.Arrays;
+import java.util.stream.Collectors;
+
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+/**
+ * Simple DOM based config file writer.
+ * <p>
+ * This class can init/load existing ozone-default-generated.xml fragments
+ * and append new entries and write to the file system.
+ */
+public class ConfigFileAppender {
+
+  private Document document;
+
+  private final DocumentBuilder builder;
+
+  public ConfigFileAppender() {
+    try {
+      DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
+      builder = factory.newDocumentBuilder();
+    } catch (Exception ex) {
+      throw new ConfigurationException("Can initialize new configuration", ex);
+    }
+  }
+
+  /**
+   * Initialize a new ozone-site.xml structure with empty content.
+   */
+  public void init() {
+    try {
+      document = builder.newDocument();
+      document.appendChild(document.createElement("configuration"));
+    } catch (Exception ex) {
+      throw new ConfigurationException("Can initialize new configuration", ex);
+    }
+  }
+
+  /**
+   * Load existing ozone-site.xml content and parse the DOM tree.
+   */
+  public void load(InputStream stream) {
+    try {
+      document = builder.parse(stream);
+    } catch (Exception ex) {
+      throw new ConfigurationException("Can't load existing configuration", ex);
+    }
+  }
+
+  /**
+   * Add configuration fragment.
+   */
+  public void addConfig(String key, String defaultValue, String description,
+      ConfigTag[] tags) {
+    Element root = document.getDocumentElement();
+    Element propertyElement = document.createElement("property");
+
+    addXmlElement(propertyElement, "name", key);
+
+    addXmlElement(propertyElement, "value", defaultValue);
+
+    addXmlElement(propertyElement, "description", description);
+
+    String tagsAsString = Arrays.stream(tags).map(tag -> tag.name())
+        .collect(Collectors.joining(", "));
+
+    addXmlElement(propertyElement, "tag", tagsAsString);
+
+    root.appendChild(propertyElement);
+  }
+
+  private void addXmlElement(Element parentElement, String tagValue,
+      String textValue) {
+    Element element = document.createElement(tagValue);
+    element.appendChild(document.createTextNode(textValue));
+    parentElement.appendChild(element);
+  }
+
+  /**
+   * Write out the XML content to a writer.
+   */
+  public void write(Writer writer) {
+    try {
+      TransformerFactory transformerFactory = TransformerFactory.newInstance();
+      Transformer transf = transformerFactory.newTransformer();
+
+      transf.setOutputProperty(OutputKeys.ENCODING, "UTF-8");
+      transf.setOutputProperty(OutputKeys.INDENT, "yes");
+      transf
+          .setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2");
+
+      transf.transform(new DOMSource(document), new StreamResult(writer));
+    } catch (TransformerException e) {
+      throw new ConfigurationException("Can't write the configuration xml", e);
+    }
+  }
+}

+ 113 - 0
hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java

@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+import javax.annotation.processing.AbstractProcessor;
+import javax.annotation.processing.Filer;
+import javax.annotation.processing.RoundEnvironment;
+import javax.annotation.processing.SupportedAnnotationTypes;
+import javax.lang.model.element.Element;
+import javax.lang.model.element.ElementKind;
+import javax.lang.model.element.TypeElement;
+import javax.tools.Diagnostic.Kind;
+import javax.tools.FileObject;
+import javax.tools.StandardLocation;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+import java.nio.charset.StandardCharsets;
+import java.util.Set;
+
+/**
+ * Annotation processor to generate config fragments from Config annotations.
+ */
+@SupportedAnnotationTypes("org.apache.hadoop.hdds.conf.ConfigGroup")
+public class ConfigFileGenerator extends AbstractProcessor {
+
+  public static final String OUTPUT_FILE_NAME = "ozone-default-generated.xml";
+
+  @Override
+  public boolean process(Set<? extends TypeElement> annotations,
+      RoundEnvironment roundEnv) {
+    if (roundEnv.processingOver()) {
+      return false;
+    }
+
+    Filer filer = processingEnv.getFiler();
+
+    try {
+
+      //load existing generated config (if exists)
+      ConfigFileAppender appender = new ConfigFileAppender();
+      try (InputStream input = filer
+          .getResource(StandardLocation.CLASS_OUTPUT, "",
+              OUTPUT_FILE_NAME).openInputStream()) {
+        appender.load(input);
+      } catch (FileNotFoundException ex) {
+        appender.init();
+      }
+
+      Set<? extends Element> annotatedElements =
+          roundEnv.getElementsAnnotatedWith(ConfigGroup.class);
+      for (Element annotatedElement : annotatedElements) {
+        TypeElement configGroup = (TypeElement) annotatedElement;
+
+        //check if any of the setters are annotated with @Config
+        for (Element element : configGroup.getEnclosedElements()) {
+          if (element.getKind() == ElementKind.METHOD) {
+            processingEnv.getMessager()
+                .printMessage(Kind.WARNING, element.getSimpleName().toString());
+            if (element.getSimpleName().toString().startsWith("set")
+                && element.getAnnotation(Config.class) != null) {
+
+              //update the ozone-site-generated.xml
+              Config configAnnotation = element.getAnnotation(Config.class);
+              ConfigGroup configGroupAnnotation =
+                  configGroup.getAnnotation(ConfigGroup.class);
+
+              String key = configGroupAnnotation.prefix() + "."
+                  + configAnnotation.key();
+
+              appender.addConfig(key,
+                  configAnnotation.defaultValue(),
+                  configAnnotation.description(),
+                  configAnnotation.tags());
+            }
+          }
+
+        }
+        FileObject resource = filer
+            .createResource(StandardLocation.CLASS_OUTPUT, "",
+                OUTPUT_FILE_NAME);
+
+        try (Writer writer = new OutputStreamWriter(
+            resource.openOutputStream(), StandardCharsets.UTF_8)) {
+          appender.write(writer);
+        }
+      }
+    } catch (IOException e) {
+      processingEnv.getMessager().printMessage(Kind.ERROR,
+          "Can't generate the config file from annotation: " + e.getMessage());
+    }
+    return false;
+  }
+
+
+}

+ 0 - 0
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java → hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java


+ 44 - 0
hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java

@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+/**
+ * Available config tags.
+ * <p>
+ * Note: the values are defined in ozone-default.xml by hadoop.tags.custom.
+ */
+public enum ConfigTag {
+  OZONE,
+  MANAGEMENT,
+  SECURITY,
+  PERFORMANCE,
+  DEBUG,
+  CLIENT,
+  SERVER,
+  OM,
+  SCM,
+  CRITICAL,
+  RATIS,
+  CONTAINER,
+  REQUIRED,
+  REST,
+  STORAGE,
+  PIPELINE,
+  STANDALONE,
+  S3GATEWAY
+}

+ 0 - 0
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java → hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java


+ 1 - 1
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java → hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java

@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdds.conf;
 
 /**
- * Exeception to throw in case of a configuration problem.
+ * Exception to throw in case of a configuration problem.
  */
 public class ConfigurationException extends RuntimeException {
   public ConfigurationException() {

+ 22 - 0
hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/package-info.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Generic configuration annotations, tools and generators.
+ */
+package org.apache.hadoop.hdds.conf;

+ 16 - 0
hadoop-hdds/config/src/main/resources/META-INF/services/javax.annotation.processing.Processor

@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.hadoop.hdds.conf.ConfigFileGenerator

+ 89 - 0
hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java

@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Example configuration to test the configuration injection.
+ */
+@ConfigGroup(prefix = "ozone.scm.client")
+public class ConfigurationExample {
+
+  private String clientAddress;
+
+  private String bindHost;
+
+  private boolean compressionEnabled;
+
+  private int port = 1234;
+
+  private long waitTime = 1;
+
+  @Config(key = "address", defaultValue = "localhost", description = "Client "
+      + "addres (To test string injection).", tags = ConfigTag.MANAGEMENT)
+  public void setClientAddress(String clientAddress) {
+    this.clientAddress = clientAddress;
+  }
+
+  @Config(key = "bind.host", defaultValue = "0.0.0.0", description = "Bind "
+      + "host(To test string injection).", tags = ConfigTag.MANAGEMENT)
+  public void setBindHost(String bindHost) {
+    this.bindHost = bindHost;
+  }
+
+  @Config(key = "compression.enabled", defaultValue = "true", description =
+      "Compression enabled. (Just to test boolean flag)", tags =
+      ConfigTag.MANAGEMENT)
+  public void setCompressionEnabled(boolean compressionEnabled) {
+    this.compressionEnabled = compressionEnabled;
+  }
+
+  @Config(key = "port", defaultValue = "1234", description = "Port number "
+      + "config (To test in injection)", tags = ConfigTag.MANAGEMENT)
+  public void setPort(int port) {
+    this.port = port;
+  }
+
+  @Config(key = "wait", type = ConfigType.TIME, timeUnit =
+      TimeUnit.SECONDS, defaultValue = "30m", description = "Wait time (To "
+      + "test TIME config type)", tags = ConfigTag.MANAGEMENT)
+  public void setWaitTime(long waitTime) {
+    this.waitTime = waitTime;
+  }
+
+  public String getClientAddress() {
+    return clientAddress;
+  }
+
+  public String getBindHost() {
+    return bindHost;
+  }
+
+  public boolean isCompressionEnabled() {
+    return compressionEnabled;
+  }
+
+  public int getPort() {
+    return port;
+  }
+
+  public long getWaitTime() {
+    return waitTime;
+  }
+}

+ 48 - 0
hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileAppender.java

@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+import java.io.StringWriter;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test the utility which loads/writes the config file fragments.
+ */
+public class TestConfigFileAppender {
+
+  @Test
+  public void testInit() {
+    ConfigFileAppender appender = new ConfigFileAppender();
+
+    appender.init();
+
+    appender.addConfig("hadoop.scm.enabled", "true", "desc",
+        new ConfigTag[] {ConfigTag.OZONE, ConfigTag.SECURITY});
+
+    StringWriter builder = new StringWriter();
+    appender.write(builder);
+
+    Assert.assertTrue("Generated config should contain property key entry",
+        builder.toString().contains("<name>hadoop.scm.enabled</name>"));
+
+    Assert.assertTrue("Generated config should contain tags",
+        builder.toString().contains("<tag>OZONE, SECURITY</tag>"));
+  }
+}

+ 24 - 0
hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/package-info.java

@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * <p>
+ * Testing configuration tools.
+ */
+
+/**
+ * Testing configuration tools.
+ */
+package org.apache.hadoop.hdds.conf;

+ 7 - 0
hadoop-hdds/pom.xml

@@ -38,6 +38,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <module>server-scm</module>
     <module>tools</module>
     <module>docs</module>
+    <module>config</module>
 
   </modules>
 
@@ -115,6 +116,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
         <version>${hdds.version}</version>
       </dependency>
 
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdds-config</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
+
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-hdds-container-service</artifactId>

+ 30 - 5
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java

@@ -21,6 +21,7 @@ package org.apache.hadoop.hdds.scm.container;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.GeneratedMessage;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+
 import org.apache.hadoop.hdds.conf.ConfigType;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
 import org.apache.hadoop.hdds.conf.Config;
@@ -40,6 +41,8 @@ import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.Time;
+
+import static org.apache.hadoop.hdds.conf.ConfigTag.*;
 import org.apache.ratis.util.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -54,7 +57,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
-import java.util.concurrent.TimeUnit;
 import java.util.function.Consumer;
 import java.util.function.Predicate;
 import java.util.stream.Collectors;
@@ -760,14 +762,37 @@ public class ReplicationManager {
      */
     private long eventTimeout = 10 * 60 * 1000;
 
-    @Config(key = "thread.interval", type = ConfigType.TIME, timeUnit =
-        TimeUnit.MILLISECONDS)
+    @Config(key = "thread.interval",
+        type = ConfigType.TIME,
+        defaultValue = "3s",
+        tags = {SCM, OZONE},
+        description = "When a heartbeat from the data node arrives on SCM, "
+            + "It is queued for processing with the time stamp of when the "
+            + "heartbeat arrived. There is a heartbeat processing thread "
+            + "inside "
+            + "SCM that runs at a specified interval. This value controls how "
+            + "frequently this thread is run.\n\n"
+            + "There are some assumptions build into SCM such as this "
+            + "value should allow the heartbeat processing thread to run at "
+            + "least three times more frequently than heartbeats and at least "
+            + "five times more than stale node detection time. "
+            + "If you specify a wrong value, SCM will gracefully refuse to "
+            + "run. "
+            + "For more info look at the node manager tests in SCM.\n"
+            + "\n"
+            + "In short, you don't need to change this."
+    )
     public void setInterval(long interval) {
       this.interval = interval;
     }
 
-    @Config(key = "event.timeout", type = ConfigType.TIME, timeUnit =
-        TimeUnit.MILLISECONDS)
+    @Config(key = "event.timeout",
+        type = ConfigType.TIME,
+        defaultValue = "10m",
+        tags = {SCM, OZONE},
+        description = "Timeout for the container replication/deletion commands "
+            + "sent  to datanodes. After this timeout the command will be "
+            + "retried.")
     public void setEventTimeout(long eventTimeout) {
       this.eventTimeout = eventTimeout;
     }

+ 14 - 0
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java

@@ -575,6 +575,20 @@ public class TestReplicationManager {
     Assert.assertEquals(0, datanodeCommandHandler.getInvocation());
   }
 
+  @Test
+  public void testGeneratedConfig() {
+    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+
+    ReplicationManagerConfiguration rmc =
+        ozoneConfiguration.getObject(ReplicationManagerConfiguration.class);
+
+    //default is not included in ozone-site.xml but generated from annotation
+    //to the ozone-site-generated.xml which should be loaded by the
+    // OzoneConfiguration.
+    Assert.assertEquals(600000, rmc.getEventTimeout());
+
+  }
+
   @After
   public void teardown() throws IOException {
     containerStateManager.close();

+ 24 - 0
hadoop-ozone/integration-test/src/test/resources/core-site.xml

@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+</configuration>

+ 24 - 0
hadoop-ozone/integration-test/src/test/resources/hdfs-site.xml

@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+</configuration>