Просмотр исходного кода

AMBARI-17769. property_description is null for some properties in config page (dlysnichenko)

Lisnichenko Dmitro 9 лет назад
Родитель
Сommit
77c2dc6e5a
17 измененных файлов с 156 добавлено и 18 удалено
  1. 3 1
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/core-site.xml
  2. 12 1
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hdfs-site.xml
  3. 6 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/capacity-scheduler.xml
  4. 22 4
      ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml
  5. 6 0
      ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
  6. 18 4
      ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/yarn-site.xml
  7. 8 0
      ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hdfs-site.xml
  8. 19 0
      ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-site.xml
  9. 1 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/configuration/capacity-scheduler.xml
  10. 2 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/configuration/yarn-site.xml
  11. 3 1
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HDFS/configuration/core-site.xml
  12. 8 1
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HDFS/configuration/hdfs-site.xml
  13. 6 0
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/capacity-scheduler.xml
  14. 23 4
      ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-site.xml
  15. 8 0
      ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HDFS/configuration/hdfs-site.xml
  16. 3 1
      ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/core-site.xml
  17. 8 1
      ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/hdfs-site.xml

+ 3 - 1
ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/core-site.xml

@@ -21,7 +21,9 @@
   <property>
     <name>hadoop.http.authentication.simple.anonymous.allowed</name>
     <value>true</value>
-    <description/>
+    <description>
+      Indicates if anonymous requests are allowed when using &apos;simple&apos; authentication.
+    </description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>

+ 12 - 1
ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hdfs-site.xml

@@ -21,7 +21,14 @@
   <property>
     <name>dfs.namenode.startup.delay.block.deletion.sec</name>
     <value>3600</value>
-    <description/>
+    <description>
+      The delay in seconds at which we will pause the blocks deletion
+      after Namenode startup. By default it's disabled.
+      In the case a directory has large number of directories and files are
+      deleted, suggested delay is one hour to give the administrator enough time
+      to notice large number of pending deletion blocks and take corrective
+      action.
+    </description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
@@ -44,6 +51,10 @@
   </property>
   <property>
     <name>dfs.encryption.key.provider.uri</name>
+    <description>
+      The KeyProvider to use when interacting with encryption keys used
+      when reading and writing to an encryption zone.
+    </description>
     <value/>
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>

+ 6 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/capacity-scheduler.xml

@@ -18,6 +18,12 @@
 <configuration supports_final="false" supports_adding_forbidden="true">
   <property>
     <name>yarn.scheduler.capacity.resource-calculator</name>
+    <description>
+      The ResourceCalculator implementation to be used to compare Resources in the scheduler.
+      The default i.e. org.apache.hadoop.yarn.util.resource.DefaultResourseCalculator only uses
+      Memory while DominantResourceCalculator uses Dominant-resource to compare multi-dimensional
+      resources such as Memory, CPU etc. A Java ResourceCalculator class name is expected.
+    </description>
     <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
     <display-name>CPU Scheduling</display-name>
     <value-attributes>

+ 22 - 4
ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml

@@ -210,7 +210,14 @@
   <property>
     <name>yarn.nodemanager.resource.cpu-vcores</name>
     <value>8</value>
-    <description/>
+    <description>Number of vcores that can be allocated
+      for containers. This is used by the RM scheduler when allocating
+      resources for containers. This is not used to limit the number of
+      CPUs used by YARN containers. If it is set to -1 and
+      yarn.nodemanager.resource.detect-hardware-capabilities is true, it is
+      automatically determined from the hardware in case of Windows and Linux.
+      In other cases, number of vcores is 8 by default.
+    </description>
     <display-name>Number of virtual cores</display-name>
     <value-attributes>
       <type>int</type>
@@ -247,7 +254,11 @@
   <property>
     <name>yarn.node-labels.fs-store.retry-policy-spec</name>
     <value>2000, 500</value>
-    <description/>
+    <description>
+      Retry policy used for FileSystem node label store. The policy is
+      specified by N pairs of sleep-time in milliseconds and number-of-retries
+      &quot;s1,n1,s2,n2,...&quot;.
+    </description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
@@ -292,7 +303,7 @@
   <property>
     <name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
     <value>10</value>
-    <description/>
+    <description>Number of worker threads that send the yarn system metrics data.</description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
@@ -421,7 +432,9 @@
   <property>
     <name>yarn.node-labels.fs-store.root-dir</name>
     <value>/system/yarn/node-labels</value>
-    <description/>
+    <description>
+      URI for NodeLabelManager.
+    </description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
@@ -526,6 +539,11 @@
   </property>
   <property>
     <name>yarn.resourcemanager.scheduler.monitor.enable</name>
+    <description>
+      Enable a set of periodic monitors (specified in
+      yarn.resourcemanager.scheduler.monitor.policies) that affect the
+      scheduler.
+    </description>
     <value>false</value>
     <display-name>Pre-emption</display-name>
     <value-attributes>

+ 6 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/capacity-scheduler.xml

@@ -18,6 +18,12 @@
 <configuration supports_final="false" supports_adding_forbidden="true">
   <property>
     <name>yarn.scheduler.capacity.resource-calculator</name>
+    <description>
+      The ResourceCalculator implementation to be used to compare Resources in the scheduler.
+      The default i.e. org.apache.hadoop.yarn.util.resource.DefaultResourseCalculator only uses
+      Memory while DominantResourceCalculator uses Dominant-resource to compare multi-dimensional
+      resources such as Memory, CPU etc. A Java ResourceCalculator class name is expected.
+    </description>
     <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
     <description/>
     <on-ambari-upgrade add="true"/>

+ 18 - 4
ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/yarn-site.xml

@@ -206,7 +206,15 @@
   <property>
     <name>yarn.nodemanager.resource.cpu-vcores</name>
     <value>8</value>
-    <description/>
+    <description>
+      Number of vcores that can be allocated
+      for containers. This is used by the RM scheduler when allocating
+      resources for containers. This is not used to limit the number of
+      CPUs used by YARN containers. If it is set to -1 and
+      yarn.nodemanager.resource.detect-hardware-capabilities is true, it is
+      automatically determined from the hardware in case of Windows and Linux.
+      In other cases, number of vcores is 8 by default.
+    </description>
     <display-name>Total NM CPU vCores available to Containers</display-name>
     <value-attributes>
       <type>int</type>
@@ -236,7 +244,11 @@
   <property>
     <name>yarn.node-labels.fs-store.retry-policy-spec</name>
     <value>2000, 500</value>
-    <description/>
+    <description>
+      Retry policy used for FileSystem node label store. The policy is
+      specified by N pairs of sleep-time in milliseconds and number-of-retries
+      &quot;s1,n1,s2,n2,...&quot;.
+    </description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
@@ -281,7 +293,7 @@
   <property>
     <name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
     <value>10</value>
-    <description/>
+    <description>Number of worker threads that send the yarn system metrics data.</description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
@@ -381,7 +393,9 @@
   <property>
     <name>yarn.node-labels.fs-store.root-dir</name>
     <value>/system/yarn/node-labels</value>
-    <description/>
+    <description>
+      URI for NodeLabelManager.
+    </description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>

+ 8 - 0
ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hdfs-site.xml

@@ -36,6 +36,14 @@
   <property>
     <name>nfs.exports.allowed.hosts</name>
     <value>* rw</value>
+    <description>
+      By default, the export can be mounted by any client. To better control the access,
+      users can update the following property. The value string contains machine name and access privilege,
+      separated by whitespace characters. Machine name format can be single host, wildcards, and IPv4
+      networks.The access privilege uses rw or ro to specify readwrite or readonly access of the machines
+      to exports. If the access privilege is not provided, the default is read-only. Entries are separated
+      by &quot;;&quot;. For example: &quot;192.168.0.0/22 rw ; host*.example.com ; host1.test.org ro;&quot;.
+    </description>
     <display-name>Allowed hosts</display-name>
     <on-ambari-upgrade add="true"/>
   </property>

+ 19 - 0
ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-site.xml

@@ -25,6 +25,10 @@
   </property>
   <property>
     <name>yarn.timeline-service.recovery.enabled</name>
+    <description>
+      Enable timeline server to recover state after starting. If
+      true, then yarn.timeline-service.state-store-class must be specified.
+    </description>
     <value>true</value>
     <on-ambari-upgrade add="true"/>
   </property>
@@ -97,24 +101,39 @@
   <!-- advanced ats v1.5 properties-->
   <property>
     <name>yarn.timeline-service.entity-group-fs-store.summary-store</name>
+    <description>Summary storage for ATS v1.5</description>
     <!-- Use rolling leveldb, advanced -->
     <value>org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore</value>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.entity-group-fs-store.scan-interval-seconds</name>
+    <description>
+      Scan interval for ATS v1.5 entity group file system storage reader.This
+      value controls how frequent the reader will scan the HDFS active directory
+      for application status.
+    </description>
     <!-- Default is 60 seconds, advanced -->
     <value>60</value>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds</name>
+    <description>
+      Scan interval for ATS v1.5 entity group file system storage cleaner.This
+      value controls how frequent the reader will scan the HDFS done directory
+      for stale application data.
+    </description>
     <!-- 3600 is default, advanced -->
     <value>3600</value>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.entity-group-fs-store.retain-seconds</name>
+    <description>
+      How long the ATS v1.5 entity group file system storage will keep an
+      application's data in the done directory.
+    </description>
     <!-- 7 days is default, advanced -->
     <value>604800</value>
     <on-ambari-upgrade add="true"/>

+ 1 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/configuration/capacity-scheduler.xml

@@ -18,6 +18,7 @@
 <configuration supports_final="false" supports_adding_forbidden="true">
   <property>
     <name>capacity-scheduler</name>
+    <description>Enter key=value (one per line) for all properties of capacity-scheduler.xml</description>
     <depends-on>
       <property>
         <type>hive-interactive-env</type>

+ 2 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/configuration/yarn-site.xml

@@ -32,11 +32,13 @@
   <property>
     <name>yarn.nodemanager.aux-services.spark_shuffle.classpath</name>
     <value>{{stack_root}}/${hdp.version}/spark/aux/*</value>
+    <description>The auxiliary service classpath to use for Spark</description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.aux-services.spark2_shuffle.classpath</name>
     <value>{{stack_root}}/${hdp.version}/spark2/aux/*</value>
+    <description>The auxiliary service classpath to use for Spark 2</description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>

+ 3 - 1
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HDFS/configuration/core-site.xml

@@ -21,7 +21,9 @@
   <property>
     <name>hadoop.http.authentication.simple.anonymous.allowed</name>
     <value>true</value>
-    <description/>
+    <description>
+      Indicates if anonymous requests are allowed when using &apos;simple&apos; authentication.
+    </description>
     <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

+ 8 - 1
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HDFS/configuration/hdfs-site.xml

@@ -21,7 +21,14 @@
   <property>
     <name>dfs.namenode.startup.delay.block.deletion.sec</name>
     <value>3600</value>
-    <description/>
+    <description>
+      The delay in seconds at which we will pause the blocks deletion
+      after Namenode startup. By default it's disabled.
+      In the case a directory has large number of directories and files are
+      deleted, suggested delay is one hour to give the administrator enough time
+      to notice large number of pending deletion blocks and take corrective
+      action.
+    </description>
     <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

+ 6 - 0
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/capacity-scheduler.xml

@@ -19,6 +19,12 @@
   <property>
     <name>yarn.scheduler.capacity.resource-calculator</name>
     <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+    <description>
+      The ResourceCalculator implementation to be used to compare Resources in the scheduler.
+      The default i.e. org.apache.hadoop.yarn.util.resource.DefaultResourseCalculator only uses
+      Memory while DominantResourceCalculator uses Dominant-resource to compare multi-dimensional
+      resources such as Memory, CPU etc. A Java ResourceCalculator class name is expected.
+    </description>
     <display-name>CPU Scheduling</display-name>
     <value-attributes>
       <type>value-list</type>

+ 23 - 4
ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-site.xml

@@ -210,7 +210,15 @@
   <property>
     <name>yarn.nodemanager.resource.cpu-vcores</name>
     <value>8</value>
-    <description/>
+    <description>
+      Number of vcores that can be allocated
+      for containers. This is used by the RM scheduler when allocating
+      resources for containers. This is not used to limit the number of
+      CPUs used by YARN containers. If it is set to -1 and
+      yarn.nodemanager.resource.detect-hardware-capabilities is true, it is
+      automatically determined from the hardware in case of Windows and Linux.
+      In other cases, number of vcores is 8 by default.
+    </description>
     <display-name>Number of virtual cores</display-name>
     <value-attributes>
       <type>int</type>
@@ -247,7 +255,11 @@
   <property>
     <name>yarn.node-labels.fs-store.retry-policy-spec</name>
     <value>2000, 500</value>
-    <description/>
+    <description>
+      Retry policy used for FileSystem node label store. The policy is
+      specified by N pairs of sleep-time in milliseconds and number-of-retries
+      &quot;s1,n1,s2,n2,...&quot;.
+    </description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
@@ -292,7 +304,7 @@
   <property>
     <name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
     <value>10</value>
-    <description/>
+    <description>Number of worker threads that send the yarn system metrics data.</description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
@@ -392,7 +404,9 @@
   <property>
     <name>yarn.node-labels.fs-store.root-dir</name>
     <value>/system/yarn/node-labels</value>
-    <description/>
+    <description>
+      URI for NodeLabelManager.
+    </description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
@@ -458,6 +472,11 @@
   </property>
   <property>
     <name>yarn.resourcemanager.scheduler.monitor.enable</name>
+    <description>
+      Enable a set of periodic monitors (specified in
+      yarn.resourcemanager.scheduler.monitor.policies) that affect the
+      scheduler.
+    </description>
     <value>false</value>
     <display-name>Pre-emption</display-name>
     <value-attributes>

+ 8 - 0
ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HDFS/configuration/hdfs-site.xml

@@ -32,6 +32,14 @@
   </property>
   <property>
     <name>nfs.exports.allowed.hosts</name>
+    <description>
+      By default, the export can be mounted by any client. To better control the access,
+      users can update the following property. The value string contains machine name and access privilege,
+      separated by whitespace characters. Machine name format can be single host, wildcards, and IPv4
+      networks.The access privilege uses rw or ro to specify readwrite or readonly access of the machines
+      to exports. If the access privilege is not provided, the default is read-only. Entries are separated
+      by &quot;;&quot;. For example: &quot;192.168.0.0/22 rw ; host*.example.com ; host1.test.org ro;&quot;.
+    </description>
     <value>* rw</value>
     <display-name>Allowed hosts</display-name>
     <on-ambari-upgrade add="true"/>

+ 3 - 1
ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/core-site.xml

@@ -21,7 +21,9 @@
   <property>
     <name>hadoop.http.authentication.simple.anonymous.allowed</name>
     <value>true</value>
-    <description/>
+    <description>
+      Indicates if anonymous requests are allowed when using &apos;simple&apos; authentication.
+    </description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>

+ 8 - 1
ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/hdfs-site.xml

@@ -21,7 +21,14 @@
   <property>
     <name>dfs.namenode.startup.delay.block.deletion.sec</name>
     <value>3600</value>
-    <description/>
+    <description>
+      The delay in seconds at which we will pause the blocks deletion
+      after Namenode startup. By default it's disabled.
+      In the case a directory has large number of directories and files are
+      deleted, suggested delay is one hour to give the administrator enough time
+      to notice large number of pending deletion blocks and take corrective
+      action.
+    </description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>