Explorar el Código

AMBARI-2888. Cleaned up Hadoop2 configs (ncole)

Nate Cole hace 12 años
padre
commit
06d53ed972

+ 0 - 63
ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/core-site.xml

@@ -45,12 +45,6 @@
                  for compression/decompression.</description>
   </property>
 
-  <property>
-    <name>io.compression.codec.lzo.class</name>
-    <value>com.hadoop.compression.lzo.LzoCodec</value>
-    <description>The implementation for lzo codec.</description>
-  </property>
-
 <!-- file system properties -->
 
   <property>
@@ -98,14 +92,6 @@
   </description>
   </property>
 
-  <property>
-    <name>fs.checkpoint.size</name>
-    <value>536870912</value>
-    <description>The size of the current edit log (in bytes) that triggers
-       a periodic checkpoint even if the dfs.namenode.checkpoint.period hasn't expired.
-  </description>
-  </property>
-
   <!-- ipc properties: copied from kryptonite configuration -->
   <property>
     <name>ipc.client.idlethreshold</name>
@@ -206,53 +192,4 @@ DEFAULT
     </description>
   </property>
 
-<!--
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").groups</name>
-  <value></value>
-  <description>
-    Proxy group for templeton.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").hosts</name>
-  <value></value>
-  <description>
-    Proxy host for templeton.
-  </description>
-</property>
--->
 </configuration>

+ 1 - 66
ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml

@@ -49,14 +49,6 @@
     <final>true</final>
   </property>
 
-<!--
- <property>
-    <name>dfs.datanode.socket.write.timeout</name>
-    <value>0</value>
-    <description>DFS Client write socket timeout</description>
-  </property>
--->
-
   <property>
     <name>dfs.datanode.failed.volumes.tolerated</name>
     <value>0</value>
@@ -64,15 +56,6 @@
     <final>true</final>
   </property>
 
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value>hbase</value>
-    <description>the user who is allowed to perform short
-    circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
   <property>
     <name>dfs.datanode.data.dir</name>
     <value></value>
@@ -105,15 +88,6 @@
   </property>
 -->
 
-  <property>
-    <name>dfs.checksum.type</name>
-    <value>CRC32</value>
-    <description>The checksum method to be used by default. To maintain
-    compatibility, it is being set to CRC32. Once all migration steps
-    are complete, we can change it to CRC32C and take advantage of the
-    additional performance benefit.</description>
-  </property>
-
   <property>
     <name>dfs.replication.max</name>
     <value>50</value>
@@ -208,13 +182,6 @@ If the port is 0 then the server will start on a free port.
 <description>Delay for first block report in seconds.</description>
 </property>
 
-<property>
-<name>dfs.datanode.du.pct</name>
-<value>0.85f</value>
-<description>When calculating remaining space, only use this percentage of the real available space
-</description>
-</property>
-
 <property>
 <name>dfs.namenode.handler.count</name>
 <value>40</value>
@@ -237,15 +204,6 @@ The octal umask used when creating files and directories.
 </description>
 </property>
 
-<property>
-<name>dfs.web.ugi</name>
-<!-- cluster variant -->
-<value>gopher,gopher</value>
-<description>The user account used by the web interface.
-Syntax: USERNAME,GROUP1,GROUP2, ...
-</description>
-</property>
-
 <property>
 <name>dfs.permissions.enabled</name>
 <value>true</value>
@@ -259,7 +217,7 @@ owner or group of files or directories.
 </property>
 
 <property>
-<name>dfs.permissions.enabled.superusergroup</name>
+<name>dfs.permissions.superusergroup</name>
 <value>hdfs</value>
 <description>The name of the group of super-users.</description>
 </property>
@@ -270,10 +228,6 @@ owner or group of files or directories.
 <description>Added to grow Queue size so that more client connections are allowed</description>
 </property>
 
-<property>
-<name>ipc.server.max.response.size</name>
-<value>5242880</value>
-</property>
 <property>
 <name>dfs.block.access.token.enable</name>
 <value>true</value>
@@ -324,12 +278,6 @@ Kerberos principal name for the NameNode
     <description>Address of secondary namenode web server</description>
   </property>
 
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
   <property>
     <name>dfs.web.authentication.kerberos.principal</name>
     <value></value>
@@ -381,13 +329,6 @@ Kerberos principal name for the NameNode
     </description>
   </property>
 
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
- <description>The https port where namenode binds</description>
-
-  </property>
-
   <property>
     <name>dfs.namenode.https-address</name>
     <value></value>
@@ -419,12 +360,6 @@ don't exist, they will be created with this permission.</description>
    <description>ACL for who all can view the default servlets in the HDFS</description>
   </property>
 
-  <property>
-    <name>ipc.server.read.threadpool.size</name>
-    <value>5</value>
-    <description></description>
-  </property>
-
   <property>
     <name>dfs.namenode.avoid.read.stale.datanode</name>
     <value>true</value>

+ 2 - 40
ambari-server/src/main/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml

@@ -30,12 +30,6 @@
     <description>No description</description>
   </property>
 
-  <property>
-    <name>io.sort.record.percent</name>
-    <value>.2</value>
-    <description>No description</description>
-  </property>
-
   <property>
     <name>mapreduce.map.sort.spill.percent</name>
     <value>0.1</value>
@@ -177,17 +171,6 @@
   </description>
   </property>
 
-  <property>
-    <name>jetty.connector</name>
-    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.child.root.logger</name>
-    <value>INFO,TLA</value>
-  </property>
-
   <property>
     <name>mapred.child.java.opts</name>
     <value>-Xmx512m</value>
@@ -227,33 +210,12 @@
     <value>60000</value>
   </property>
 
-  <property>
-    <name>mapred.task.maxvmem</name>
-    <value></value>
-    <final>true</final>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-    <value>false</value>
-  </property>
-
   <property>
     <name>mapreduce.tasktracker.keytab.file</name>
     <value></value>
     <description>The filename of the keytab for the task tracker</description>
   </property>
 
-  <property>
-    <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
-    <value>50000000</value>
-    <final>true</final>
-     <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-    initialize.
-   </description>
-  </property>
-
  <property>
    <name>mapreduce.jobhistory.keytab.file</name>
     <!-- cluster variant -->
@@ -280,13 +242,13 @@
 </property>
 
 <property>       
-  <name>mapreduce.jobhistory.address</name>       
+  <name>mapreduce.jobhistory.address</name>
   <value>localhost:10020</value>  
   <description>Enter your JobHistoryServer hostname.</description>
 </property>
 
 <property>       
-  <name>mapreduce.jobhistory.webapp.address</name>       
+  <name>mapreduce.jobhistory.webapp.address</name>
   <value>localhost:19888</value>  
   <description>Enter your JobHistoryServer hostname.</description>
 </property>

+ 0 - 11
ambari-server/src/main/resources/stacks/HDP/2.0.5/services/YARN/configuration/yarn-site.xml

@@ -100,11 +100,6 @@
     <description>Auxilliary services of NodeManager</description>
   </property>
 
-  <property>
-    <name>yarn.nodemanager.aux-services.class</name>
-    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
-  </property>
-
   <property>
     <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
     <value>org.apache.hadoop.mapred.ShuffleHandler</value>
@@ -120,12 +115,6 @@
     <value></value>
   </property>
 
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.group</name>
-    <value>hadoop</value>
-    <description>Unix group of the NodeManager</description>
-  </property>
-
   <property>
     <name>yarn.nodemanager.container-executor.class</name>
     <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>

+ 0 - 63
ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/core-site.xml

@@ -45,12 +45,6 @@
                  for compression/decompression.</description>
   </property>
 
-  <property>
-    <name>io.compression.codec.lzo.class</name>
-    <value>com.hadoop.compression.lzo.LzoCodec</value>
-    <description>The implementation for lzo codec.</description>
-  </property>
-
 <!-- file system properties -->
 
   <property>
@@ -98,14 +92,6 @@
   </description>
   </property>
 
-  <property>
-    <name>fs.checkpoint.size</name>
-    <value>536870912</value>
-    <description>The size of the current edit log (in bytes) that triggers
-       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
-  </description>
-  </property>
-
   <!-- ipc properties: copied from kryptonite configuration -->
   <property>
     <name>ipc.client.idlethreshold</name>
@@ -205,53 +191,4 @@ DEFAULT
     </description>
   </property>
 
-<!--
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").groups</name>
-  <value></value>
-  <description>
-    Proxy group for templeton.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").hosts</name>
-  <value></value>
-  <description>
-    Proxy host for templeton.
-  </description>
-</property>
--->
 </configuration>

+ 1 - 66
ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/hdfs-site.xml

@@ -49,14 +49,6 @@
     <final>true</final>
   </property>
 
-<!--
- <property>
-    <name>dfs.datanode.socket.write.timeout</name>
-    <value>0</value>
-    <description>DFS Client write socket timeout</description>
-  </property>
--->
-
   <property>
     <name>dfs.datanode.failed.volumes.tolerated</name>
     <value>0</value>
@@ -64,15 +56,6 @@
     <final>true</final>
   </property>
 
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value>hbase</value>
-    <description>the user who is allowed to perform short
-    circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
   <property>
     <name>dfs.datanode.data.dir</name>
     <value></value>
@@ -105,15 +88,6 @@
   </property>
 -->
 
-  <property>
-    <name>dfs.checksum.type</name>
-    <value>CRC32</value>
-    <description>The checksum method to be used by default. To maintain
-    compatibility, it is being set to CRC32. Once all migration steps
-    are complete, we can change it to CRC32C and take advantage of the
-    additional performance benefit.</description>
-  </property>
-
   <property>
     <name>dfs.replication.max</name>
     <value>50</value>
@@ -208,13 +182,6 @@ If the port is 0 then the server will start on a free port.
 <description>Delay for first block report in seconds.</description>
 </property>
 
-<property>
-<name>dfs.datanode.du.pct</name>
-<value>0.85f</value>
-<description>When calculating remaining space, only use this percentage of the real available space
-</description>
-</property>
-
 <property>
 <name>dfs.namenode.handler.count</name>
 <value>40</value>
@@ -237,15 +204,6 @@ The octal umask used when creating files and directories.
 </description>
 </property>
 
-<property>
-<name>dfs.web.ugi</name>
-<!-- cluster variant -->
-<value>gopher,gopher</value>
-<description>The user account used by the web interface.
-Syntax: USERNAME,GROUP1,GROUP2, ...
-</description>
-</property>
-
 <property>
 <name>dfs.permissions.enabled</name>
 <value>true</value>
@@ -259,7 +217,7 @@ owner or group of files or directories.
 </property>
 
 <property>
-<name>dfs.permissions.enabled.superusergroup</name>
+<name>dfs.permissions.superusergroup</name>
 <value>hdfs</value>
 <description>The name of the group of super-users.</description>
 </property>
@@ -270,10 +228,6 @@ owner or group of files or directories.
 <description>Added to grow Queue size so that more client connections are allowed</description>
 </property>
 
-<property>
-<name>ipc.server.max.response.size</name>
-<value>5242880</value>
-</property>
 <property>
 <name>dfs.block.access.token.enable</name>
 <value>true</value>
@@ -324,12 +278,6 @@ Kerberos principal name for the NameNode
     <description>Address of secondary namenode web server</description>
   </property>
 
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
   <property>
     <name>dfs.web.authentication.kerberos.principal</name>
     <value></value>
@@ -381,13 +329,6 @@ Kerberos principal name for the NameNode
     </description>
   </property>
 
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
- <description>The https port where namenode binds</description>
-
-  </property>
-
   <property>
     <name>dfs.namenode.https-address</name>
     <value></value>
@@ -419,12 +360,6 @@ don't exist, they will be created with this permission.</description>
    <description>ACL for who all can view the default servlets in the HDFS</description>
   </property>
 
-  <property>
-    <name>ipc.server.read.threadpool.size</name>
-    <value>5</value>
-    <description></description>
-  </property>
-
   <property>
     <name>dfs.namenode.avoid.read.stale.datanode</name>
     <value>true</value>

+ 0 - 38
ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml

@@ -30,12 +30,6 @@
     <description>No description</description>
   </property>
 
-  <property>
-    <name>io.sort.record.percent</name>
-    <value>.2</value>
-    <description>No description</description>
-  </property>
-
   <property>
     <name>mapreduce.map.sort.spill.percent</name>
     <value>0.1</value>
@@ -177,17 +171,6 @@
   </description>
   </property>
 
-  <property>
-    <name>jetty.connector</name>
-    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.child.root.logger</name>
-    <value>INFO,TLA</value>
-  </property>
-
   <property>
     <name>mapred.child.java.opts</name>
     <value>-Xmx512m</value>
@@ -227,33 +210,12 @@
     <value>60000</value>
   </property>
 
-  <property>
-    <name>mapred.task.maxvmem</name>
-    <value></value>
-    <final>true</final>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-    <value>false</value>
-  </property>
-
   <property>
     <name>mapreduce.tasktracker.keytab.file</name>
     <value></value>
     <description>The filename of the keytab for the task tracker</description>
   </property>
 
-  <property>
-    <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
-    <value>50000000</value>
-    <final>true</final>
-     <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-    initialize.
-   </description>
-  </property>
-
  <property>
    <name>mapreduce.jobhistory.keytab.file</name>
     <!-- cluster variant -->

+ 0 - 5
ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/yarn-site.xml

@@ -100,11 +100,6 @@
     <description>Auxilliary services of NodeManager</description>
   </property>
 
-  <property>
-    <name>yarn.nodemanager.aux-services.class</name>
-    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
-  </property>
-
   <property>
     <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
     <value>org.apache.hadoop.mapred.ShuffleHandler</value>