فهرست منبع

HDFS-7566. Remove obsolete entries from hdfs-default.xml (Ray Chiang via aw)

Allen Wittenauer 10 سال پیش
والد
کامیت
0a05ae1782

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -798,6 +798,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-49. MiniDFSCluster.stopDataNode will always shut down a node in
     the cluster if a matching name is not found. (stevel)
 
+    HDFS-7566. Remove obsolete entries from hdfs-default.xml (Ray Chiang
+    via aw)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 0 - 26
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -30,16 +30,6 @@
   <description>version of this configuration file</description>
 </property>
 
-<property>
-  <name>dfs.namenode.logging.level</name>
-  <value>info</value>
-  <description>
-    The logging level for dfs namenode. Other values are "dir" (trace
-    namespace mutations), "block" (trace block under/over replications
-    and block creations/deletions), or "all".
-  </description>
-</property>
-
 <property>
   <name>dfs.namenode.rpc-address</name>
   <value></value>
@@ -154,14 +144,6 @@
   </description>
 </property>
 
-<property>
-  <name>dfs.https.enable</name>
-  <value>false</value>
-  <description>
-    Deprecated. Use "dfs.http.policy" instead.
-  </description>
-</property>
-
 <property>
   <name>dfs.http.policy</name>
   <value>HTTP_ONLY</value>
@@ -1244,14 +1226,6 @@
   </description>
 </property>
 
-<property>
-  <name>dfs.support.append</name>
-  <value>true</value>
-  <description>
-    Does HDFS allow appends to files?
-  </description>
-</property>
-
 <property>
   <name>dfs.client.use.datanode.hostname</name>
   <value>false</value>

+ 0 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml

@@ -113,7 +113,6 @@
 <property><!--Loaded from job.xml--><name>hadoop.proxyuser.user.groups</name><value>users</value></property>
 <property><!--Loaded from job.xml--><name>dfs.namenode.name.dir.restore</name><value>false</value></property>
 <property><!--Loaded from job.xml--><name>io.seqfile.lazydecompress</name><value>true</value></property>
-<property><!--Loaded from job.xml--><name>dfs.https.enable</name><value>false</value></property>
 <property><!--Loaded from job.xml--><name>mapreduce.reduce.merge.inmem.threshold</name><value>1000</value></property>
 <property><!--Loaded from job.xml--><name>mapreduce.input.fileinputformat.split.minsize</name><value>0</value></property>
 <property><!--Loaded from job.xml--><name>dfs.replication</name><value>3</value></property>
@@ -209,7 +208,6 @@
 <property><!--Loaded from job.xml--><name>mapreduce.job.dir</name><value>/tmp/hadoop-yarn/staging/user/.staging/job_1329348432655_0001</value></property>
 <property><!--Loaded from job.xml--><name>io.map.index.skip</name><value>0</value></property>
 <property><!--Loaded from job.xml--><name>net.topology.node.switch.mapping.impl</name><value>org.apache.hadoop.net.ScriptBasedMapping</value></property>
-<property><!--Loaded from job.xml--><name>dfs.namenode.logging.level</name><value>info</value></property>
 <property><!--Loaded from job.xml--><name>fs.s3.maxRetries</name><value>4</value></property>
 <property><!--Loaded from job.xml--><name>s3native.client-write-packet-size</name><value>65536</value></property>
 <property><!--Loaded from job.xml--><name>yarn.resourcemanager.amliveliness-monitor.interval-ms</name><value>1000</value></property>

+ 0 - 6
hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json

@@ -4657,7 +4657,6 @@
     "mapreduce.tasktracker.taskcontroller" : "org.apache.hadoop.mapred.DefaultTaskController",
     "yarn.scheduler.fair.preemption" : "true",
     "mapreduce.reduce.shuffle.parallelcopies" : "5",
-    "dfs.support.append" : "true",
     "yarn.nodemanager.env-whitelist" : "JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME",
     "mapreduce.jobtracker.heartbeats.in.second" : "100",
     "mapreduce.job.maxtaskfailures.per.tracker" : "3",
@@ -4674,7 +4673,6 @@
     "dfs.datanode.hdfs-blocks-metadata.enabled" : "true",
     "ha.zookeeper.parent-znode" : "/hadoop-ha",
     "io.seqfile.lazydecompress" : "true",
-    "dfs.https.enable" : "false",
     "mapreduce.reduce.merge.inmem.threshold" : "1000",
     "mapreduce.input.fileinputformat.split.minsize" : "0",
     "dfs.replication" : "3",
@@ -4783,7 +4781,6 @@
     "io.map.index.skip" : "0",
     "net.topology.node.switch.mapping.impl" : "org.apache.hadoop.net.ScriptBasedMapping",
     "fs.s3.maxRetries" : "4",
-    "dfs.namenode.logging.level" : "info",
     "ha.failover-controller.new-active.rpc-timeout.ms" : "60000",
     "s3native.client-write-packet-size" : "65536",
     "yarn.resourcemanager.amliveliness-monitor.interval-ms" : "1000",
@@ -9770,7 +9767,6 @@
     "mapreduce.tasktracker.taskcontroller" : "org.apache.hadoop.mapred.DefaultTaskController",
     "yarn.scheduler.fair.preemption" : "true",
     "mapreduce.reduce.shuffle.parallelcopies" : "5",
-    "dfs.support.append" : "true",
     "yarn.nodemanager.env-whitelist" : "JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME",
     "mapreduce.jobtracker.heartbeats.in.second" : "100",
     "mapreduce.job.maxtaskfailures.per.tracker" : "3",
@@ -9787,7 +9783,6 @@
     "dfs.datanode.hdfs-blocks-metadata.enabled" : "true",
     "ha.zookeeper.parent-znode" : "/hadoop-ha",
     "io.seqfile.lazydecompress" : "true",
-    "dfs.https.enable" : "false",
     "mapreduce.reduce.merge.inmem.threshold" : "1000",
     "mapreduce.input.fileinputformat.split.minsize" : "0",
     "dfs.replication" : "3",
@@ -9896,7 +9891,6 @@
     "io.map.index.skip" : "0",
     "net.topology.node.switch.mapping.impl" : "org.apache.hadoop.net.ScriptBasedMapping",
     "fs.s3.maxRetries" : "4",
-    "dfs.namenode.logging.level" : "info",
     "ha.failover-controller.new-active.rpc-timeout.ms" : "60000",
     "s3native.client-write-packet-size" : "65536",
     "yarn.resourcemanager.amliveliness-monitor.interval-ms" : "1000",