ソースを参照

HDFS-631. Rename configuration keys towards API standardization and backward compatibility. Contributed by Jitendra Pandey.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@820533 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 16 年 前
コミット
d7d239662d
100 ファイル変更840 行追加399 行削除
  1. 4 1
      CHANGES.txt
  2. 3 1
      src/ant/org/apache/hadoop/ant/DfsTask.java
  3. 2 2
      src/contrib/fuse-dfs/src/test/TestFuseDFS.java
  4. 4 3
      src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java
  5. 3 1
      src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/LdapIpDirFilter.java
  6. 3 1
      src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java
  7. 8 6
      src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFilter.java
  8. 2 1
      src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyListPathsServlet.java
  9. 3 2
      src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyStreamFile.java
  10. 2 1
      src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUgiManager.java
  11. 3 1
      src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUtil.java
  12. 4 3
      src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestHdfsProxy.java
  13. 2 1
      src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyUtil.java
  14. 3 1
      src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java
  15. 1 1
      src/contrib/thriftfs/test/org/apache/hadoop/thriftfs/TestThriftfs.java
  16. 87 52
      src/java/hdfs-default.xml
  17. 9 6
      src/java/org/apache/hadoop/hdfs/DFSClient.java
  18. 184 0
      src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  19. 85 0
      src/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
  20. 3 3
      src/java/org/apache/hadoop/hdfs/HsftpFileSystem.java
  21. 2 2
      src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  22. 2 1
      src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
  23. 2 1
      src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
  24. 13 9
      src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  25. 5 2
      src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
  26. 2 1
      src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
  27. 5 4
      src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
  28. 10 7
      src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
  29. 2 1
      src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java
  30. 5 2
      src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
  31. 2 1
      src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java
  32. 2 1
      src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
  33. 4 2
      src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  34. 5 4
      src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
  35. 28 18
      src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  36. 5 3
      src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
  37. 2 1
      src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
  38. 11 8
      src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  39. 13 7
      src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  40. 3 2
      src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
  41. 2 1
      src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
  42. 2 1
      src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
  43. 5 2
      src/java/org/apache/hadoop/hdfs/tools/DFSck.java
  44. 2 1
      src/test/aop/org/apache/hadoop/fi/FiConfig.java
  45. 23 21
      src/test/aop/org/apache/hadoop/hdfs/TestFiHFlush.java
  46. 5 3
      src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java
  47. 2 1
      src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java
  48. 2 1
      src/test/hdfs/org/apache/hadoop/fs/TestGlobPaths.java
  49. 2 1
      src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
  50. 3 2
      src/test/hdfs/org/apache/hadoop/fs/TestUrlStreamHandler.java
  51. 5 3
      src/test/hdfs/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java
  52. 9 7
      src/test/hdfs/org/apache/hadoop/fs/permission/TestStickyBit.java
  53. 2 1
      src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java
  54. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/BenchmarkThroughput.java
  55. 3 2
      src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
  56. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java
  57. 13 12
      src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
  58. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java
  59. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestBlockMissingException.java
  60. 3 3
      src/test/hdfs/org/apache/hadoop/hdfs/TestBlockReport.java
  61. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
  62. 2 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
  63. 6 6
      src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java
  64. 3 3
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
  65. 3 3
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSFinalize.java
  66. 2 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSMkdirs.java
  67. 2 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java
  68. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRename.java
  69. 3 3
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java
  70. 16 16
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java
  71. 3 3
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java
  72. 3 3
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
  73. 3 3
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
  74. 3 3
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java
  75. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
  76. 4 3
      src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
  77. 6 6
      src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
  78. 8 8
      src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java
  79. 2 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java
  80. 4 4
      src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java
  81. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
  82. 41 0
      src/test/hdfs/org/apache/hadoop/hdfs/TestDeprecatedKeys.java
  83. 8 8
      src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
  84. 3 3
      src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java
  85. 3 3
      src/test/hdfs/org/apache/hadoop/hdfs/TestFSOutputSummer.java
  86. 3 3
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
  87. 5 5
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java
  88. 2 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
  89. 3 3
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java
  90. 21 21
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
  91. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java
  92. 2 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java
  93. 2 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationEmpty.java
  94. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
  95. 2 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java
  96. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
  97. 25 24
      src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
  98. 2 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSTrash.java
  99. 9 7
      src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java
  100. 4 4
      src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java

+ 4 - 1
CHANGES.txt

@@ -75,7 +75,7 @@ Release 0.21.0 - Unreleased
 
     HDFS-567. Add block forensics contrib tool to print history of corrupt and
     missing blocks from the HDFS logs.
-    (Bill Zeller, Jithendra Pandey via suresh).
+    (Bill Zeller, Jitendra Nath Pandey via suresh).
 
     HDFS-610. Support o.a.h.fs.FileContext.  (Sanjay Radia via szetszwo)
 
@@ -110,6 +110,9 @@ Release 0.21.0 - Unreleased
 
     HDFS-642. Support pipeline close and close error recovery. (hairong)
 
+    HDFS-631. Rename configuration keys towards API standardization and
+    backward compatibility. (Jitendra Nath Pandey via suresh)
+
   IMPROVEMENTS
 
     HDFS-381. Remove blocks from DataNode maps when corresponding file

+ 3 - 1
src/ant/org/apache/hadoop/ant/DfsTask.java

@@ -34,6 +34,8 @@ import org.apache.tools.ant.Project;
 import org.apache.tools.ant.types.Path;
 import org.apache.hadoop.util.ToolRunner;
 
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
 /**
  * {@link org.apache.hadoop.fs.FsShell FsShell} wrapper for ant Task.
  */
@@ -180,7 +182,7 @@ public class DfsTask extends Task {
     try {
       pushContext();
 
-      Configuration conf = new Configuration();
+      Configuration conf = new HdfsConfiguration();
       conf.setClassLoader(confloader);
       exit_code = ToolRunner.run(conf, shell,
           argv.toArray(new String[argv.size()]));

+ 2 - 2
src/contrib/fuse-dfs/src/test/TestFuseDFS.java

@@ -113,8 +113,8 @@ public class TestFuseDFS extends TestCase {
 
   static public void startStuff() {
     try {
-      Configuration conf = new Configuration();
-      conf.setBoolean("dfs.permissions",false);
+      Configuration conf = new HdfsConfiguration();
+      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,false);
       cluster = new MiniDFSCluster(conf, 1, true, null);
       fileSys = (DistributedFileSystem)cluster.getFileSystem();
       assertTrue(fileSys.getFileStatus(new Path("/")).isDir());

+ 4 - 3
src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java

@@ -29,6 +29,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 
 /**
  * A HTTPS/SSL proxy to HDFS, implementing certificate based access control.
@@ -57,7 +58,7 @@ public class HdfsProxy {
     InetSocketAddress nnAddr = NetUtils.createSocketAddr(nn);
     LOG.info("HDFS NameNode is at: " + nnAddr.getHostName() + ":" + nnAddr.getPort());
 
-    Configuration sslConf = new Configuration(false);
+    Configuration sslConf = new HdfsConfiguration(false);
     sslConf.addResource(conf.get("hdfsproxy.https.server.keystore.resource",
         "ssl-server.xml"));
     // unit testing
@@ -67,7 +68,7 @@ public class HdfsProxy {
     this.server = new ProxyHttpServer(sslAddr, sslConf);
     this.server.setAttribute("proxy.https.port", server.getPort());
     this.server.setAttribute("name.node.address", nnAddr);
-    this.server.setAttribute("name.conf", new Configuration());
+    this.server.setAttribute("name.conf", new HdfsConfiguration());
     this.server.addGlobalFilter("ProxyFilter", ProxyFilter.class.getName(), null);
     this.server.addServlet("listPaths", "/listPaths/*", ProxyListPathsServlet.class);
     this.server.addServlet("data", "/data/*", ProxyFileDataServlet.class);
@@ -129,7 +130,7 @@ public class HdfsProxy {
       return null;
     }
     if (conf == null) {
-      conf = new Configuration(false);
+      conf = new HdfsConfiguration(false);
       conf.addResource("hdfsproxy-default.xml");
     }
    

+ 3 - 1
src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/LdapIpDirFilter.java

@@ -48,6 +48,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
 public class LdapIpDirFilter implements Filter {
   public static final Log LOG = LogFactory.getLog(LdapIpDirFilter.class);
 
@@ -89,7 +91,7 @@ public class LdapIpDirFilter implements Filter {
   /** {@inheritDoc} */
   public void init(FilterConfig filterConfig) throws ServletException {
     ServletContext context = filterConfig.getServletContext();
-    Configuration conf = new Configuration(false);
+    Configuration conf = new HdfsConfiguration(false);
     conf.addResource("hdfsproxy-default.xml");
     conf.addResource("hdfsproxy-site.xml");
     // extract namenode from source conf.

+ 3 - 1
src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java

@@ -31,6 +31,8 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.namenode.FileDataServlet;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
 /** {@inheritDoc} */
 public class ProxyFileDataServlet extends FileDataServlet {
   /** For java.io.Serializable */
@@ -41,7 +43,7 @@ public class ProxyFileDataServlet extends FileDataServlet {
   public void init() throws ServletException {
     ServletContext context = getServletContext();
     if (context.getAttribute("name.conf") == null) {
-      context.setAttribute("name.conf", new Configuration());
+      context.setAttribute("name.conf", new HdfsConfiguration());
     }
   }
 

+ 8 - 6
src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFilter.java

@@ -50,6 +50,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.net.NetUtils;
 
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
 public class ProxyFilter implements Filter {
   public static final Log LOG = LogFactory.getLog(ProxyFilter.class);
 
@@ -73,7 +75,7 @@ public class ProxyFilter implements Filter {
   private static volatile Map<String, Set<Path>> permsMap;
   private static volatile Map<String, Set<BigInteger>> certsMap;
   static {
-    Configuration conf = new Configuration(false);
+    Configuration conf = new HdfsConfiguration(false);
     conf.addResource("hdfsproxy-default.xml");
     Map<String, Set<Path>> pMap = getPermMap(conf);
     permsMap = pMap != null ? pMap : new HashMap<String, Set<Path>>();
@@ -85,7 +87,7 @@ public class ProxyFilter implements Filter {
   /** {@inheritDoc} */
   public void init(FilterConfig filterConfig) throws ServletException {
     ServletContext context = filterConfig.getServletContext();
-    Configuration conf = new Configuration(false);
+    Configuration conf = new HdfsConfiguration(false);
     conf.addResource("hdfsproxy-default.xml");
     conf.addResource("ssl-server.xml");
     conf.addResource("hdfsproxy-site.xml");
@@ -95,7 +97,7 @@ public class ProxyFilter implements Filter {
     }
     InetSocketAddress nAddr = NetUtils.createSocketAddr(nn);
     context.setAttribute("name.node.address", nAddr);
-    context.setAttribute("name.conf", new Configuration());   
+    context.setAttribute("name.conf", new HdfsConfiguration());   
     
     context.setAttribute("org.apache.hadoop.hdfsproxy.conf", conf);
     LOG.info("proxyFilter initialization success: " + nn);
@@ -108,7 +110,7 @@ public class ProxyFilter implements Filter {
       LOG.warn("HdfsProxy user permissions file not found");
       return null;
     }
-    Configuration permConf = new Configuration(false);
+    Configuration permConf = new HdfsConfiguration(false);
     permConf.addResource(permLoc);
     Map<String, Set<Path>> map = new HashMap<String, Set<Path>>();
     for (Map.Entry<String, String> e : permConf) {
@@ -135,7 +137,7 @@ public class ProxyFilter implements Filter {
       LOG.warn("HdfsProxy user certs file not found");
       return null;
     }
-    Configuration certsConf = new Configuration(false);
+    Configuration certsConf = new HdfsConfiguration(false);
     certsConf.addResource(certsLoc);
     Map<String, Set<BigInteger>> map = new HashMap<String, Set<BigInteger>>();
     for (Map.Entry<String, String> e : certsConf) {
@@ -284,7 +286,7 @@ public class ProxyFilter implements Filter {
         }
       } else if (RELOAD_PATTERN.matcher(servletPath).matches()
           && checkUser("Admin", certs[0])) {
-        Configuration conf = new Configuration(false);
+        Configuration conf = new HdfsConfiguration(false);
         conf.addResource("hdfsproxy-default.xml");
         Map<String, Set<Path>> permsMap = getPermMap(conf);
         Map<String, Set<BigInteger>> certsMap = getCertsMap(conf);

+ 2 - 1
src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyListPathsServlet.java

@@ -22,6 +22,7 @@ import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.namenode.ListPathsServlet;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 
@@ -35,7 +36,7 @@ public class ProxyListPathsServlet extends ListPathsServlet {
   public void init() throws ServletException {
     ServletContext context = getServletContext();
     if (context.getAttribute("name.conf") == null) {
-      context.setAttribute("name.conf", new Configuration());
+      context.setAttribute("name.conf", new HdfsConfiguration());
     }
   }
 

+ 3 - 2
src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyStreamFile.java

@@ -26,6 +26,7 @@ import javax.servlet.http.HttpServletRequest;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.namenode.StreamFile;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 
@@ -39,7 +40,7 @@ public class ProxyStreamFile extends StreamFile {
   public void init() throws ServletException {
     ServletContext context = getServletContext();
     if (context.getAttribute("name.conf") == null) {
-      context.setAttribute("name.conf", new Configuration());
+      context.setAttribute("name.conf", new HdfsConfiguration());
     }
   }
 
@@ -48,7 +49,7 @@ public class ProxyStreamFile extends StreamFile {
   protected DFSClient getDFSClient(HttpServletRequest request)
       throws IOException {
     ServletContext context = getServletContext();
-    Configuration conf = new Configuration((Configuration) context
+    Configuration conf = new HdfsConfiguration((Configuration) context
         .getAttribute("name.conf"));
     UnixUserGroupInformation.saveToConf(conf,
         UnixUserGroupInformation.UGI_PROPERTY_NAME, getUGI(request));

+ 2 - 1
src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUgiManager.java

@@ -25,6 +25,7 @@ import java.util.Map;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.util.Shell;
 
@@ -37,7 +38,7 @@ public class ProxyUgiManager {
   static final int CLEANUP_THRESHOLD = 1000;
 
   static {
-    Configuration conf = new Configuration(false);
+    Configuration conf = new HdfsConfiguration(false);
     conf.addResource("hdfsproxy-default.xml");
     ugiLifetime = conf.getLong("hdfsproxy.ugi.cache.ugi.lifetime", 15) * 60 * 1000L;
   }

+ 3 - 1
src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUtil.java

@@ -51,6 +51,8 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.HostsFileReader;
 
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
 /**
  * Proxy Utility .
  */
@@ -312,7 +314,7 @@ public class ProxyUtil {
           + UtilityOption.CHECKCERTS.getName() + " <hostname> <#port> ]");
       System.exit(0);
     }
-    Configuration conf = new Configuration(false);
+    Configuration conf = new HdfsConfiguration(false);
     conf.addResource("ssl-client.xml");
     conf.addResource("hdfsproxy-default.xml");
 

+ 4 - 3
src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestHdfsProxy.java

@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.net.NetUtils;
 
 /**
@@ -121,7 +122,7 @@ public class TestHdfsProxy extends TestCase {
 
   private static MyFile[] createFiles(URI fsname, String topdir)
       throws IOException {
-    return createFiles(FileSystem.get(fsname, new Configuration()), topdir);
+    return createFiles(FileSystem.get(fsname, new HdfsConfiguration()), topdir);
   }
 
   /**
@@ -203,13 +204,13 @@ public class TestHdfsProxy extends TestCase {
     HdfsProxy proxy = null;
     try {
 
-      final Configuration dfsConf = new Configuration();
+      final Configuration dfsConf = new HdfsConfiguration();
       cluster = new MiniDFSCluster(dfsConf, 2, true, null);
       cluster.waitActive();
 
       final FileSystem localfs = FileSystem.get(LOCAL_FS, dfsConf);
       final FileSystem hdfs = cluster.getFileSystem();
-      final Configuration proxyConf = new Configuration(false);
+      final Configuration proxyConf = new HdfsConfiguration(false);
       proxyConf.set("hdfsproxy.dfs.namenode.address", hdfs.getUri().getHost() + ":"
           + hdfs.getUri().getPort());
       proxyConf.set("hdfsproxy.https.address", "localhost:0");

+ 2 - 1
src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyUtil.java

@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfsproxy;
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 
 /** Unit tests for ProxyUtil */
 public class TestProxyUtil extends TestCase {
@@ -30,7 +31,7 @@ public class TestProxyUtil extends TestCase {
 
   public void testSendCommand() throws Exception {
       
-    Configuration conf = new Configuration(false);  
+    Configuration conf = new HdfsConfiguration(false);  
     conf.addResource("ssl-client.xml");
     conf.addResource("hdfsproxy-default.xml");
     String address = "localhost:" + TEST_PROXY_HTTPS_PORT;

+ 3 - 1
src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java

@@ -26,6 +26,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.StringUtils;
 
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
 /**
  * ThriftHadoopFileSystem
  * A thrift wrapper around the Hadoop File System
@@ -122,7 +124,7 @@ public class HadoopThriftServer extends ThriftHadoopFileSystem {
      * @param name - the name of this handler
      */
     public HadoopThriftHandler(String name) {
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
       now = now();
       try {
         inactivityThread = new Daemon(new InactivityMonitor());

+ 1 - 1
src/contrib/thriftfs/test/org/apache/hadoop/thriftfs/TestThriftfs.java

@@ -35,7 +35,7 @@ public class TestThriftfs extends TestCase
 
   public void testServer() throws IOException
   {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
     cluster.waitActive();
     DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();

+ 87 - 52
src/java/hdfs-default.xml

@@ -7,6 +7,12 @@
 
 <configuration>
 
+<property>
+  <name>hadoop.hdfs.configuration.version</name>
+  <value>1</value>
+  <description>version of this configuration file</description>
+</property>
+
 <property>
   <name>dfs.namenode.logging.level</name>
   <value>info</value>
@@ -16,7 +22,7 @@ creations/deletions), or "all".</description>
 </property>
 
 <property>
-  <name>dfs.secondary.http.address</name>
+  <name>dfs.namenode.secondary.http-address</name>
   <value>0.0.0.0:50090</value>
   <description>
     The secondary namenode http server address and port.
@@ -58,7 +64,7 @@ creations/deletions), or "all".</description>
 </property>
 
 <property>
-  <name>dfs.http.address</name>
+  <name>dfs.namenode.http-address</name>
   <value>0.0.0.0:50070</value>
   <description>
     The address and the base port where the dfs namenode web ui will listen on.
@@ -74,7 +80,7 @@ creations/deletions), or "all".</description>
 </property>
 
 <property>
-  <name>dfs.https.need.client.auth</name>
+  <name>dfs.client.https.need-auth</name>
   <value>false</value>
   <description>Whether SSL client certificate authentication is required
   </description>
@@ -89,7 +95,7 @@ creations/deletions), or "all".</description>
 </property>
 
 <property>
-  <name>dfs.https.client.keystore.resource</name>
+  <name>dfs.client.https.keystore.resource</name>
   <value>ssl-client.xml</value>
   <description>Resource file from which ssl client keystore
   information will be extracted
@@ -102,7 +108,7 @@ creations/deletions), or "all".</description>
 </property>
 
 <property>
-  <name>dfs.https.address</name>
+  <name>dfs.namenode.https-address</name>
   <value>0.0.0.0:50470</value>
 </property>
 
@@ -124,7 +130,7 @@ creations/deletions), or "all".</description>
  </property>
  
  <property>
-  <name>dfs.backup.address</name>
+  <name>dfs.namenode.backup.address</name>
   <value>0.0.0.0:50100</value>
   <description>
     The backup node server address and port.
@@ -133,7 +139,7 @@ creations/deletions), or "all".</description>
 </property>
  
  <property>
-  <name>dfs.backup.http.address</name>
+  <name>dfs.namenode.backup.http-address</name>
   <value>0.0.0.0:50105</value>
   <description>
     The backup node http server address and port.
@@ -142,7 +148,7 @@ creations/deletions), or "all".</description>
 </property>
 
 <property>
-  <name>dfs.replication.considerLoad</name>
+  <name>dfs.namenode.replication.considerLoad</name>
   <value>true</value>
   <description>Decide if chooseTarget considers the target's load or not
   </description>
@@ -162,7 +168,7 @@ creations/deletions), or "all".</description>
 </property>
 
 <property>
-  <name>dfs.name.dir</name>
+  <name>dfs.namenode.name.dir</name>
   <value>${hadoop.tmp.dir}/dfs/name</value>
   <description>Determines where on the local filesystem the DFS name node
       should store the name table(fsimage).  If this is a comma-delimited list
@@ -171,8 +177,8 @@ creations/deletions), or "all".</description>
 </property>
 
 <property>
-  <name>dfs.name.edits.dir</name>
-  <value>${dfs.name.dir}</value>
+  <name>dfs.namenode.edits.dir</name>
+  <value>${dfs.namenode.name.dir}</value>
   <description>Determines where on the local filesystem the DFS name node
       should store the transaction (edits) file. If this is a comma-delimited list
       of directories then the transaction file is replicated in all of the 
@@ -188,7 +194,7 @@ creations/deletions), or "all".</description>
 </property>
 
 <property>
-  <name>dfs.permissions</name>
+  <name>dfs.permissions.enabled</name>
   <value>true</value>
   <description>
     If "true", enable permission checking in HDFS.
@@ -200,36 +206,13 @@ creations/deletions), or "all".</description>
 </property>
 
 <property>
-  <name>dfs.permissions.supergroup</name>
+  <name>dfs.permissions.superusergroup</name>
   <value>supergroup</value>
   <description>The name of the group of super-users.</description>
 </property>
 
 <property>
-  <name>dfs.access.token.enable</name>
-  <value>false</value>
-  <description>
-    If "true", access tokens are used as capabilities for accessing datanodes.
-    If "false", no access tokens are checked on accessing datanodes.
-  </description>
-</property>
-
-<property>
-  <name>dfs.access.key.update.interval</name>
-  <value>600</value>
-  <description>
-    Interval in minutes at which namenode updates its access keys.
-  </description>
-</property>
-
-<property>
-  <name>dfs.access.token.lifetime</name>
-  <value>600</value>
-  <description>The lifetime of access tokens in minutes.</description>
-</property>
-
-<property>
-  <name>dfs.data.dir</name>
+  <name>dfs.datanode.data.dir</name>
   <value>${hadoop.tmp.dir}/dfs/data</value>
   <description>Determines where on the local filesystem an DFS data node
   should store its blocks.  If this is a comma-delimited
@@ -256,24 +239,18 @@ creations/deletions), or "all".</description>
 </property>
 
 <property>
-  <name>dfs.replication.min</name>
+  <name>dfs.namenode.replication.min</name>
   <value>1</value>
   <description>Minimal block replication. 
   </description>
 </property>
 
 <property>
-  <name>dfs.block.size</name>
+  <name>dfs.blocksize</name>
   <value>67108864</value>
   <description>The default block size for new files.</description>
 </property>
 
-<property>
-  <name>dfs.df.interval</name>
-  <value>60000</value>
-  <description>Disk usage statistics refresh interval in msec.</description>
-</property>
-
 <property>
   <name>dfs.client.block.write.retries</name>
   <value>3</value>
@@ -314,18 +291,18 @@ creations/deletions), or "all".</description>
 </property>
 
 <property>
-  <name>dfs.safemode.threshold.pct</name>
+  <name>dfs.namenode.safemode.threshold-pct</name>
   <value>0.999f</value>
   <description>
     Specifies the percentage of blocks that should satisfy 
-    the minimal replication requirement defined by dfs.replication.min.
+    the minimal replication requirement defined by dfs.namenode.replication.min.
     Values less than or equal to 0 mean not to start in safe mode.
     Values greater than 1 will make safe mode permanent.
   </description>
 </property>
 
 <property>
-  <name>dfs.safemode.extension</name>
+  <name>dfs.namenode.safemode.extension</name>
   <value>30000</value>
   <description>
     Determines extension of safe mode in milliseconds 
@@ -334,7 +311,7 @@ creations/deletions), or "all".</description>
 </property>
 
 <property>
-  <name>dfs.balance.bandwidthPerSec</name>
+  <name>dfs.datanode.balance.bandwidthPerSec</name>
   <value>1048576</value>
   <description>
         Specifies the maximum amount of bandwidth that each datanode
@@ -362,7 +339,7 @@ creations/deletions), or "all".</description>
 </property> 
 
 <property>
-  <name>dfs.max.objects</name>
+  <name>dfs.namenode.max.objects</name>
   <value>0</value>
   <description>The maximum number of files, directories and blocks
   dfs supports. A value of zero indicates no limit to the number
@@ -385,14 +362,14 @@ creations/deletions), or "all".</description>
 </property>
 
 <property>
-  <name>dfs.replication.interval</name>
+  <name>dfs.namenode.replication.interval</name>
   <value>3</value>
   <description>The periodicity in seconds with which the namenode computes 
   repliaction work for datanodes. </description>
 </property>
 
 <property>
-  <name>dfs.access.time.precision</name>
+  <name>dfs.namenode.accesstime.precision</name>
   <value>3600000</value>
   <description>The access time for HDFS file is precise upto this value. 
                The default value is 1 hour. Setting a value of 0 disables
@@ -423,4 +400,62 @@ creations/deletions), or "all".</description>
   </description>
 </property>
 
+<property>
+  <name>dfs.stream-buffer-size</name>
+  <value>4096</value>
+  <description>The size of buffer to stream files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+</property>
+
+<property>
+  <name>dfs.bytes-per-checksum</name>
+  <value>512</value>
+  <description>The number of bytes per checksum.  Must not be larger than
+  dfs.stream-buffer-size</description>
+</property>
+
+<property>
+  <name>dfs.client-write-packet-size</name>
+  <value>65536</value>
+  <description>Packet size for clients to write</description>
+</property>
+
+<property>
+  <name>dfs.namenode.checkpoint.dir</name>
+  <value>${hadoop.tmp.dir}/dfs/namesecondary</value>
+  <description>Determines where on the local filesystem the DFS secondary
+      name node should store the temporary images to merge.
+      If this is a comma-delimited list of directories then the image is
+      replicated in all of the directories for redundancy.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.checkpoint.edits.dir</name>
+  <value>${dfs.namenode.checkpoint.dir}</value>
+  <description>Determines where on the local filesystem the DFS secondary
+      name node should store the temporary edits to merge.
+      If this is a comma-delimited list of directoires then teh edits is
+      replicated in all of the directoires for redundancy.
+      Default value is same as fs.checkpoint.dir
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.checkpoint.period</name>
+  <value>3600</value>
+  <description>The number of seconds between two periodic checkpoints.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.checkpoint.size</name>
+  <value>67108864</value>
+  <description>The size of the current edit log (in bytes) that triggers
+       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
+  </description>
+</property>
+
 </configuration>

+ 9 - 6
src/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -248,13 +248,14 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     throws IOException {
     this.conf = conf;
     this.stats = stats;
-    this.socketTimeout = conf.getInt("dfs.socket.timeout", 
+    this.socketTimeout = conf.getInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 
                                      HdfsConstants.READ_TIMEOUT);
     this.datanodeWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout",
                                             HdfsConstants.WRITE_TIMEOUT);
     this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
     // dfs.write.packet.size is an internal config variable
-    this.writePacketSize = conf.getInt("dfs.write.packet.size", 64*1024);
+    this.writePacketSize = conf.getInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 
+		                       DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
     this.maxBlockAcquireFailures = 
                           conf.getInt("dfs.client.max.block.acquire.failures",
                                       MAX_BLOCK_ACQUIRE_FAILURES);
@@ -273,7 +274,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     } else {
       this.clientName = "DFSClient_" + r.nextInt();
     }
-    defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    defaultBlockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
     defaultReplication = (short) conf.getInt("dfs.replication", 3);
 
     if (nameNodeAddr != null && rpcNamenode == null) {
@@ -569,7 +570,8 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     LOG.debug(src + ": masked=" + masked);
     OutputStream result = new DFSOutputStream(src, masked,
         flag, createParent, replication, blockSize, progress, buffersize,
-        conf.getInt("io.bytes.per.checksum", 512));
+        conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 
+                    DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT));
     leasechecker.put(src, result);
     return result;
   }
@@ -628,7 +630,8 @@ public class DFSClient implements FSConstants, java.io.Closeable {
                                      DSQuotaExceededException.class);
     }
     OutputStream result = new DFSOutputStream(src, buffersize, progress,
-        lastBlock, stat, conf.getInt("io.bytes.per.checksum", 512));
+        lastBlock, stat, conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 
+                                     DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT));
     leasechecker.put(src, result);
     return result;
   }
@@ -1635,7 +1638,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
       this.verifyChecksum = verifyChecksum;
       this.buffersize = buffersize;
       this.src = src;
-      prefetchSize = conf.getLong("dfs.read.prefetch.size", prefetchSize);
+      prefetchSize = conf.getLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, prefetchSize);
       openInfo();
     }
 

+ 184 - 0
src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -0,0 +1,184 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+
+/** 
+ * This class contains constants for configuration keys used
+ * in hdfs.
+ *
+ */
+
+public class DFSConfigKeys extends CommonConfigurationKeys {
+
+  public static final String  DFS_BLOCK_SIZE_KEY = "dfs.blocksize";
+  public static final long    DFS_BLOCK_SIZE_DEFAULT = 64*1024*1024;
+  public static final String  DFS_REPLICATION_KEY = "dfs.replication";
+  public static final short   DFS_REPLICATION_DEFAULT = 3;
+  public static final String  DFS_STREAM_BUFFER_SIZE_KEY = "dfs.stream-buffer-size";
+  public static final int     DFS_STREAM_BUFFER_SIZE_DEFAULT = 4096;
+  public static final String  DFS_BYTES_PER_CHECKSUM_KEY = "dfs.bytes-per-checksum";
+  public static final int     DFS_BYTES_PER_CHECKSUM_DEFAULT = 512;
+  public static final String  DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";
+  public static final int     DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
+  
+  public static final String  DFS_NAMENODE_BACKUP_ADDRESS_KEY = "dfs.namenode.backup.address";
+  public static final String  DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT = "localhost:50100";
+  public static final String  DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY = "dfs.namenode.backup.http-address";
+  public static final String  DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50105";
+  public static final String  DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY = "dfs.datanode.balance.bandwidthPerSec";
+  public static final long    DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT = 1024*1024;
+  public static final String  DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
+  public static final String  DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50070";
+  public static final String  DFS_NAMENODE_MAX_OBJECTS_KEY = "dfs.namenode.max.objects";
+  public static final long    DFS_NAMENODE_MAX_OBJECTS_DEFAULT = 0;
+  public static final String  DFS_NAMENODE_SAFEMODE_EXTENSION_KEY = "dfs.namenode.safemode.extension";
+  public static final int     DFS_NAMENODE_SAFEMODE_EXTENSION_DEFAULT = 30000;
+  public static final String  DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY = "dfs.namenode.safemode.threshold-pct";
+  public static final float   DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT = 0.999f;
+  public static final String  DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY = "dfs.namenode.secondary.http-address";
+  public static final String  DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50090";
+  public static final String  DFS_NAMENODE_CHECKPOINT_PERIOD_KEY = "dfs.namenode.checkpoint.period";
+  public static final long    DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT = 3600;
+  public static final String  DFS_NAMENODE_CHECKPOINT_SIZE_KEY = "dfs.namenode.checkpoint.size";
+  public static final long    DFS_NAMENODE_CHECKPOINT_SIZE_DEFAULT = 4194304;
+  public static final String  DFS_NAMENODE_UPGRADE_PERMISSION_KEY = "dfs.namenode.upgrade.permission";
+  public static final int     DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT = 00777;
+  public static final String  DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY = "dfs.namenode.heartbeat.recheck-interval";
+  public static final int     DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT = 5*60*1000;
+  public static final String  DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.client.https.keystore.resource";
+  public static final String  DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-client.xml";
+  public static final String  DFS_CLIENT_HTTPS_NEED_AUTH_KEY = "dfs.client.https.need-auth";
+  public static final boolean DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT = false;
+  public static final String  DFS_NAMENODE_ACCESSTIME_PRECISION_KEY = "dfs.namenode.accesstime.precision";
+  public static final long    DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT = 3600000;
+  public static final String  DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY = "dfs.namenode.replication.considerLoad";
+  public static final boolean DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT = true;
+  public static final String  DFS_NAMENODE_REPLICATION_INTERVAL_KEY = "dfs.namenode.replication.interval";
+  public static final int     DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT = 3;
+  public static final String  DFS_NAMENODE_REPLICATION_MIN_KEY = "dfs.namenode.replication.min";
+  public static final int     DFS_NAMENODE_REPLICATION_MIN_DEFAULT = 1;
+  public static final String  DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY = "dfs.namenode.replication.pending.timeout-sec";
+  public static final int     DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
+  public static final String  DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY = "dfs.namenode.replication.max-streams";
+  public static final int     DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT = 2;
+  public static final String  DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";
+  public static final boolean DFS_PERMISSIONS_ENABLED_DEFAULT = true;
+  public static final String  DFS_PERMISSIONS_SUPERUSERGROUP_KEY = "dfs.permissions.superusergroup";
+  public static final String  DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT = "supergroup";
+  public static final String  DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.https.server.keystore.resource";
+  public static final String  DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-server.xml";
+  public static final String  DFS_NAMENODE_NAME_DIR_RESTORE_KEY = "dfs.namenode.name.dir.restore";
+  public static final boolean DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT = false;
+
+  //Following keys have no defaults
+  public static final String  DFS_DATANODE_DATA_DIR_KEY = "dfs.datanode.data.dir";
+  public static final String  DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
+  public static final String  DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:50470";
+  public static final String  DFS_NAMENODE_NAME_DIR_KEY = "dfs.namenode.name.dir";
+  public static final String  DFS_NAMENODE_EDITS_DIR_KEY = "dfs.namenode.edits.dir";
+  public static final String  DFS_CLIENT_READ_PREFETCH_SIZE_KEY = "dfs.client.read.prefetch.size"; 
+  public static final String  DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id";
+  public static final String  DFS_DATANODE_HOST_NAME_KEY = "dfs.datanode.hostname";
+  public static final String  DFS_DATANODE_STORAGEID_KEY = "dfs.datanode.StorageId";
+  public static final String  DFS_NAMENODE_HOSTS_KEY = "dfs.namenode.hosts";
+  public static final String  DFS_NAMENODE_HOSTS_EXCLUDE_KEY = "dfs.namenode.hosts.exclude";
+  public static final String  DFS_CLIENT_SOCKET_TIMEOUT_KEY = "dfs.client.socket-timeout";
+  public static final String  DFS_NAMENODE_CHECKPOINT_DIR_KEY = "dfs.namenode.checkpoint.dir";
+  public static final String  DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY = "dfs.namenode.checkpoint.edits.dir";
+
+  //Code in hdfs is not updated to use these keys.
+  public static final String  DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY = "dfs.client.block.write.locateFollowingBlock.retries";
+  public static final int     DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT = 5;
+  public static final String  DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY = "dfs.client.block.write.retries";
+  public static final int     DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT = 3;
+  public static final String  DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY = "dfs.client.max.block.acquire.failures";
+  public static final int     DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT = 3;
+  public static final String  DFS_BALANCER_MOVEDWINWIDTH_KEY = "dfs.balancer.movedWinWidth";
+  public static final int     DFS_BALANCER_MOVEDWINWIDTH_DEFAULT = 5400*1000;
+  public static final String  DFS_DATANODE_ADDRESS_KEY = "dfs.datanode.address";
+  public static final String  DFS_DATANODE_ADDRESS_DEFAULT = "0.0.0.0:50010";
+  public static final String  DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY = "dfs.datanode.directoryscan.interval";
+  public static final int     DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT = 21600;
+  public static final String  DFS_DATANODE_DNS_INTERFACE_KEY = "dfs.datanode.dns.interface";
+  public static final String  DFS_DATANODE_DNS_INTERFACE_DEFAULT = "default";
+  public static final String  DFS_DATANODE_DNS_NAMESERVER_KEY = "dfs.datanode.dns.nameserver";
+  public static final String  DFS_DATANODE_DNS_NAMESERVER_DEFAULT = "default";
+  public static final String  DFS_DATANODE_DU_RESERVED_KEY = "dfs.datanode.du.reserved";
+  public static final long    DFS_DATANODE_DU_RESERVED_DEFAULT = 0;
+  public static final String  DFS_DATANODE_HANDLER_COUNT_KEY = "dfs.datanode.handler.count";
+  public static final int     DFS_DATANODE_HANDLER_COUNT_DEFAULT = 3;
+  public static final String  DFS_DATANODE_HTTP_ADDRESS_KEY = "dfs.datanode.http.address";
+  public static final String  DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50075";
+  public static final String  DFS_DATANODE_MAX_XCIEVERS_KEY = "dfs.datanode.max.xcievers";
+  public static final int     DFS_DATANODE_MAX_XCIEVERS_DEFAULT = 256;
+  public static final String  DFS_DATANODE_NUMBLOCKS_KEY = "dfs.datanode.numblocks";
+  public static final int     DFS_DATANODE_NUMBLOCKS_DEFAULT = 64;
+  public static final String  DFS_DATANODE_SCAN_PERIOD_HOURS_KEY = "dfs.datanode.scan.period.hours";
+  public static final int     DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0;
+  public static final String  DFS_DATANODE_SIMULATEDDATASTORAGE_KEY = "dfs.datanode.simulateddatastorage";
+  public static final boolean DFS_DATANODE_SIMULATEDDATASTORAGE_DEFAULT = false;
+  public static final String  DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_KEY = "dfs.datanode.simulateddatastorage.capacity";
+  public static final long    DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_DEFAULT = 2L<<40;
+  public static final String  DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed";
+  public static final boolean DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT = true;
+  public static final String  DFS_HEARTBEAT_INTERVAL_KEY = "dfs.heartbeat.interval";
+  public static final long    DFS_HEARTBEAT_INTERVAL_DEFAULT = 3;
+  public static final String  DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY = "dfs.namenode.decommission.interval";
+  public static final int     DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT = 30;
+  public static final String  DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_KEY = "dfs.namenode.decommission.nodes.per.interval";
+  public static final int     DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_DEFAULT = 5;
+  public static final String  DFS_NAMENODE_HANDLER_COUNT_KEY = "dfs.namenode.handler.count";
+  public static final int     DFS_NAMENODE_HANDLER_COUNT_DEFAULT = 10;
+  public static final String  DFS_SUPPORT_APPEND_KEY = "dfs.support.append";
+  public static final boolean DFS_SUPPORT_APPEND_DEFAULT = false;
+  public static final String  DFS_HTTPS_ENABLE_KEY = "dfs.https.enable";
+  public static final boolean DFS_HTTPS_ENABLE_DEFAULT = false;
+  public static final String  DFS_DEFAULT_CHUNK_VIEW_SIZE_KEY = "dfs.default.chunk.view.size";
+  public static final int     DFS_DEFAULT_CHUNK_VIEW_SIZE_DEFAULT = 32*1024;
+  public static final String  DFS_DATANODE_HTTPS_ADDRESS_KEY = "dfs.datanode.https.address";
+  public static final String  DFS_DATANODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:50475";
+  public static final String  DFS_DATANODE_IPC_ADDRESS_KEY = "dfs.datanode.ipc.address";
+  public static final String  DFS_DATANODE_IPC_ADDRESS_DEFAULT = "0.0.0.0:50020";
+
+  public static final String  DFS_ACCESS_TOKEN_ENABLE_KEY = "dfs.access.token.enable";
+  public static final boolean DFS_ACCESS_TOKEN_ENABLE_DEFAULT = false;
+  public static final String  DFS_ACCESS_KEY_UPDATE_INTERVAL_KEY = "dfs.access.key.update.interval";
+  public static final int     DFS_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT = 600;
+  public static final String  DFS_ACCESS_TOKEN_LIFETIME_KEY = "dfs.access.token.lifetime";
+  public static final int     DFS_ACCESS_TOKEN_LIFETIME_DEFAULT = 600;
+
+  public static final String  DFS_REPLICATION_MAX_KEY = "dfs.replication.max";
+  public static final int     DFS_REPLICATION_MAX_DEFAULT = 512;
+  public static final String  DFS_DF_INTERVAL_KEY = "dfs.df.interval";
+  public static final int     DFS_DF_INTERVAL_DEFAULT = 60000;
+  public static final String  DFS_BLOCKREPORT_INTERVAL_MSEC_KEY = "dfs.blockreport.intervalMsec";
+  public static final long    DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT = 21600000;
+  public static final String  DFS_BLOCKREPORT_INITIAL_DELAY_KEY = "dfs.blockreport.initialDelay";
+  public static final int     DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT = 0;
+
+  //Keys with no defaults
+  public static final String  DFS_DATANODE_PLUGINS_KEY = "dfs.datanode.plugins";
+  public static final String  DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY = "dfs.datanode.socket.write.timeout";
+  public static final String  DFS_DATANODE_STARTUP_KEY = "dfs.datanode.startup";
+  public static final String  DFS_NAMENODE_PLUGINS_KEY = "dfs.namenode.plugins";
+  public static final String  DFS_WEB_UGI_KEY = "dfs.web.ugi";
+  public static final String  DFS_NAMENODE_STARTUP_KEY = "dfs.namenode.startup";
+}

+ 85 - 0
src/java/org/apache/hadoop/hdfs/HdfsConfiguration.java

@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Adds deprecated keys into the configuration.
+ */
+public class HdfsConfiguration extends Configuration {
+  static {
+    addDeprecatedKeys();
+  }
+
+  public HdfsConfiguration() {
+    super();
+  }
+
+  public HdfsConfiguration(boolean loadDefaults) {
+    super(loadDefaults);
+  }
+
+  public HdfsConfiguration(Configuration conf) {
+    super(conf);
+  }
+
+  private static void deprecate(String oldKey, String newKey) {
+    Configuration.addDeprecation(oldKey, new String[]{newKey});
+  }
+
+  private static void addDeprecatedKeys() {
+    deprecate("dfs.backup.address", DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY);
+    deprecate("dfs.backup.http.address", DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY);
+    deprecate("dfs.balance.bandwidthPerSec", DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY);
+    deprecate("dfs.data.dir", DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
+    deprecate("dfs.http.address", DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
+    deprecate("dfs.https.address", DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
+    deprecate("dfs.max.objects", DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY);
+    deprecate("dfs.name.dir", DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
+    deprecate("dfs.name.dir.restore", DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY);
+    deprecate("dfs.name.edits.dir", DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY);
+    deprecate("dfs.read.prefetch.size", DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY);
+    deprecate("dfs.safemode.extension", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY);
+    deprecate("dfs.safemode.threshold.pct", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY);
+    deprecate("dfs.secondary.http.address", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
+    deprecate("dfs.socket.timeout", DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY);
+    deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
+    deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
+    deprecate("fs.checkpoint.period", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY);
+    deprecate("fs.checkpoint.size", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_KEY);
+    deprecate("dfs.upgrade.permission", DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY);
+    deprecate("heartbeat.recheck.interval", DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY);
+    deprecate("StorageId", DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY);
+    deprecate("dfs.https.client.keystore.resource", DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY);
+    deprecate("dfs.https.need.client.auth", DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY);
+    deprecate("slave.host.name", DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY);
+    deprecate("session.id", DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
+    deprecate("dfs.access.time.precision", DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY);
+    deprecate("dfs.replication.considerLoad", DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY);
+    deprecate("dfs.replication.interval", DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY);
+    deprecate("dfs.replication.min", DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY);
+    deprecate("dfs.replication.pending.timeout.sec", DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY);
+    deprecate("dfs.max-repl-streams", DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY);
+    deprecate("dfs.permissions", DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY);
+    deprecate("dfs.permissions.supergroup", DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY);
+    deprecate("dfs.write.packet.size", DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY);
+    deprecate("dfs.block.size", DFSConfigKeys.DFS_BLOCK_SIZE_KEY);
+  }
+}

+ 3 - 3
src/java/org/apache/hadoop/hdfs/HsftpFileSystem.java

@@ -65,9 +65,9 @@ public class HsftpFileSystem extends HftpFileSystem {
    * @throws IOException
    */
   private static void setupSsl(Configuration conf) throws IOException {
-    Configuration sslConf = new Configuration(false);
-    sslConf.addResource(conf.get("dfs.https.client.keystore.resource",
-        "ssl-client.xml"));
+    Configuration sslConf = new HdfsConfiguration(false);
+    sslConf.addResource(conf.get(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
+                             DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
     FileInputStream fis = null;
     try {
       SSLContext sc = SSLContext.getInstance("SSL");

+ 2 - 2
src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -366,7 +366,7 @@ public interface ClientProtocol extends VersionedProtocol {
    * percentage called threshold of blocks, which satisfy the minimal 
    * replication condition.
    * The minimal replication condition is that each block must have at least
-   * <tt>dfs.replication.min</tt> replicas.
+   * <tt>dfs.namenode.replication.min</tt> replicas.
    * When the threshold is reached the name node extends safe mode
    * for a configurable amount of time
    * to let the remaining data nodes to check in before it
@@ -382,7 +382,7 @@ public interface ClientProtocol extends VersionedProtocol {
    * <h4>Configuration parameters:</h4>
    * <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
    * <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
-   * <tt>dfs.replication.min</tt> is the minimal replication parameter.
+   * <tt>dfs.namenode.replication.min</tt> is the minimal replication parameter.
    * 
    * <h4>Special cases:</h4>
    * The name node does not enter safe mode at startup if the threshold is 

+ 2 - 1
src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.protocol;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 
 /************************************
  * Some handy constants
@@ -48,7 +49,7 @@ public interface FSConstants {
   public static int MAX_PATH_LENGTH = 8000;
   public static int MAX_PATH_DEPTH = 1000;
     
-  public static final int BUFFER_SIZE = new Configuration().getInt("io.file.buffer.size", 4096);
+  public static final int BUFFER_SIZE = new HdfsConfiguration().getInt("io.file.buffer.size", 4096);
   //Used for writing header etc.
   public static final int SMALL_BUFFER_SIZE = Math.min(BUFFER_SIZE/2, 512);
   //TODO mb@media-style.com: should be conf injected?

+ 2 - 1
src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java

@@ -36,6 +36,7 @@ import javax.servlet.jsp.JspWriter;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
@@ -47,7 +48,7 @@ import org.apache.hadoop.util.VersionInfo;
 public class JspHelper {
   final static public String WEB_UGI_PROPERTY_NAME = "dfs.web.ugi";
 
-  public static final Configuration conf = new Configuration();
+  public static final Configuration conf = new HdfsConfiguration();
   public static final UnixUserGroupInformation webUGI
   = UnixUserGroupInformation.createImmutable(
       conf.getStrings(WEB_UGI_PROPERTY_NAME));

+ 13 - 9
src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -80,6 +80,8 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RPC;
@@ -251,8 +253,8 @@ public class DataNode extends Configured
                      AbstractList<File> dataDirs
                      ) throws IOException {
     // use configured nameserver & interface to get local hostname
-    if (conf.get("slave.host.name") != null) {
-      machineName = conf.get("slave.host.name");   
+    if (conf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY) != null) {
+      machineName = conf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY);   
     }
     if (machineName == null) {
       machineName = DNS.getDefaultHost(
@@ -261,7 +263,7 @@ public class DataNode extends Configured
     }
     this.nameNodeAddr = NameNode.getAddress(conf);
     
-    this.socketTimeout =  conf.getInt("dfs.socket.timeout",
+    this.socketTimeout =  conf.getInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
                                       HdfsConstants.READ_TIMEOUT);
     this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout",
                                           HdfsConstants.WRITE_TIMEOUT);
@@ -269,7 +271,8 @@ public class DataNode extends Configured
      * to false on some of them. */
     this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed", 
                                              true);
-    this.writePacketSize = conf.getInt("dfs.write.packet.size", 64*1024);
+    this.writePacketSize = conf.getInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 
+                                       DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
     InetSocketAddress socAddr = NetUtils.createSocketAddr(
         conf.get("dfs.datanode.address", "0.0.0.0:50010"));
     int tmpPort = socAddr.getPort();
@@ -296,7 +299,7 @@ public class DataNode extends Configured
         dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID;
         // it would have been better to pass storage as a parameter to
         // constructor below - need to augment ReflectionUtils used below.
-        conf.set("StorageId", dnRegistration.getStorageID());
+        conf.set(DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY, dnRegistration.getStorageID());
         try {
           //Equivalent of following (can't do because Simulated is in test dir)
           //  this.data = new SimulatedFSDataset(conf);
@@ -365,10 +368,11 @@ public class DataNode extends Configured
     this.infoServer = new HttpServer("datanode", infoHost, tmpInfoPort,
         tmpInfoPort == 0, conf);
     if (conf.getBoolean("dfs.https.enable", false)) {
-      boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false);
+      boolean needClientAuth = conf.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
+                                               DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
       InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get(
           "dfs.datanode.https.address", infoHost + ":" + 0));
-      Configuration sslConf = new Configuration(false);
+      Configuration sslConf = new HdfsConfiguration(false);
       sslConf.addResource(conf.get("dfs.https.server.keystore.resource",
           "ssl-server.xml"));
       this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
@@ -1345,7 +1349,7 @@ public class DataNode extends Configured
   public static DataNode instantiateDataNode(String args[],
                                       Configuration conf) throws IOException {
     if (conf == null)
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
     
     if (args != null) {
       // parse generic hadoop options
@@ -1362,7 +1366,7 @@ public class DataNode extends Configured
           " anymore. RackID resolution is handled by the NameNode.");
       System.exit(-1);
     }
-    String[] dataDirs = conf.getStrings("dfs.data.dir");
+    String[] dataDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
     dnThreadName = "DataNode: [" +
                         StringUtils.arrayToString(dataDirs) + "]";
     return makeInstance(dataDirs, conf);

+ 5 - 2
src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java

@@ -32,6 +32,8 @@ import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.balancer.Balancer;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+
 
 /**
  * Server used for receiving/sending a block of data.
@@ -115,11 +117,12 @@ class DataXceiverServer implements Runnable, FSConstants {
     this.maxXceiverCount = conf.getInt("dfs.datanode.max.xcievers",
         MAX_XCEIVER_COUNT);
     
-    this.estimateBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    this.estimateBlockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
     
     //set up parameter for cluster balancing
     this.balanceThrottler = new BlockBalanceThrottler(
-      conf.getLong("dfs.balance.bandwidthPerSec", 1024L*1024));
+      conf.getLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 
+                   DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT));
   }
 
   /**

+ 2 - 1
src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java

@@ -28,6 +28,7 @@ import org.apache.hadoop.metrics.util.MetricsRegistry;
 import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
 import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong;
 import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 
 
 /**
@@ -91,7 +92,7 @@ public class DataNodeMetrics implements Updater {
 
     
   public DataNodeMetrics(Configuration conf, String datanodeName) {
-    String sessionId = conf.get("session.id"); 
+    String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY); 
     // Initiate reporting of Java VM metrics
     JvmMetrics.init("DataNode", sessionId);
     

+ 5 - 4
src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java

@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.CheckpointStates;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.net.DNS;
@@ -52,10 +53,10 @@ import org.apache.hadoop.util.Daemon;
  * </ol>
  */
 public class BackupNode extends NameNode {
-  private static final String BN_ADDRESS_NAME_KEY = "dfs.backup.address";
-  private static final String BN_ADDRESS_DEFAULT = "localhost:50100";
-  private static final String BN_HTTP_ADDRESS_NAME_KEY = "dfs.backup.http.address";
-  private static final String BN_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50105";
+  private static final String BN_ADDRESS_NAME_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
+  private static final String BN_ADDRESS_DEFAULT = DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT;
+  private static final String BN_HTTP_ADDRESS_NAME_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY;
+  private static final String BN_HTTP_ADDRESS_DEFAULT = DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT;
 
   /** Name-node proxy */
   NamenodeProtocol namenode;

+ 10 - 7
src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java

@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas;
 import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 
 /**
  * Keeps information related to the blocks stored in the Hadoop cluster.
@@ -128,8 +129,8 @@ public class BlockManager {
       throws IOException {
     namesystem = fsn;
     pendingReplications = new PendingReplicationBlocks(
-        conf.getInt("dfs.replication.pending.timeout.sec",
-                    -1) * 1000L);
+        conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
+            DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT) * 1000L);
     setConfigurationParameters(conf);
     blocksMap = new BlocksMap(capacity, DEFAULT_MAP_LOAD_FACTOR);
   }
@@ -142,10 +143,11 @@ public class BlockManager {
 
     this.defaultReplication = conf.getInt("dfs.replication", 3);
     this.maxReplication = conf.getInt("dfs.replication.max", 512);
-    this.minReplication = conf.getInt("dfs.replication.min", 1);
+    this.minReplication = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,
+                                      DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT);
     if (minReplication <= 0)
       throw new IOException(
-                            "Unexpected configuration parameters: dfs.replication.min = "
+                            "Unexpected configuration parameters: dfs.namenode.replication.min = "
                             + minReplication
                             + " must be greater than 0");
     if (maxReplication >= (int)Short.MAX_VALUE)
@@ -154,12 +156,13 @@ public class BlockManager {
                             + maxReplication + " must be less than " + (Short.MAX_VALUE));
     if (maxReplication < minReplication)
       throw new IOException(
-                            "Unexpected configuration parameters: dfs.replication.min = "
+                            "Unexpected configuration parameters: dfs.namenode.replication.min = "
                             + minReplication
                             + " must be less than dfs.replication.max = "
                             + maxReplication);
-    this.maxReplicationStreams = conf.getInt("dfs.max-repl-streams", 2);
-    this.shouldCheckForEnoughRacks = conf.get("topology.script.file.name") == null ? false
+    this.maxReplicationStreams = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY,
+                                             DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT);
+    this.shouldCheckForEnoughRacks = conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null ? false
                                                                              : true;
     FSNamesystem.LOG.info("defaultReplication = " + defaultReplication);
     FSNamesystem.LOG.info("maxReplication = " + maxReplication);

+ 2 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java

@@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import java.util.*;
 
 /** The class is responsible for choosing the desired number of targets
@@ -52,7 +53,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
   /** {@inheritDoc} */
   public void initialize(Configuration conf,  FSClusterStats stats,
                          NetworkTopology clusterMap) {
-    this.considerLoad = conf.getBoolean("dfs.replication.considerLoad", true);
+    this.considerLoad = conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
     this.stats = stats;
     this.clusterMap = clusterMap;
   }

+ 5 - 2
src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java

@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer;
 
@@ -86,8 +87,10 @@ class Checkpointer implements Runnable {
     shouldRun = true;
 
     // Initialize other scheduling parameters from the configuration
-    checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
-    checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);
+    checkpointPeriod = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 
+                                    DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT);
+    checkpointSize = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_KEY, 
+                                  DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_DEFAULT);
 
     HttpServer httpServer = backupNode.httpServer;
     httpServer.setAttribute("name.system.image", getFSImage());

+ 2 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java

@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -66,7 +67,7 @@ abstract class DfsServlet extends HttpServlet {
       ) throws IOException {
     ServletContext context = getServletContext();
     InetSocketAddress nnAddr = (InetSocketAddress)context.getAttribute("name.node.address");
-    Configuration conf = new Configuration(
+    Configuration conf = new HdfsConfiguration(
         (Configuration)context.getAttribute("name.conf"));
     UnixUserGroupInformation.saveToConf(conf,
         UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);

+ 2 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java

@@ -23,6 +23,7 @@ import java.net.InetSocketAddress;
 import java.util.ArrayList;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
@@ -79,7 +80,7 @@ class EditLogBackupOutputStream extends EditLogOutputStream {
     try {
       this.backupNode =
         (NamenodeProtocol) RPC.getProxy(NamenodeProtocol.class,
-            NamenodeProtocol.versionID, bnAddress, new Configuration());
+            NamenodeProtocol.versionID, bnAddress, new HdfsConfiguration());
     } catch(IOException e) {
       Storage.LOG.error("Error connecting to: " + bnAddress, e);
       throw e;

+ 4 - 2
src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.metrics.MetricsContext;
 import org.apache.hadoop.metrics.MetricsRecord;
 import org.apache.hadoop.metrics.MetricsUtil;
@@ -62,7 +63,8 @@ class FSDirectory implements Closeable {
   /** Access an existing dfs name directory. */
   FSDirectory(FSNamesystem ns, Configuration conf) {
     this(new FSImage(), ns, conf);
-    if(conf.getBoolean("dfs.name.dir.restore", false)) {
+    if(conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, 
+                       DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT)) {
       NameNode.LOG.info("set FSImage.restoreFailedStorage");
       fsImage.setRestoreFailedStorage(true);
     }
@@ -90,7 +92,7 @@ class FSDirectory implements Closeable {
   private void initialize(Configuration conf) {
     MetricsContext metricsContext = MetricsUtil.getContext("dfs");
     directoryMetrics = MetricsUtil.createRecord(metricsContext, "FSDirectory");
-    directoryMetrics.setTag("sessionId", conf.get("session.id"));
+    directoryMetrics.setTag("sessionId", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY));
   }
 
   void loadFSImage(Collection<URI> dataDirs,

+ 5 - 4
src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java

@@ -64,6 +64,7 @@ import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.io.Writable;
 
 /**
@@ -373,12 +374,12 @@ public class FSImage extends Storage {
     if(startOpt == StartupOption.IMPORT 
         && (checkpointDirs == null || checkpointDirs.isEmpty()))
       throw new IOException("Cannot import image from a checkpoint. "
-                          + "\"fs.checkpoint.dir\" is not set." );
+                          + "\"dfs.namenode.checkpoint.dir\" is not set." );
 
     if(startOpt == StartupOption.IMPORT 
         && (checkpointEditsDirs == null || checkpointEditsDirs.isEmpty()))
       throw new IOException("Cannot import image from a checkpoint. "
-                          + "\"fs.checkpoint.edits.dir\" is not set." );
+                          + "\"dfs.namenode.checkpoint.dir\" is not set." );
     
     setStorageDirectories(dataDirs, editsDirs);
     // 1. For each data directory calculate its state and 
@@ -1901,7 +1902,7 @@ public class FSImage extends Storage {
    */
   static Collection<URI> getCheckpointDirs(Configuration conf,
       String defaultValue) {
-    Collection<String> dirNames = conf.getStringCollection("fs.checkpoint.dir");
+    Collection<String> dirNames = conf.getStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
     if (dirNames.size() == 0 && defaultValue != null) {
       dirNames.add(defaultValue);
     }
@@ -1927,7 +1928,7 @@ public class FSImage extends Storage {
   static Collection<URI> getCheckpointEditsDirs(Configuration conf,
       String defaultName) {
     Collection<String> dirNames = 
-      conf.getStringCollection("fs.checkpoint.edits.dir");
+      conf.getStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
     if (dirNames.size() == 0 && defaultName != null) {
       dirNames.add(defaultName);
     }

+ 28 - 18
src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -53,6 +53,8 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -291,7 +293,8 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
     dnthread.start();
 
     this.dnsToSwitchMapping = ReflectionUtils.newInstance(
-        conf.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class,
+        conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
+                      ScriptBasedMapping.class,
             DNSToSwitchMapping.class), conf);
     
     /* If the dns to swith mapping supports cache, resolve network 
@@ -305,7 +308,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
   }
 
   public static Collection<URI> getNamespaceDirs(Configuration conf) {
-    return getStorageDirs(conf, "dfs.name.dir");
+    return getStorageDirs(conf, DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
   }
 
   public static Collection<URI> getStorageDirs(Configuration conf,
@@ -317,7 +320,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
       // but will retain directories specified in hdfs-site.xml
       // When importing image from a checkpoint, the name-node can
       // start with empty set of storage directories.
-      Configuration cE = new Configuration(false);
+      Configuration cE = new HdfsConfiguration(false);
       cE.addResource("core-default.xml");
       cE.addResource("core-site.xml");
       cE.addResource("hdfs-default.xml");
@@ -356,7 +359,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
   }
 
   public static Collection<URI> getNamespaceEditsDirs(Configuration conf) {
-    return getStorageDirs(conf, "dfs.name.edits.dir");
+    return getStorageDirs(conf, DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY);
   }
 
   /**
@@ -400,31 +403,37 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
     }
     LOG.info("fsOwner=" + fsOwner);
 
-    this.supergroup = conf.get("dfs.permissions.supergroup", "supergroup");
-    this.isPermissionEnabled = conf.getBoolean("dfs.permissions", true);
+    this.supergroup = conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY, 
+                               DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
+    this.isPermissionEnabled = conf.getBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,
+                                               DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT);
     LOG.info("supergroup=" + supergroup);
     LOG.info("isPermissionEnabled=" + isPermissionEnabled);
-    short filePermission = (short)conf.getInt("dfs.upgrade.permission", 00777);
+    short filePermission = (short)conf.getInt(DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY,
+                                              DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT);
     this.defaultPermission = PermissionStatus.createImmutable(
         fsOwner.getUserName(), supergroup, new FsPermission(filePermission));
 
     long heartbeatInterval = conf.getLong("dfs.heartbeat.interval", 3) * 1000;
     this.heartbeatRecheckInterval = conf.getInt(
-        "heartbeat.recheck.interval", 5 * 60 * 1000); // 5 minutes
+        DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
+        DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
     this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval +
       10 * heartbeatInterval;
     this.replicationRecheckInterval = 
-      conf.getInt("dfs.replication.interval", 3) * 1000L;
+      conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
+                  DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000L;
     this.serverDefaults = new FsServerDefaults(
-        conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE),
-        conf.getInt("io.bytes.per.checksum", DEFAULT_BYTES_PER_CHECKSUM),
-        conf.getInt("dfs.write.packet.size", DEFAULT_WRITE_PACKET_SIZE),
+        conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE),
+        conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BYTES_PER_CHECKSUM),
+        conf.getInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DEFAULT_WRITE_PACKET_SIZE),
         (short) conf.getInt("dfs.replication", DEFAULT_REPLICATION_FACTOR),
         conf.getInt("io.file.buffer.size", DEFAULT_FILE_BUFFER_SIZE));
-    this.maxFsObjects = conf.getLong("dfs.max.objects", 0);
+    this.maxFsObjects = conf.getLong(DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY, 
+                                     DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT);
     this.blockInvalidateLimit = Math.max(this.blockInvalidateLimit, 
                                          20*(int)(heartbeatInterval/1000));
-    this.accessTimePrecision = conf.getLong("dfs.access.time.precision", 0);
+    this.accessTimePrecision = conf.getLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 0);
     this.supportAppends = conf.getBoolean("dfs.support.append", false);
     this.isAccessTokenEnabled = conf.getBoolean(
         AccessTokenHandler.STRING_ENABLE_ACCESS_TOKEN, false);
@@ -3007,7 +3016,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
     // Reread the config to get dfs.hosts and dfs.hosts.exclude filenames.
     // Update the file names and refresh internal includes and excludes list
     if (conf == null)
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
     hostsReader.updateFileNames(conf.get("dfs.hosts",""), 
                                 conf.get("dfs.hosts.exclude", ""));
     hostsReader.refresh();
@@ -3160,9 +3169,10 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
      * @param conf configuration
      */
     SafeModeInfo(Configuration conf) {
-      this.threshold = conf.getFloat("dfs.safemode.threshold.pct", 0.95f);
-      this.extension = conf.getInt("dfs.safemode.extension", 0);
-      this.safeReplication = conf.getInt("dfs.replication.min", 1);
+      this.threshold = conf.getFloat(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 0.95f);
+      this.extension = conf.getInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
+      this.safeReplication = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, 
+                                         DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT);
       this.blockTotal = 0; 
       this.blockSafe = 0;
     }

+ 5 - 3
src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java

@@ -35,6 +35,8 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UnixUserGroupInformation;
@@ -81,8 +83,8 @@ public class FileChecksumServlets {
       final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
       xml.declaration();
 
-      final Configuration conf = new Configuration(DataNode.getDataNode().getConf());
-      final int socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT);
+      final Configuration conf = new HdfsConfiguration(DataNode.getDataNode().getConf());
+      final int socketTimeout = conf.getInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, HdfsConstants.READ_TIMEOUT);
       final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
       UnixUserGroupInformation.saveToConf(conf,
           UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
@@ -99,4 +101,4 @@ public class FileChecksumServlets {
       xml.endDocument();
     }
   }
-}
+}

+ 2 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java

@@ -26,6 +26,7 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -48,7 +49,7 @@ public class FsckServlet extends DfsServlet {
     UserGroupInformation.setCurrentUser(ugi);
 
     final ServletContext context = getServletContext();
-    final Configuration conf = new Configuration((Configuration) context.getAttribute("name.conf"));
+    final Configuration conf = new HdfsConfiguration((Configuration) context.getAttribute("name.conf"));
     UnixUserGroupInformation.saveToConf(conf,
         UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
 

+ 11 - 8
src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -63,6 +63,8 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.NodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.ipc.RPC;
@@ -244,11 +246,11 @@ public class NameNode implements ClientProtocol, DatanodeProtocol,
 
   protected InetSocketAddress getHttpServerAddress(Configuration conf) {
     return  NetUtils.createSocketAddr(
-        conf.get("dfs.http.address", "0.0.0.0:50070"));
+        conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:50070"));
   }
 
   protected void setHttpServerAddress(Configuration conf){
-    conf.set("dfs.http.address", getHostPortString(httpAddress));
+    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, getHostPortString(httpAddress));
   }
 
   protected void loadNamesystem(Configuration conf) throws IOException {
@@ -337,10 +339,11 @@ public class NameNode implements ClientProtocol, DatanodeProtocol,
     this.httpServer = new HttpServer("hdfs", infoHost, infoPort, 
         infoPort == 0, conf);
     if (conf.getBoolean("dfs.https.enable", false)) {
-      boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false);
+      boolean needClientAuth = conf.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
+                                               DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
       InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get(
-          "dfs.https.address", infoHost + ":" + 0));
-      Configuration sslConf = new Configuration(false);
+          DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0));
+      Configuration sslConf = new HdfsConfiguration(false);
       sslConf.addResource(conf.get("dfs.https.server.keystore.resource",
           "ssl-server.xml"));
       this.httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
@@ -828,11 +831,11 @@ public class NameNode implements ClientProtocol, DatanodeProtocol,
 
   /**
    * Refresh the list of datanodes that the namenode should allow to  
-   * connect.  Re-reads conf by creating new Configuration object and 
+   * connect.  Re-reads conf by creating new HdfsConfiguration object and 
    * uses the files list in the configuration to update the list. 
    */
   public void refreshNodes() throws IOException {
-    namesystem.refreshNodes(new Configuration());
+    namesystem.refreshNodes(new HdfsConfiguration());
   }
 
   /**
@@ -1163,7 +1166,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol,
   public static NameNode createNameNode(String argv[], 
                                  Configuration conf) throws IOException {
     if (conf == null)
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
     StartupOption startOpt = parseArguments(argv);
     if (startOpt == null) {
       printUsage();

+ 13 - 7
src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -38,6 +38,8 @@ import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
@@ -117,7 +119,7 @@ public class SecondaryNameNode implements Runnable {
    */
   private void initialize(Configuration conf) throws IOException {
     // initiate Java VM metrics
-    JvmMetrics.init("SecondaryNameNode", conf.get("session.id"));
+    JvmMetrics.init("SecondaryNameNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY));
     
     // Create connection to the namenode.
     shouldRun = true;
@@ -138,12 +140,15 @@ public class SecondaryNameNode implements Runnable {
     checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);
 
     // Initialize other scheduling parameters from the configuration
-    checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
-    checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);
+    checkpointPeriod = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 
+                                    DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT);
+    checkpointSize = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_KEY, 
+                                  DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_DEFAULT);
 
     // initialize the webserver for uploading files.
     InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(
-        conf.get("dfs.secondary.http.address", "0.0.0.0:50090"));
+        conf.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+                 DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
     infoBindAddress = infoSocAddr.getHostName();
     int tmpInfoPort = infoSocAddr.getPort();
     infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
@@ -156,7 +161,7 @@ public class SecondaryNameNode implements Runnable {
 
     // The web-server port can be ephemeral... ensure we have the correct info
     infoPort = infoServer.getPort();
-    conf.set("dfs.secondary.http.address", infoBindAddress + ":" +infoPort); 
+    conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" +infoPort); 
     LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
     LOG.warn("Checkpoint Period   :" + checkpointPeriod + " secs " +
              "(" + checkpointPeriod/60 + " min)");
@@ -280,7 +285,8 @@ public class SecondaryNameNode implements Runnable {
     if (!FSConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
       throw new IOException("This is not a DFS");
     }
-    String configuredAddress = conf.get("dfs.http.address", "0.0.0.0:50070");
+    String configuredAddress = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
+                                        DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT);
     InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
     if (sockAddr.getAddress().isAnyLocalAddress()) {
       return fsName.getHost() + ":" + sockAddr.getPort();
@@ -455,7 +461,7 @@ public class SecondaryNameNode implements Runnable {
    */
   public static void main(String[] argv) throws Exception {
     StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
-    Configuration tconf = new Configuration();
+    Configuration tconf = new HdfsConfiguration();
     if (argv.length >= 1) {
       SecondaryNameNode secondary = new SecondaryNameNode(tconf);
       int ret = secondary.processArgs(argv);

+ 3 - 2
src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java

@@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.mortbay.jetty.InclusiveByteRange;
@@ -40,7 +41,7 @@ public class StreamFile extends DfsServlet {
 
   static InetSocketAddress nameNodeAddr;
   static DataNode datanode = null;
-  private static final Configuration masterConf = new Configuration();
+  private static final Configuration masterConf = new HdfsConfiguration();
   static {
     if ((datanode = DataNode.getDataNode()) != null) {
       nameNodeAddr = datanode.getNameNodeAddr();
@@ -50,7 +51,7 @@ public class StreamFile extends DfsServlet {
   /** getting a client for connecting to dfs */
   protected DFSClient getDFSClient(HttpServletRequest request)
       throws IOException {
-    Configuration conf = new Configuration(masterConf);
+    Configuration conf = new HdfsConfiguration(masterConf);
     UnixUserGroupInformation.saveToConf(conf,
         UnixUserGroupInformation.UGI_PROPERTY_NAME, getUGI(request));
     return new DFSClient(nameNodeAddr, conf);

+ 2 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java

@@ -22,6 +22,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.metrics.*;
 import org.apache.hadoop.metrics.util.MetricsBase;
 import org.apache.hadoop.metrics.util.MetricsIntValue;
@@ -67,7 +68,7 @@ public class FSNamesystemMetrics implements Updater {
 
   public FSNamesystemMetrics(FSNamesystem fsNameSystem, Configuration conf) {
     this.fsNameSystem = fsNameSystem;
-    String sessionId = conf.get("session.id");
+    String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
      
     // Create a record for FSNamesystem metrics
     MetricsContext metricsContext = MetricsUtil.getContext("dfs");

+ 2 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java

@@ -21,6 +21,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.metrics.*;
 import org.apache.hadoop.metrics.jvm.JvmMetrics;
 import org.apache.hadoop.metrics.util.MetricsBase;
@@ -86,7 +87,7 @@ public class NameNodeMetrics implements Updater {
 
       
     public NameNodeMetrics(Configuration conf, NamenodeRole nameNodeRole) {
-      String sessionId = conf.get("session.id");
+      String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
       // Initiate Java VM metrics
       String processName = nameNodeRole.toString();
       JvmMetrics.init(processName, sessionId);

+ 5 - 2
src/java/org/apache/hadoop/hdfs/tools/DFSck.java

@@ -30,6 +30,8 @@ import javax.security.auth.login.LoginException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Tool;
@@ -106,7 +108,8 @@ public class DFSck extends Configured implements Tool {
     }
 
     final StringBuffer url = new StringBuffer("http://");
-    url.append(getConf().get("dfs.http.address", "0.0.0.0:50070"));
+    url.append(getConf().get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, 
+                             DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT));
     url.append("/fsck?ugi=").append(ugi).append("&path=");
 
     String dir = "/";
@@ -162,7 +165,7 @@ public class DFSck extends Configured implements Tool {
     if ((args.length == 0 ) || ("-files".equals(args[0]))) 
       printUsage();
     else
-      res = ToolRunner.run(new DFSck(new Configuration()), args);
+      res = ToolRunner.run(new DFSck(new HdfsConfiguration()), args);
     System.exit(res);
   }
 }

+ 2 - 1
src/test/aop/org/apache/hadoop/fi/FiConfig.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.fi;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 
 /**
  * This class wraps the logic around fault injection configuration file
@@ -37,7 +38,7 @@ public class FiConfig {
   
   protected static void init () {
     if (conf == null) {
-      conf = new Configuration(false);
+      conf = new HdfsConfiguration(false);
       String configName = System.getProperty(CONFIG_PARAMETER, DEFAULT_CONFIG);
       conf.addResource(configName);
     }

+ 23 - 21
src/test/aop/org/apache/hadoop/hdfs/TestFiHFlush.java

@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs;
 
 import org.junit.Test;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.fi.FiHFlushTestUtil;
 import org.apache.hadoop.fi.FiTestUtil;
 import org.apache.hadoop.fi.FiHFlushTestUtil.DerrAction;
@@ -62,7 +64,7 @@ public class TestFiHFlush {
   @Test
   public void hFlushFi01_a() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    runDiskErrorTest(new Configuration(), methodName, 
+    runDiskErrorTest(new HdfsConfiguration(), methodName, 
         AppendTestUtil.BLOCK_SIZE, new DerrAction(methodName, 0));
   }
 
@@ -75,11 +77,11 @@ public class TestFiHFlush {
   @Test(expected = IOException.class)
   public void hFlushFi01_b() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     int customPerChecksumSize = 512;
     int customBlockSize = customPerChecksumSize * 3;
-    conf.setInt("io.bytes.per.checksum", customPerChecksumSize);
-    conf.setLong("dfs.block.size", customBlockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
     runDiskErrorTest(conf, methodName, 
         customBlockSize, new DerrAction(methodName, 0));
   }
@@ -90,11 +92,11 @@ public class TestFiHFlush {
   @Test(expected = IOException.class)
   public void hFlushFi01_c() throws IOException { 
     final String methodName = FiTestUtil.getMethodName();
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     int customPerChecksumSize = 400;
     int customBlockSize = customPerChecksumSize * 3;
-    conf.setInt("io.bytes.per.checksum", customPerChecksumSize);
-    conf.setLong("dfs.block.size", customBlockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
     runDiskErrorTest(conf, methodName, 
         customBlockSize, new DerrAction(methodName, 0));
   }
@@ -104,7 +106,7 @@ public class TestFiHFlush {
   @Test
   public void hFlushFi02_a() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    runDiskErrorTest(new Configuration(), methodName,
+    runDiskErrorTest(new HdfsConfiguration(), methodName,
         AppendTestUtil.BLOCK_SIZE, new DerrAction(methodName, 1));
   }
 
@@ -113,11 +115,11 @@ public class TestFiHFlush {
 @Test(expected = IOException.class)
   public void hFlushFi02_b() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     int customPerChecksumSize = 512;
     int customBlockSize = customPerChecksumSize * 3;
-    conf.setInt("io.bytes.per.checksum", customPerChecksumSize);
-    conf.setLong("dfs.block.size", customBlockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
     runDiskErrorTest(conf, methodName,
         customBlockSize, new DerrAction(methodName, 1));
   }
@@ -127,11 +129,11 @@ public class TestFiHFlush {
   @Test(expected = IOException.class)
   public void hFlushFi02_c() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     int customPerChecksumSize = 400;
     int customBlockSize = customPerChecksumSize * 3;
-    conf.setInt("io.bytes.per.checksum", customPerChecksumSize);
-    conf.setLong("dfs.block.size", customBlockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
     runDiskErrorTest(conf, methodName,
         customBlockSize, new DerrAction(methodName, 1));
   }
@@ -141,7 +143,7 @@ public class TestFiHFlush {
   @Test
   public void hFlushFi03_a() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    runDiskErrorTest(new Configuration(), methodName,
+    runDiskErrorTest(new HdfsConfiguration(), methodName,
         AppendTestUtil.BLOCK_SIZE, new DerrAction(methodName, 2));
   }
   
@@ -150,11 +152,11 @@ public class TestFiHFlush {
   @Test(expected = IOException.class)
   public void hFlushFi03_b() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     int customPerChecksumSize = 512;
     int customBlockSize = customPerChecksumSize * 3;
-    conf.setInt("io.bytes.per.checksum", customPerChecksumSize);
-    conf.setLong("dfs.block.size", customBlockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
     runDiskErrorTest(conf, methodName,
         customBlockSize, new DerrAction(methodName, 2));
   }
@@ -164,11 +166,11 @@ public class TestFiHFlush {
   @Test(expected = IOException.class)
   public void hFlushFi03_c() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     int customPerChecksumSize = 400;
     int customBlockSize = customPerChecksumSize * 3;
-    conf.setInt("io.bytes.per.checksum", customPerChecksumSize);
-    conf.setLong("dfs.block.size", customBlockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
     runDiskErrorTest(conf, methodName,
         customBlockSize, new DerrAction(methodName, 2));
   }

+ 5 - 3
src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java

@@ -34,6 +34,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 
 import org.junit.Assert;
 import org.junit.Test;
@@ -43,11 +45,11 @@ public class TestFiDataTransferProtocol {
   static final short REPLICATION = 3;
   static final long BLOCKSIZE = 1L * (1L << 20);
 
-  static final Configuration conf = new Configuration();
+  static final Configuration conf = new HdfsConfiguration();
   static {
     conf.setInt("dfs.datanode.handler.count", 1);
     conf.setInt("dfs.replication", REPLICATION);
-    conf.setInt("dfs.socket.timeout", 5000);
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
   }
 
   static private FSDataOutputStream createFile(FileSystem fs, Path p
@@ -298,4 +300,4 @@ public class TestFiDataTransferProtocol {
     final String methodName = FiTestUtil.getMethodName();
     runCallReceivePacketTest(methodName, 2, new DoosAction(methodName, 2));
   }
-}
+}

+ 2 - 1
src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java

@@ -23,6 +23,7 @@ import org.apache.hadoop.cli.util.CLITestData.TestCmd;
 import org.apache.hadoop.cli.util.CommandExecutor.Result;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -53,7 +54,7 @@ public class TestHDFSCLI extends TestCLI{
                        "host5", "host6", "host7", "host8" };
     dfsCluster = new MiniDFSCluster(conf, 8, true, racks, hosts);
     
-    namenode = conf.get("fs.default.name", "file:///");
+    namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
     
     username = System.getProperty("user.name");
     dfsAdmCmdExecutor = new DFSAdminCmdExecutor(namenode);

+ 2 - 1
src/test/hdfs/org/apache/hadoop/fs/TestGlobPaths.java

@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 
 import junit.framework.TestCase;
@@ -48,7 +49,7 @@ public class TestGlobPaths extends TestCase {
   
   protected void setUp() throws Exception {
     try {
-      Configuration conf = new Configuration();
+      Configuration conf = new HdfsConfiguration();
       dfsCluster = new MiniDFSCluster(conf, 1, true, null);
       fs = FileSystem.get(conf);
     } catch (IOException e) {

+ 2 - 1
src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java

@@ -23,6 +23,7 @@ import java.io.IOException;
 import javax.security.auth.login.LoginException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.junit.After;
@@ -40,7 +41,7 @@ public class TestHDFSFileContextMainOperations extends
   @BeforeClass
   public static void clusterSetupAtBegining()
                                     throws IOException, LoginException  {
-    cluster = new MiniDFSCluster(new Configuration(), 2, true, null);
+    cluster = new MiniDFSCluster(new HdfsConfiguration(), 2, true, null);
     fc = FileContext.getFileContext(cluster.getFileSystem());
     defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
         UnixUserGroupInformation.login().getUserName()));

+ 3 - 2
src/test/hdfs/org/apache/hadoop/fs/TestUrlStreamHandler.java

@@ -28,6 +28,7 @@ import java.net.URL;
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsUrlStreamHandlerFactory;
@@ -48,7 +49,7 @@ public class TestUrlStreamHandler extends TestCase {
    */
   public void testDfsUrls() throws IOException {
 
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     FileSystem fs = cluster.getFileSystem();
 
@@ -106,7 +107,7 @@ public class TestUrlStreamHandler extends TestCase {
    */
   public void testFileUrls() throws IOException, URISyntaxException {
     // URLStreamHandler is already set in JVM by testDfsUrls() 
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
 
     // Locate the test temporary directory.
     File tmpDir = new File(conf.get("hadoop.tmp.dir"));

+ 5 - 3
src/test/hdfs/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java

@@ -23,6 +23,8 @@ import java.io.FileReader;
 import java.io.FileWriter;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 
 import junit.framework.TestCase;
@@ -30,7 +32,7 @@ import junit.framework.TestCase;
  * This class tests if a balancer schedules tasks correctly.
  */
 public class TestLoadGenerator extends TestCase {
-  private static final Configuration CONF = new Configuration();
+  private static final Configuration CONF = new HdfsConfiguration();
   private static final int DEFAULT_BLOCK_SIZE = 10;
   private static final String OUT_DIR = 
     System.getProperty("test.build.data","build/test/data");
@@ -47,8 +49,8 @@ public class TestLoadGenerator extends TestCase {
   
 
   static {
-    CONF.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
-    CONF.setInt("io.bytes.per.checksum", DEFAULT_BLOCK_SIZE);
+    CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
+    CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
     CONF.setLong("dfs.heartbeat.interval", 1L);
   }
 

+ 9 - 7
src/test/hdfs/org/apache/hadoop/fs/permission/TestStickyBit.java

@@ -27,7 +27,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UnixUserGroupInformation;
@@ -160,8 +162,8 @@ public class TestStickyBit extends TestCase {
   public void testGeneralSBBehavior() throws IOException {
     MiniDFSCluster cluster = null;
     try {
-      Configuration conf = new Configuration();
-      conf.setBoolean("dfs.permissions", true);
+      Configuration conf = new HdfsConfiguration();
+      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
       conf.setBoolean("dfs.support.append", true);
       cluster = new MiniDFSCluster(conf, 4, true, null);
 
@@ -200,8 +202,8 @@ public class TestStickyBit extends TestCase {
 
     try {
       // Set up cluster for testing
-      Configuration conf = new Configuration();
-      conf.setBoolean("dfs.permissions", true);
+      Configuration conf = new HdfsConfiguration();
+      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
       cluster = new MiniDFSCluster(conf, 4, true, null);
       FileSystem hdfs = cluster.getFileSystem();
 
@@ -246,8 +248,8 @@ public class TestStickyBit extends TestCase {
   public void testStickyBitPersistence() throws IOException {
     MiniDFSCluster cluster = null;
     try {
-      Configuration conf = new Configuration();
-      conf.setBoolean("dfs.permissions", true);
+      Configuration conf = new HdfsConfiguration();
+      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
       cluster = new MiniDFSCluster(conf, 4, true, null);
       FileSystem hdfs = cluster.getFileSystem();
 
@@ -293,7 +295,7 @@ public class TestStickyBit extends TestCase {
    */
   static private FileSystem logonAs(UnixUserGroupInformation user,
       Configuration conf, FileSystem hdfs) throws IOException {
-    Configuration conf2 = new Configuration(conf);
+    Configuration conf2 = new HdfsConfiguration(conf);
     UnixUserGroupInformation.saveToConf(conf2,
         UnixUserGroupInformation.UGI_PROPERTY_NAME, user);
 

+ 2 - 1
src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java

@@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 
 /** Utilities for append-related tests */ 
 public class AppendTestUtil {
@@ -94,7 +95,7 @@ public class AppendTestUtil {
    */
   public static FileSystem createHdfsWithDifferentUsername(Configuration conf
       ) throws IOException {
-    Configuration conf2 = new Configuration(conf);
+    Configuration conf2 = new HdfsConfiguration(conf);
     String username = UserGroupInformation.getCurrentUGI().getUserName()+"_XXX";
     UnixUserGroupInformation.saveToConf(conf2,
         UnixUserGroupInformation.UGI_PROPERTY_NAME,

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/BenchmarkThroughput.java

@@ -226,7 +226,7 @@ public class BenchmarkThroughput extends Configured implements Tool {
    * @param args
    */
   public static void main(String[] args) throws Exception {
-    int res = ToolRunner.run(new Configuration(),
+    int res = ToolRunner.run(new HdfsConfiguration(),
         new BenchmarkThroughput(), args);
     System.exit(res);
   }

+ 3 - 2
src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java

@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.AccessToken;
 import org.apache.hadoop.security.UnixUserGroupInformation;
@@ -155,7 +156,7 @@ public class DFSTestUtil {
   /** check if the files have been copied correctly. */
   public boolean checkFiles(FileSystem fs, String topdir) throws IOException {
     
-    //Configuration conf = new Configuration();
+    //Configuration conf = new HdfsConfiguration();
     Path root = new Path(topdir);
     
     for (int idx = 0; idx < nFiles; idx++) {
@@ -284,7 +285,7 @@ public class DFSTestUtil {
 
   static public Configuration getConfigurationWithDifferentUsername(Configuration conf
       ) throws IOException {
-    final Configuration c = new Configuration(conf);
+    final Configuration c = new HdfsConfiguration(conf);
     final UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
     final String username = ugi.getUserName()+"_XXX";
     final String[] groups = {ugi.getGroupNames()[0] + "_XXX"};

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java

@@ -98,7 +98,7 @@ public class DataNodeCluster {
     int numBlocksPerDNtoInject = 0;
     int replication = 1;
     
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
 
     for (int i = 0; i < args.length; i++) { // parse command line
       if (args[i].equals("-n")) {

+ 13 - 12
src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.StaticMapping;
@@ -254,17 +255,17 @@ public class MiniDFSCluster {
     
     // Setup the NameNode configuration
     FileSystem.setDefaultUri(conf, "hdfs://localhost:"+ Integer.toString(nameNodePort));
-    conf.set("dfs.http.address", "127.0.0.1:0");  
+    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");  
     if (manageNameDfsDirs) {
-      conf.set("dfs.name.dir", new File(base_dir, "name1").getPath()+","+
+      conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(base_dir, "name1").getPath()+","+
                new File(base_dir, "name2").getPath());
-      conf.set("fs.checkpoint.dir", new File(base_dir, "namesecondary1").
+      conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, new File(base_dir, "namesecondary1").
                 getPath()+"," + new File(base_dir, "namesecondary2").getPath());
     }
     
     int replication = conf.getInt("dfs.replication", 3);
     conf.setInt("dfs.replication", Math.min(replication, numDataNodes));
-    conf.setInt("dfs.safemode.extension", 0);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
     conf.setInt("dfs.namenode.decommission.interval", 3); // 3 second
     
     // Format and clean out DataNode directories
@@ -280,7 +281,7 @@ public class MiniDFSCluster {
                      operation == StartupOption.FORMAT ||
                      operation == StartupOption.REGULAR) ?
       new String[] {} : new String[] {operation.getName()};
-    conf.setClass("topology.node.switch.mapping.impl", 
+    conf.setClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
                    StaticMapping.class, DNSToSwitchMapping.class);
     nameNode = NameNode.createNameNode(args, conf);
     
@@ -386,7 +387,7 @@ public class MiniDFSCluster {
     
     
     for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) {
-      Configuration dnConf = new Configuration(conf);
+      Configuration dnConf = new HdfsConfiguration(conf);
       if (manageDfsDirs) {
         File dir1 = new File(data_dir, "data"+(2*i+1));
         File dir2 = new File(data_dir, "data"+(2*i+2));
@@ -396,7 +397,7 @@ public class MiniDFSCluster {
           throw new IOException("Mkdirs failed to create directory for DataNode "
                                 + i + ": " + dir1 + " or " + dir2);
         }
-        dnConf.set("dfs.data.dir", dir1.getPath() + "," + dir2.getPath()); 
+        dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dir1.getPath() + "," + dir2.getPath()); 
       }
       if (simulatedCapacities != null) {
         dnConf.setBoolean("dfs.datanode.simulateddatastorage", true);
@@ -404,11 +405,11 @@ public class MiniDFSCluster {
             simulatedCapacities[i-curDatanodesNum]);
       }
       System.out.println("Starting DataNode " + i + " with dfs.data.dir: " 
-                         + dnConf.get("dfs.data.dir"));
+                         + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
       if (hosts != null) {
-        dnConf.set("slave.host.name", hosts[i - curDatanodesNum]);
+        dnConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hosts[i - curDatanodesNum]);
         System.out.println("Starting DataNode " + i + " with hostname set to: " 
-                           + dnConf.get("slave.host.name"));
+                           + dnConf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY));
       }
       if (racks != null) {
         String name = hosts[i - curDatanodesNum];
@@ -417,7 +418,7 @@ public class MiniDFSCluster {
         StaticMapping.addNodeToRack(name,
                                     racks[i-curDatanodesNum]);
       }
-      Configuration newconf = new Configuration(dnConf); // save config
+      Configuration newconf = new HdfsConfiguration(dnConf); // save config
       if (hosts != null) {
         NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost");
       }
@@ -709,7 +710,7 @@ public class MiniDFSCluster {
       boolean keepPort) throws IOException {
     Configuration conf = dnprop.conf;
     String[] args = dnprop.dnArgs;
-    Configuration newconf = new Configuration(conf); // save cloned config
+    Configuration newconf = new HdfsConfiguration(conf); // save cloned config
     if (keepPort) {
       InetSocketAddress addr = dnprop.datanode.getSelfAddr();
       conf.set("dfs.datanode.address", addr.getAddress().getHostAddress() + ":"

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java

@@ -31,7 +31,7 @@ import org.apache.hadoop.util.StringUtils;
 public class TestAbandonBlock extends junit.framework.TestCase {
   public static final Log LOG = LogFactory.getLog(TestAbandonBlock.class);
   
-  private static final Configuration CONF = new Configuration();
+  private static final Configuration CONF = new HdfsConfiguration();
   static final String FILE_NAME_PREFIX
       = "/" + TestAbandonBlock.class.getSimpleName() + "_"; 
 

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestBlockMissingException.java

@@ -53,7 +53,7 @@ public class TestBlockMissingException extends TestCase {
     LOG.info("Test testBlockMissingException started.");
     long blockSize = 1024L;
     int numBlocks = 4;
-    conf = new Configuration();
+    conf = new HdfsConfiguration();
     try {
       dfs = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
       dfs.waitActive();

+ 3 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestBlockReport.java

@@ -65,11 +65,11 @@ public class TestBlockReport {
   private static Configuration conf;
 
   static {
-    conf = new Configuration();
+    conf = new HdfsConfiguration();
     int customPerChecksumSize = 512;
     int customBlockSize = customPerChecksumSize * 3;
-    conf.setInt("io.bytes.per.checksum", customPerChecksumSize);
-    conf.setLong("dfs.block.size", customBlockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
     conf.setLong("dfs.datanode.directoryscan.interval", DN_RESCAN_INTERVAL);
   }
 

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java

@@ -38,7 +38,7 @@ public class TestBlocksScheduledCounter extends TestCase {
 
   public void testBlocksScheduledCounter() throws IOException {
     
-    MiniDFSCluster cluster = new MiniDFSCluster(new Configuration(), 1, 
+    MiniDFSCluster cluster = new MiniDFSCluster(new HdfsConfiguration(), 1, 
                                                 true, null);
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();

+ 2 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.DFSClient.DFSOutputStream;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.io.IOUtils;
 
 import org.junit.Test;
@@ -39,7 +40,7 @@ public class TestClientProtocolForPipelineRecovery {
   
   @Test public void testGetNewStamp() throws IOException {
     int numDataNodes = 1;
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     conf.setBoolean("dfs.support.append", true);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
     try {

+ 6 - 6
src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java

@@ -43,8 +43,8 @@ import org.apache.hadoop.fs.FileSystem;
  *  5. Swaps two meta files, i.e the format of the meta files 
  *     are valid but their CRCs do not match with their corresponding 
  *     data blocks
- * The above tests are run for varied values of io.bytes.per.checksum 
- * and dfs.block.size. It tests for the case when the meta file is 
+ * The above tests are run for varied values of dfs.bytes-per-checksum 
+ * and dfs.blocksize. It tests for the case when the meta file is 
  * multiple blocks.
  *
  * Another portion of the test is commented out till HADOOP-1557 
@@ -207,7 +207,7 @@ public class TestCrcCorruption extends TestCase {
     // default parameters
     //
     System.out.println("TestCrcCorruption with default parameters");
-    Configuration conf1 = new Configuration();
+    Configuration conf1 = new HdfsConfiguration();
     conf1.setInt("dfs.blockreport.intervalMsec", 3 * 1000);
     DFSTestUtil util1 = new DFSTestUtil("TestCrcCorruption", 40, 3, 8*1024);
     thistest(conf1, util1);
@@ -216,9 +216,9 @@ public class TestCrcCorruption extends TestCase {
     // specific parameters
     //
     System.out.println("TestCrcCorruption with specific parameters");
-    Configuration conf2 = new Configuration();
-    conf2.setInt("io.bytes.per.checksum", 17);
-    conf2.setInt("dfs.block.size", 34);
+    Configuration conf2 = new HdfsConfiguration();
+    conf2.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 17);
+    conf2.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 34);
     DFSTestUtil util2 = new DFSTestUtil("TestCrcCorruption", 40, 3, 400);
     thistest(conf2, util2);
   }

+ 3 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java

@@ -61,14 +61,14 @@ public class TestDFSClientRetries extends TestCase {
    */
   public void testWriteTimeoutAtDataNode() throws IOException,
                                                   InterruptedException { 
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     
     final int writeTimeout = 100; //milliseconds.
     // set a very short write timeout for datanode, so that tests runs fast.
     conf.setInt("dfs.datanode.socket.write.timeout", writeTimeout); 
     // set a smaller block size
     final int blockSize = 10*1024*1024;
-    conf.setInt("dfs.block.size", blockSize);
+    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt("dfs.client.max.block.acquire.failures", 1);
     // set a small buffer size
     final int bufferSize = 4096;
@@ -225,7 +225,7 @@ public class TestDFSClientRetries extends TestCase {
   
   public void testNotYetReplicatedErrors() throws IOException
   {   
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     
     // allow 1 retry (2 total calls)
     conf.setInt("dfs.client.block.write.locateFollowingBlock.retries", 1);

+ 3 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSFinalize.java

@@ -88,11 +88,11 @@ public class TestDFSFinalize extends TestCase {
        * For now disabling block verification so that the contents are 
        * not changed.
        */
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
       conf.setInt("dfs.datanode.scan.period.hours", -1);
       conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
-      String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
-      String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
+      String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
+      String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
       
       log("Finalize with existing previous dir", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSMkdirs.java

@@ -43,7 +43,7 @@ public class TestDFSMkdirs extends TestCase {
    * not create a subdirectory off a file.
    */
   public void testDFSMkdirs() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     FileSystem fileSys = cluster.getFileSystem();
     try {
@@ -80,7 +80,7 @@ public class TestDFSMkdirs extends TestCase {
    * Tests mkdir will not create directory when parent is missing.
    */
   public void testMkdir() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
     try {

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java

@@ -38,7 +38,7 @@ import junit.framework.TestCase;
 /** Unit tests for permission */
 public class TestDFSPermission extends TestCase {
   public static final Log LOG = LogFactory.getLog(TestDFSPermission.class);
-  final private static Configuration conf = new Configuration();
+  final private static Configuration conf = new HdfsConfiguration();
   
   final private static String GROUP1_NAME = "group1";
   final private static String GROUP2_NAME = "group2";
@@ -79,7 +79,7 @@ public class TestDFSPermission extends TestCase {
       LOG.info("NUM_TEST_PERMISSIONS=" + NUM_TEST_PERMISSIONS);
       
       // explicitly turn on permission checking
-      conf.setBoolean("dfs.permissions", true);
+      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
       
       // Initiate all four users
       SUPERUSER = UnixUserGroupInformation.login(conf);

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRename.java

@@ -39,7 +39,7 @@ public class TestDFSRename extends junit.framework.TestCase {
   }
 
   public void testRename() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     try {
       FileSystem fs = cluster.getFileSystem();

+ 3 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java

@@ -121,11 +121,11 @@ public class TestDFSRollback extends TestCase {
     UpgradeUtilities.initialize();
     
     for (int numDirs = 1; numDirs <= 2; numDirs++) {
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
       conf.setInt("dfs.datanode.scan.period.hours", -1);      
       conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
-      String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
-      String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
+      String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
+      String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
       
       log("Normal NameNode rollback", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");

+ 16 - 16
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java

@@ -93,7 +93,7 @@ public class TestDFSShell extends TestCase {
   }
 
   public void testZeroSizeFile() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
@@ -135,7 +135,7 @@ public class TestDFSShell extends TestCase {
   }
   
   public void testRecrusiveRm() throws IOException {
-	  Configuration conf = new Configuration();
+	  Configuration conf = new HdfsConfiguration();
 	  MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
 	  FileSystem fs = cluster.getFileSystem();
 	  assertTrue("Not a HDFS: " + fs.getUri(), 
@@ -160,7 +160,7 @@ public class TestDFSShell extends TestCase {
   }
     
   public void testDu() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
@@ -209,7 +209,7 @@ public class TestDFSShell extends TestCase {
                                   
   }
   public void testPut() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
@@ -305,7 +305,7 @@ public class TestDFSShell extends TestCase {
 
   /** check command error outputs and exit statuses. */
   public void testErrOutPut() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = null;
     PrintStream bak = null;
     try {
@@ -447,8 +447,8 @@ public class TestDFSShell extends TestCase {
   }
   
   public void testURIPaths() throws Exception {
-    Configuration srcConf = new Configuration();
-    Configuration dstConf = new Configuration();
+    Configuration srcConf = new HdfsConfiguration();
+    Configuration dstConf = new HdfsConfiguration();
     MiniDFSCluster srcCluster =  null;
     MiniDFSCluster dstCluster = null;
     String bak = System.getProperty("test.build.data");
@@ -539,7 +539,7 @@ public class TestDFSShell extends TestCase {
   }
 
   public void testText() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = null;
     PrintStream bak = null;
     try {
@@ -583,7 +583,7 @@ public class TestDFSShell extends TestCase {
   }
 
   public void testCopyToLocal() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
@@ -680,7 +680,7 @@ public class TestDFSShell extends TestCase {
   }
 
   public void testCount() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
     FsShell shell = new FsShell();
@@ -836,14 +836,14 @@ public class TestDFSShell extends TestCase {
   }
   
   public void testFilePermissions() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     
     //test chmod on local fs
     FileSystem fs = FileSystem.getLocal(conf);
     testChmod(conf, fs, 
               (new File(TEST_ROOT_DIR, "chmodTest")).getAbsolutePath());
     
-    conf.set("dfs.permissions", "true");
+    conf.set(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, "true");
     
     //test chmod on DFS
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
@@ -901,7 +901,7 @@ public class TestDFSShell extends TestCase {
    * Tests various options of DFSShell.
    */
   public void testDFSShell() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     /* This tests some properties of ChecksumFileSystem as well.
      * Make sure that we create ChecksumDFS */
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
@@ -1127,7 +1127,7 @@ public class TestDFSShell extends TestCase {
     MiniDFSCluster dfs = null;
     PrintStream bak = null;
     try {
-      Configuration conf = new Configuration();
+      Configuration conf = new HdfsConfiguration();
       dfs = new MiniDFSCluster(conf, 2, true, null);
       FileSystem fs = dfs.getFileSystem();
       Path p = new Path("/foo");
@@ -1160,7 +1160,7 @@ public class TestDFSShell extends TestCase {
   
   public void testGet() throws IOException {
     DFSTestUtil.setLogLevel2All(FSInputChecker.LOG);
-    final Configuration conf = new Configuration();
+    final Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
 
@@ -1218,7 +1218,7 @@ public class TestDFSShell extends TestCase {
   }
 
   public void testLsr() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
 

+ 3 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java

@@ -37,7 +37,7 @@ public class TestDFSShellGenericOptions extends TestCase {
     String namenode = null;
     MiniDFSCluster cluster = null;
     try {
-      Configuration conf = new Configuration();
+      Configuration conf = new HdfsConfiguration();
       cluster = new MiniDFSCluster(conf, 1, true, null);
       namenode = FileSystem.getDefaultUri(conf).toString();
       String [] args = new String[4];
@@ -70,7 +70,7 @@ public class TestDFSShellGenericOptions extends TestCase {
                "<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>\n"+
                "<configuration>\n"+
                " <property>\n"+
-               "   <name>fs.default.name</name>\n"+
+               "   <name>fs.defaultFS</name>\n"+
                "   <value>"+namenode+"</value>\n"+
                " </property>\n"+
                "</configuration>\n");
@@ -91,7 +91,7 @@ public class TestDFSShellGenericOptions extends TestCase {
   private void testPropertyOption(String[] args, String namenode) {
     // prepare arguments to create a directory /data
     args[0] = "-D";
-    args[1] = "fs.default.name="+namenode;
+    args[1] = "fs.defaultFS="+namenode;
     execute(args, namenode);        
   }
     

+ 3 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java

@@ -169,10 +169,10 @@ public class TestDFSStartupVersions extends TestCase {
   public void testVersions() throws Exception {
     UpgradeUtilities.initialize();
     Configuration conf = UpgradeUtilities.initializeStorageStateConf(1, 
-                                                      new Configuration());
+                                                      new HdfsConfiguration());
     StorageInfo[] versions = initializeVersions();
     UpgradeUtilities.createStorageDirs(
-                                       NAME_NODE, conf.getStrings("dfs.name.dir"), "current");
+                                       NAME_NODE, conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY), "current");
     cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
     StorageInfo nameNodeVersion = new StorageInfo(
                                                   UpgradeUtilities.getCurrentLayoutVersion(),
@@ -181,7 +181,7 @@ public class TestDFSStartupVersions extends TestCase {
     log("NameNode version info", NAME_NODE, null, nameNodeVersion);
     for (int i = 0; i < versions.length; i++) {
       File[] storage = UpgradeUtilities.createStorageDirs(
-                                                          DATA_NODE, conf.getStrings("dfs.data.dir"), "current");
+                                                          DATA_NODE, conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY), "current");
       log("DataNode version info", DATA_NODE, i, versions[i]);
       UpgradeUtilities.createVersionFile(DATA_NODE, storage, versions[i]);
       try {

+ 3 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java

@@ -111,8 +111,8 @@ public class TestDFSStorageStateRecovery extends TestCase {
    */
   String[] createStorageState(NodeType nodeType, boolean[] state) throws Exception {
     String[] baseDirs = (nodeType == NAME_NODE ?
-                         conf.getStrings("dfs.name.dir") :
-                         conf.getStrings("dfs.data.dir"));
+                         conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY) :
+                         conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
     UpgradeUtilities.createEmptyDirs(baseDirs);
     if (state[0])  // current
       UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "current");
@@ -179,7 +179,7 @@ public class TestDFSStorageStateRecovery extends TestCase {
     UpgradeUtilities.initialize();
 
     for (int numDirs = 1; numDirs <= 2; numDirs++) {
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
       conf.setInt("dfs.datanode.scan.period.hours", -1);      
       conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
       for (int i = 0; i < testCases.length; i++) {

+ 3 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java

@@ -128,11 +128,11 @@ public class TestDFSUpgrade extends TestCase {
     UpgradeUtilities.initialize();
     
     for (int numDirs = 1; numDirs <= 2; numDirs++) {
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
       conf.setInt("dfs.datanode.scan.period.hours", -1);      
       conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
-      String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
-      String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
+      String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
+      String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
       
       log("Normal NameNode upgrade", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java

@@ -177,7 +177,7 @@ public class TestDFSUpgradeFromImage extends TestCase {
   public void testUpgradeFromImage() throws IOException {
     MiniDFSCluster cluster = null;
     try {
-      Configuration conf = new Configuration();
+      Configuration conf = new HdfsConfiguration();
       if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Ant
         System.setProperty("test.build.data", "build/test/data");
       }

+ 4 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java

@@ -49,6 +49,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
@@ -185,7 +186,7 @@ public class TestDataTransferProtocol extends TestCase {
   
   @Test public void testOpWrite() throws IOException {
     int numDataNodes = 1;
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     conf.setBoolean("dfs.support.append", true);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
     try {
@@ -316,7 +317,7 @@ public class TestDataTransferProtocol extends TestCase {
     Path file = new Path("dataprotocol.dat");
     int numDataNodes = 1;
     
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     conf.setInt("dfs.replication", numDataNodes); 
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
     try {
@@ -328,7 +329,7 @@ public class TestDataTransferProtocol extends TestCase {
     dnAddr = NetUtils.createSocketAddr(datanode.getName());
     FileSystem fileSys = cluster.getFileSystem();
     
-    int fileLen = Math.min(conf.getInt("dfs.block.size", 4096), 4096);
+    int fileLen = Math.min(conf.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096), 4096);
     
     createFile(fileSys, file, fileLen);
 

+ 6 - 6
src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java

@@ -91,7 +91,7 @@ public class TestDatanodeBlockScanner extends TestCase {
     
     long startTime = System.currentTimeMillis();
     
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     cluster.waitActive();
     
@@ -152,7 +152,7 @@ public class TestDatanodeBlockScanner extends TestCase {
   }
 
   public void testBlockCorruptionPolicy() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     conf.setLong("dfs.blockreport.intervalMsec", 1000L);
     Random random = new Random();
     FileSystem fs = null;
@@ -262,11 +262,11 @@ public class TestDatanodeBlockScanner extends TestCase {
                                              short numReplicas,
                                              int numCorruptReplicas) 
                                              throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     conf.setLong("dfs.blockreport.intervalMsec", 30L);
-    conf.setLong("dfs.replication.interval", 30);
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 30);
     conf.setLong("dfs.heartbeat.interval", 30L);
-    conf.setBoolean("dfs.replication.considerLoad", false);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
     FileSystem fs = null;
     DFSClient dfsClient = null;
     LocatedBlocks blocks = null;
@@ -371,7 +371,7 @@ public class TestDatanodeBlockScanner extends TestCase {
   
   /** Test if NameNode handles truncated blocks in block report */
   public void testTruncatedBlockReport() throws Exception {
-    final Configuration conf = new Configuration();
+    final Configuration conf = new HdfsConfiguration();
     final short REPLICATION_FACTOR = (short)2;
 
     MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION_FACTOR, true, null);

+ 8 - 8
src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java

@@ -283,11 +283,11 @@ public class TestDatanodeDeath extends TestCase {
    * dies.
    */
   private void complexTest() throws IOException {
-    Configuration conf = new Configuration();
-    conf.setInt("heartbeat.recheck.interval", 2000);
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
     conf.setInt("dfs.heartbeat.interval", 2);
-    conf.setInt("dfs.replication.pending.timeout.sec", 2);
-    conf.setInt("dfs.socket.timeout", 5000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();
@@ -338,11 +338,11 @@ public class TestDatanodeDeath extends TestCase {
    * close the file.
    */
   private void simpleTest(int datanodeToKill) throws IOException {
-    Configuration conf = new Configuration();
-    conf.setInt("heartbeat.recheck.interval", 2000);
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
     conf.setInt("dfs.heartbeat.interval", 1);
-    conf.setInt("dfs.replication.pending.timeout.sec", 2);
-    conf.setInt("dfs.socket.timeout", 5000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
     int myMaxNodes = 5;
     System.out.println("SimpleTest starting with DataNode to Kill " + 
                        datanodeToKill);

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java

@@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics;
  * This test ensures the all types of data node report work correctly.
  */
 public class TestDatanodeReport extends TestCase {
-  final static private Configuration conf = new Configuration();
+  final static private Configuration conf = new HdfsConfiguration();
   final static private int NUM_OF_DATANODES = 4;
     
   /**
@@ -40,7 +40,7 @@ public class TestDatanodeReport extends TestCase {
    */
   public void testDatanodeReport() throws Exception {
     conf.setInt(
-        "heartbeat.recheck.interval", 500); // 0.5s
+        DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); // 0.5s
     conf.setLong("dfs.heartbeat.interval", 1L);
     MiniDFSCluster cluster = 
       new MiniDFSCluster(conf, NUM_OF_DATANODES, true, null);

+ 4 - 4
src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java

@@ -240,8 +240,8 @@ public class TestDecommission extends TestCase {
    * Tests Decommission in DFS.
    */
   public void testDecommission() throws IOException {
-    Configuration conf = new Configuration();
-    conf.setBoolean("dfs.replication.considerLoad", false);
+    Configuration conf = new HdfsConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
 
     // Set up the hosts/exclude files.
     FileSystem localFileSys = FileSystem.getLocal(conf);
@@ -251,9 +251,9 @@ public class TestDecommission extends TestCase {
     hostsFile = new Path(dir, "hosts");
     excludeFile = new Path(dir, "exclude");
     conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath());
-    conf.setInt("heartbeat.recheck.interval", 2000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
     conf.setInt("dfs.heartbeat.interval", 1);
-    conf.setInt("dfs.replication.pending.timeout.sec", 4);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4);
     writeConfigFile(localFileSys, excludeFile, null);
 
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java

@@ -41,7 +41,7 @@ public class TestDefaultNameNodePort extends TestCase {
   }
 
   public void testGetAddressFromConf() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     FileSystem.setDefaultUri(conf, "hdfs://foo/");
     assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
     FileSystem.setDefaultUri(conf, "hdfs://foo:555/");

+ 41 - 0
src/test/hdfs/org/apache/hadoop/hdfs/TestDeprecatedKeys.java

@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.log4j.Level;
+
+import junit.framework.TestCase;
+
+public class TestDeprecatedKeys extends TestCase {
+ 
+  //Tests a deprecated key
+  public void testDeprecatedKeys() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.set("topology.script.file.name", "xyz");
+    String scriptFile = conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
+    assertTrue(scriptFile.equals("xyz")) ;
+    conf.setInt("dfs.replication.interval", 1);
+    String alpha = DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY;
+    int repInterval = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 3) ;
+    assertTrue(repInterval == 1) ;
+  }
+}

+ 8 - 8
src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java

@@ -36,14 +36,14 @@ public class TestDistributedFileSystem extends junit.framework.TestCase {
   private static final Random RAN = new Random();
 
   public void testFileSystemCloseAll() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 0, true, null);
     URI address = FileSystem.getDefaultUri(conf);
 
     try {
       FileSystem.closeAll();
 
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
       FileSystem.setDefaultUri(conf, address);
       FileSystem.get(conf);
       FileSystem.get(conf);
@@ -59,7 +59,7 @@ public class TestDistributedFileSystem extends junit.framework.TestCase {
    * multiple files are open.
    */
   public void testDFSClose() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     FileSystem fileSys = cluster.getFileSystem();
 
@@ -76,7 +76,7 @@ public class TestDistributedFileSystem extends junit.framework.TestCase {
   }
 
   public void testDFSClient() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = null;
 
     try {
@@ -165,19 +165,19 @@ public class TestDistributedFileSystem extends junit.framework.TestCase {
     System.out.println("seed=" + seed);
     RAN.setSeed(seed);
 
-    final Configuration conf = new Configuration();
-    conf.set("slave.host.name", "localhost");
+    final Configuration conf = new HdfsConfiguration();
+    conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
 
     final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     final FileSystem hdfs = cluster.getFileSystem();
-    final String hftpuri = "hftp://" + conf.get("dfs.http.address");
+    final String hftpuri = "hftp://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
     System.out.println("hftpuri=" + hftpuri);
     final FileSystem hftp = new Path(hftpuri).getFileSystem(conf);
 
     final String dir = "/filechecksum";
     final int block_size = 1024;
     final int buffer_size = conf.getInt("io.file.buffer.size", 4096);
-    conf.setInt("io.bytes.per.checksum", 512);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
 
     //try different number of blocks
     for(int n = 0; n < 5; n++) {

+ 3 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java

@@ -286,9 +286,9 @@ public class TestFSInputChecker extends TestCase {
   }
   
   public void testFSInputChecker() throws Exception {
-    Configuration conf = new Configuration();
-    conf.setLong("dfs.block.size", BLOCK_SIZE);
-    conf.setInt("io.bytes.per.checksum", BYTES_PER_SUM);
+    Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_SUM);
     rand.nextBytes(expected);
 
     // test DFS

+ 3 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestFSOutputSummer.java

@@ -110,9 +110,9 @@ public class TestFSOutputSummer extends TestCase {
    * Test write opeation for output stream in DFS.
    */
   public void testFSOutputSummer() throws Exception {
-    Configuration conf = new Configuration();
-    conf.setLong("dfs.block.size", BLOCK_SIZE);
-    conf.setInt("io.bytes.per.checksum", BYTES_PER_CHECKSUM);
+    Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
     MiniDFSCluster cluster = new MiniDFSCluster(
         conf, NUM_OF_DATANODES, true, null);
     fileSys = cluster.getFileSystem();

+ 3 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java

@@ -102,7 +102,7 @@ public class TestFileAppend extends TestCase {
    * @throws IOException an exception might be thrown
    */
   public void testCopyOnWrite() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
@@ -172,7 +172,7 @@ public class TestFileAppend extends TestCase {
    * @throws IOException an exception might be thrown
    */
   public void testSimpleFlush() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
@@ -227,7 +227,7 @@ public class TestFileAppend extends TestCase {
    * @throws IOException an exception might be thrown
    */
   public void testComplexFlush() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }

+ 5 - 5
src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java

@@ -80,7 +80,7 @@ public class TestFileAppend2 extends TestCase {
    * @throws IOException an exception might be thrown
    */ 
   public void testSimpleAppend() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
@@ -331,11 +331,11 @@ public class TestFileAppend2 extends TestCase {
    */
   public void testComplexAppend() throws IOException {
     fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
-    Configuration conf = new Configuration();
-    conf.setInt("heartbeat.recheck.interval", 2000);
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
     conf.setInt("dfs.heartbeat.interval", 2);
-    conf.setInt("dfs.replication.pending.timeout.sec", 2);
-    conf.setInt("dfs.socket.timeout", 30000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 30000);
     conf.setInt("dfs.datanode.socket.write.timeout", 30000);
     conf.setInt("dfs.datanode.handler.count", 50);
     conf.setBoolean("dfs.support.append", true);

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java

@@ -49,8 +49,8 @@ public class TestFileAppend3 extends junit.framework.TestCase {
     return new TestSetup(new TestSuite(TestFileAppend3.class)) {
       protected void setUp() throws java.lang.Exception {
         AppendTestUtil.LOG.info("setUp()");
-        conf = new Configuration();
-        conf.setInt("io.bytes.per.checksum", 512);
+        conf = new HdfsConfiguration();
+        conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
         conf.setBoolean("dfs.support.append", true);
         buffersize = conf.getInt("io.file.buffer.size", 4096);
         cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);

+ 3 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java

@@ -44,7 +44,7 @@ public class TestFileCorruption extends TestCase {
     MiniDFSCluster cluster = null;
     DFSTestUtil util = new DFSTestUtil("TestFileCorruption", 20, 3, 8*1024);
     try {
-      Configuration conf = new Configuration();
+      Configuration conf = new HdfsConfiguration();
       cluster = new MiniDFSCluster(conf, 3, true, null);
       FileSystem fs = cluster.getFileSystem();
       util.createFiles(fs, "/srcdat");
@@ -71,7 +71,7 @@ public class TestFileCorruption extends TestCase {
 
   /** check if local FS can handle corrupted blocks properly */
   public void testLocalFileCorruption() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     Path file = new Path(System.getProperty("test.build.data"), "corruptFile");
     FileSystem fs = FileSystem.getLocal(conf);
     DataOutputStream dos = fs.create(file);
@@ -99,7 +99,7 @@ public class TestFileCorruption extends TestCase {
   public void testArrayOutOfBoundsException() throws Exception {
     MiniDFSCluster cluster = null;
     try {
-      Configuration conf = new Configuration();
+      Configuration conf = new HdfsConfiguration();
       cluster = new MiniDFSCluster(conf, 2, true, null);
       cluster.waitActive();
       

+ 21 - 21
src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java

@@ -177,10 +177,10 @@ public class TestFileCreation extends junit.framework.TestCase {
    * Test that server default values can be retrieved on the client side
    */
   public void testServerDefaults() throws IOException {
-    Configuration conf = new Configuration();
-    conf.setLong("dfs.block.size", FSConstants.DEFAULT_BLOCK_SIZE);
-    conf.setInt("io.bytes.per.checksum", FSConstants.DEFAULT_BYTES_PER_CHECKSUM);
-    conf.setInt("dfs.write.packet.size", FSConstants.DEFAULT_WRITE_PACKET_SIZE);
+    Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, FSConstants.DEFAULT_BLOCK_SIZE);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, FSConstants.DEFAULT_BYTES_PER_CHECKSUM);
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, FSConstants.DEFAULT_WRITE_PACKET_SIZE);
     conf.setInt("dfs.replication", FSConstants.DEFAULT_REPLICATION_FACTOR + 1);
     conf.setInt("io.file.buffer.size", FSConstants.DEFAULT_FILE_BUFFER_SIZE);
     MiniDFSCluster cluster = new MiniDFSCluster(conf,
@@ -204,7 +204,7 @@ public class TestFileCreation extends junit.framework.TestCase {
    * Test that file data becomes available before file is closed.
    */
   public void testFileCreation() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
@@ -284,7 +284,7 @@ public class TestFileCreation extends junit.framework.TestCase {
    * Test deleteOnExit
    */
   public void testDeleteOnExit() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
@@ -346,8 +346,8 @@ public class TestFileCreation extends junit.framework.TestCase {
    * Test that file data does not become corrupted even in the face of errors.
    */
   public void testFileCreationError1() throws IOException {
-    Configuration conf = new Configuration();
-    conf.setInt("heartbeat.recheck.interval", 1000);
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
@@ -420,8 +420,8 @@ public class TestFileCreation extends junit.framework.TestCase {
   public void testFileCreationError2() throws IOException {
     long leasePeriod = 1000;
     System.out.println("testFileCreationError2 start");
-    Configuration conf = new Configuration();
-    conf.setInt("heartbeat.recheck.interval", 1000);
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
@@ -487,10 +487,10 @@ public class TestFileCreation extends junit.framework.TestCase {
    * is needed to handle persistent leases.
    */
   public void xxxtestFileCreationNamenodeRestart() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
-    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
@@ -619,7 +619,7 @@ public class TestFileCreation extends junit.framework.TestCase {
    * Test that all open files are closed when client dies abnormally.
    */
   public void testDFSClientDeath() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     System.out.println("Testing adbornal client death.");
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
@@ -655,7 +655,7 @@ public class TestFileCreation extends junit.framework.TestCase {
    * Test file creation with all supported flags.
    */
   public void testFileCreationWithFlags() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
@@ -740,7 +740,7 @@ public class TestFileCreation extends junit.framework.TestCase {
    * Test file creation using createNonRecursive().
    */
   public void testFileCreationNonRecursive() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
@@ -851,7 +851,7 @@ public class TestFileCreation extends junit.framework.TestCase {
    * Test creating two files at the same time. 
    */
   public void testConcurrentFileCreation() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
 
     try {
@@ -889,8 +889,8 @@ public class TestFileCreation extends junit.framework.TestCase {
     final long leasePeriod = 1000;
     final int DATANODE_NUM = 3;
 
-    Configuration conf = new Configuration();
-    conf.setInt("heartbeat.recheck.interval", 1000);
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
 
     // create cluster
@@ -946,7 +946,7 @@ public class TestFileCreation extends junit.framework.TestCase {
     System.out.println("test file system close start");
     final int DATANODE_NUM = 3;
 
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
 
     // create cluster
     MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
@@ -974,8 +974,8 @@ public class TestFileCreation extends junit.framework.TestCase {
     System.out.println("test testFsCloseAfterClusterShutdown start");
     final int DATANODE_NUM = 3;
 
-    Configuration conf = new Configuration();
-    conf.setInt("dfs.replication.min", 3);
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, 3);
     conf.setBoolean("ipc.client.ping", false); // hdfs timeout is default 60 seconds
     conf.setInt("ipc.ping.interval", 10000); // hdfs timeout is now 10 second
 

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java

@@ -47,7 +47,7 @@ public class TestFileCreationClient extends junit.framework.TestCase {
   /** Test lease recovery Triggered by DFSClient. */
   public void testClientTriggeredLeaseRecovery() throws Exception {
     final int REPLICATION = 3;
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     conf.setInt("dfs.datanode.handler.count", 1);
     conf.setInt("dfs.replication", REPLICATION);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION, true, null);

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java

@@ -37,10 +37,10 @@ public class TestFileCreationDelete extends junit.framework.TestCase {
   }
 
   public void testFileCreationDeleteParent() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
-    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setBoolean("dfs.support.append", true);
 

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationEmpty.java

@@ -50,8 +50,8 @@ public class TestFileCreationEmpty extends junit.framework.TestCase {
     final long leasePeriod = 1000;
     final int DATANODE_NUM = 3;
 
-    final Configuration conf = new Configuration();
-    conf.setInt("heartbeat.recheck.interval", 1000);
+    final Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
 
     // create cluster

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java

@@ -61,7 +61,7 @@ public class TestFileStatus extends TestCase {
    * Tests various options of DFSShell.
    */
   public void testFileStatus() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     FileSystem fs = cluster.getFileSystem();
     final DFSClient dfsClient = new DFSClient(NameNode.getAddress(conf), conf);

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java

@@ -44,13 +44,13 @@ import junit.framework.TestCase;
 public class TestGetBlocks extends TestCase {
   /** test getBlocks */
   public void testGetBlocks() throws Exception {
-    final Configuration CONF = new Configuration();
+    final Configuration CONF = new HdfsConfiguration();
 
     final short REPLICATION_FACTOR = (short)2;
     final int DEFAULT_BLOCK_SIZE = 1024;
     final Random r = new Random();
     
-    CONF.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
     MiniDFSCluster cluster = new MiniDFSCluster(
           CONF, REPLICATION_FACTOR, true, null );
     try {

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java

@@ -29,7 +29,7 @@ public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
 
   @Override
   protected void setUp() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster(conf, 2, true, null);
     fs = cluster.getFileSystem();
     defaultWorkingDirectory = "/user/" + 

+ 25 - 24
src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java

@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.BackupNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.net.DNS;
 
 /**
@@ -90,10 +91,10 @@ public class TestHDFSServerPorts extends TestCase {
     if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
       throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
     }
-    config = new Configuration();
-    config.set("dfs.name.dir", new File(hdfsDir, "name1").getPath());
+    config = new HdfsConfiguration();
+    config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name1").getPath());
     FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
-    config.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0");
+    config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
     NameNode.format(config);
 
     String[] args = new String[] {};
@@ -119,8 +120,8 @@ public class TestHDFSServerPorts extends TestCase {
     assertTrue(currDir2.mkdirs());
     assertTrue(currDir3.mkdirs());
     
-    conf.set("dfs.name.dir", new File(hdfsDir, "name2").getPath());
-    conf.set("dfs.name.edits.dir", "${dfs.name.dir}");
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name2").getPath());
+    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, "${dfs.name.dir}");
     
     // Start BackupNode
     String[] args = new String [] { StartupOption.BACKUP.getName() };
@@ -136,7 +137,7 @@ public class TestHDFSServerPorts extends TestCase {
   throws IOException {
     String dataDir = getTestingDir();
     File dataNodeDir = new File(dataDir, "data-" + index);
-    config.set("dfs.data.dir", dataNodeDir.getPath());
+    config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDir.getPath());
 
     String[] args = new String[] {};
     // NameNode will modify config with the ports it bound to
@@ -244,8 +245,8 @@ public class TestHDFSServerPorts extends TestCase {
       nn = startNameNode();
 
       // start another namenode on the same port
-      Configuration conf2 = new Configuration(config);
-      conf2.set("dfs.name.dir", new File(hdfsDir, "name2").getPath());
+      Configuration conf2 = new HdfsConfiguration(config);
+      conf2.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name2").getPath());
       NameNode.format(conf2);
       boolean started = canStartNameNode(conf2);
       assertFalse(started); // should fail
@@ -258,7 +259,7 @@ public class TestHDFSServerPorts extends TestCase {
       // reset conf2 since NameNode modifies it
       FileSystem.setDefaultUri(conf2, "hdfs://"+NAME_NODE_HOST + "0");
       // different http port
-      conf2.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0");
+      conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
       started = canStartNameNode(conf2);
       assertTrue(started); // should start now
     } finally {
@@ -275,8 +276,8 @@ public class TestHDFSServerPorts extends TestCase {
       nn = startNameNode();
 
       // start data-node on the same port as name-node
-      Configuration conf2 = new Configuration(config);
-      conf2.set("dfs.data.dir", new File(hdfsDir, "data").getPath());
+      Configuration conf2 = new HdfsConfiguration(config);
+      conf2.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, new File(hdfsDir, "data").getPath());
       conf2.set("dfs.datanode.address",
                 FileSystem.getDefaultUri(config).getAuthority());
       conf2.set("dfs.datanode.http.address", NAME_NODE_HTTP_HOST + "0");
@@ -286,7 +287,7 @@ public class TestHDFSServerPorts extends TestCase {
       // bind http server to the same port as name-node
       conf2.set("dfs.datanode.address", NAME_NODE_HOST + "0");
       conf2.set("dfs.datanode.http.address", 
-                config.get("dfs.http.address"));
+                config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
       started = canStartDataNode(conf2);
       assertFalse(started); // should fail
     
@@ -310,18 +311,18 @@ public class TestHDFSServerPorts extends TestCase {
       nn = startNameNode();
 
       // bind http server to the same port as name-node
-      Configuration conf2 = new Configuration(config);
-      conf2.set("dfs.secondary.http.address", 
-                config.get("dfs.http.address"));
+      Configuration conf2 = new HdfsConfiguration(config);
+      conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, 
+                config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
       LOG.info("= Starting 1 on: " + 
-                                 conf2.get("dfs.secondary.http.address"));
+                                 conf2.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY));
       boolean started = canStartSecondaryNode(conf2);
       assertFalse(started); // should fail
 
       // bind http server to a different port
-      conf2.set("dfs.secondary.http.address", NAME_NODE_HTTP_HOST + "0");
+      conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
       LOG.info("= Starting 2 on: " + 
-                                 conf2.get("dfs.secondary.http.address"));
+                                 conf2.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY));
       started = canStartSecondaryNode(conf2);
       assertTrue(started); // should start now
     } finally {
@@ -338,20 +339,20 @@ public class TestHDFSServerPorts extends TestCase {
         nn = startNameNode();
 
         // bind http server to the same port as name-node
-        Configuration backup_config = new Configuration(config);
-        backup_config.set("dfs.backup.http.address", 
-                                        backup_config.get("dfs.http.address"));
+        Configuration backup_config = new HdfsConfiguration(config);
+        backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, 
+                                        backup_config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
 
         LOG.info("= Starting 1 on: " + 
-                                  backup_config.get("dfs.backup.http.address"));
+                                  backup_config.get(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
 
         assertFalse("Backup started on same port as Namenode", 
                            canStartBackupNode(backup_config)); // should fail
 
         // bind http server to a different port
-        backup_config.set("dfs.backup.http.address", NAME_NODE_HTTP_HOST + "0");
+        backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
         LOG.info("= Starting 2 on: " + 
-                                  backup_config.get("dfs.backup.http.address"));
+                                  backup_config.get(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
 
         assertTrue(canStartBackupNode(backup_config)); // should start now
       } finally {

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSTrash.java

@@ -37,7 +37,7 @@ public class TestHDFSTrash extends TestTrash {
   public static Test suite() {
     TestSetup setup = new TestSetup(new TestSuite(TestHDFSTrash.class)) {
       protected void setUp() throws Exception {
-        Configuration conf = new Configuration();
+        Configuration conf = new HdfsConfiguration();
         cluster = new MiniDFSCluster(conf, 2, true, null);
       }
       protected void tearDown() throws Exception {
@@ -57,7 +57,7 @@ public class TestHDFSTrash extends TestTrash {
   public void testNonDefaultFS() throws IOException {
     FileSystem fs = cluster.getFileSystem();
     Configuration conf = fs.getConf();
-    conf.set("fs.default.name", fs.getUri().toString());
+    conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, fs.getUri().toString());
     trashNonDefaultFS(conf);
   }
 

+ 9 - 7
src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java

@@ -22,6 +22,8 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FSDataInputStream;
 import static org.junit.Assert.assertEquals;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.junit.Test;
 
 import java.io.IOException;
@@ -36,7 +38,7 @@ public class TestHFlush {
    */
   @Test
   public void hFlush_01() throws IOException {
-    doTheJob(new Configuration(), fName, AppendTestUtil.BLOCK_SIZE, (short)2);
+    doTheJob(new HdfsConfiguration(), fName, AppendTestUtil.BLOCK_SIZE, (short)2);
   }
 
   /** The test uses {@link #doTheJob(Configuration, String, long, short)
@@ -45,12 +47,12 @@ public class TestHFlush {
    */
   @Test
   public void hFlush_02() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     int customPerChecksumSize = 512;
     int customBlockSize = customPerChecksumSize * 3;
     // Modify defaul filesystem settings
-    conf.setInt("io.bytes.per.checksum", customPerChecksumSize);
-    conf.setLong("dfs.block.size", customBlockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
 
     doTheJob(conf, fName, customBlockSize, (short)2);
   }
@@ -61,12 +63,12 @@ public class TestHFlush {
    */
  @Test
   public void hFlush_03() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     int customPerChecksumSize = 400;
     int customBlockSize = customPerChecksumSize * 3;
     // Modify defaul filesystem settings
-    conf.setInt("io.bytes.per.checksum", customPerChecksumSize);
-    conf.setLong("dfs.block.size", customBlockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
 
     doTheJob(conf, fName, customBlockSize, (short)2);
   }

+ 4 - 4
src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java

@@ -133,9 +133,9 @@ public class TestInjectionForSimulatedStorage extends TestCase {
     }
     
     try {
-      Configuration conf = new Configuration();
+      Configuration conf = new HdfsConfiguration();
       conf.set("dfs.replication", Integer.toString(numDataNodes));
-      conf.setInt("io.bytes.per.checksum", checksumSize);
+      conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, checksumSize);
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
       //first time format
       cluster = new MiniDFSCluster(0, conf, numDataNodes, true,
@@ -165,9 +165,9 @@ public class TestInjectionForSimulatedStorage extends TestCase {
        */
       
       LOG.info("Restarting minicluster");
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
-      conf.set("dfs.safemode.threshold.pct", "0.0f"); 
+      conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f"); 
       
       cluster = new MiniDFSCluster(0, conf, numDataNodes*2, false,
                                    true, null, null);

この差分においてかなりの量のファイルが変更されているため、一部のファイルを表示していません