Ver código fonte

HADOOP-564. Replace uses of dfs:// with hdfs://. Contributed by Wendy.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@510275 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 18 anos atrás
pai
commit
1b0662b3b1

+ 3 - 0
CHANGES.txt

@@ -102,6 +102,9 @@ Trunk (unreleased changes)
 30. HADOOP-990.  Improve HDFS support for full datanode volumes.
     (Raghu Angadi via cutting)
 
+31. HADOOP-564.  Replace uses of "dfs://" URIs with the more standard
+    "hdfs://".  (Wendy Chien via cutting)
+
 
 Release 0.11.2 - 2007-02-16
 

+ 1 - 1
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java

@@ -76,7 +76,7 @@ public class TestSymLink extends TestCase
             "-jobconf", strNamenode,
             "-jobconf", strJobtracker,
             "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
-            "-cacheFile", "dfs://"+fileSys.getName()+CACHE_FILE + "#testlink"
+            "-cacheFile", "hdfs://"+fileSys.getName()+CACHE_FILE + "#testlink"
         };
 
         fileSys.delete(new Path(OUTPUT_DIR));

+ 4 - 4
src/java/org/apache/hadoop/filecache/DistributedCache.java

@@ -44,7 +44,7 @@ public class DistributedCache {
   /**
    * 
    * @param cache the cache to be localized, this should be specified as 
-   * new URI(dfs://hostname:port/absoulte_path_to_file#LINKNAME). If no schema 
+   * new URI(hdfs://hostname:port/absoulte_path_to_file#LINKNAME). If no schema 
    * or hostname:port is provided the file is assumed to be in the filesystem
    * being used in the Configuration
    * @param conf The Confguration file which contains the filesystem
@@ -137,7 +137,7 @@ public class DistributedCache {
   /*
    * Returns the relative path of the dir this cache will be localized in
    * relative path that this cache will be localized in. For
-   * dfs://hostname:port/absolute_path -- the relative path is
+   * hdfs://hostname:port/absolute_path -- the relative path is
    * hostname/absolute path -- if it is just /absolute_path -- then the
    * relative path is hostname of DFS this mapred cluster is running
    * on/absolute_path
@@ -147,7 +147,7 @@ public class DistributedCache {
     String fsname = cache.getScheme();
     String path;
     FileSystem dfs = FileSystem.get(conf);
-    if ("dfs".equals(fsname)) {
+    if ("hdfs".equals(fsname)) {
       path = cache.getHost() + cache.getPath();
     } else {
       String[] split = dfs.getName().split(":");
@@ -348,7 +348,7 @@ public class DistributedCache {
   
   private static String getFileSysName(URI url) {
     String fsname = url.getScheme();
-    if ("dfs".equals(fsname)) {
+    if ("hdfs".equals(fsname)) {
       String host = url.getHost();
       int port = url.getPort();
       return (port == (-1)) ? host : (host + ":" + port);

+ 1 - 1
src/java/org/apache/hadoop/tools/Logalyzer.java

@@ -177,7 +177,7 @@ public class Logalyzer {
   doArchive(String logListURI, String archiveDirectory)
   throws IOException
   {
-    String destURL = new String("dfs://" + fsConfig.get("fs.default.name", "local") + 
+    String destURL = new String("hdfs://" + fsConfig.get("fs.default.name", "local") + 
         archiveDirectory);
     CopyFiles.copy(fsConfig, logListURI, destURL, true, false);
   }

+ 4 - 5
src/java/org/apache/hadoop/util/CopyFiles.java

@@ -671,9 +671,8 @@ public class CopyFiles extends ToolBase {
     ArrayList<String> protocolURIs = new ArrayList<String>(uris.length);
     
     for(int i=0; i < uris.length; ++i) {
-      // uri must start w/ protocol or if protocol is dfs, allow hdfs as alias.
-      if(uris[i].startsWith(protocol) || 
-          (protocol.equalsIgnoreCase("dfs") && uris[i].startsWith("hdfs"))) {
+      // uri must start w/ protocol 
+      if(uris[i].startsWith(protocol)) {
         protocolURIs.add(uris[i]);
       }
     }
@@ -720,8 +719,8 @@ public class CopyFiles extends ToolBase {
       //Source paths
       srcPaths = fetchSrcURIs(conf, srcURI);  
       
-      // Protocol - 'dfs://'
-      String[] dfsUrls = parseInputFile("dfs", srcPaths);
+      // Protocol - 'hdfs://'
+      String[] dfsUrls = parseInputFile(HDFS, srcPaths);
       if(dfsUrls != null) {
         for(int i=0; i < dfsUrls.length; ++i) {
           copy(conf, dfsUrls[i], destPath, false, ignoreReadFailures);

+ 3 - 3
src/test/org/apache/hadoop/mapred/MRCaching.java

@@ -181,9 +181,9 @@ System.out.println("HERE:"+inDir);
       archive2 = "file://" + cachePath + "/test.zip";
       file1 = "file://" + cachePath + "/test.txt";
     } else {
-      archive1 = "dfs://" + fileSys + cachePath + "/test.jar";
-      archive2 = "dfs://" + fileSys + cachePath + "/test.zip";
-      file1 = "dfs://" + fileSys + cachePath + "/test.txt";
+      archive1 = "hdfs://" + fileSys + cachePath + "/test.jar";
+      archive2 = "hdfs://" + fileSys + cachePath + "/test.zip";
+      file1 = "hdfs://" + fileSys + cachePath + "/test.txt";
     }
     URI uri1 = null;
     URI uri2 = null;