Browse Source

HDFS-10674. Optimize creating a full path from an inode. Contributed by Daryn Sharp.

(cherry picked from commit 22ef5286bc8511ddee9594b7cecc598bf41a850b)
(cherry picked from commit a5d12d9c1f2c0e6fcd918ee8e614dcaf203e77de)
(cherry picked from commit e53f6fde465e5a1a97d8c4eebe7f7897b0875f1c)
Kihwal Lee 9 years ago
parent
commit
73ba5a0170

+ 1 - 54
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -775,57 +775,6 @@ public class FSDirectory implements Closeable {
     return typeSpaceDeltas;
   }
 
-  /** Return the name of the path represented by inodes at [0, pos] */
-  static String getFullPathName(INode[] inodes, int pos) {
-    StringBuilder fullPathName = new StringBuilder();
-    if (inodes[0].isRoot()) {
-      if (pos == 0) return Path.SEPARATOR;
-    } else {
-      fullPathName.append(inodes[0].getLocalName());
-    }
-    
-    for (int i=1; i<=pos; i++) {
-      fullPathName.append(Path.SEPARATOR_CHAR).append(inodes[i].getLocalName());
-    }
-    return fullPathName.toString();
-  }
-
-  /**
-   * @return the relative path of an inode from one of its ancestors,
-   *         represented by an array of inodes.
-   */
-  private static INode[] getRelativePathINodes(INode inode, INode ancestor) {
-    // calculate the depth of this inode from the ancestor
-    int depth = 0;
-    for (INode i = inode; i != null && !i.equals(ancestor); i = i.getParent()) {
-      depth++;
-    }
-    INode[] inodes = new INode[depth];
-
-    // fill up the inodes in the path from this inode to root
-    for (int i = 0; i < depth; i++) {
-      if (inode == null) {
-        NameNode.stateChangeLog.warn("Could not get full path."
-            + " Corresponding file might have deleted already.");
-        return null;
-      }
-      inodes[depth-i-1] = inode;
-      inode = inode.getParent();
-    }
-    return inodes;
-  }
-  
-  private static INode[] getFullPathINodes(INode inode) {
-    return getRelativePathINodes(inode, null);
-  }
-  
-  /** Return the full path name of the specified inode */
-  static String getFullPathName(INode inode) {
-    INode[] inodes = getFullPathINodes(inode);
-    // inodes can be null only when its called without holding lock
-    return inodes == null ? "" : getFullPathName(inodes, inodes.length - 1);
-  }
-
   /**
    * Add the given child to the namespace.
    * @param existing the INodesInPath containing all the ancestral INodes
@@ -877,9 +826,7 @@ public class FSDirectory implements Closeable {
         try {
           q.verifyQuota(deltas);
         } catch (QuotaExceededException e) {
-          List<INode> inodes = iip.getReadOnlyINodes();
-          final String path = getFullPathName(inodes.toArray(new INode[inodes.size()]), i);
-          e.setPathName(path);
+          e.setPathName(iip.getPath(i));
           throw e;
         }
       }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -6610,7 +6610,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         final INode inode = (INode)blockManager.getBlockCollection(blk);
         skip++;
         if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) {
-          String src = FSDirectory.getFullPathName(inode);
+          String src = inode.getFullPathName();
           if (src.startsWith(path)){
             corruptFiles.add(new CorruptFileBlockInfo(src, blk));
             count++;

+ 19 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java

@@ -579,7 +579,25 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
 
   public String getFullPathName() {
     // Get the full path name of this inode.
-    return FSDirectory.getFullPathName(this);
+    if (isRoot()) {
+      return Path.SEPARATOR;
+    }
+    // compute size of needed bytes for the path
+    int idx = 0;
+    for (INode inode = this; inode != null; inode = inode.getParent()) {
+      // add component + delimiter (if not tail component)
+      idx += inode.getLocalNameBytes().length + (inode != this ? 1 : 0);
+    }
+    byte[] path = new byte[idx];
+    for (INode inode = this; inode != null; inode = inode.getParent()) {
+      if (inode != this) {
+        path[--idx] = Path.SEPARATOR_CHAR;
+      }
+      byte[] name = inode.getLocalNameBytes();
+      idx -= name.length;
+      System.arraycopy(name, 0, path, idx, name.length);
+    }
+    return DFSUtil.bytes2String(path);
   }
   
   @Override

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java

@@ -346,11 +346,11 @@ public class INodesInPath {
   }
 
   public String getParentPath() {
-    return getPath(path.length - 1);
+    return getPath(path.length - 2);
   }
 
   public String getPath(int pos) {
-    return DFSUtil.byteArray2PathString(path, 0, pos);
+    return DFSUtil.byteArray2PathString(path, 0, pos + 1); // it's a length...
   }
 
   /**

+ 6 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java

@@ -155,7 +155,12 @@ public class TestSnapshotPathINodes {
         sub1.toString());
     assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(),
         dir.toString());
-    
+
+    assertEquals(Path.SEPARATOR, nodesInPath.getPath(0));
+    assertEquals(dir.toString(), nodesInPath.getPath(1));
+    assertEquals(sub1.toString(), nodesInPath.getPath(2));
+    assertEquals(file1.toString(), nodesInPath.getPath(3));
+
     nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
     assertEquals(nodesInPath.length(), components.length);
     assertSnapshot(nodesInPath, false, null, -1);