Browse Source

HDFS-6984. Serialize FileStatus via protobuf.

Chris Douglas 7 years ago
parent
commit
12e44e7bda
41 changed files with 1054 additions and 366 deletions
  1. 4 0
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  2. 1 0
      hadoop-common-project/hadoop-common/pom.xml
  3. 5 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java
  4. 100 52
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
  5. 51 13
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
  6. 12 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
  7. 131 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java
  8. 18 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/package-info.java
  9. 5 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
  10. 69 0
      hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto
  11. 1 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java
  12. 85 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/protocolPB/TestFSSerialization.java
  13. 6 2
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
  14. 5 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java
  15. 107 121
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
  16. 29 13
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
  17. 6 4
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
  18. 60 5
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
  19. 26 24
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
  20. 7 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
  21. 5 11
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
  22. 6 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/acl.proto
  23. 9 7
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
  24. 17 28
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
  25. 10 9
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
  26. 12 0
      hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
  27. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java
  28. 18 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/package-info.java
  29. 25 33
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
  30. 13 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  31. 5 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  32. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  33. 5 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
  34. 13 15
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
  35. 4 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
  36. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
  37. 153 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusSerialization.java
  38. 5 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
  39. 10 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java
  40. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
  41. 8 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java

+ 4 - 0
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -323,6 +323,10 @@
       <!-- protobuf generated code -->
       <Class name="~org\.apache\.hadoop\.tracing\.TraceAdminPB.*"/>
     </Match>
+    <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.fs\.FSProto.*"/>
+    </Match>
 
     <!--
        Manually checked, misses child thread manually syncing on parent's intrinsic lock.

+ 1 - 0
hadoop-common-project/hadoop-common/pom.xml

@@ -393,6 +393,7 @@
                   <include>RefreshUserMappingsProtocol.proto</include>
                   <include>RefreshCallQueueProtocol.proto</include>
                   <include>GenericRefreshProtocol.proto</include>
+                  <include>FSProtos.proto</include>
                 </includes>
               </source>
             </configuration>

+ 5 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java

@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.fs;
 
+import java.io.Serializable;
+
 import org.apache.commons.codec.binary.Hex;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.crypto.CipherSuite;
@@ -30,7 +32,9 @@ import static com.google.common.base.Preconditions.checkNotNull;
  * an encrypted file.
  */
 @InterfaceAudience.Private
-public class FileEncryptionInfo {
+public class FileEncryptionInfo implements Serializable {
+
+  private static final long serialVersionUID = 0x156abe03;
 
   private final CipherSuite cipherSuite;
   private final CryptoProtocolVersion version;

+ 100 - 52
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java

@@ -23,11 +23,15 @@ import java.io.IOException;
 import java.io.InvalidObjectException;
 import java.io.ObjectInputValidation;
 import java.io.Serializable;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.Set;
 
+import org.apache.hadoop.fs.FSProtos.FileStatusProto;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.io.Text;
+import org.apache.hadoop.fs.protocolPB.PBHelper;
 import org.apache.hadoop.io.Writable;
 
 /** Interface that represents the client side information for a file.
@@ -50,7 +54,31 @@ public class FileStatus implements Writable, Comparable<Object>,
   private String owner;
   private String group;
   private Path symlink;
-  
+  private Set<AttrFlags> attr;
+
+  private enum AttrFlags {
+    HAS_ACL,
+    HAS_CRYPT,
+    HAS_EC,
+  };
+  private static final Set<AttrFlags> NONE = Collections.<AttrFlags>emptySet();
+  private static Set<AttrFlags> flags(boolean acl, boolean crypt, boolean ec) {
+    if (!(acl || crypt || ec)) {
+      return NONE;
+    }
+    EnumSet<AttrFlags> ret = EnumSet.noneOf(AttrFlags.class);
+    if (acl) {
+      ret.add(AttrFlags.HAS_ACL);
+    }
+    if (crypt) {
+      ret.add(AttrFlags.HAS_CRYPT);
+    }
+    if (ec) {
+      ret.add(AttrFlags.HAS_EC);
+    }
+    return ret;
+  }
+
   public FileStatus() { this(0, false, 0, 0, 0, 0, null, null, null, null); }
   
   //We should deprecate this soon?
@@ -79,6 +107,15 @@ public class FileStatus implements Writable, Comparable<Object>,
                     FsPermission permission, String owner, String group, 
                     Path symlink,
                     Path path) {
+    this(length, isdir, block_replication, blocksize, modification_time,
+        access_time, permission, owner, group, symlink, path,
+        false, false, false);
+  }
+
+  public FileStatus(long length, boolean isdir, int block_replication,
+      long blocksize, long modification_time, long access_time,
+      FsPermission permission, String owner, String group, Path symlink,
+      Path path, boolean hasAcl, boolean isEncrypted, boolean isErasureCoded) {
     this.length = length;
     this.isdir = isdir;
     this.block_replication = (short)block_replication;
@@ -89,7 +126,7 @@ public class FileStatus implements Writable, Comparable<Object>,
       this.permission = permission;
     } else if (isdir) {
       this.permission = FsPermission.getDirDefault();
-    } else if (symlink!=null) {
+    } else if (symlink != null) {
       this.permission = FsPermission.getDefault();
     } else {
       this.permission = FsPermission.getFileDefault();
@@ -98,6 +135,8 @@ public class FileStatus implements Writable, Comparable<Object>,
     this.group = (group == null) ? "" : group;
     this.symlink = symlink;
     this.path = path;
+    attr = flags(hasAcl, isEncrypted, isErasureCoded);
+
     // The variables isdir and symlink indicate the type:
     // 1. isdir implies directory, in which case symlink must be null.
     // 2. !isdir implies a file or symlink, symlink != null implies a
@@ -213,7 +252,7 @@ public class FileStatus implements Writable, Comparable<Object>,
    * @return true if the underlying file or directory has ACLs set.
    */
   public boolean hasAcl() {
-    return permission.getAclBit();
+    return attr.contains(AttrFlags.HAS_ACL);
   }
 
   /**
@@ -222,7 +261,7 @@ public class FileStatus implements Writable, Comparable<Object>,
    * @return true if the underlying file is encrypted.
    */
   public boolean isEncrypted() {
-    return permission.getEncryptedBit();
+    return attr.contains(AttrFlags.HAS_CRYPT);
   }
 
   /**
@@ -231,7 +270,7 @@ public class FileStatus implements Writable, Comparable<Object>,
    * @return true if the underlying file or directory is erasure coded.
    */
   public boolean isErasureCoded() {
-    return permission.getErasureCodedBit();
+    return attr.contains(AttrFlags.HAS_EC);
   }
 
   /**
@@ -304,47 +343,6 @@ public class FileStatus implements Writable, Comparable<Object>,
   public void setSymlink(final Path p) {
     symlink = p;
   }
-  
-  //////////////////////////////////////////////////
-  // Writable
-  //////////////////////////////////////////////////
-  @Override
-  public void write(DataOutput out) throws IOException {
-    Text.writeString(out, getPath().toString(), Text.DEFAULT_MAX_LEN);
-    out.writeLong(getLen());
-    out.writeBoolean(isDirectory());
-    out.writeShort(getReplication());
-    out.writeLong(getBlockSize());
-    out.writeLong(getModificationTime());
-    out.writeLong(getAccessTime());
-    getPermission().write(out);
-    Text.writeString(out, getOwner(), Text.DEFAULT_MAX_LEN);
-    Text.writeString(out, getGroup(), Text.DEFAULT_MAX_LEN);
-    out.writeBoolean(isSymlink());
-    if (isSymlink()) {
-      Text.writeString(out, getSymlink().toString(), Text.DEFAULT_MAX_LEN);
-    }
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    String strPath = Text.readString(in, Text.DEFAULT_MAX_LEN);
-    this.path = new Path(strPath);
-    this.length = in.readLong();
-    this.isdir = in.readBoolean();
-    this.block_replication = in.readShort();
-    blocksize = in.readLong();
-    modification_time = in.readLong();
-    access_time = in.readLong();
-    permission.readFields(in);
-    owner = Text.readString(in, Text.DEFAULT_MAX_LEN);
-    group = Text.readString(in, Text.DEFAULT_MAX_LEN);
-    if (in.readBoolean()) {
-      this.symlink = new Path(Text.readString(in, Text.DEFAULT_MAX_LEN));
-    } else {
-      this.symlink = null;
-    }
-  }
 
   /**
    * Compare this FileStatus to another FileStatus
@@ -377,15 +375,12 @@ public class FileStatus implements Writable, Comparable<Object>,
    */
   @Override
   public boolean equals(Object o) {
-    if (o == null) {
+    if (!(o instanceof FileStatus)) {
       return false;
     }
     if (this == o) {
       return true;
     }
-    if (!(o instanceof FileStatus)) {
-      return false;
-    }
     FileStatus other = (FileStatus)o;
     return this.getPath().equals(other.getPath());
   }
@@ -420,7 +415,11 @@ public class FileStatus implements Writable, Comparable<Object>,
     sb.append("; permission=" + permission);
     sb.append("; isSymlink=" + isSymlink());
     if(isSymlink()) {
-      sb.append("; symlink=" + symlink);
+      try {
+        sb.append("; symlink=" + getSymlink());
+      } catch (IOException e) {
+        throw new RuntimeException("Unexpected exception", e);
+      }
     }
     sb.append("; hasAcl=" + hasAcl());
     sb.append("; isEncrypted=" + isEncrypted());
@@ -429,6 +428,55 @@ public class FileStatus implements Writable, Comparable<Object>,
     return sb.toString();
   }
 
+  /**
+   * Read instance encoded as protobuf from stream.
+   * @param in Input stream
+   * @see PBHelper#convert(FileStatus)
+   * @deprecated Use the {@link PBHelper} and protobuf serialization directly.
+   */
+  @Override
+  @Deprecated
+  public void readFields(DataInput in) throws IOException {
+    int size = in.readInt();
+    if (size < 0) {
+      throw new IOException("Can't read FileStatusProto with negative " +
+          "size of " + size);
+    }
+    byte[] buf = new byte[size];
+    in.readFully(buf);
+    FileStatusProto proto = FileStatusProto.parseFrom(buf);
+    FileStatus other = PBHelper.convert(proto);
+    isdir = other.isDirectory();
+    length = other.getLen();
+    isdir = other.isDirectory();
+    block_replication = other.getReplication();
+    blocksize = other.getBlockSize();
+    modification_time = other.getModificationTime();
+    access_time = other.getAccessTime();
+    setPermission(other.getPermission());
+    setOwner(other.getOwner());
+    setGroup(other.getGroup());
+    setSymlink((other.isSymlink() ? other.getSymlink() : null));
+    setPath(other.getPath());
+    attr = flags(other.hasAcl(), other.isEncrypted(), other.isErasureCoded());
+    assert (isDirectory() && getSymlink() == null) || !isDirectory();
+  }
+
+  /**
+   * Write instance encoded as protobuf to stream.
+   * @param out Output stream
+   * @see PBHelper#convert(FileStatus)
+   * @deprecated Use the {@link PBHelper} and protobuf serialization directly.
+   */
+  @Override
+  @Deprecated
+  public void write(DataOutput out) throws IOException {
+    FileStatusProto proto = PBHelper.convert(this);
+    int size = proto.getSerializedSize();
+    out.writeInt(size);
+    out.write(proto.toByteArray());
+  }
+
   @Override
   public void validateObject() throws InvalidObjectException {
     if (null == path) {

+ 51 - 13
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java

@@ -30,6 +30,9 @@ import org.apache.hadoop.fs.permission.FsPermission;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class LocatedFileStatus extends FileStatus {
+
+  private static final long serialVersionUID = 0x17339920;
+
   private BlockLocation[] locations;
 
 
@@ -42,14 +45,18 @@ public class LocatedFileStatus extends FileStatus {
    * @param stat a file status
    * @param locations a file's block locations
    */
-  public LocatedFileStatus(FileStatus stat, BlockLocation[] locations)
-  throws IOException {
+  public LocatedFileStatus(FileStatus stat, BlockLocation[] locations) {
     this(stat.getLen(), stat.isDirectory(), stat.getReplication(),
         stat.getBlockSize(), stat.getModificationTime(),
-        stat.getAccessTime(), stat.getPermission(), stat.getOwner(),
-        stat.getGroup(), null, stat.getPath(), locations);
+        stat.getAccessTime(), stat.getPermission(),
+        stat.getOwner(), stat.getGroup(), null, stat.getPath(),
+        stat.hasAcl(), stat.isEncrypted(), stat.isErasureCoded(), locations);
     if (stat.isSymlink()) {
-      setSymlink(stat.getSymlink());
+      try {
+        setSymlink(stat.getSymlink());
+      } catch (IOException e) {
+        throw new RuntimeException("Unexpected exception", e);
+      }
     }
   }
 
@@ -69,24 +76,55 @@ public class LocatedFileStatus extends FileStatus {
    * @param path the path's qualified name
    * @param locations a file's block locations
    */
+  @Deprecated
   public LocatedFileStatus(long length, boolean isdir,
           int block_replication,
           long blocksize, long modification_time, long access_time,
           FsPermission permission, String owner, String group, 
-          Path symlink,
-          Path path,
-          BlockLocation[] locations) {
-	  super(length, isdir, block_replication, blocksize, modification_time,
-			  access_time, permission, owner, group, symlink, path);
-	  this.locations = locations;
+          Path symlink, Path path, BlockLocation[] locations) {
+    this(length, isdir, block_replication, blocksize, modification_time,
+        access_time, permission, owner, group, symlink, path,
+        permission.getAclBit(), permission.getEncryptedBit(),
+        permission.getErasureCodedBit(), locations);
   }
-  
+
+  /**
+   * Constructor.
+   *
+   * @param length a file's length
+   * @param isdir if the path is a directory
+   * @param block_replication the file's replication factor
+   * @param blocksize a file's block size
+   * @param modification_time a file's modification time
+   * @param access_time a file's access time
+   * @param permission a file's permission
+   * @param owner a file's owner
+   * @param group a file's group
+   * @param symlink symlink if the path is a symbolic link
+   * @param path the path's qualified name
+   * @param hasAcl entity has associated ACLs
+   * @param isEncrypted entity is encrypted
+   * @param isErasureCoded entity is erasure coded
+   * @param locations a file's block locations
+   */
+  public LocatedFileStatus(long length, boolean isdir,
+      int block_replication, long blocksize, long modification_time,
+      long access_time, FsPermission permission, String owner, String group,
+      Path symlink, Path path,
+      boolean hasAcl, boolean isEncrypted, boolean isErasureCoded,
+      BlockLocation[] locations) {
+    super(length, isdir, block_replication, blocksize, modification_time,
+        access_time, permission, owner, group, symlink, path,
+        hasAcl, isEncrypted, isErasureCoded);
+    this.locations = locations;
+  }
+
   /**
    * Get the file's block locations
    * @return the file's block locations
    */
   public BlockLocation[] getBlockLocations() {
-	  return locations;
+    return locations;
   }
   
   /**

+ 12 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java

@@ -133,11 +133,13 @@ public class FsPermission implements Writable, Serializable,
   }
 
   @Override
+  @Deprecated
   public void write(DataOutput out) throws IOException {
     out.writeShort(toShort());
   }
 
   @Override
+  @Deprecated
   public void readFields(DataInput in) throws IOException {
     fromShort(in.readShort());
   }
@@ -184,6 +186,7 @@ public class FsPermission implements Writable, Serializable,
    *
    * @return short extended short representation of this permission
    */
+  @Deprecated
   public short toExtendedShort() {
     return toShort();
   }
@@ -299,7 +302,10 @@ public class FsPermission implements Writable, Serializable,
    * Returns true if there is also an ACL (access control list).
    *
    * @return boolean true if there is also an ACL (access control list).
+   * @deprecated Get acl bit from the {@link org.apache.hadoop.fs.FileStatus}
+   * object.
    */
+  @Deprecated
   public boolean getAclBit() {
     // File system subclasses that support the ACL bit would override this.
     return false;
@@ -307,14 +313,20 @@ public class FsPermission implements Writable, Serializable,
 
   /**
    * Returns true if the file is encrypted or directory is in an encryption zone
+   * @deprecated Get encryption bit from the
+   * {@link org.apache.hadoop.fs.FileStatus} object.
    */
+  @Deprecated
   public boolean getEncryptedBit() {
     return false;
   }
 
   /**
    * Returns true if the file or directory is erasure coded.
+   * @deprecated Get ec bit from the {@link org.apache.hadoop.fs.FileStatus}
+   * object.
    */
+  @Deprecated
   public boolean getErasureCodedBit() {
     return false;
   }

+ 131 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java

@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.protocolPB;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+
+import java.io.IOException;
+
+import static org.apache.hadoop.fs.FSProtos.*;
+
+/**
+ * Utility methods aiding conversion of fs data structures.
+ */
+public final class PBHelper {
+
+  private PBHelper() {
+    // prevent construction
+  }
+
+  public static FsPermission convert(FsPermissionProto proto)
+      throws IOException {
+    return new FsPermission((short)proto.getPerm());
+  }
+
+  public static FsPermissionProto convert(FsPermission p) throws IOException {
+    FsPermissionProto.Builder bld = FsPermissionProto.newBuilder();
+    bld.setPerm(p.toShort());
+    return bld.build();
+  }
+
+  public static FileStatus convert(FileStatusProto proto) throws IOException {
+    final Path path;
+    final long length;
+    final boolean isdir;
+    final short blockReplication;
+    final long blocksize;
+    final long mtime;
+    final long atime;
+    final String owner;
+    final String group;
+    final FsPermission permission;
+    final Path symlink;
+    switch (proto.getFileType()) {
+    case FT_DIR:
+      isdir = true;
+      symlink = null;
+      blocksize = 0;
+      length = 0;
+      blockReplication = 0;
+      break;
+    case FT_SYMLINK:
+      isdir = false;
+      symlink = new Path(proto.getSymlink());
+      blocksize = 0;
+      length = 0;
+      blockReplication = 0;
+      break;
+    case FT_FILE:
+      isdir = false;
+      symlink = null;
+      blocksize = proto.getBlockSize();
+      length = proto.getLength();
+      int brep = proto.getBlockReplication();
+      if ((brep & 0xffff0000) != 0) {
+        throw new IOException(String.format("Block replication 0x%08x " +
+            "doesn't fit in 16 bits.", brep));
+      }
+      blockReplication = (short)brep;
+      break;
+    default:
+      throw new IllegalStateException("Unknown type: " + proto.getFileType());
+    }
+    path = new Path(proto.getPath());
+    mtime = proto.getModificationTime();
+    atime = proto.getAccessTime();
+    permission = convert(proto.getPermission());
+    owner = proto.getOwner();
+    group = proto.getGroup();
+    int flags = proto.getFlags();
+    return new FileStatus(length, isdir, blockReplication, blocksize,
+        mtime, atime, permission, owner, group, symlink, path,
+        (flags & FileStatusProto.Flags.HAS_ACL_VALUE)   != 0,
+        (flags & FileStatusProto.Flags.HAS_CRYPT_VALUE) != 0,
+        (flags & FileStatusProto.Flags.HAS_EC_VALUE)    != 0);
+  }
+
+  public static FileStatusProto convert(FileStatus stat) throws IOException {
+    FileStatusProto.Builder bld = FileStatusProto.newBuilder();
+    bld.setPath(stat.getPath().toString());
+    if (stat.isDirectory()) {
+      bld.setFileType(FileStatusProto.FileType.FT_DIR);
+    } else if (stat.isSymlink()) {
+      bld.setFileType(FileStatusProto.FileType.FT_SYMLINK)
+         .setSymlink(stat.getSymlink().toString());
+    } else {
+      bld.setFileType(FileStatusProto.FileType.FT_FILE)
+         .setLength(stat.getLen())
+         .setBlockReplication(stat.getReplication())
+         .setBlockSize(stat.getBlockSize());
+    }
+    bld.setAccessTime(stat.getAccessTime())
+       .setModificationTime(stat.getModificationTime())
+       .setOwner(stat.getOwner())
+       .setGroup(stat.getGroup())
+       .setPermission(convert(stat.getPermission()));
+    int flags = 0;
+    flags |= stat.hasAcl()         ? FileStatusProto.Flags.HAS_ACL_VALUE   : 0;
+    flags |= stat.isEncrypted()    ? FileStatusProto.Flags.HAS_CRYPT_VALUE : 0;
+    flags |= stat.isErasureCoded() ? FileStatusProto.Flags.HAS_EC_VALUE    : 0;
+    bld.setFlags(flags);
+    return bld.build();
+  }
+
+}

+ 18 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/package-info.java

@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.protocolPB;

+ 5 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.io.erasurecode;
 
+import java.io.Serializable;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
@@ -31,7 +32,10 @@ import org.apache.hadoop.classification.InterfaceStability;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public final class ECSchema {
+public final class ECSchema implements Serializable {
+
+  private static final long serialVersionUID = 0x10953aa0;
+
   public static final String NUM_DATA_UNITS_KEY = "numDataUnits";
   public static final String NUM_PARITY_UNITS_KEY = "numParityUnits";
   public static final String CODEC_NAME_KEY = "codec";

+ 69 - 0
hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto

@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and stable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *stable* .proto interface.
+ */
+
+option java_package = "org.apache.hadoop.fs";
+option java_outer_classname = "FSProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+package hadoop.fs;
+
+message FsPermissionProto {
+  required uint32 perm = 1; // UNIX-style mode bits
+}
+
+/*
+ * FileStatus encoding. Field IDs match those from HdfsFileStatusProto, but
+ * cross-serialization is not an explicitly supported use case. Unlike HDFS,
+ * most fields are optional and do not define defaults.
+ */
+message FileStatusProto {
+  enum FileType {
+    FT_DIR     = 1;
+    FT_FILE    = 2;
+    FT_SYMLINK = 3;
+  }
+  enum Flags {
+    HAS_ACL    = 0x01; // has ACLs
+    HAS_CRYPT  = 0x02; // encrypted
+    HAS_EC     = 0x04; // erasure coded
+  }
+  required FileType fileType            = 1;
+  required string path                  = 2;
+  optional uint64 length                = 3;
+  optional FsPermissionProto permission = 4;
+  optional string owner                 = 5;
+  optional string group                 = 6;
+  optional uint64 modification_time     = 7;
+  optional uint64 access_time           = 8;
+  optional string symlink               = 9;
+  optional uint32 block_replication     = 10;
+  optional uint64 block_size            = 11;
+  // locations                          = 12
+  // alias                              = 13
+  // childrenNum                        = 14
+  optional bytes encryption_data        = 15;
+  // storagePolicy                      = 16
+  optional bytes ec_data                = 17;
+  optional uint32 flags                 = 18 [default = 0];
+}

+ 1 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java

@@ -36,6 +36,7 @@ import org.junit.Test;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

+ 85 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/protocolPB/TestFSSerialization.java

@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.protocolPB;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import static org.apache.hadoop.fs.FSProtos.*;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+/**
+ * Verify PB serialization of FS data structures.
+ */
+public class TestFSSerialization {
+
+  @Test
+  @SuppressWarnings("deprecation")
+  public void testWritableFlagSerialization() throws Exception {
+    final Path p = new Path("hdfs://yaks:4344/dingos/f");
+    for (int i = 0; i < 0x8; ++i) {
+      final boolean acl   = 0 != (i & 0x1);
+      final boolean crypt = 0 != (i & 0x2);
+      final boolean ec    = 0 != (i & 0x4);
+      FileStatus stat = new FileStatus(1024L, false, 3, 1L << 31,
+          12345678L, 87654321L, FsPermission.getFileDefault(),
+          "hadoop", "unqbbc", null, p, acl, crypt, ec);
+      DataOutputBuffer dob = new DataOutputBuffer();
+      stat.write(dob);
+      DataInputBuffer dib = new DataInputBuffer();
+      dib.reset(dob.getData(), 0, dob.getLength());
+      FileStatus fstat = new FileStatus();
+      fstat.readFields(dib);
+      assertEquals(stat, fstat);
+      checkFields(stat, fstat);
+    }
+  }
+
+  @Test
+  public void testUtilitySerialization() throws Exception {
+    final Path p = new Path("hdfs://yaks:4344/dingos/f");
+    FileStatus stat = new FileStatus(1024L, false, 3, 1L << 31,
+        12345678L, 87654321L, FsPermission.createImmutable((short)0111),
+        "hadoop", "unqbbc", null, p);
+    FileStatusProto fsp = PBHelper.convert(stat);
+    FileStatus stat2 = PBHelper.convert(fsp);
+    assertEquals(stat, stat2);
+    checkFields(stat, stat2);
+  }
+
+  private static void checkFields(FileStatus expected, FileStatus actual) {
+    assertEquals(expected.getPath(), actual.getPath());
+    assertEquals(expected.isDirectory(), actual.isDirectory());
+    assertEquals(expected.getLen(), actual.getLen());
+    assertEquals(expected.getPermission(), actual.getPermission());
+    assertEquals(expected.getOwner(), actual.getOwner());
+    assertEquals(expected.getGroup(), actual.getGroup());
+    assertEquals(expected.getModificationTime(), actual.getModificationTime());
+    assertEquals(expected.getAccessTime(), actual.getAccessTime());
+    assertEquals(expected.getReplication(), actual.getReplication());
+    assertEquals(expected.getBlockSize(), actual.getBlockSize());
+    assertEquals(expected.hasAcl(), actual.hasAcl());
+    assertEquals(expected.isEncrypted(), actual.isEncrypted());
+    assertEquals(expected.isErasureCoded(), actual.isErasureCoded());
+  }
+
+}

+ 6 - 2
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java

@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
+import java.io.Serializable;
+
 import com.google.common.base.Preconditions;
 import org.apache.commons.lang.builder.EqualsBuilder;
 import org.apache.commons.lang.builder.HashCodeBuilder;
@@ -29,11 +31,13 @@ import org.apache.hadoop.io.erasurecode.ECSchema;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public final class ErasureCodingPolicy {
+public final class ErasureCodingPolicy implements Serializable {
+
+  private static final long serialVersionUID = 0x0079fe4e;
 
+  private String name;
   private final ECSchema schema;
   private final int cellSize;
-  private String name;
   private byte id;
 
   public ErasureCodingPolicy(String name, ECSchema schema,

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java

@@ -27,6 +27,11 @@ import org.apache.hadoop.fs.permission.FsPermission;
  * done for backwards compatibility in case any existing clients assume the
  * value of FsPermission is in a particular range.
  */
+
+/**
+ * @deprecated ACLs, encryption, and erasure coding are managed on FileStatus.
+ */
+@Deprecated
 @InterfaceAudience.Private
 public class FsPermissionExtension extends FsPermission {
   private static final long serialVersionUID = 0x13c298a4;

+ 107 - 121
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java

@@ -17,7 +17,9 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
+import java.io.IOException;
 import java.net.URI;
+import java.util.EnumSet;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -31,24 +33,15 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-public class HdfsFileStatus {
+public class HdfsFileStatus extends FileStatus {
+
+  private static final long serialVersionUID = 0x126eb82a;
 
   // local name of the inode that's encoded in java UTF8
-  private final byte[] path;
-  private final byte[] symlink; // symlink target encoded in java UTF8 or null
-  private final long length;
-  private final boolean isdir;
-  private final short block_replication;
-  private final long blocksize;
-  private final long modification_time;
-  private final long access_time;
-  private final FsPermission permission;
-  private final String owner;
-  private final String group;
+  private byte[] uPath;
+  private byte[] uSymlink; // symlink target encoded in java UTF8/null
   private final long fileId;
-
   private final FileEncryptionInfo feInfo;
-
   private final ErasureCodingPolicy ecPolicy;
 
   // Used by dir, not including dot and dotdot. Always zero for a regular file.
@@ -57,12 +50,22 @@ public class HdfsFileStatus {
 
   public static final byte[] EMPTY_NAME = new byte[0];
 
+  /**
+   * Set of features potentially active on an instance.
+   */
+  public enum Flags {
+    HAS_ACL,
+    HAS_CRYPT,
+    HAS_EC;
+  }
+  private final EnumSet<Flags> flags;
+
   /**
    * Constructor.
-   * @param length the number of bytes the file has
-   * @param isdir if the path is a directory
+   * @param length            the number of bytes the file has
+   * @param isdir             if the path is a directory
    * @param block_replication the replication factor
-   * @param blocksize the block size
+   * @param blocksize         the block size
    * @param modification_time modification time
    * @param access_time access time
    * @param permission permission
@@ -77,25 +80,18 @@ public class HdfsFileStatus {
    * @param ecPolicy the erasure coding policy
    */
   public HdfsFileStatus(long length, boolean isdir, int block_replication,
-      long blocksize, long modification_time, long access_time,
-      FsPermission permission, String owner, String group, byte[] symlink,
-      byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo,
-      byte storagePolicy, ErasureCodingPolicy ecPolicy) {
-    this.length = length;
-    this.isdir = isdir;
-    this.block_replication = (short) block_replication;
-    this.blocksize = blocksize;
-    this.modification_time = modification_time;
-    this.access_time = access_time;
-    this.permission = (permission == null) ?
-        ((isdir || symlink!=null) ?
-            FsPermission.getDefault() :
-            FsPermission.getFileDefault()) :
-        permission;
-    this.owner = (owner == null) ? "" : owner;
-    this.group = (group == null) ? "" : group;
-    this.symlink = symlink;
-    this.path = path;
+                        long blocksize, long modification_time,
+                        long access_time, FsPermission permission,
+                        EnumSet<Flags> flags, String owner, String group,
+                        byte[] symlink, byte[] path, long fileId,
+                        int childrenNum, FileEncryptionInfo feInfo,
+                        byte storagePolicy, ErasureCodingPolicy ecPolicy) {
+    super(length, isdir, block_replication, blocksize, modification_time,
+        access_time, convert(isdir, symlink != null, permission, flags),
+        owner, group, null, null);
+    this.flags = flags;
+    this.uSymlink = symlink;
+    this.uPath = path;
     this.fileId = fileId;
     this.childrenNum = childrenNum;
     this.feInfo = feInfo;
@@ -104,83 +100,48 @@ public class HdfsFileStatus {
   }
 
   /**
-   * Get the length of this file, in bytes.
-   * @return the length of this file, in bytes.
+   * Set redundant flags for compatibility with existing applications.
    */
-  public final long getLen() {
-    return length;
-  }
-
-  /**
-   * Is this a directory?
-   * @return true if this is a directory
-   */
-  public final boolean isDir() {
-    return isdir;
+  protected static FsPermission convert(boolean isdir, boolean symlink,
+      FsPermission p, EnumSet<Flags> f) {
+    if (p instanceof FsPermissionExtension) {
+      // verify flags are set consistently
+      assert p.getAclBit() == f.contains(HdfsFileStatus.Flags.HAS_ACL);
+      assert p.getEncryptedBit() == f.contains(HdfsFileStatus.Flags.HAS_CRYPT);
+      assert p.getErasureCodedBit() == f.contains(HdfsFileStatus.Flags.HAS_EC);
+      return p;
+    }
+    if (null == p) {
+      if (isdir) {
+        p = FsPermission.getDirDefault();
+      } else if (symlink) {
+        p = FsPermission.getDefault();
+      } else {
+        p = FsPermission.getFileDefault();
+      }
+    }
+    return new FsPermissionExtension(p, f.contains(Flags.HAS_ACL),
+        f.contains(Flags.HAS_CRYPT), f.contains(Flags.HAS_EC));
   }
 
-  /**
-   * Is this a symbolic link?
-   * @return true if this is a symbolic link
-   */
+  @Override
   public boolean isSymlink() {
-    return symlink != null;
+    return uSymlink != null;
   }
 
-  /**
-   * Get the block size of the file.
-   * @return the number of bytes
-   */
-  public final long getBlockSize() {
-    return blocksize;
+  @Override
+  public boolean hasAcl() {
+    return flags.contains(Flags.HAS_ACL);
   }
 
-  /**
-   * Get the replication factor of a file.
-   * @return the replication factor of a file.
-   */
-  public final short getReplication() {
-    return block_replication;
+  @Override
+  public boolean isEncrypted() {
+    return flags.contains(Flags.HAS_CRYPT);
   }
 
-  /**
-   * Get the modification time of the file.
-   * @return the modification time of file in milliseconds since January 1, 1970 UTC.
-   */
-  public final long getModificationTime() {
-    return modification_time;
-  }
-
-  /**
-   * Get the access time of the file.
-   * @return the access time of file in milliseconds since January 1, 1970 UTC.
-   */
-  public final long getAccessTime() {
-    return access_time;
-  }
-
-  /**
-   * Get FsPermission associated with the file.
-   * @return permission
-   */
-  public final FsPermission getPermission() {
-    return permission;
-  }
-
-  /**
-   * Get the owner of the file.
-   * @return owner of the file
-   */
-  public final String getOwner() {
-    return owner;
-  }
-
-  /**
-   * Get the group associated with the file.
-   * @return group for the file.
-   */
-  public final String getGroup() {
-    return group;
+  @Override
+  public boolean isErasureCoded() {
+    return flags.contains(Flags.HAS_EC);
   }
 
   /**
@@ -188,7 +149,7 @@ public class HdfsFileStatus {
    * @return true if the name is empty
    */
   public final boolean isEmptyLocalName() {
-    return path.length == 0;
+    return uPath.length == 0;
   }
 
   /**
@@ -196,7 +157,7 @@ public class HdfsFileStatus {
    * @return the local name in string
    */
   public final String getLocalName() {
-    return DFSUtilClient.bytes2String(path);
+    return DFSUtilClient.bytes2String(uPath);
   }
 
   /**
@@ -204,7 +165,7 @@ public class HdfsFileStatus {
    * @return the local name in java UTF8
    */
   public final byte[] getLocalNameInBytes() {
-    return path;
+    return uPath;
   }
 
   /**
@@ -238,16 +199,24 @@ public class HdfsFileStatus {
     return new Path(parent, getLocalName());
   }
 
-  /**
-   * Get the string representation of the symlink.
-   * @return the symlink as a string.
-   */
-  public final String getSymlink() {
-    return DFSUtilClient.bytes2String(symlink);
+  @Override
+  public Path getSymlink() throws IOException {
+    if (isSymlink()) {
+      return new Path(DFSUtilClient.bytes2String(uSymlink));
+    }
+    throw new IOException("Path " + getPath() + " is not a symbolic link");
+  }
+
+  @Override
+  public void setSymlink(Path sym) {
+    uSymlink = DFSUtilClient.string2Bytes(sym.toString());
   }
 
+  /**
+   * Opaque referant for the symlink, to be resolved at the client.
+   */
   public final byte[] getSymlinkInBytes() {
-    return symlink;
+    return uSymlink;
   }
 
   public final long getFileId() {
@@ -275,13 +244,30 @@ public class HdfsFileStatus {
     return storagePolicy;
   }
 
-  public final FileStatus makeQualified(URI defaultUri, Path path) {
-    return new FileStatus(getLen(), isDir(), getReplication(),
-        getBlockSize(), getModificationTime(),
-        getAccessTime(),
-        getPermission(), getOwner(), getGroup(),
-        isSymlink() ? new Path(getSymlink()) : null,
-        (getFullPath(path)).makeQualified(
-            defaultUri, null)); // fully-qualify path
+  @Override
+  public boolean equals(Object o) {
+    // satisfy findbugs
+    return super.equals(o);
+  }
+
+  @Override
+  public int hashCode() {
+    // satisfy findbugs
+    return super.hashCode();
+  }
+
+  /**
+   * Resolve the short name of the Path given the URI, parent provided. This
+   * FileStatus reference will not contain a valid Path until it is resolved
+   * by this method.
+   * @param defaultUri FileSystem to fully qualify HDFS path.
+   * @param parent Parent path of this element.
+   * @return Reference to this instance.
+   */
+  public final FileStatus makeQualified(URI defaultUri, Path parent) {
+    // fully-qualify path
+    setPath(getFullPath(parent).makeQualified(defaultUri, null));
+    return this; // API compatibility
   }
+
 }

+ 29 - 13
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.protocol;
 
 import java.net.URI;
+import java.util.EnumSet;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -34,7 +35,14 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class HdfsLocatedFileStatus extends HdfsFileStatus {
-  private final LocatedBlocks locations;
+
+  private static final long serialVersionUID = 0x23c73328;
+
+  /**
+   * Left transient, because {@link #makeQualifiedLocated(URI,Path)}
+   * is the user-facing type.
+   */
+  private transient LocatedBlocks locations;
 
   /**
    * Constructor
@@ -56,12 +64,12 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
    */
   public HdfsLocatedFileStatus(long length, boolean isdir,
       int block_replication, long blocksize, long modification_time,
-      long access_time, FsPermission permission, String owner, String group,
-      byte[] symlink, byte[] path, long fileId, LocatedBlocks locations,
-      int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy,
-      ErasureCodingPolicy ecPolicy) {
+      long access_time, FsPermission permission, EnumSet<Flags> flags,
+      String owner, String group, byte[] symlink, byte[] path, long fileId,
+      LocatedBlocks locations, int childrenNum, FileEncryptionInfo feInfo,
+      byte storagePolicy, ErasureCodingPolicy ecPolicy) {
     super(length, isdir, block_replication, blocksize, modification_time,
-        access_time, permission, owner, group, symlink, path, fileId,
+        access_time, permission, flags, owner, group, symlink, path, fileId,
         childrenNum, feInfo, storagePolicy, ecPolicy);
     this.locations = locations;
   }
@@ -72,13 +80,21 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
 
   public final LocatedFileStatus makeQualifiedLocated(URI defaultUri,
       Path path) {
-    return new LocatedFileStatus(getLen(), isDir(), getReplication(),
-        getBlockSize(), getModificationTime(),
-        getAccessTime(),
-        getPermission(), getOwner(), getGroup(),
-        isSymlink() ? new Path(getSymlink()) : null,
-        (getFullPath(path)).makeQualified(
-            defaultUri, null), // fully-qualify path
+    makeQualified(defaultUri, path);
+    return new LocatedFileStatus(this,
         DFSUtilClient.locatedBlocks2Locations(getBlockLocations()));
   }
+
+  @Override
+  public boolean equals(Object o) {
+    // satisfy findbugs
+    return super.equals(o);
+  }
+
+  @Override
+  public int hashCode() {
+    // satisfy findbugs
+    return super.hashCode();
+  }
+
 }

+ 6 - 4
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java

@@ -21,6 +21,7 @@ import java.io.PrintStream;
 import java.text.SimpleDateFormat;
 import java.util.Comparator;
 import java.util.Date;
+import java.util.EnumSet;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -57,11 +58,12 @@ public class SnapshottableDirectoryStatus {
   private final byte[] parentFullPath;
 
   public SnapshottableDirectoryStatus(long modification_time, long access_time,
-      FsPermission permission, String owner, String group, byte[] localName,
-      long inodeId, int childrenNum,
-      int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
+      FsPermission permission, EnumSet<HdfsFileStatus.Flags> flags,
+      String owner, String group, byte[] localName, long inodeId,
+      int childrenNum, int snapshotNumber, int snapshotQuota,
+      byte[] parentFullPath) {
     this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
-        access_time, permission, owner, group, null, localName, inodeId,
+        access_time, permission, flags, owner, group, null, localName, inodeId,
         childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
         null);
     this.snapshotNumber = snapshotNumber;

+ 60 - 5
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java

@@ -104,6 +104,7 @@ import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntrySco
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclStatusProto;
+import org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockFlagProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
@@ -149,7 +150,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Sto
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType;
@@ -1142,7 +1142,7 @@ public class PBHelperClient {
   }
 
   public static FsPermission convert(FsPermissionProto p) {
-    return new FsPermissionExtension((short)p.getPerm());
+    return new FsPermission((short)p.getPerm());
   }
 
   private static Event.CreateEvent.INodeType createTypeConvert(
@@ -1501,10 +1501,14 @@ public class PBHelperClient {
       return null;
     }
     final HdfsFileStatusProto status = sdirStatusProto.getDirStatus();
+    EnumSet<HdfsFileStatus.Flags> flags = status.hasFlags()
+        ? convertFlags(status.getFlags())
+        : convertFlags(status.getPermission());
     return new SnapshottableDirectoryStatus(
         status.getModificationTime(),
         status.getAccessTime(),
         convert(status.getPermission()),
+        flags,
         status.getOwner(),
         status.getGroup(),
         status.getPath().toByteArray(),
@@ -1546,17 +1550,23 @@ public class PBHelperClient {
   }
 
   public static FsPermissionProto convert(FsPermission p) {
-    return FsPermissionProto.newBuilder().setPerm(p.toExtendedShort()).build();
+    return FsPermissionProto.newBuilder().setPerm(p.toShort()).build();
   }
 
   public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
-    if (fs == null)
+    if (fs == null) {
       return null;
+    }
+    EnumSet<HdfsFileStatus.Flags> flags = fs.hasFlags()
+        ? convertFlags(fs.getFlags())
+        : convertFlags(fs.getPermission());
     return new HdfsLocatedFileStatus(
         fs.getLength(), fs.getFileType().equals(FileType.IS_DIR),
         fs.getBlockReplication(), fs.getBlocksize(),
         fs.getModificationTime(), fs.getAccessTime(),
-        convert(fs.getPermission()), fs.getOwner(), fs.getGroup(),
+        convert(fs.getPermission()),
+        flags,
+        fs.getOwner(), fs.getGroup(),
         fs.getFileType().equals(FileType.IS_SYMLINK) ?
             fs.getSymlink().toByteArray() : null,
         fs.getPath().toByteArray(),
@@ -1569,6 +1579,47 @@ public class PBHelperClient {
         fs.hasEcPolicy() ? convertErasureCodingPolicy(fs.getEcPolicy()) : null);
   }
 
+  private static EnumSet<HdfsFileStatus.Flags> convertFlags(int flags) {
+    EnumSet<HdfsFileStatus.Flags> f =
+        EnumSet.noneOf(HdfsFileStatus.Flags.class);
+    for (HdfsFileStatusProto.Flags pbf : HdfsFileStatusProto.Flags.values()) {
+      if ((pbf.getNumber() & flags) != 0) {
+        switch (pbf) {
+        case HAS_ACL:
+          f.add(HdfsFileStatus.Flags.HAS_ACL);
+          break;
+        case HAS_CRYPT:
+          f.add(HdfsFileStatus.Flags.HAS_CRYPT);
+          break;
+        case HAS_EC:
+          f.add(HdfsFileStatus.Flags.HAS_EC);
+          break;
+        default:
+          // ignore unknown
+          break;
+        }
+      }
+    }
+    return f;
+  }
+
+  private static EnumSet<HdfsFileStatus.Flags> convertFlags(
+      FsPermissionProto pbp) {
+    EnumSet<HdfsFileStatus.Flags> f =
+        EnumSet.noneOf(HdfsFileStatus.Flags.class);
+    FsPermission p = new FsPermissionExtension((short)pbp.getPerm());
+    if (p.getAclBit()) {
+      f.add(HdfsFileStatus.Flags.HAS_ACL);
+    }
+    if (p.getEncryptedBit()) {
+      f.add(HdfsFileStatus.Flags.HAS_CRYPT);
+    }
+    if (p.getErasureCodedBit()) {
+      f.add(HdfsFileStatus.Flags.HAS_EC);
+    }
+    return f;
+  }
+
   public static CorruptFileBlocks convert(CorruptFileBlocksProto c) {
     if (c == null)
       return null;
@@ -2082,6 +2133,10 @@ public class PBHelperClient {
       builder.setEcPolicy(convertErasureCodingPolicy(
           fs.getErasureCodingPolicy()));
     }
+    int flags = fs.hasAcl()   ? HdfsFileStatusProto.Flags.HAS_ACL_VALUE   : 0;
+    flags |= fs.isEncrypted() ? HdfsFileStatusProto.Flags.HAS_CRYPT_VALUE : 0;
+    flags |= fs.isErasureCoded() ? HdfsFileStatusProto.Flags.HAS_EC_VALUE : 0;
+    builder.setFlags(flags);
     return builder.build();
   }
 

+ 26 - 24
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java

@@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -61,6 +60,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
 
@@ -97,17 +97,8 @@ class JsonUtilClient {
   }
 
   /** Convert a string to a FsPermission object. */
-  static FsPermission toFsPermission(
-      final String s, Boolean aclBit, Boolean encBit, Boolean erasureBit) {
-    FsPermission perm = new FsPermission(Short.parseShort(s, 8));
-    final boolean aBit = (aclBit != null) ? aclBit : false;
-    final boolean eBit = (encBit != null) ? encBit : false;
-    final boolean ecBit = (erasureBit != null) ? erasureBit : false;
-    if (aBit || eBit || ecBit) {
-      return new FsPermissionExtension(perm, aBit, eBit, ecBit);
-    } else {
-      return perm;
-    }
+  static FsPermission toFsPermission(final String s) {
+    return null == s ? null : new FsPermission(Short.parseShort(s, 8));
   }
 
   /** Convert a Json map to a HdfsFileStatus object. */
@@ -128,10 +119,23 @@ class JsonUtilClient {
     final long len = ((Number) m.get("length")).longValue();
     final String owner = (String) m.get("owner");
     final String group = (String) m.get("group");
-    final FsPermission permission = toFsPermission((String) m.get("permission"),
-        (Boolean) m.get("aclBit"),
-        (Boolean) m.get("encBit"),
-        (Boolean) m.get("ecBit"));
+    final FsPermission permission = toFsPermission((String)m.get("permission"));
+
+    Boolean aclBit = (Boolean) m.get("aclBit");
+    Boolean encBit = (Boolean) m.get("encBit");
+    Boolean erasureBit  = (Boolean) m.get("ecBit");
+    EnumSet<HdfsFileStatus.Flags> f =
+        EnumSet.noneOf(HdfsFileStatus.Flags.class);
+    if (aclBit != null && aclBit) {
+      f.add(HdfsFileStatus.Flags.HAS_ACL);
+    }
+    if (encBit != null && encBit) {
+      f.add(HdfsFileStatus.Flags.HAS_CRYPT);
+    }
+    if (erasureBit != null && erasureBit) {
+      f.add(HdfsFileStatus.Flags.HAS_EC);
+    }
+
     final long aTime = ((Number) m.get("accessTime")).longValue();
     final long mTime = ((Number) m.get("modificationTime")).longValue();
     final long blockSize = ((Number) m.get("blockSize")).longValue();
@@ -143,11 +147,11 @@ class JsonUtilClient {
     final byte storagePolicy = m.containsKey("storagePolicy") ?
         (byte) ((Number) m.get("storagePolicy")).longValue() :
         HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
-    return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY,
-        replication, blockSize, mTime, aTime, permission, owner, group,
-        symlink, DFSUtilClient.string2Bytes(localName),
-        fileId, childrenNum, null,
-        storagePolicy, null);
+    return new HdfsFileStatus(len,
+        type == WebHdfsConstants.PathType.DIRECTORY, replication, blockSize,
+        mTime, aTime, permission, f, owner, group, symlink,
+        DFSUtilClient.string2Bytes(localName), fileId, childrenNum,
+        null, storagePolicy, null);
   }
 
   static HdfsFileStatus[] toHdfsFileStatusArray(final Map<?, ?> json) {
@@ -465,9 +469,7 @@ class JsonUtilClient {
     aclStatusBuilder.stickyBit((Boolean) m.get("stickyBit"));
     String permString = (String) m.get("permission");
     if (permString != null) {
-      final FsPermission permission = toFsPermission(permString,
-          (Boolean) m.get("aclBit"), (Boolean) m.get("encBit"),
-          (Boolean) m.get("ecBit"));
+      final FsPermission permission = toFsPermission(permString);
       aclStatusBuilder.setPermission(permission);
     }
     final List<?> entries = (List<?>) m.get("entries");

+ 7 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java

@@ -32,7 +32,13 @@ public class WebHdfsConstants {
     FILE, DIRECTORY, SYMLINK;
 
     static PathType valueOf(HdfsFileStatus status) {
-      return status.isDir()? DIRECTORY: status.isSymlink()? SYMLINK: FILE;
+      if (status.isDirectory()) {
+        return DIRECTORY;
+      }
+      if (status.isSymlink()) {
+        return SYMLINK;
+      }
+      return FILE;
     }
   }
 }

+ 5 - 11
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

@@ -1016,15 +1016,7 @@ public class WebHdfsFileSystem extends FileSystem
   public FileStatus getFileStatus(Path f) throws IOException {
     statistics.incrementReadOps(1);
     storageStatistics.incrementOpCounter(OpType.GET_FILE_STATUS);
-    return makeQualified(getHdfsFileStatus(f), f);
-  }
-
-  private FileStatus makeQualified(HdfsFileStatus f, Path parent) {
-    return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
-        f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
-        f.getPermission(), f.getOwner(), f.getGroup(),
-        f.isSymlink() ? new Path(f.getSymlink()) : null,
-        f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory()));
+    return getHdfsFileStatus(f).makeQualified(getUri(), f);
   }
 
   @Override
@@ -1507,6 +1499,7 @@ public class WebHdfsFileSystem extends FileSystem
     statistics.incrementReadOps(1);
     storageStatistics.incrementOpCounter(OpType.LIST_STATUS);
 
+    final URI fsUri = getUri();
     final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS;
     return new FsPathResponseRunner<FileStatus[]>(op, f) {
       @Override
@@ -1515,7 +1508,7 @@ public class WebHdfsFileSystem extends FileSystem
             JsonUtilClient.toHdfsFileStatusArray(json);
         final FileStatus[] statuses = new FileStatus[hdfsStatuses.length];
         for (int i = 0; i < hdfsStatuses.length; i++) {
-          statuses[i] = makeQualified(hdfsStatuses[i], f);
+          statuses[i] = hdfsStatuses[i].makeQualified(fsUri, f);
         }
 
         return statuses;
@@ -1541,10 +1534,11 @@ public class WebHdfsFileSystem extends FileSystem
       }
     }.run();
     // Qualify the returned FileStatus array
+    final URI fsUri = getUri();
     final HdfsFileStatus[] statuses = listing.getPartialListing();
     FileStatus[] qualified = new FileStatus[statuses.length];
     for (int i = 0; i < statuses.length; i++) {
-      qualified[i] = makeQualified(statuses[i], f);
+      qualified[i] = statuses[i].makeQualified(fsUri, f);
     }
     return new DirectoryEntries(qualified, listing.getLastName(),
         listing.hasMore());

+ 6 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/acl.proto

@@ -21,7 +21,12 @@ option java_outer_classname = "AclProtos";
 option java_generate_equals_and_hash = true;
 package hadoop.hdfs;
 
-import "hdfs.proto";
+/**
+ * File or Directory permision - same spec as posix
+ */
+message FsPermissionProto {
+  required uint32 perm = 1;       // Actually a short - only 16bits used
+}
 
 message AclEntryProto {
   enum AclEntryScopeProto {

+ 9 - 7
hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto

@@ -32,6 +32,7 @@ option java_generate_equals_and_hash = true;
 package hadoop.hdfs;
 
 import "Security.proto";
+import "acl.proto";
 
 /**
  * Extended block idenfies a block
@@ -196,13 +197,6 @@ message CorruptFileBlocksProto {
  required string   cookie = 2;
 }
 
-/**
- * File or Directory permision - same spec as posix
- */
-message FsPermissionProto {
-  required uint32 perm = 1;       // Actually a short - only 16bits used
-}
-
 /**
  * Types of recognized storage media.
  */
@@ -388,6 +382,11 @@ message HdfsFileStatusProto {
     IS_FILE = 2;
     IS_SYMLINK = 3;
   }
+  enum Flags {
+    HAS_ACL   = 0x01; // has ACLs
+    HAS_CRYPT = 0x02; // encrypted
+    HAS_EC    = 0x04; // erasure coded
+  }
   required FileType fileType = 1;
   required bytes path = 2;          // local name of inode encoded java UTF8
   required uint64 length = 3;
@@ -415,6 +414,9 @@ message HdfsFileStatusProto {
 
   // Optional field for erasure coding
   optional ErasureCodingPolicyProto ecPolicy = 17;
+
+  // Set of flags
+  optional uint32 flags = 18 [default = 0];
 }
 
 /**

+ 17 - 28
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java

@@ -1047,18 +1047,7 @@ public class HttpFSFileSystem extends FileSystem
   /** Convert a string to a FsPermission object. */
   static FsPermission toFsPermission(JSONObject json) {
     final String s = (String) json.get(PERMISSION_JSON);
-    final Boolean aclBit = (Boolean) json.get(ACL_BIT_JSON);
-    final Boolean encBit = (Boolean) json.get(ENC_BIT_JSON);
-    final Boolean erasureBit = (Boolean) json.get(EC_BIT_JSON);
-    FsPermission perm = new FsPermission(Short.parseShort(s, 8));
-    final boolean aBit = (aclBit != null) ? aclBit : false;
-    final boolean eBit = (encBit != null) ? encBit : false;
-    final boolean ecBit = (erasureBit != null) ? erasureBit : false;
-    if (aBit || eBit || ecBit) {
-      return new FsPermissionExtension(perm, aBit, eBit, ecBit);
-    } else {
-      return perm;
-    }
+    return new FsPermission(Short.parseShort(s, 8));
   }
 
   private FileStatus createFileStatus(Path parent, JSONObject json) {
@@ -1073,23 +1062,23 @@ public class HttpFSFileSystem extends FileSystem
     long mTime = (Long) json.get(MODIFICATION_TIME_JSON);
     long blockSize = (Long) json.get(BLOCK_SIZE_JSON);
     short replication = ((Long) json.get(REPLICATION_JSON)).shortValue();
-    FileStatus fileStatus = null;
-
-    switch (type) {
-      case FILE:
-      case DIRECTORY:
-        fileStatus = new FileStatus(len, (type == FILE_TYPE.DIRECTORY),
-                                    replication, blockSize, mTime, aTime,
-                                    permission, owner, group, path);
-        break;
-      case SYMLINK:
-        Path symLink = null;
-        fileStatus = new FileStatus(len, false,
-                                    replication, blockSize, mTime, aTime,
-                                    permission, owner, group, symLink,
-                                    path);
+
+    final Boolean aclBit = (Boolean) json.get(ACL_BIT_JSON);
+    final Boolean encBit = (Boolean) json.get(ENC_BIT_JSON);
+    final Boolean erasureBit = (Boolean) json.get(EC_BIT_JSON);
+    final boolean aBit = (aclBit != null) ? aclBit : false;
+    final boolean eBit = (encBit != null) ? encBit : false;
+    final boolean ecBit = (erasureBit != null) ? erasureBit : false;
+    if (aBit || eBit || ecBit) {
+      // include this for compatibility with 2.x
+      FsPermissionExtension deprecatedPerm =
+          new FsPermissionExtension(permission, aBit, eBit, ecBit);
+      return new FileStatus(len, FILE_TYPE.DIRECTORY == type,
+          replication, blockSize, mTime, aTime, deprecatedPerm, owner, group,
+          null, path, aBit, eBit, ecBit);
     }
-    return fileStatus;
+    return new FileStatus(len, FILE_TYPE.DIRECTORY == type,
+        replication, blockSize, mTime, aTime, permission, owner, group, path);
   }
 
   /**

+ 10 - 9
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java

@@ -852,10 +852,11 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
     }
   }
 
-  private static void assertSameAclBit(FileSystem expected, FileSystem actual,
+  private static void assertSameAcls(FileSystem expected, FileSystem actual,
       Path path) throws IOException {
     FileStatus expectedFileStatus = expected.getFileStatus(path);
     FileStatus actualFileStatus = actual.getFileStatus(path);
+    assertEquals(actualFileStatus.hasAcl(), expectedFileStatus.hasAcl());
     assertEquals(actualFileStatus.getPermission().getAclBit(),
         expectedFileStatus.getPermission().getAclBit());
   }
@@ -888,31 +889,31 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
     AclStatus proxyAclStat = proxyFs.getAclStatus(path);
     AclStatus httpfsAclStat = httpfs.getAclStatus(path);
     assertSameAcls(httpfsAclStat, proxyAclStat);
-    assertSameAclBit(httpfs, proxyFs, path);
+    assertSameAcls(httpfs, proxyFs, path);
 
     httpfs.setAcl(path, AclEntry.parseAclSpec(aclSet,true));
     proxyAclStat = proxyFs.getAclStatus(path);
     httpfsAclStat = httpfs.getAclStatus(path);
     assertSameAcls(httpfsAclStat, proxyAclStat);
-    assertSameAclBit(httpfs, proxyFs, path);
+    assertSameAcls(httpfs, proxyFs, path);
 
     httpfs.modifyAclEntries(path, AclEntry.parseAclSpec(aclUser2, true));
     proxyAclStat = proxyFs.getAclStatus(path);
     httpfsAclStat = httpfs.getAclStatus(path);
     assertSameAcls(httpfsAclStat, proxyAclStat);
-    assertSameAclBit(httpfs, proxyFs, path);
+    assertSameAcls(httpfs, proxyFs, path);
 
     httpfs.removeAclEntries(path, AclEntry.parseAclSpec(rmAclUser1, false));
     proxyAclStat = proxyFs.getAclStatus(path);
     httpfsAclStat = httpfs.getAclStatus(path);
     assertSameAcls(httpfsAclStat, proxyAclStat);
-    assertSameAclBit(httpfs, proxyFs, path);
+    assertSameAcls(httpfs, proxyFs, path);
 
     httpfs.removeAcl(path);
     proxyAclStat = proxyFs.getAclStatus(path);
     httpfsAclStat = httpfs.getAclStatus(path);
     assertSameAcls(httpfsAclStat, proxyAclStat);
-    assertSameAclBit(httpfs, proxyFs, path);
+    assertSameAcls(httpfs, proxyFs, path);
   }
 
   /**
@@ -935,21 +936,21 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
     AclStatus proxyAclStat = proxyFs.getAclStatus(dir);
     AclStatus httpfsAclStat = httpfs.getAclStatus(dir);
     assertSameAcls(httpfsAclStat, proxyAclStat);
-    assertSameAclBit(httpfs, proxyFs, dir);
+    assertSameAcls(httpfs, proxyFs, dir);
 
     /* Set a default ACL on the directory */
     httpfs.setAcl(dir, (AclEntry.parseAclSpec(defUser1,true)));
     proxyAclStat = proxyFs.getAclStatus(dir);
     httpfsAclStat = httpfs.getAclStatus(dir);
     assertSameAcls(httpfsAclStat, proxyAclStat);
-    assertSameAclBit(httpfs, proxyFs, dir);
+    assertSameAcls(httpfs, proxyFs, dir);
 
     /* Remove the default ACL */
     httpfs.removeDefaultAcl(dir);
     proxyAclStat = proxyFs.getAclStatus(dir);
     httpfsAclStat = httpfs.getAclStatus(dir);
     assertSameAcls(httpfsAclStat, proxyAclStat);
-    assertSameAclBit(httpfs, proxyFs, dir);
+    assertSameAcls(httpfs, proxyFs, dir);
   }
 
   private void testEncryption() throws Exception {

+ 12 - 0
hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml

@@ -252,4 +252,16 @@
         <Class name="org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture" />
         <Bug pattern="NS_DANGEROUS_NON_SHORT_CIRCUIT" />
     </Match>
+    <Match>
+        <Class name="org.apache.hadoop.hdfs.server.namenode.NNUpgradeUtil$1" />
+        <Method name="visitFile" />
+        <Bug pattern="NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE" />
+    </Match>
+    <!-- HdfsFileStatus is user-facing, but HdfsLocatedFileStatus is not.
+         Defensible compatibility choices over time create odd corners. -->
+    <Match>
+        <Class name="org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus" />
+        <Field name="locations" />
+        <Bug pattern="SE_TRANSIENT_FIELD_NOT_RESTORED" />
+    </Match>
  </FindBugsFilter>

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java

@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.protocol;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto;
+import org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 
 /**

+ 18 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/package-info.java

@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolPB;

+ 25 - 33
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java

@@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
@@ -47,6 +46,7 @@ import org.apache.hadoop.security.AccessControlException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.EnumSet;
 
 import static org.apache.hadoop.util.Time.now;
 
@@ -384,7 +384,6 @@ class FSDirStatAndListingOp {
    * @param child for a directory listing of the iip, else null
    * @param storagePolicy for the path or closest ancestor
    * @param needLocation if block locations need to be included or not
-   * @param includeStoragePolicy if storage policy should be returned
    * @return a file status
    * @throws java.io.IOException if any error occurs
    */
@@ -439,7 +438,19 @@ class FSDirStatAndListingOp {
     int childrenNum = node.isDirectory() ?
         node.asDirectory().getChildrenNum(snapshot) : 0;
 
+    EnumSet<HdfsFileStatus.Flags> flags =
+        EnumSet.noneOf(HdfsFileStatus.Flags.class);
     INodeAttributes nodeAttrs = fsd.getAttributes(iip);
+    boolean hasAcl = nodeAttrs.getAclFeature() != null;
+    if (hasAcl) {
+      flags.add(HdfsFileStatus.Flags.HAS_ACL);
+    }
+    if (isEncrypted) {
+      flags.add(HdfsFileStatus.Flags.HAS_CRYPT);
+    }
+    if (isErasureCoded) {
+      flags.add(HdfsFileStatus.Flags.HAS_EC);
+    }
     return createFileStatus(
         size,
         node.isDirectory(),
@@ -447,7 +458,8 @@ class FSDirStatAndListingOp {
         blocksize,
         node.getModificationTime(snapshot),
         node.getAccessTime(snapshot),
-        getPermissionForFileStatus(nodeAttrs, isEncrypted, isErasureCoded),
+        nodeAttrs.getFsPermission(),
+        flags,
         nodeAttrs.getUserName(),
         nodeAttrs.getGroupName(),
         node.isSymlink() ? node.asSymlink().getSymlink() : null,
@@ -460,42 +472,22 @@ class FSDirStatAndListingOp {
         loc);
   }
 
-  private static HdfsFileStatus createFileStatus(long length, boolean isdir,
-      int replication, long blocksize, long mtime,
-      long atime, FsPermission permission, String owner, String group,
-      byte[] symlink, byte[] path, long fileId, int childrenNum,
-      FileEncryptionInfo feInfo, byte storagePolicy,
+  private static HdfsFileStatus createFileStatus(
+      long length, boolean isdir,
+      int replication, long blocksize, long mtime, long atime,
+      FsPermission permission, EnumSet<HdfsFileStatus.Flags> flags,
+      String owner, String group, byte[] symlink, byte[] path, long fileId,
+      int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy,
       ErasureCodingPolicy ecPolicy, LocatedBlocks locations) {
     if (locations == null) {
       return new HdfsFileStatus(length, isdir, replication, blocksize,
-          mtime, atime, permission, owner, group, symlink, path, fileId,
-          childrenNum, feInfo, storagePolicy, ecPolicy);
+          mtime, atime, permission, flags, owner, group, symlink, path,
+          fileId, childrenNum, feInfo, storagePolicy, ecPolicy);
     } else {
       return new HdfsLocatedFileStatus(length, isdir, replication, blocksize,
-          mtime, atime, permission, owner, group, symlink, path, fileId,
-          locations, childrenNum, feInfo, storagePolicy, ecPolicy);
-    }
-  }
-
-  /**
-   * Returns an inode's FsPermission for use in an outbound FileStatus.  If the
-   * inode has an ACL or is for an encrypted file/dir, then this method will
-   * return an FsPermissionExtension.
-   *
-   * @param node INode to check
-   * @param isEncrypted boolean true if the file/dir is encrypted
-   * @return FsPermission from inode, with ACL bit on if the inode has an ACL
-   * and encrypted bit on if it represents an encrypted file/dir.
-   */
-  private static FsPermission getPermissionForFileStatus(
-      INodeAttributes node, boolean isEncrypted, boolean isErasureCoded) {
-    FsPermission perm = node.getFsPermission();
-    boolean hasAcl = node.getAclFeature() != null;
-    if (hasAcl || isEncrypted || isErasureCoded) {
-      perm = new FsPermissionExtension(perm, hasAcl,
-          isEncrypted, isErasureCoded);
+          mtime, atime, permission, flags, owner, group, symlink, path,
+          fileId, locations, childrenNum, feInfo, storagePolicy, ecPolicy);
     }
-    return perm;
   }
 
   private static ContentSummary getContentSummaryInt(FSDirectory fsd,

+ 13 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -72,12 +72,13 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.concurrent.ForkJoinPool;
-import java.util.concurrent.RecursiveAction;
+import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
 import java.util.SortedSet;
 import java.util.TreeSet;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.RecursiveAction;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeys.FS_PROTECTED_DIRECTORIES;
@@ -135,11 +136,13 @@ public class FSDirectory implements Closeable {
 
   public final static HdfsFileStatus DOT_RESERVED_STATUS =
       new HdfsFileStatus(0, true, 0, 0, 0, 0, new FsPermission((short) 01770),
-          null, null, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
+          EnumSet.noneOf(HdfsFileStatus.Flags.class), null, null, null,
+          HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
           HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
 
   public final static HdfsFileStatus DOT_SNAPSHOT_DIR_STATUS =
-      new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
+      new HdfsFileStatus(0, true, 0, 0, 0, 0, null,
+          EnumSet.noneOf(HdfsFileStatus.Flags.class), null, null, null,
           HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
           HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
 
@@ -383,12 +386,15 @@ public class FSDirectory implements Closeable {
    */
   void createReservedStatuses(long cTime) {
     HdfsFileStatus inodes = new HdfsFileStatus(0, true, 0, 0, cTime, cTime,
-        new FsPermission((short) 0770), null, supergroup, null,
+        new FsPermission((short) 0770),
+        EnumSet.noneOf(HdfsFileStatus.Flags.class), null, supergroup, null,
         DOT_INODES, -1L, 0, null,
         HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
     HdfsFileStatus raw = new HdfsFileStatus(0, true, 0, 0, cTime, cTime,
-        new FsPermission((short) 0770), null, supergroup, null, RAW, -1L,
-        0, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
+        new FsPermission((short) 0770),
+        EnumSet.noneOf(HdfsFileStatus.Flags.class), null, supergroup, null,
+        RAW, -1L, 0, null,
+        HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
     reservedStatuses = new HdfsFileStatus[] {inodes, raw};
   }
 

+ 5 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -177,6 +177,7 @@ import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException;
@@ -371,9 +372,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
     FileStatus status = null;
     if (stat != null) {
-      Path symlink = stat.isSymlink() ? new Path(stat.getSymlink()) : null;
+      Path symlink = stat.isSymlink()
+          ? new Path(DFSUtilClient.bytes2String(stat.getSymlinkInBytes()))
+          : null;
       Path path = new Path(src);
-      status = new FileStatus(stat.getLen(), stat.isDir(),
+      status = new FileStatus(stat.getLen(), stat.isDirectory(),
           stat.getReplication(), stat.getBlockSize(),
           stat.getModificationTime(),
           stat.getAccessTime(), stat.getPermission(), stat.getOwner(),

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -80,6 +80,7 @@ import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB;
 import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.inotify.EventBatch;
 import org.apache.hadoop.hdfs.inotify.EventBatchList;
@@ -1430,7 +1431,7 @@ public class NameNodeRpcServer implements NamenodeProtocols {
     } else if (!stat.isSymlink()) {
       throw new IOException("Path " + path + " is not a symbolic link");
     }
-    return stat.getSymlink();
+    return DFSUtilClient.bytes2String(stat.getSymlinkInBytes());
   }
 
 

+ 5 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java

@@ -25,6 +25,7 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -35,6 +36,7 @@ import javax.management.ObjectName;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
@@ -345,8 +347,9 @@ public class SnapshotManager implements SnapshotStatsMXBean {
       if (userName == null || userName.equals(dir.getUserName())) {
         SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(
             dir.getModificationTime(), dir.getAccessTime(),
-            dir.getFsPermission(), dir.getUserName(), dir.getGroupName(),
-            dir.getLocalNameBytes(), dir.getId(), 
+            dir.getFsPermission(), EnumSet.noneOf(HdfsFileStatus.Flags.class),
+            dir.getUserName(), dir.getGroupName(),
+            dir.getLocalNameBytes(), dir.getId(),
             dir.getChildrenNum(Snapshot.CURRENT_STATE_ID),
             dir.getDirectorySnapshottableFeature().getNumSnapshots(),
             dir.getDirectorySnapshottableFeature().getSnapshotQuota(),

+ 13 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java

@@ -17,10 +17,18 @@
  */
 package org.apache.hadoop.hdfs.web;
 
-import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.*;
 import org.apache.hadoop.ipc.RemoteException;
@@ -110,21 +118,20 @@ public class JsonUtil {
     m.put("pathSuffix", status.getLocalName());
     m.put("type", WebHdfsConstants.PathType.valueOf(status));
     if (status.isSymlink()) {
-      m.put("symlink", status.getSymlink());
+      m.put("symlink", DFSUtilClient.bytes2String(status.getSymlinkInBytes()));
     }
-
     m.put("length", status.getLen());
     m.put("owner", status.getOwner());
     m.put("group", status.getGroup());
     FsPermission perm = status.getPermission();
     m.put("permission", toString(perm));
-    if (perm.getAclBit()) {
+    if (status.hasAcl()) {
       m.put("aclBit", true);
     }
-    if (perm.getEncryptedBit()) {
+    if (status.isEncrypted()) {
       m.put("encBit", true);
     }
-    if (perm.getErasureCodedBit()) {
+    if (status.isErasureCoded()) {
       m.put("ecBit", true);
     }
     m.put("accessTime", status.getAccessTime());
@@ -373,15 +380,6 @@ public class JsonUtil {
     FsPermission perm = status.getPermission();
     if (perm != null) {
       m.put("permission", toString(perm));
-      if (perm.getAclBit()) {
-        m.put("aclBit", true);
-      }
-      if (perm.getEncryptedBit()) {
-        m.put("encBit", true);
-      }
-      if (perm.getErasureCodedBit()) {
-        m.put("ecBit", true);
-      }
     }
     final Map<String, Map<String, Object>> finalMap =
         new TreeMap<String, Map<String, Object>>();

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java

@@ -259,12 +259,14 @@ public class TestDFSClientRetries {
     
     Mockito.doReturn(
             new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
-                (short) 777), "owner", "group", new byte[0], new byte[0],
+                (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
+                "owner", "group", new byte[0], new byte[0],
                 1010, 0, null, (byte) 0, null)).when(mockNN).getFileInfo(anyString());
     
     Mockito.doReturn(
             new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
-                (short) 777), "owner", "group", new byte[0], new byte[0],
+                (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
+                "owner", "group", new byte[0], new byte[0],
                 1010, 0, null, (byte) 0, null))
         .when(mockNN)
         .create(anyString(), (FsPermission) anyObject(), anyString(),

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java

@@ -891,7 +891,8 @@ public class TestEncryptionZones {
       CipherSuite suite, CryptoProtocolVersion version) throws Exception {
     Mockito.doReturn(
         new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
-            (short) 777), "owner", "group", new byte[0], new byte[0],
+            (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
+            "owner", "group", new byte[0], new byte[0],
             1010, 0, new FileEncryptionInfo(suite,
             version, new byte[suite.getAlgorithmBlockSize()],
             new byte[suite.getAlgorithmBlockSize()],

+ 153 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusSerialization.java

@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.net.URI;
+
+import org.apache.hadoop.fs.FSProtos.FileStatusProto;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto;
+import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+
+import com.google.protobuf.ByteString;
+
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Verify compatible FileStatus/HdfsFileStatus serialization.
+ */
+public class TestFileStatusSerialization {
+
+  private static void checkFields(FileStatus expected, FileStatus actual) {
+    assertEquals(expected.getPath(), actual.getPath());
+    assertEquals(expected.isDirectory(), actual.isDirectory());
+    assertEquals(expected.getLen(), actual.getLen());
+    assertEquals(expected.getPermission(), actual.getPermission());
+    assertEquals(expected.getOwner(), actual.getOwner());
+    assertEquals(expected.getGroup(), actual.getGroup());
+    assertEquals(expected.getModificationTime(), actual.getModificationTime());
+    assertEquals(expected.getAccessTime(), actual.getAccessTime());
+    assertEquals(expected.getReplication(), actual.getReplication());
+    assertEquals(expected.getBlockSize(), actual.getBlockSize());
+  }
+
+  /**
+   * Test API backwards-compatibility with 2.x applications w.r.t. FsPermission.
+   */
+  @Test
+  @SuppressWarnings("deprecation")
+  public void testFsPermissionCompatibility() throws Exception {
+    final int flagmask = 0x8;
+    // flags compatible with 2.x; fixed as constant in this test to ensure
+    // compatibility is maintained. New flags are not part of the contract this
+    // test verifies.
+    for (int i = 0; i < flagmask; ++i) {
+      FsPermission perm = FsPermission.createImmutable((short) 0013);
+      HdfsFileStatusProto.Builder hspb = HdfsFileStatusProto.newBuilder()
+          .setFileType(FileType.IS_FILE)
+          .setPath(ByteString.copyFromUtf8("hdfs://foobar/dingos/zot"))
+          .setLength(4344)
+          .setPermission(PBHelperClient.convert(perm))
+          .setOwner("hadoop")
+          .setGroup("unqbbc")
+          .setModificationTime(12345678L)
+          .setAccessTime(87654321L)
+          .setBlockReplication(10)
+          .setBlocksize(1L << 33)
+          .setFlags(i);
+      HdfsFileStatus stat = PBHelperClient.convert(hspb.build());
+      stat.makeQualified(new URI("hdfs://foobar"), new Path("/dingos"));
+      assertEquals(new Path("hdfs://foobar/dingos/zot"), stat.getPath());
+
+      // verify deprecated FsPermissionExtension methods
+      FsPermission sp = stat.getPermission();
+      assertEquals(sp.getAclBit(), stat.hasAcl());
+      assertEquals(sp.getEncryptedBit(), stat.isEncrypted());
+      assertEquals(sp.getErasureCodedBit(), stat.isErasureCoded());
+
+      // verify Writable contract
+      DataOutputBuffer dob = new DataOutputBuffer();
+      stat.write(dob);
+      DataInputBuffer dib = new DataInputBuffer();
+      dib.reset(dob.getData(), 0, dob.getLength());
+      FileStatus fstat = new FileStatus();
+      fstat.readFields(dib);
+      checkFields(stat, fstat);
+
+      // FsPermisisonExtension used for HdfsFileStatus, not FileStatus,
+      // attribute flags should still be preserved
+      assertEquals(sp.getAclBit(), fstat.hasAcl());
+      assertEquals(sp.getEncryptedBit(), fstat.isEncrypted());
+      assertEquals(sp.getErasureCodedBit(), fstat.isErasureCoded());
+    }
+  }
+  // param for LocatedFileStatus, HttpFileStatus
+
+  @Test
+  public void testCrossSerializationProto() throws Exception {
+    FsPermission perm = FsPermission.getFileDefault();
+    for (FileType t : FileType.values()) {
+      HdfsFileStatusProto.Builder hspb = HdfsFileStatusProto.newBuilder()
+          .setFileType(t)
+          .setPath(ByteString.copyFromUtf8("hdfs://foobar/dingos"))
+          .setLength(4344)
+          .setPermission(PBHelperClient.convert(perm))
+          .setOwner("hadoop")
+          .setGroup("unqbbc")
+          .setModificationTime(12345678L)
+          .setAccessTime(87654321L)
+          .setBlockReplication(10)
+          .setBlocksize(1L << 33);
+      if (FileType.IS_SYMLINK.equals(t)) {
+        hspb.setSymlink(ByteString.copyFromUtf8("hdfs://yaks/dingos"));
+      }
+      if (FileType.IS_FILE.equals(t)) {
+        hspb.setFileId(4544);
+      }
+      HdfsFileStatusProto hsp = hspb.build();
+      byte[] src = hsp.toByteArray();
+      FileStatusProto fsp = FileStatusProto.parseFrom(src);
+      assertEquals(hsp.getPath().toStringUtf8(), fsp.getPath());
+      assertEquals(hsp.getLength(), fsp.getLength());
+      assertEquals(hsp.getPermission().getPerm(),
+                   fsp.getPermission().getPerm());
+      assertEquals(hsp.getOwner(), fsp.getOwner());
+      assertEquals(hsp.getGroup(), fsp.getGroup());
+      assertEquals(hsp.getModificationTime(), fsp.getModificationTime());
+      assertEquals(hsp.getAccessTime(), fsp.getAccessTime());
+      assertEquals(hsp.getSymlink().toStringUtf8(), fsp.getSymlink());
+      assertEquals(hsp.getBlockReplication(), fsp.getBlockReplication());
+      assertEquals(hsp.getBlocksize(), fsp.getBlockSize());
+      assertEquals(hsp.getFileType().ordinal(), fsp.getFileType().ordinal());
+
+      // verify unknown fields preserved
+      byte[] dst = fsp.toByteArray();
+      HdfsFileStatusProto hsp2 = HdfsFileStatusProto.parseFrom(dst);
+      assertEquals(hsp, hsp2);
+      checkFields(PBHelperClient.convert(hsp), PBHelperClient.convert(hsp2));
+    }
+  }
+
+}

+ 5 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java

@@ -30,6 +30,7 @@ import java.io.DataOutputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.EnumSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -353,12 +354,14 @@ public class TestLease {
 
     Mockito.doReturn(
         new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
-            (short) 777), "owner", "group", new byte[0], new byte[0],
+            (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
+            "owner", "group", new byte[0], new byte[0],
             1010, 0, null, (byte) 0, null)).when(mcp).getFileInfo(anyString());
     Mockito
         .doReturn(
             new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
-                (short) 777), "owner", "group", new byte[0], new byte[0],
+                (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
+                "owner", "group", new byte[0], new byte[0],
                 1010, 0, null, (byte) 0, null))
         .when(mcp)
         .create(anyString(), (FsPermission) anyObject(), anyString(),

+ 10 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java

@@ -21,6 +21,7 @@ import static org.junit.Assert.*;
 
 import java.io.IOException;
 
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.AclEntry;
@@ -141,6 +142,11 @@ public final class AclTestHelpers {
     }
   }
 
+  public static void assertPermission(FileSystem fs, Path pathToCheck,
+      short perm) throws IOException {
+    assertPermission(fs, pathToCheck, perm, (perm & (1 << 12)) != 0);
+  }
+
   /**
    * Asserts the value of the FsPermission bits on the inode of a specific path.
    *
@@ -150,10 +156,11 @@ public final class AclTestHelpers {
    * @throws IOException thrown if there is an I/O error
    */
   public static void assertPermission(FileSystem fs, Path pathToCheck,
-      short perm) throws IOException {
+      short perm, boolean hasAcl) throws IOException {
     short filteredPerm = (short)(perm & 01777);
-    FsPermission fsPermission = fs.getFileStatus(pathToCheck).getPermission();
+    FileStatus stat = fs.getFileStatus(pathToCheck);
+    FsPermission fsPermission = stat.getPermission();
     assertEquals(filteredPerm, fsPermission.toShort());
-    assertEquals(((perm & (1 << 12)) != 0), fsPermission.getAclBit());
+    assertEquals(hasAcl, stat.hasAcl());
   }
 }

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java

@@ -48,6 +48,7 @@ import java.nio.channels.FileChannel;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -1355,7 +1356,8 @@ public class TestFsck {
     byte storagePolicy = 0;
 
     HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
-        blockSize, modTime, accessTime, perms, owner, group, symlink,
+        blockSize, modTime, accessTime, perms,
+        EnumSet.noneOf(HdfsFileStatus.Flags.class), owner, group, symlink,
         path, fileId, numChildren, null, storagePolicy, null);
     Result replRes = new ReplicationResult(conf);
     Result ecRes = new ErasureCodingResult(conf);

+ 8 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java

@@ -23,6 +23,7 @@ import static org.apache.hadoop.fs.permission.FsAction.*;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
 
 import java.io.IOException;
+import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
@@ -37,6 +38,7 @@ import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -51,10 +53,12 @@ import com.google.common.collect.Lists;
 
 public class TestJsonUtil {
   static FileStatus toFileStatus(HdfsFileStatus f, String parent) {
-    return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
+    return new FileStatus(f.getLen(), f.isDirectory(), f.getReplication(),
         f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
         f.getPermission(), f.getOwner(), f.getGroup(),
-        f.isSymlink() ? new Path(f.getSymlink()) : null,
+        f.isSymlink()
+          ? new Path(DFSUtilClient.bytes2String(f.getSymlinkInBytes()))
+          : null,
         new Path(f.getFullName(parent)));
   }
 
@@ -63,7 +67,8 @@ public class TestJsonUtil {
     final long now = Time.now();
     final String parent = "/dir";
     final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
-        now, now + 10, new FsPermission((short) 0644), "user", "group",
+        now, now + 10, new FsPermission((short) 0644),
+        EnumSet.noneOf(HdfsFileStatus.Flags.class), "user", "group",
         DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
         HdfsConstants.GRANDFATHER_INODE_ID, 0, null, (byte) 0, null);
     final FileStatus fstatus = toFileStatus(status, parent);