浏览代码

HDFS-6302. Implement XAttr as a INode feature. Contributed by Yi Liu

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2006@1591345 13f79535-47bb-0310-9956-ffa450edef68
Uma Maheswara Rao G 11 年之前
父节点
当前提交
a15ecb1975

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-2006.txt

@@ -10,6 +10,8 @@ HDFS-2006 (Unreleased)
 
    HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
 
+   HDFS-6302. Implement XAttr as a INode feature. (Yi Liu via umamahesh)
+
   OPTIMIZATIONS
 
   BUG FIXES

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java

@@ -767,7 +767,7 @@ public class FSImageFormat {
       final long preferredBlockSize = in.readLong();
 
       return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
-          accessTime, replication, preferredBlockSize);
+          accessTime, replication, preferredBlockSize, null);
     }
 
     public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
@@ -787,10 +787,10 @@ public class FSImageFormat {
       final long nsQuota = in.readLong();
       final long dsQuota = in.readLong();
   
-      return nsQuota == -1L && dsQuota == -1L?
-          new INodeDirectoryAttributes.SnapshotCopy(name, permissions, null, modificationTime)
+      return nsQuota == -1L && dsQuota == -1L ? new INodeDirectoryAttributes.SnapshotCopy(
+          name, permissions, null, modificationTime, null)
         : new INodeDirectoryAttributes.CopyWithQuota(name, permissions,
-            null, modificationTime, nsQuota, dsQuota);
+            null, modificationTime, nsQuota, dsQuota, null);
     }
   
     private void loadFilesUnderConstruction(DataInput in,

+ 38 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java

@@ -177,6 +177,44 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
     nodeToUpdate.removeAclFeature();
     return nodeToUpdate;
   }
+
+  /**
+   * @param snapshotId
+   *          if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
+   *          from the given snapshot; otherwise, get the result from the
+   *          current inode.
+   * @return XAttrFeature
+   */  
+  abstract XAttrFeature getXAttrFeature(int snapshotId);
+  
+  @Override
+  public final XAttrFeature getXAttrFeature() {
+    return getXAttrFeature(Snapshot.CURRENT_STATE_ID);
+  }
+  
+  /**
+   * Set <code>XAttrFeature</code> 
+   */
+  abstract void addXAttrFeature(XAttrFeature xAttrFeature);
+  
+  final INode addXAttrFeature(XAttrFeature xAttrFeature, int latestSnapshotId) 
+      throws QuotaExceededException {
+    final INode nodeToUpdate = recordModification(latestSnapshotId);
+    nodeToUpdate.addXAttrFeature(xAttrFeature);
+    return nodeToUpdate;
+  }
+  
+  /**
+   * Remove <code>XAttrFeature</code> 
+   */
+  abstract void removeXAttrFeature();
+  
+  final INode removeXAttrFeature(int lastestSnapshotId)
+      throws QuotaExceededException {
+    final INode nodeToUpdate = recordModification(lastestSnapshotId);
+    nodeToUpdate.removeXAttrFeature();
+    return nodeToUpdate;
+  }
   
   /**
    * @return if the given snapshot id is {@link Snapshot#CURRENT_STATE_ID},

+ 14 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java

@@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.server.namenode.INodeWithAdditionalFields.PermissionStatusFormat;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 
 /**
  * The attributes of an inode.
@@ -50,6 +51,9 @@ public interface INodeAttributes {
 
   /** @return the ACL feature. */
   public AclFeature getAclFeature();
+  
+  /** @return the XAttrs feature. */
+  public XAttrFeature getXAttrFeature();
 
   /** @return the modification time. */
   public long getModificationTime();
@@ -64,14 +68,17 @@ public interface INodeAttributes {
     private final AclFeature aclFeature;
     private final long modificationTime;
     private final long accessTime;
+    private XAttrFeature xAttrFeature;
 
     SnapshotCopy(byte[] name, PermissionStatus permissions,
-        AclFeature aclFeature, long modificationTime, long accessTime) {
+        AclFeature aclFeature, long modificationTime, long accessTime, 
+        XAttrFeature xAttrFeature) {
       this.name = name;
       this.permission = PermissionStatusFormat.toLong(permissions);
       this.aclFeature = aclFeature;
       this.modificationTime = modificationTime;
       this.accessTime = accessTime;
+      this.xAttrFeature = xAttrFeature;
     }
 
     SnapshotCopy(INode inode) {
@@ -80,6 +87,7 @@ public interface INodeAttributes {
       this.aclFeature = inode.getAclFeature();
       this.modificationTime = inode.getModificationTime();
       this.accessTime = inode.getAccessTime();
+      this.xAttrFeature = inode.getXAttrFeature();
     }
 
     @Override
@@ -128,5 +136,10 @@ public interface INodeAttributes {
     public final long getAccessTime() {
       return accessTime;
     }
+    
+    @Override
+    public final XAttrFeature getXAttrFeature() {
+      return xAttrFeature;
+    }
   }
 }

+ 6 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 
 import com.google.common.base.Preconditions;
 
@@ -35,8 +36,9 @@ public interface INodeDirectoryAttributes extends INodeAttributes {
   public static class SnapshotCopy extends INodeAttributes.SnapshotCopy
       implements INodeDirectoryAttributes {
     public SnapshotCopy(byte[] name, PermissionStatus permissions,
-        AclFeature aclFeature, long modificationTime) {
-      super(name, permissions, aclFeature, modificationTime, 0L);
+        AclFeature aclFeature, long modificationTime, 
+        XAttrFeature xAttrsFeature) {
+      super(name, permissions, aclFeature, modificationTime, 0L, xAttrsFeature);
     }
 
     public SnapshotCopy(INodeDirectory dir) {
@@ -63,8 +65,8 @@ public interface INodeDirectoryAttributes extends INodeAttributes {
 
     public CopyWithQuota(byte[] name, PermissionStatus permissions,
         AclFeature aclFeature, long modificationTime, long nsQuota,
-        long dsQuota) {
-      super(name, permissions, aclFeature, modificationTime);
+        long dsQuota, XAttrFeature xAttrsFeature) {
+      super(name, permissions, aclFeature, modificationTime, xAttrsFeature);
       this.nsQuota = nsQuota;
       this.dsQuota = dsQuota;
     }

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile.HeaderFormat;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 
 /**
  * The attributes of a file.
@@ -42,8 +43,9 @@ public interface INodeFileAttributes extends INodeAttributes {
 
     public SnapshotCopy(byte[] name, PermissionStatus permissions,
         AclFeature aclFeature, long modificationTime, long accessTime,
-        short replication, long preferredBlockSize) {
-      super(name, permissions, aclFeature, modificationTime, accessTime);
+        short replication, long preferredBlockSize, XAttrFeature xAttrsFeature) {
+      super(name, permissions, aclFeature, modificationTime, accessTime, 
+          xAttrsFeature);
 
       final long h = HeaderFormat.combineReplication(0L, replication);
       header = HeaderFormat.combinePreferredBlockSize(h, preferredBlockSize);

+ 16 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java

@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 
 import com.google.common.base.Preconditions;
 
@@ -228,6 +229,21 @@ public abstract class INodeReference extends INode {
   final void removeAclFeature() {
     referred.removeAclFeature();
   }
+  
+  @Override
+  final XAttrFeature getXAttrFeature(int snapshotId) {
+    return referred.getXAttrFeature(snapshotId);
+  }
+  
+  @Override
+  final void addXAttrFeature(XAttrFeature xAttrFeature) {
+    referred.addXAttrFeature(xAttrFeature);
+  }
+  
+  @Override
+  final void removeXAttrFeature() {
+    referred.removeXAttrFeature();
+  }
 
   @Override
   public final short getFsPermissionShort() {

+ 25 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java

@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.namenode.INode.Feature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
 
 import com.google.common.base.Preconditions;
@@ -340,6 +341,30 @@ public abstract class INodeWithAdditionalFields extends INode
 
     addFeature(f);
   }
+  
+  @Override
+  final XAttrFeature getXAttrFeature(int snapshotId) {
+    if (snapshotId != Snapshot.CURRENT_STATE_ID) {
+      return getSnapshotINode(snapshotId).getXAttrFeature();
+    }
+
+    return getFeature(XAttrFeature.class);
+  }
+  
+  @Override
+  public void removeXAttrFeature() {
+    XAttrFeature f = getXAttrFeature();
+    Preconditions.checkNotNull(f);
+    removeFeature(f);
+  }
+  
+  @Override
+  public void addXAttrFeature(XAttrFeature f) {
+    XAttrFeature f1 = getXAttrFeature();
+    Preconditions.checkState(f1 == null, "Duplicated XAttrFeature");
+    
+    addFeature(f);
+  }
 
   public final Feature[] getFeatures() {
     return features;

+ 43 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFeature.java

@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+
+import com.google.common.collect.ImmutableList;
+
+/**
+ * Feature for extended attributes.
+ */
+@InterfaceAudience.Private
+public class XAttrFeature implements INode.Feature {
+  public static final ImmutableList<XAttr> EMPTY_ENTRY_LIST =
+      ImmutableList.of();
+
+  private final ImmutableList<XAttr> xAttrs;
+
+  public XAttrFeature(ImmutableList<XAttr> xAttrs) {
+    this.xAttrs = xAttrs;
+  }
+
+  public ImmutableList<XAttr> getXAttrs() {
+    return xAttrs;
+  }
+}

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java

@@ -219,7 +219,7 @@ public class FSImageFormatPBSnapshot {
           copy = new INodeFileAttributes.SnapshotCopy(pbf.getName()
               .toByteArray(), permission, acl, fileInPb.getModificationTime(),
               fileInPb.getAccessTime(), (short) fileInPb.getReplication(),
-              fileInPb.getPreferredBlockSize());
+              fileInPb.getPreferredBlockSize(), null);
         }
 
         FileDiff diff = new FileDiff(pbf.getSnapshotId(), copy, null,
@@ -316,10 +316,10 @@ public class FSImageFormatPBSnapshot {
               && dirCopyInPb.getDsQuota() == -1;
 
           copy = noQuota ? new INodeDirectoryAttributes.SnapshotCopy(name,
-              permission, acl, modTime)
+              permission, acl, modTime, null)
               : new INodeDirectoryAttributes.CopyWithQuota(name, permission,
                   acl, modTime, dirCopyInPb.getNsQuota(),
-                  dirCopyInPb.getDsQuota());
+                  dirCopyInPb.getDsQuota(), null);
         }
         // load created list
         List<INode> clist = loadCreatedList(in, dir,

+ 15 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java

@@ -32,8 +32,10 @@ import org.apache.hadoop.hdfs.server.namenode.AclFeature;
 import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 
+import com.google.common.base.Predicate;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 
@@ -142,9 +144,20 @@ public class Snapshot implements Comparable<byte[]> {
   /** The root directory of the snapshot. */
   static public class Root extends INodeDirectory {
     Root(INodeDirectory other) {
-      // Always preserve ACL.
+      // Always preserve ACL, XAttr.
       super(other, false, Lists.newArrayList(
-        Iterables.filter(Arrays.asList(other.getFeatures()), AclFeature.class))
+        Iterables.filter(Arrays.asList(other.getFeatures()), new Predicate<Feature>() {
+
+          @Override
+          public boolean apply(Feature input) {
+            if (AclFeature.class.isInstance(input) 
+                || XAttrFeature.class.isInstance(input)) {
+              return true;
+            }
+            return false;
+          }
+          
+        }))
         .toArray(new Feature[0]));
     }
 

+ 21 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java

@@ -46,6 +46,7 @@ import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSClient;
@@ -67,6 +68,8 @@ import org.apache.hadoop.util.Time;
 import org.junit.Test;
 import org.mockito.Mockito;
 
+import com.google.common.collect.ImmutableList;
+
 public class TestINodeFile {
   public static final Log LOG = LogFactory.getLog(TestINodeFile.class);
 
@@ -1079,4 +1082,22 @@ public class TestINodeFile {
     file.toCompleteFile(Time.now());
     assertFalse(file.isUnderConstruction());
   }
+
+  @Test
+  public void testXAttrFeature() {
+    replication = 3;
+    preferredBlockSize = 128*1024*1024;
+    INodeFile inf = createINodeFile(replication, preferredBlockSize);
+    ImmutableList.Builder<XAttr> builder = new ImmutableList.Builder<XAttr>();
+    XAttr xAttr = new XAttr.Builder().setNameSpace(XAttr.NameSpace.USER).
+        setName("a1").setValue(new byte[]{0x31, 0x32, 0x33}).build();
+    builder.add(xAttr);
+    XAttrFeature f = new XAttrFeature(builder.build());
+    inf.addXAttrFeature(f);
+    XAttrFeature f1 = inf.getXAttrFeature();
+    assertEquals(xAttr, f1.getXAttrs().get(0));
+    inf.removeXAttrFeature();
+    f1 = inf.getXAttrFeature();
+    assertEquals(f1, null);
+  }
 }