ソースを参照

HDFS-6301. NameNode: persist XAttrs in fsimage and record XAttrs modifications to edit log. Contributed by Yi Liu.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2006@1592526 13f79535-47bb-0310-9956-ffa450edef68
Uma Maheswara Rao G 11 年 前
コミット
71bc04ddc8

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-2006.txt

@@ -20,6 +20,9 @@ HDFS-2006 (Unreleased)
 
     HDFS-6324. Shift XAttr helper code out for reuse. (Yi Liu via umamahesh)
 
+    HDFS-6301. NameNode: persist XAttrs in fsimage and record XAttrs modifications to edit log.
+    (Yi Liu via umamahesh)
+
   OPTIMIZATIONS
 
   BUG FIXES

+ 4 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -356,7 +356,6 @@ public class FSDirectory implements Closeable {
                             String path, 
                             PermissionStatus permissions,
                             List<AclEntry> aclEntries,
-                            List<XAttr> xAttrs,
                             short replication,
                             long modificationTime,
                             long atime,
@@ -383,10 +382,6 @@ public class FSDirectory implements Closeable {
           AclStorage.updateINodeAcl(newNode, aclEntries,
             Snapshot.CURRENT_STATE_ID);
         }
-        if (xAttrs != null) {
-          XAttrStorage.updateINodeXAttrs(newNode, 
-              xAttrs, Snapshot.CURRENT_STATE_ID);
-        }
         return newNode;
       }
     } catch (IOException e) {
@@ -2894,9 +2889,8 @@ public class FSDirectory implements Closeable {
   void removeXAttr(String src, XAttr xAttr) throws IOException {
     writeLock();
     try {
-      unprotectedRemoveXAttr(src, xAttr);
-      //TODO: Recording XAttrs modifications to edit log will be 
-      //implemented as part of HDFS-6301
+      List<XAttr> newXAttrs = unprotectedRemoveXAttr(src, xAttr);
+      fsImage.getEditLog().logSetXAttrs(src, newXAttrs);
     } finally {
       writeUnlock();
     }
@@ -2936,9 +2930,8 @@ public class FSDirectory implements Closeable {
       throws IOException {
     writeLock();
     try {
-      unprotectedSetXAttr(src, xAttr, flag);
-      //TODO: Recording XAttrs modifications to edit log will be 
-      //implemented as part of HDFS-6301
+      List<XAttr> newXAttrs = unprotectedSetXAttr(src, xAttr, flag);
+      fsImage.getEditLog().logSetXAttrs(src, newXAttrs);
     } finally {
       writeUnlock();
     }

+ 9 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
@@ -80,6 +81,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetXAttrsOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
@@ -1050,6 +1052,13 @@ public class FSEditLog implements LogsPurgeable {
     op.aclEntries = entries;
     logEdit(op);
   }
+  
+  void logSetXAttrs(String src, List<XAttr> xAttrs) {
+    final SetXAttrsOp op = SetXAttrsOp.getInstance();
+    op.src = src;
+    op.xAttrs = xAttrs;
+    logEdit(op);
+  }
 
   /**
    * Get all the journals this edit log is currently operating on.

+ 7 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java

@@ -76,6 +76,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetXAttrsOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
@@ -350,7 +351,7 @@ public class FSEditLogLoader {
         inodeId = getAndUpdateLastInodeId(addCloseOp.inodeId, logVersion,
             lastInodeId);
         newFile = fsDir.unprotectedAddFile(inodeId,
-            path, addCloseOp.permissions, addCloseOp.aclEntries, null,
+            path, addCloseOp.permissions, addCloseOp.aclEntries,
             replication, addCloseOp.mtime, addCloseOp.atime,
             addCloseOp.blockSize, true, addCloseOp.clientName,
             addCloseOp.clientMachine);
@@ -798,6 +799,11 @@ public class FSEditLogLoader {
       fsDir.unprotectedSetAcl(setAclOp.src, setAclOp.aclEntries);
       break;
     }
+    case OP_SET_XATTRS: {
+      SetXAttrsOp setXAttrsOp = (SetXAttrsOp) op;
+      fsDir.unprotectedUpdateXAttrs(setXAttrsOp.src, setXAttrsOp.xAttrs);
+      break;
+    }
     default:
       throw new IOException("Invalid operation read " + op.opCode);
     }

+ 89 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java

@@ -54,6 +54,7 @@ import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_OWN
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_PERMISSIONS;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_QUOTA;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_REPLICATION;
+import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_XATTRS;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_START_LOG_SEGMENT;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SYMLINK;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_TIMES;
@@ -79,12 +80,14 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclEntryType;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DeprecatedUTF8;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -95,6 +98,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEditLogProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrEditLogProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.util.XMLUtils;
@@ -186,6 +190,7 @@ public abstract class FSEditLogOp {
           OP_ROLLING_UPGRADE_START, "start"));
       inst.put(OP_ROLLING_UPGRADE_FINALIZE, new RollingUpgradeOp(
           OP_ROLLING_UPGRADE_FINALIZE, "finalize"));
+      inst.put(OP_SET_XATTRS, new SetXAttrsOp());
     }
     
     public FSEditLogOp get(FSEditLogOpCodes opcode) {
@@ -3490,6 +3495,51 @@ public abstract class FSEditLogOp {
       return builder.toString();
     }
   }
+  
+  static class SetXAttrsOp extends FSEditLogOp {
+    List<XAttr> xAttrs = Lists.newArrayList();
+    String src;
+    
+    private SetXAttrsOp() {
+      super(OP_SET_XATTRS);
+    }
+    
+    static SetXAttrsOp getInstance() {
+      return new SetXAttrsOp();
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion) throws IOException {
+      XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
+      src = p.getSrc();
+      xAttrs = PBHelper.convertXAttrs(p.getXAttrsList());
+    }
+
+    @Override
+    public void writeFields(DataOutputStream out) throws IOException {
+      XAttrEditLogProto.Builder b = XAttrEditLogProto.newBuilder();
+      if (src != null) {
+        b.setSrc(src);
+      }
+      b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
+      b.build().writeDelimitedTo(out);
+    }
+
+    @Override
+    protected void toXml(ContentHandler contentHandler) throws SAXException {
+      XMLUtils.addSaxString(contentHandler, "SRC", src);
+      appendXAttrsToXml(contentHandler, xAttrs);
+    }
+
+    @Override
+    void fromXml(Stanza st) throws InvalidXmlException {
+      src = st.getValue("SRC");
+      xAttrs = readXAttrsFromXml(st);
+      if (xAttrs == null) {
+        xAttrs = Lists.newArrayList();
+      }
+    }
+  }
 
   static class SetAclOp extends FSEditLogOp {
     List<AclEntry> aclEntries = Lists.newArrayList();
@@ -4106,4 +4156,43 @@ public abstract class FSEditLogOp {
     }
     return aclEntries;
   }
+  
+  private static void appendXAttrsToXml(ContentHandler contentHandler,
+      List<XAttr> xAttrs) throws SAXException {
+    for (XAttr a : xAttrs) {
+      contentHandler.startElement("", "", "XATTR", new AttributesImpl());
+      XMLUtils.addSaxString(contentHandler, "NAMESPACE", 
+          a.getNameSpace().toString());
+      XMLUtils.addSaxString(contentHandler, "NAME", a.getName());
+      try {
+        XMLUtils.addSaxString(contentHandler, "VALUE", 
+            XAttrCodec.encodeValue(a.getValue(), XAttrCodec.HEX));
+      } catch (IOException e) {
+        throw new SAXException(e);
+      }
+      contentHandler.endElement("", "", "XATTR");
+    }
+  }
+  
+  private static List<XAttr> readXAttrsFromXml(Stanza st) 
+      throws InvalidXmlException {
+    List<XAttr> xAttrs = Lists.newArrayList();
+    if (!st.hasChildren("XATTR")) {
+      return null;
+    }
+    
+    try {
+      List<Stanza> stanzas = st.getChildren("XATTR");
+      for (Stanza s : stanzas) {
+        XAttr a = new XAttr.Builder()
+          .setNameSpace(XAttr.NameSpace.valueOf(s.getValue("NAMESPACE")))
+          .setName(s.getValue("NAME"))
+          .setValue(XAttrCodec.decodeValue(s.getValue("VALUE"))).build();
+        xAttrs.add(a);
+      }
+      return xAttrs;
+    } catch (IOException e) {
+      throw new InvalidXmlException(e.toString());
+    }
+  }
 }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java

@@ -70,6 +70,7 @@ public enum FSEditLogOpCodes {
   OP_SET_ACL                    ((byte) 40),
   OP_ROLLING_UPGRADE_START      ((byte) 41),
   OP_ROLLING_UPGRADE_FINALIZE   ((byte) 42),
+  OP_SET_XATTRS                 ((byte) 43),
 
   // Note that the current range of the valid OP code is 0~127
   OP_INVALID                    ((byte) -1);

+ 73 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java

@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.permission.AclEntryType;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
@@ -49,7 +50,10 @@ import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructio
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 
 import com.google.common.base.Preconditions;
@@ -74,6 +78,14 @@ public final class FSImageFormatPBINode {
       .values();
   private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES = AclEntryType
       .values();
+  
+  private static final int XATTR_NAMESPACE_MASK = 3;
+  private static final int XATTR_NAMESPACE_OFFSET = 30;
+  private static final int XATTR_NAME_MASK = (1 << 24) - 1;
+  private static final int XATTR_NAME_OFFSET = 6;
+  private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES = 
+      XAttr.NameSpace.values();
+  
 
   private static final Log LOG = LogFactory.getLog(FSImageFormatPBINode.class);
 
@@ -103,6 +115,25 @@ public final class FSImageFormatPBINode {
       }
       return b.build();
     }
+    
+    public static ImmutableList<XAttr> loadXAttrs(
+        XAttrFeatureProto proto, final String[] stringTable) {
+      ImmutableList.Builder<XAttr> b = ImmutableList.builder();
+      for (XAttrCompactProto xAttrCompactProto : proto.getXAttrsList()) {
+        int v = xAttrCompactProto.getName();
+        int nid = (v >> XATTR_NAME_OFFSET) & XATTR_NAME_MASK;
+        int ns = (v >> XATTR_NAMESPACE_OFFSET) & XATTR_NAMESPACE_MASK;
+        String name = stringTable[nid];
+        byte[] value = null;
+        if (xAttrCompactProto.getValue() != null) {
+          value = xAttrCompactProto.getValue().toByteArray();
+        }
+        b.add(new XAttr.Builder().setNameSpace(XATTR_NAMESPACE_VALUES[ns])
+            .setName(name).setValue(value).build());
+      }
+      
+      return b.build();
+    }
 
     public static INodeDirectory loadINodeDirectory(INodeSection.INode n,
         LoaderContext state) {
@@ -123,6 +154,10 @@ public final class FSImageFormatPBINode {
         dir.addAclFeature(new AclFeature(loadAclEntries(d.getAcl(),
             state.getStringTable())));
       }
+      if (d.hasXAttrs()) {
+        dir.addXAttrFeature(new XAttrFeature(
+            loadXAttrs(d.getXAttrs(), state.getStringTable())));
+      }
       return dir;
     }
 
@@ -255,6 +290,11 @@ public final class FSImageFormatPBINode {
         file.addAclFeature(new AclFeature(loadAclEntries(f.getAcl(),
             state.getStringTable())));
       }
+      
+      if (f.hasXAttrs()) {
+        file.addXAttrFeature(new XAttrFeature(
+            loadXAttrs(f.getXAttrs(), state.getStringTable())));
+      }
 
       // under-construction information
       if (f.hasFileUC()) {
@@ -295,6 +335,11 @@ public final class FSImageFormatPBINode {
       }
       dir.rootDir.cloneModificationTime(root);
       dir.rootDir.clonePermissionStatus(root);
+      // root dir supports having extended attributes according to POSIX
+      final XAttrFeature f = root.getXAttrFeature();
+      if (f != null) {
+        dir.rootDir.addXAttrFeature(f);
+      }
     }
   }
 
@@ -320,6 +365,26 @@ public final class FSImageFormatPBINode {
       }
       return b;
     }
+    
+    private static XAttrFeatureProto.Builder buildXAttrs(XAttrFeature f,
+        final SaverContext.DeduplicationMap<String> stringMap) {
+      XAttrFeatureProto.Builder b = XAttrFeatureProto.newBuilder();
+      for (XAttr a : f.getXAttrs()) {
+        XAttrCompactProto.Builder xAttrCompactBuilder = XAttrCompactProto.
+            newBuilder();
+        int v = ((a.getNameSpace().ordinal() & XATTR_NAMESPACE_MASK) << 
+            XATTR_NAMESPACE_OFFSET) 
+            | ((stringMap.getId(a.getName()) & XATTR_NAME_MASK) << 
+                XATTR_NAME_OFFSET);
+        xAttrCompactBuilder.setName(v);
+        if (a.getValue() != null) {
+          xAttrCompactBuilder.setValue(PBHelper.getByteString(a.getValue()));
+        }
+        b.addXAttrs(xAttrCompactBuilder.build());
+      }
+      
+      return b;
+    }
 
     public static INodeSection.INodeFile.Builder buildINodeFile(
         INodeFileAttributes file, final SaverContext state) {
@@ -334,6 +399,10 @@ public final class FSImageFormatPBINode {
       if (f != null) {
         b.setAcl(buildAclEntries(f, state.getStringMap()));
       }
+      XAttrFeature xAttrFeature = file.getXAttrFeature();
+      if (xAttrFeature != null) {
+        b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap()));
+      }
       return b;
     }
 
@@ -350,6 +419,10 @@ public final class FSImageFormatPBINode {
       if (f != null) {
         b.setAcl(buildAclEntries(f, state.getStringMap()));
       }
+      XAttrFeature xAttrFeature = dir.getXAttrFeature();
+      if (xAttrFeature != null) {
+        b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap()));
+      }
       return b;
     }
 

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java

@@ -64,7 +64,8 @@ public class NameNodeLayoutVersion {
    */
   public static enum Feature implements LayoutFeature {
     ROLLING_UPGRADE(-55, -53, "Support rolling upgrade", false),
-    EDITLOG_LENGTH(-56, "Add length field to every edit log op");
+    EDITLOG_LENGTH(-56, "Add length field to every edit log op"),
+    XATTRS(-57, "Extended attributes");
     
     private final FeatureInfo info;
 

+ 14 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java

@@ -65,6 +65,7 @@ import org.apache.hadoop.hdfs.server.namenode.SaveNamespaceContext;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 import org.apache.hadoop.hdfs.util.Diff.ListType;
 
 import com.google.common.base.Preconditions;
@@ -215,11 +216,16 @@ public class FSImageFormatPBSnapshot {
             acl = new AclFeature(FSImageFormatPBINode.Loader.loadAclEntries(
                 fileInPb.getAcl(), state.getStringTable()));
           }
+          XAttrFeature xAttrs = null;
+          if (fileInPb.hasXAttrs()) {
+            xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.loadXAttrs(
+                fileInPb.getXAttrs(), state.getStringTable()));
+          }
 
           copy = new INodeFileAttributes.SnapshotCopy(pbf.getName()
               .toByteArray(), permission, acl, fileInPb.getModificationTime(),
               fileInPb.getAccessTime(), (short) fileInPb.getReplication(),
-              fileInPb.getPreferredBlockSize(), null);
+              fileInPb.getPreferredBlockSize(), xAttrs);
         }
 
         FileDiff diff = new FileDiff(pbf.getSnapshotId(), copy, null,
@@ -310,16 +316,21 @@ public class FSImageFormatPBSnapshot {
             acl = new AclFeature(FSImageFormatPBINode.Loader.loadAclEntries(
                 dirCopyInPb.getAcl(), state.getStringTable()));
           }
+          XAttrFeature xAttrs = null;
+          if (dirCopyInPb.hasXAttrs()) {
+            xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.loadXAttrs(
+                dirCopyInPb.getXAttrs(), state.getStringTable()));
+          }
 
           long modTime = dirCopyInPb.getModificationTime();
           boolean noQuota = dirCopyInPb.getNsQuota() == -1
               && dirCopyInPb.getDsQuota() == -1;
 
           copy = noQuota ? new INodeDirectoryAttributes.SnapshotCopy(name,
-              permission, acl, modTime, null)
+              permission, acl, modTime, xAttrs)
               : new INodeDirectoryAttributes.CopyWithQuota(name, permission,
                   acl, modTime, dirCopyInPb.getNsQuota(),
-                  dirCopyInPb.getDsQuota(), null);
+                  dirCopyInPb.getDsQuota(), xAttrs);
         }
         // load created list
         List<INode> clist = loadCreatedList(in, dir,

+ 14 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java

@@ -73,6 +73,7 @@ public class TestSafeMode {
     conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();      
     fs = cluster.getFileSystem();
@@ -381,7 +382,19 @@ public class TestSafeMode {
       public void run(FileSystem fs) throws IOException {
         fs.setAcl(file1, Lists.<AclEntry>newArrayList());
       }});
-
+    
+    runFsFun("setXAttr while in SM", new FSRun() {
+      @Override
+      public void run(FileSystem fs) throws IOException {
+        fs.setXAttr(file1, "user.a1", null);
+      }});
+    
+    runFsFun("removeXAttr while in SM", new FSRun() {
+      @Override
+      public void run(FileSystem fs) throws IOException {
+        fs.removeXAttr(file1, "user.a1");
+      }});
+    
     try {
       DFSTestUtil.readFile(fs, file1);
     } catch (IOException ioe) {

+ 127 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java

@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * 1) save xattrs, restart NN, assert xattrs reloaded from edit log, 
+ * 2) save xattrs, create new checkpoint, restart NN, assert xattrs 
+ * reloaded from fsimage
+ */
+public class TestFSImageWithXAttr {
+  private static Configuration conf;
+  private static MiniDFSCluster cluster;
+  
+  //xattrs
+  private static final String name1 = "user.a1";
+  private static final byte[] value1 = {0x31, 0x32, 0x33};
+  private static final byte[] newValue1 = {0x31, 0x31, 0x31};
+  private static final String name2 = "user.a2";
+  private static final byte[] value2 = {0x37, 0x38, 0x39};
+
+  @BeforeClass
+  public static void setUp() throws IOException {
+    conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    cluster.waitActive();
+  }
+
+  @AfterClass
+  public static void tearDown() {
+    cluster.shutdown();
+  }
+
+  private void testXAttr(boolean persistNamespace) throws IOException {
+    Path path = new Path("/p");
+    DistributedFileSystem fs = cluster.getFileSystem();
+    fs.create(path).close();
+    
+    fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
+    fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
+    
+    restart(fs, persistNamespace);
+    
+    Map<String, byte[]> xattrs = fs.getXAttrs(path);
+    Assert.assertEquals(xattrs.size(), 2);
+    Assert.assertArrayEquals(value1, xattrs.get(name1));
+    Assert.assertArrayEquals(value2, xattrs.get(name2));
+    
+    fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.REPLACE));
+    
+    restart(fs, persistNamespace);
+    
+    xattrs = fs.getXAttrs(path);
+    Assert.assertEquals(xattrs.size(), 2);
+    Assert.assertArrayEquals(newValue1, xattrs.get(name1));
+    Assert.assertArrayEquals(value2, xattrs.get(name2));
+
+    fs.removeXAttr(path, name1);
+    fs.removeXAttr(path, name2);
+
+    restart(fs, persistNamespace);
+    xattrs = fs.getXAttrs(path);
+    Assert.assertEquals(xattrs.size(), 0);
+  }
+
+  @Test
+  public void testPersistXAttr() throws IOException {
+    testXAttr(true);
+  }
+
+  @Test
+  public void testXAttrEditLog() throws IOException {
+    testXAttr(false);
+  }
+
+  /**
+   * Restart the NameNode, optionally saving a new checkpoint.
+   *
+   * @param fs DistributedFileSystem used for saving namespace
+   * @param persistNamespace boolean true to save a new checkpoint
+   * @throws IOException if restart fails
+   */
+  private void restart(DistributedFileSystem fs, boolean persistNamespace)
+      throws IOException {
+    if (persistNamespace) {
+      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+      fs.saveNamespace();
+      fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+    }
+
+    cluster.restartNameNode();
+    cluster.waitActive();
+  }
+
+}