Selaa lähdekoodia

HDFS-6357. SetXattr should persist rpcIDs for handling retrycache with Namenode restart and HA. Contributed by Uma Maheswara Rao G.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2006@1593717 13f79535-47bb-0310-9956-ffa450edef68
Uma Maheswara Rao G 11 vuotta sitten
vanhempi
commit
298c659fa6

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-2006.txt

@@ -42,3 +42,6 @@ HDFS-2006 (Unreleased)
     HDFS-6343. fix TestNamenodeRetryCache and TestRetryCacheWithHA failures. (umamahesh)
 
     HDFS-6366. FsImage loading failed with RemoveXattr op (umamahesh)
+
+    HDFS-6357. SetXattr should persist rpcIDs for handling retrycache with Namenode restart and HA
+    (umamahesh)

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -2935,12 +2935,12 @@ public class FSDirectory implements Closeable {
     return xAttrs;
   }
   
-  void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag) 
-      throws IOException {
+  void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
+      boolean logRetryCache) throws IOException {
     writeLock();
     try {
       unprotectedSetXAttr(src, xAttr, flag);
-      fsImage.getEditLog().logSetXAttr(src, xAttr);
+      fsImage.getEditLog().logSetXAttr(src, xAttr, logRetryCache);
     } finally {
       writeUnlock();
     }

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

@@ -1054,10 +1054,11 @@ public class FSEditLog implements LogsPurgeable {
     logEdit(op);
   }
   
-  void logSetXAttr(String src, XAttr xAttr) {
+  void logSetXAttr(String src, XAttr xAttr, boolean toLogRpcIds) {
     final SetXAttrOp op = SetXAttrOp.getInstance();
     op.src = src;
     op.xAttr = xAttr;
+    logRpcIds(op, toLogRpcIds);
     logEdit(op);
   }
   

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java

@@ -806,6 +806,9 @@ public class FSEditLogLoader {
       SetXAttrOp setXAttrOp = (SetXAttrOp) op;
       fsDir.unprotectedSetXAttr(setXAttrOp.src, setXAttrOp.xAttr, 
           EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
+      if (toAddRetryCache) {
+        fsNamesys.addCacheEntry(setXAttrOp.rpcClientId, setXAttrOp.rpcCallId);
+      }
       break;
     }
     case OP_REMOVE_XATTR: {

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java

@@ -3557,6 +3557,7 @@ public abstract class FSEditLogOp {
       XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
       src = p.getSrc();
       xAttr = PBHelper.convertXAttr(p.getXAttr());
+      readRpcIds(in, logVersion);
     }
 
     @Override
@@ -3567,6 +3568,8 @@ public abstract class FSEditLogOp {
       }
       b.setXAttr(PBHelper.convertXAttrProto(xAttr));
       b.build().writeDelimitedTo(out);
+      // clientId and callId
+      writeRpcIds(rpcClientId, rpcCallId, out);
     }
 
     @Override

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -7731,7 +7731,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     }
     boolean success = false;
     try {
-      setXAttrInt(src, xAttr, flag);
+      setXAttrInt(src, xAttr, flag, cacheEntry != null);
       success = true;
     } catch (AccessControlException e) {
       logAuditEvent(false, "setXAttr", src);
@@ -7741,8 +7741,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     }
   }
   
-  private void setXAttrInt(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag) 
-      throws IOException {
+  private void setXAttrInt(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
+      boolean logRetryCache) throws IOException {
     nnConf.checkXAttrsConfigFlag();
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
@@ -7757,7 +7757,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       if (isPermissionEnabled) {
         checkPathAccess(pc, src, FsAction.WRITE);
       }
-      dir.setXAttr(src, xAttr, flag);
+      dir.setXAttr(src, xAttr, flag, logRetryCache);
       resultingStat = getAuditFileInfo(src, false);
     } finally {
       writeUnlock();

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java

@@ -415,7 +415,7 @@ public class TestNamenodeRetryCache {
     
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
         (LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
-    assertEquals(20, cacheSet.size());
+    assertEquals(22, cacheSet.size());
     
     Map<CacheEntry, CacheEntry> oldEntries = 
         new HashMap<CacheEntry, CacheEntry>();