فهرست منبع

HDFS-2618. Implement protobuf service for NamenodeProtocol. Contributed by Suresh Srinivas.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1210719 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 13 سال پیش
والد
کامیت
0a713035f2

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -15,6 +15,8 @@ Trunk (unreleased changes)
 
 
     HDFS-2581. Implement protobuf service for JournalProtocol. (suresh)
     HDFS-2581. Implement protobuf service for JournalProtocol. (suresh)
 
 
+    HDFS-2618. Implement protobuf service for NamenodeProtocol. (suresh)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HADOOP-7524 Change RPC to allow multiple protocols including multuple 
     HADOOP-7524 Change RPC to allow multiple protocols including multuple 

+ 91 - 266
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java

@@ -14903,15 +14903,10 @@ public final class HdfsProtos {
     org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock();
     org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock();
     org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder();
     org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder();
     
     
-    // repeated .DatanodeIDProto datanodeIDs = 2;
-    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> 
-        getDatanodeIDsList();
-    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeIDs(int index);
+    // repeated string datanodeIDs = 2;
+    java.util.List<String> getDatanodeIDsList();
     int getDatanodeIDsCount();
     int getDatanodeIDsCount();
-    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> 
-        getDatanodeIDsOrBuilderList();
-    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDsOrBuilder(
-        int index);
+    String getDatanodeIDs(int index);
   }
   }
   public static final class BlockWithLocationsProto extends
   public static final class BlockWithLocationsProto extends
       com.google.protobuf.GeneratedMessage
       com.google.protobuf.GeneratedMessage
@@ -14955,30 +14950,23 @@ public final class HdfsProtos {
       return block_;
       return block_;
     }
     }
     
     
-    // repeated .DatanodeIDProto datanodeIDs = 2;
+    // repeated string datanodeIDs = 2;
     public static final int DATANODEIDS_FIELD_NUMBER = 2;
     public static final int DATANODEIDS_FIELD_NUMBER = 2;
-    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> datanodeIDs_;
-    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> getDatanodeIDsList() {
-      return datanodeIDs_;
-    }
-    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> 
-        getDatanodeIDsOrBuilderList() {
+    private com.google.protobuf.LazyStringList datanodeIDs_;
+    public java.util.List<String>
+        getDatanodeIDsList() {
       return datanodeIDs_;
       return datanodeIDs_;
     }
     }
     public int getDatanodeIDsCount() {
     public int getDatanodeIDsCount() {
       return datanodeIDs_.size();
       return datanodeIDs_.size();
     }
     }
-    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeIDs(int index) {
-      return datanodeIDs_.get(index);
-    }
-    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDsOrBuilder(
-        int index) {
+    public String getDatanodeIDs(int index) {
       return datanodeIDs_.get(index);
       return datanodeIDs_.get(index);
     }
     }
     
     
     private void initFields() {
     private void initFields() {
       block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
       block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
-      datanodeIDs_ = java.util.Collections.emptyList();
+      datanodeIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
     }
     }
     private byte memoizedIsInitialized = -1;
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
     public final boolean isInitialized() {
@@ -14993,12 +14981,6 @@ public final class HdfsProtos {
         memoizedIsInitialized = 0;
         memoizedIsInitialized = 0;
         return false;
         return false;
       }
       }
-      for (int i = 0; i < getDatanodeIDsCount(); i++) {
-        if (!getDatanodeIDs(i).isInitialized()) {
-          memoizedIsInitialized = 0;
-          return false;
-        }
-      }
       memoizedIsInitialized = 1;
       memoizedIsInitialized = 1;
       return true;
       return true;
     }
     }
@@ -15010,7 +14992,7 @@ public final class HdfsProtos {
         output.writeMessage(1, block_);
         output.writeMessage(1, block_);
       }
       }
       for (int i = 0; i < datanodeIDs_.size(); i++) {
       for (int i = 0; i < datanodeIDs_.size(); i++) {
-        output.writeMessage(2, datanodeIDs_.get(i));
+        output.writeBytes(2, datanodeIDs_.getByteString(i));
       }
       }
       getUnknownFields().writeTo(output);
       getUnknownFields().writeTo(output);
     }
     }
@@ -15025,9 +15007,14 @@ public final class HdfsProtos {
         size += com.google.protobuf.CodedOutputStream
         size += com.google.protobuf.CodedOutputStream
           .computeMessageSize(1, block_);
           .computeMessageSize(1, block_);
       }
       }
-      for (int i = 0; i < datanodeIDs_.size(); i++) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeMessageSize(2, datanodeIDs_.get(i));
+      {
+        int dataSize = 0;
+        for (int i = 0; i < datanodeIDs_.size(); i++) {
+          dataSize += com.google.protobuf.CodedOutputStream
+            .computeBytesSizeNoTag(datanodeIDs_.getByteString(i));
+        }
+        size += dataSize;
+        size += 1 * getDatanodeIDsList().size();
       }
       }
       size += getUnknownFields().getSerializedSize();
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       memoizedSerializedSize = size;
@@ -15185,7 +15172,6 @@ public final class HdfsProtos {
       private void maybeForceBuilderInitialization() {
       private void maybeForceBuilderInitialization() {
         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
           getBlockFieldBuilder();
           getBlockFieldBuilder();
-          getDatanodeIDsFieldBuilder();
         }
         }
       }
       }
       private static Builder create() {
       private static Builder create() {
@@ -15200,12 +15186,8 @@ public final class HdfsProtos {
           blockBuilder_.clear();
           blockBuilder_.clear();
         }
         }
         bitField0_ = (bitField0_ & ~0x00000001);
         bitField0_ = (bitField0_ & ~0x00000001);
-        if (datanodeIDsBuilder_ == null) {
-          datanodeIDs_ = java.util.Collections.emptyList();
-          bitField0_ = (bitField0_ & ~0x00000002);
-        } else {
-          datanodeIDsBuilder_.clear();
-        }
+        datanodeIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000002);
         return this;
         return this;
       }
       }
       
       
@@ -15252,15 +15234,12 @@ public final class HdfsProtos {
         } else {
         } else {
           result.block_ = blockBuilder_.build();
           result.block_ = blockBuilder_.build();
         }
         }
-        if (datanodeIDsBuilder_ == null) {
-          if (((bitField0_ & 0x00000002) == 0x00000002)) {
-            datanodeIDs_ = java.util.Collections.unmodifiableList(datanodeIDs_);
-            bitField0_ = (bitField0_ & ~0x00000002);
-          }
-          result.datanodeIDs_ = datanodeIDs_;
-        } else {
-          result.datanodeIDs_ = datanodeIDsBuilder_.build();
+        if (((bitField0_ & 0x00000002) == 0x00000002)) {
+          datanodeIDs_ = new com.google.protobuf.UnmodifiableLazyStringList(
+              datanodeIDs_);
+          bitField0_ = (bitField0_ & ~0x00000002);
         }
         }
+        result.datanodeIDs_ = datanodeIDs_;
         result.bitField0_ = to_bitField0_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         onBuilt();
         return result;
         return result;
@@ -15280,31 +15259,15 @@ public final class HdfsProtos {
         if (other.hasBlock()) {
         if (other.hasBlock()) {
           mergeBlock(other.getBlock());
           mergeBlock(other.getBlock());
         }
         }
-        if (datanodeIDsBuilder_ == null) {
-          if (!other.datanodeIDs_.isEmpty()) {
-            if (datanodeIDs_.isEmpty()) {
-              datanodeIDs_ = other.datanodeIDs_;
-              bitField0_ = (bitField0_ & ~0x00000002);
-            } else {
-              ensureDatanodeIDsIsMutable();
-              datanodeIDs_.addAll(other.datanodeIDs_);
-            }
-            onChanged();
-          }
-        } else {
-          if (!other.datanodeIDs_.isEmpty()) {
-            if (datanodeIDsBuilder_.isEmpty()) {
-              datanodeIDsBuilder_.dispose();
-              datanodeIDsBuilder_ = null;
-              datanodeIDs_ = other.datanodeIDs_;
-              bitField0_ = (bitField0_ & ~0x00000002);
-              datanodeIDsBuilder_ = 
-                com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
-                   getDatanodeIDsFieldBuilder() : null;
-            } else {
-              datanodeIDsBuilder_.addAllMessages(other.datanodeIDs_);
-            }
+        if (!other.datanodeIDs_.isEmpty()) {
+          if (datanodeIDs_.isEmpty()) {
+            datanodeIDs_ = other.datanodeIDs_;
+            bitField0_ = (bitField0_ & ~0x00000002);
+          } else {
+            ensureDatanodeIDsIsMutable();
+            datanodeIDs_.addAll(other.datanodeIDs_);
           }
           }
+          onChanged();
         }
         }
         this.mergeUnknownFields(other.getUnknownFields());
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
         return this;
@@ -15319,12 +15282,6 @@ public final class HdfsProtos {
           
           
           return false;
           return false;
         }
         }
-        for (int i = 0; i < getDatanodeIDsCount(); i++) {
-          if (!getDatanodeIDs(i).isInitialized()) {
-            
-            return false;
-          }
-        }
         return true;
         return true;
       }
       }
       
       
@@ -15361,9 +15318,8 @@ public final class HdfsProtos {
               break;
               break;
             }
             }
             case 18: {
             case 18: {
-              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder();
-              input.readMessage(subBuilder, extensionRegistry);
-              addDatanodeIDs(subBuilder.buildPartial());
+              ensureDatanodeIDsIsMutable();
+              datanodeIDs_.add(input.readBytes());
               break;
               break;
             }
             }
           }
           }
@@ -15462,190 +15418,60 @@ public final class HdfsProtos {
         return blockBuilder_;
         return blockBuilder_;
       }
       }
       
       
-      // repeated .DatanodeIDProto datanodeIDs = 2;
-      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> datanodeIDs_ =
-        java.util.Collections.emptyList();
+      // repeated string datanodeIDs = 2;
+      private com.google.protobuf.LazyStringList datanodeIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
       private void ensureDatanodeIDsIsMutable() {
       private void ensureDatanodeIDsIsMutable() {
         if (!((bitField0_ & 0x00000002) == 0x00000002)) {
         if (!((bitField0_ & 0x00000002) == 0x00000002)) {
-          datanodeIDs_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto>(datanodeIDs_);
+          datanodeIDs_ = new com.google.protobuf.LazyStringArrayList(datanodeIDs_);
           bitField0_ |= 0x00000002;
           bitField0_ |= 0x00000002;
          }
          }
       }
       }
-      
-      private com.google.protobuf.RepeatedFieldBuilder<
-          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> datanodeIDsBuilder_;
-      
-      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> getDatanodeIDsList() {
-        if (datanodeIDsBuilder_ == null) {
-          return java.util.Collections.unmodifiableList(datanodeIDs_);
-        } else {
-          return datanodeIDsBuilder_.getMessageList();
-        }
+      public java.util.List<String>
+          getDatanodeIDsList() {
+        return java.util.Collections.unmodifiableList(datanodeIDs_);
       }
       }
       public int getDatanodeIDsCount() {
       public int getDatanodeIDsCount() {
-        if (datanodeIDsBuilder_ == null) {
-          return datanodeIDs_.size();
-        } else {
-          return datanodeIDsBuilder_.getCount();
-        }
+        return datanodeIDs_.size();
       }
       }
-      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeIDs(int index) {
-        if (datanodeIDsBuilder_ == null) {
-          return datanodeIDs_.get(index);
-        } else {
-          return datanodeIDsBuilder_.getMessage(index);
-        }
+      public String getDatanodeIDs(int index) {
+        return datanodeIDs_.get(index);
       }
       }
       public Builder setDatanodeIDs(
       public Builder setDatanodeIDs(
-          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
-        if (datanodeIDsBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensureDatanodeIDsIsMutable();
-          datanodeIDs_.set(index, value);
-          onChanged();
-        } else {
-          datanodeIDsBuilder_.setMessage(index, value);
-        }
-        return this;
-      }
-      public Builder setDatanodeIDs(
-          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
-        if (datanodeIDsBuilder_ == null) {
-          ensureDatanodeIDsIsMutable();
-          datanodeIDs_.set(index, builderForValue.build());
-          onChanged();
-        } else {
-          datanodeIDsBuilder_.setMessage(index, builderForValue.build());
-        }
-        return this;
-      }
-      public Builder addDatanodeIDs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
-        if (datanodeIDsBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensureDatanodeIDsIsMutable();
-          datanodeIDs_.add(value);
-          onChanged();
-        } else {
-          datanodeIDsBuilder_.addMessage(value);
-        }
-        return this;
-      }
-      public Builder addDatanodeIDs(
-          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
-        if (datanodeIDsBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensureDatanodeIDsIsMutable();
-          datanodeIDs_.add(index, value);
-          onChanged();
-        } else {
-          datanodeIDsBuilder_.addMessage(index, value);
-        }
-        return this;
-      }
-      public Builder addDatanodeIDs(
-          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
-        if (datanodeIDsBuilder_ == null) {
-          ensureDatanodeIDsIsMutable();
-          datanodeIDs_.add(builderForValue.build());
-          onChanged();
-        } else {
-          datanodeIDsBuilder_.addMessage(builderForValue.build());
-        }
+          int index, String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensureDatanodeIDsIsMutable();
+        datanodeIDs_.set(index, value);
+        onChanged();
         return this;
         return this;
       }
       }
-      public Builder addDatanodeIDs(
-          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
-        if (datanodeIDsBuilder_ == null) {
-          ensureDatanodeIDsIsMutable();
-          datanodeIDs_.add(index, builderForValue.build());
-          onChanged();
-        } else {
-          datanodeIDsBuilder_.addMessage(index, builderForValue.build());
-        }
+      public Builder addDatanodeIDs(String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensureDatanodeIDsIsMutable();
+        datanodeIDs_.add(value);
+        onChanged();
         return this;
         return this;
       }
       }
       public Builder addAllDatanodeIDs(
       public Builder addAllDatanodeIDs(
-          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> values) {
-        if (datanodeIDsBuilder_ == null) {
-          ensureDatanodeIDsIsMutable();
-          super.addAll(values, datanodeIDs_);
-          onChanged();
-        } else {
-          datanodeIDsBuilder_.addAllMessages(values);
-        }
+          java.lang.Iterable<String> values) {
+        ensureDatanodeIDsIsMutable();
+        super.addAll(values, datanodeIDs_);
+        onChanged();
         return this;
         return this;
       }
       }
       public Builder clearDatanodeIDs() {
       public Builder clearDatanodeIDs() {
-        if (datanodeIDsBuilder_ == null) {
-          datanodeIDs_ = java.util.Collections.emptyList();
-          bitField0_ = (bitField0_ & ~0x00000002);
-          onChanged();
-        } else {
-          datanodeIDsBuilder_.clear();
-        }
-        return this;
-      }
-      public Builder removeDatanodeIDs(int index) {
-        if (datanodeIDsBuilder_ == null) {
-          ensureDatanodeIDsIsMutable();
-          datanodeIDs_.remove(index);
-          onChanged();
-        } else {
-          datanodeIDsBuilder_.remove(index);
-        }
+        datanodeIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        onChanged();
         return this;
         return this;
       }
       }
-      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getDatanodeIDsBuilder(
-          int index) {
-        return getDatanodeIDsFieldBuilder().getBuilder(index);
-      }
-      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDsOrBuilder(
-          int index) {
-        if (datanodeIDsBuilder_ == null) {
-          return datanodeIDs_.get(index);  } else {
-          return datanodeIDsBuilder_.getMessageOrBuilder(index);
-        }
-      }
-      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> 
-           getDatanodeIDsOrBuilderList() {
-        if (datanodeIDsBuilder_ != null) {
-          return datanodeIDsBuilder_.getMessageOrBuilderList();
-        } else {
-          return java.util.Collections.unmodifiableList(datanodeIDs_);
-        }
-      }
-      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addDatanodeIDsBuilder() {
-        return getDatanodeIDsFieldBuilder().addBuilder(
-            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance());
-      }
-      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addDatanodeIDsBuilder(
-          int index) {
-        return getDatanodeIDsFieldBuilder().addBuilder(
-            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance());
-      }
-      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder> 
-           getDatanodeIDsBuilderList() {
-        return getDatanodeIDsFieldBuilder().getBuilderList();
-      }
-      private com.google.protobuf.RepeatedFieldBuilder<
-          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> 
-          getDatanodeIDsFieldBuilder() {
-        if (datanodeIDsBuilder_ == null) {
-          datanodeIDsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
-              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>(
-                  datanodeIDs_,
-                  ((bitField0_ & 0x00000002) == 0x00000002),
-                  getParentForChildren(),
-                  isClean());
-          datanodeIDs_ = null;
-        }
-        return datanodeIDsBuilder_;
+      void addDatanodeIDs(com.google.protobuf.ByteString value) {
+        ensureDatanodeIDsIsMutable();
+        datanodeIDs_.add(value);
+        onChanged();
       }
       }
       
       
       // @@protoc_insertion_point(builder_scope:BlockWithLocationsProto)
       // @@protoc_insertion_point(builder_scope:BlockWithLocationsProto)
@@ -20348,29 +20174,28 @@ public final class HdfsProtos {
       "ature\030\001 \002(\0132\031.CheckpointSignatureProto\022\031" +
       "ature\030\001 \002(\0132\031.CheckpointSignatureProto\022\031" +
       "\n\021needToReturnImage\030\002 \002(\010\"A\n\nBlockProto\022" +
       "\n\021needToReturnImage\030\002 \002(\010\"A\n\nBlockProto\022" +
       "\017\n\007blockId\030\001 \002(\004\022\020\n\010genStamp\030\002 \002(\004\022\020\n\010nu" +
       "\017\n\007blockId\030\001 \002(\004\022\020\n\010genStamp\030\002 \002(\004\022\020\n\010nu" +
-      "mBytes\030\003 \001(\004\"\\\n\027BlockWithLocationsProto\022",
-      "\032\n\005block\030\001 \002(\0132\013.BlockProto\022%\n\013datanodeI" +
-      "Ds\030\002 \003(\0132\020.DatanodeIDProto\"D\n\030BlocksWith" +
-      "LocationsProto\022(\n\006blocks\030\001 \003(\0132\030.BlockWi" +
-      "thLocationsProto\"8\n\022RemoteEditLogProto\022\021" +
-      "\n\tstartTxId\030\001 \002(\004\022\017\n\007endTxId\030\002 \002(\004\"?\n\032Re" +
-      "moteEditLogManifestProto\022!\n\004logs\030\001 \003(\0132\023" +
-      ".RemoteEditLogProto\"\203\001\n\022NamespaceInfoPro" +
-      "to\022\024\n\014buildVersion\030\001 \002(\t\022\032\n\022distUpgradeV" +
-      "ersion\030\002 \002(\r\022\023\n\013blockPoolID\030\003 \002(\t\022&\n\013sto" +
-      "rageInfo\030\004 \002(\0132\021.StorageInfoProto\"D\n\rBlo",
-      "ckKeyProto\022\r\n\005keyId\030\001 \002(\r\022\022\n\nexpiryDate\030" +
-      "\002 \002(\004\022\020\n\010keyBytes\030\003 \002(\014\"\254\001\n\026ExportedBloc" +
-      "kKeysProto\022\033\n\023isBlockTokenEnabled\030\001 \002(\010\022" +
-      "\031\n\021keyUpdateInterval\030\002 \002(\004\022\025\n\rtokenLifeT" +
-      "ime\030\003 \002(\004\022\"\n\ncurrentKey\030\004 \002(\0132\016.BlockKey" +
-      "Proto\022\037\n\007allKeys\030\005 \003(\0132\016.BlockKeyProto\"N" +
-      "\n\024RecoveringBlockProto\022\023\n\013newGenStamp\030\001 " +
-      "\002(\004\022!\n\005block\030\002 \002(\0132\022.LocatedBlockProto*G" +
-      "\n\014ReplicaState\022\r\n\tFINALIZED\020\000\022\007\n\003RBW\020\001\022\007" +
-      "\n\003RWR\020\002\022\007\n\003RUR\020\003\022\r\n\tTEMPORARY\020\004B6\n%org.a",
-      "pache.hadoop.hdfs.protocol.protoB\nHdfsPr" +
-      "otos\240\001\001"
+      "mBytes\030\003 \001(\004\"J\n\027BlockWithLocationsProto\022",
+      "\032\n\005block\030\001 \002(\0132\013.BlockProto\022\023\n\013datanodeI" +
+      "Ds\030\002 \003(\t\"D\n\030BlocksWithLocationsProto\022(\n\006" +
+      "blocks\030\001 \003(\0132\030.BlockWithLocationsProto\"8" +
+      "\n\022RemoteEditLogProto\022\021\n\tstartTxId\030\001 \002(\004\022" +
+      "\017\n\007endTxId\030\002 \002(\004\"?\n\032RemoteEditLogManifes" +
+      "tProto\022!\n\004logs\030\001 \003(\0132\023.RemoteEditLogProt" +
+      "o\"\203\001\n\022NamespaceInfoProto\022\024\n\014buildVersion" +
+      "\030\001 \002(\t\022\032\n\022distUpgradeVersion\030\002 \002(\r\022\023\n\013bl" +
+      "ockPoolID\030\003 \002(\t\022&\n\013storageInfo\030\004 \002(\0132\021.S" +
+      "torageInfoProto\"D\n\rBlockKeyProto\022\r\n\005keyI",
+      "d\030\001 \002(\r\022\022\n\nexpiryDate\030\002 \002(\004\022\020\n\010keyBytes\030" +
+      "\003 \002(\014\"\254\001\n\026ExportedBlockKeysProto\022\033\n\023isBl" +
+      "ockTokenEnabled\030\001 \002(\010\022\031\n\021keyUpdateInterv" +
+      "al\030\002 \002(\004\022\025\n\rtokenLifeTime\030\003 \002(\004\022\"\n\ncurre" +
+      "ntKey\030\004 \002(\0132\016.BlockKeyProto\022\037\n\007allKeys\030\005" +
+      " \003(\0132\016.BlockKeyProto\"N\n\024RecoveringBlockP" +
+      "roto\022\023\n\013newGenStamp\030\001 \002(\004\022!\n\005block\030\002 \002(\013" +
+      "2\022.LocatedBlockProto*G\n\014ReplicaState\022\r\n\t" +
+      "FINALIZED\020\000\022\007\n\003RBW\020\001\022\007\n\003RWR\020\002\022\007\n\003RUR\020\003\022\r" +
+      "\n\tTEMPORARY\020\004B6\n%org.apache.hadoop.hdfs.",
+      "protocol.protoB\nHdfsProtos\240\001\001"
     };
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {

تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 813 - 40
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/NamenodeProtocolProtos.java


+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolTranslatorPB.java

@@ -53,7 +53,7 @@ public class JournalProtocolTranslatorPB implements JournalProtocol, Closeable {
       Configuration conf) throws IOException {
       Configuration conf) throws IOException {
     RPC.setProtocolEngine(conf, JournalProtocolPB.class, ProtobufRpcEngine.class);
     RPC.setProtocolEngine(conf, JournalProtocolPB.class, ProtobufRpcEngine.class);
     rpcProxy = RPC.getProxy(JournalProtocolPB.class,
     rpcProxy = RPC.getProxy(JournalProtocolPB.class,
-        JournalProtocol.versionID, nameNodeAddr, conf);
+        RPC.getProtocolVersion(JournalProtocolPB.class), nameNodeAddr, conf);
   }
   }
 
 
   @Override
   @Override
@@ -64,7 +64,7 @@ public class JournalProtocolTranslatorPB implements JournalProtocol, Closeable {
   @Override
   @Override
   public long getProtocolVersion(String protocolName, long clientVersion)
   public long getProtocolVersion(String protocolName, long clientVersion)
       throws IOException {
       throws IOException {
-    return 0;
+    return rpcProxy.getProtocolVersion(protocolName, clientVersion);
   }
   }
 
 
   @Override
   @Override

+ 53 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolPB.java

@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolPB;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.ipc.VersionedProtocol;
+import org.apache.hadoop.security.KerberosInfo;
+
+/**
+ * Protocol that a secondary NameNode uses to communicate with the NameNode.
+ * It's used to get part of the name node state
+ * 
+ * Note: This extends the protocolbuffer service based interface to
+ * add annotations required for security.
+ */
+@KerberosInfo(
+    serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
+    clientPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
+@ProtocolInfo(protocolName = 
+    "org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol",
+    protocolVersion = 1)
+@InterfaceAudience.Private
+public interface NamenodeProtocolPB extends
+    NamenodeProtocolService.BlockingInterface, VersionedProtocol {
+  /**
+   * This method is defined to get the protocol signature using 
+   * the R23 protocol - hence we have added the suffix of 2 the method name
+   * to avoid conflict.
+   */
+  public ProtocolSignatureWritable getProtocolSignature2(String protocol,
+      long clientVersion, int clientMethodsHash) throws IOException;
+}

+ 253 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java

@@ -0,0 +1,253 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolPB;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
+import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
+import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.ipc.RPC;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+/**
+ * Implementation for protobuf service that forwards requests
+ * received on {@link NamenodeProtocolPB} to the
+ * {@link NamenodeProtocol} server implementation.
+ */
+public class NamenodeProtocolServerSideTranslatorPB implements
+    NamenodeProtocolPB {
+  private final NamenodeProtocol impl;
+
+  public NamenodeProtocolServerSideTranslatorPB(NamenodeProtocol impl) {
+    this.impl = impl;
+  }
+
+  @Override
+  public GetBlocksResponseProto getBlocks(RpcController unused,
+      GetBlocksRequestProto request) throws ServiceException {
+    DatanodeInfo dnInfo = new DatanodeInfo(PBHelper.convert(request
+        .getDatanode()));
+    BlocksWithLocations blocks;
+    try {
+      blocks = impl.getBlocks(dnInfo, request.getSize());
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return GetBlocksResponseProto.newBuilder()
+        .setBlocks(PBHelper.convert(blocks)).build();
+  }
+
+  @Override
+  public GetBlockKeysResponseProto getBlockKeys(RpcController unused,
+      GetBlockKeysRequestProto request) throws ServiceException {
+    ExportedBlockKeys keys;
+    try {
+      keys = impl.getBlockKeys();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return GetBlockKeysResponseProto.newBuilder()
+        .setKeys(PBHelper.convert(keys)).build();
+  }
+
+  @Override
+  public GetTransactionIdResponseProto getTransationId(RpcController unused,
+      GetTransactionIdRequestProto request) throws ServiceException {
+    long txid;
+    try {
+      txid = impl.getTransactionID();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return GetTransactionIdResponseProto.newBuilder().setTxId(txid).build();
+  }
+
+  @Override
+  public RollEditLogResponseProto rollEditLog(RpcController unused,
+      RollEditLogRequestProto request) throws ServiceException {
+    CheckpointSignature signature;
+    try {
+      signature = impl.rollEditLog();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return RollEditLogResponseProto.newBuilder()
+        .setSignature(PBHelper.convert(signature)).build();
+  }
+
+  @Override
+  public ErrorReportResponseProto errorReport(RpcController unused,
+      ErrorReportRequestProto request) throws ServiceException {
+    try {
+      impl.errorReport(PBHelper.convert(request.getRegistration()),
+          request.getErrorCode(), request.getMsg());
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return ErrorReportResponseProto.newBuilder().build();
+  }
+
+  @Override
+  public RegisterResponseProto register(RpcController unused,
+      RegisterRequestProto request) throws ServiceException {
+    NamenodeRegistration reg;
+    try {
+      reg = impl.register(PBHelper.convert(request.getRegistration()));
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return RegisterResponseProto.newBuilder()
+        .setRegistration(PBHelper.convert(reg)).build();
+  }
+
+  @Override
+  public StartCheckpointResponseProto startCheckpoint(RpcController unused,
+      StartCheckpointRequestProto request) throws ServiceException {
+    NamenodeCommand cmd;
+    try {
+      cmd = impl.startCheckpoint(PBHelper.convert(request.getRegistration()));
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return StartCheckpointResponseProto.newBuilder()
+        .setCommand(PBHelper.convert(cmd)).build();
+  }
+
+  @Override
+  public EndCheckpointResponseProto endCheckpoint(RpcController unused,
+      EndCheckpointRequestProto request) throws ServiceException {
+    try {
+      impl.endCheckpoint(PBHelper.convert(request.getRegistration()),
+          PBHelper.convert(request.getSignature()));
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return EndCheckpointResponseProto.newBuilder().build();
+  }
+
+  @Override
+  public GetEditLogManifestResponseProto getEditLogManifest(
+      RpcController unused, GetEditLogManifestRequestProto request)
+      throws ServiceException {
+    RemoteEditLogManifest manifest;
+    try {
+      manifest = impl.getEditLogManifest(request.getSinceTxId());
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return GetEditLogManifestResponseProto.newBuilder()
+        .setManifest(PBHelper.convert(manifest)).build();
+  }
+  
+  @Override
+  public long getProtocolVersion(String protocol, long clientVersion)
+      throws IOException {
+    return RPC.getProtocolVersion(NamenodeProtocolPB.class);
+  }
+
+  /**
+   * The client side will redirect getProtocolSignature to
+   * getProtocolSignature2.
+   * 
+   * However the RPC layer below on the Server side will call getProtocolVersion
+   * and possibly in the future getProtocolSignature. Hence we still implement
+   * it even though the end client will never call this method.
+   */
+  @Override
+  public ProtocolSignature getProtocolSignature(String protocol,
+      long clientVersion, int clientMethodsHash) throws IOException {
+    /**
+     * Don't forward this to the server. The protocol version and signature is
+     * that of {@link NamenodeProtocol}
+     */
+    if (!protocol.equals(RPC.getProtocolName(NamenodeProtocolPB.class))) {
+      throw new IOException("Namenode Serverside implements " +
+          RPC.getProtocolName(NamenodeProtocolPB.class) +
+          ". The following requested protocol is unknown: " + protocol);
+    }
+
+    return ProtocolSignature.getProtocolSignature(clientMethodsHash,
+        RPC.getProtocolVersion(NamenodeProtocolPB.class),
+        NamenodeProtocolPB.class);
+  }
+
+
+  @Override
+  public ProtocolSignatureWritable getProtocolSignature2(String protocol,
+      long clientVersion, int clientMethodsHash) throws IOException {
+    /**
+     * Don't forward this to the server. The protocol version and signature is
+     * that of {@link NamenodePBProtocol}
+     */
+    return ProtocolSignatureWritable.convert(
+        this.getProtocolSignature(protocol, clientVersion, clientMethodsHash));
+  }
+
+  @Override
+  public VersionResponseProto versionRequest(RpcController controller,
+      VersionRequestProto request) throws ServiceException {
+    NamespaceInfo info;
+    try {
+      info = impl.versionRequest();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return VersionResponseProto.newBuilder()
+        .setInfo(convert(info)).build();
+  }
+
+  private NamespaceInfoProto convert(NamespaceInfo info) {
+    return NamespaceInfoProto.newBuilder()
+        .setBlockPoolID(info.getBlockPoolID())
+        .setBuildVersion(info.getBuildVersion())
+        .setDistUpgradeVersion(info.getDistributedUpgradeVersion())
+        .setStorageInfo(PBHelper.convert(info)).build();
+  }
+}

+ 269 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java

@@ -0,0 +1,269 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolPB;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
+import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
+import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryProxy;
+import org.apache.hadoop.ipc.ProtobufHelper;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+/**
+ * This class is the client side translator to translate the requests made on
+ * {@link NamenodeProtocol} interfaces to the RPC server implementing
+ * {@link NamenodeProtocolPB}.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
+    Closeable {
+  /** RpcController is not used and hence is set to null */
+  private final static RpcController NULL_CONTROLLER = null;
+  
+  /*
+   * Protobuf requests with no parameters instantiated only once
+   */
+  private static final GetBlockKeysRequestProto GET_BLOCKKEYS = 
+      GetBlockKeysRequestProto.newBuilder().build();
+  private static final GetTransactionIdRequestProto GET_TRANSACTIONID = 
+      GetTransactionIdRequestProto.newBuilder().build();
+  private static final RollEditLogRequestProto ROLL_EDITLOG = 
+      RollEditLogRequestProto.newBuilder().build();
+  private static final VersionRequestProto VERSION_REQUEST = 
+      VersionRequestProto.newBuilder().build();
+
+  final private NamenodeProtocolPB rpcProxy;
+
+
+
+  private static NamenodeProtocolPB createNamenode(
+      InetSocketAddress nameNodeAddr, Configuration conf,
+      UserGroupInformation ugi) throws IOException {
+    return RPC.getProxy(NamenodeProtocolPB.class,
+        RPC.getProtocolVersion(NamenodeProtocolPB.class), nameNodeAddr, ugi,
+        conf, NetUtils.getSocketFactory(conf, NamenodeProtocolPB.class));
+  }
+
+  /** Create a {@link NameNode} proxy */
+  static NamenodeProtocolPB createNamenodeWithRetry(
+      NamenodeProtocolPB rpcNamenode) {
+    RetryPolicy createPolicy = RetryPolicies
+        .retryUpToMaximumCountWithFixedSleep(5,
+            HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
+    Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap = new HashMap<Class<? extends Exception>, RetryPolicy>();
+    remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
+        createPolicy);
+
+    Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap = new HashMap<Class<? extends Exception>, RetryPolicy>();
+    exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
+        .retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL,
+            remoteExceptionToPolicyMap));
+    RetryPolicy methodPolicy = RetryPolicies.retryByException(
+        RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
+    Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<String, RetryPolicy>();
+
+    methodNameToPolicyMap.put("create", methodPolicy);
+
+    return (NamenodeProtocolPB) RetryProxy.create(NamenodeProtocolPB.class,
+        rpcNamenode, methodNameToPolicyMap);
+  }
+
+  public NamenodeProtocolTranslatorPB(InetSocketAddress nameNodeAddr,
+      Configuration conf, UserGroupInformation ugi) throws IOException {
+    rpcProxy = createNamenodeWithRetry(createNamenode(nameNodeAddr, conf, ugi));
+  }
+
+  public void close() {
+    RPC.stopProxy(rpcProxy);
+  }
+
+  @Override
+  public ProtocolSignature getProtocolSignature(String protocolName,
+      long clientVersion, int clientMethodHash) throws IOException {
+    return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
+        protocolName, clientVersion, clientMethodHash));
+  }
+
+  @Override
+  public long getProtocolVersion(String protocolName, long clientVersion)
+      throws IOException {
+    return rpcProxy.getProtocolVersion(protocolName, clientVersion);
+  }
+
+  @Override
+  public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
+      throws IOException {
+    GetBlocksRequestProto req = GetBlocksRequestProto.newBuilder()
+        .setDatanode(PBHelper.convert(datanode)).setSize(size)
+        .build();
+    try {
+      return PBHelper.convert(rpcProxy.getBlocks(NULL_CONTROLLER, req)
+          .getBlocks());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public ExportedBlockKeys getBlockKeys() throws IOException {
+    try {
+      return PBHelper.convert(rpcProxy.getBlockKeys(NULL_CONTROLLER,
+          GET_BLOCKKEYS).getKeys());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public long getTransactionID() throws IOException {
+    try {
+      return rpcProxy.getTransationId(NULL_CONTROLLER, GET_TRANSACTIONID)
+          .getTxId();
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  @SuppressWarnings("deprecation")
+  public CheckpointSignature rollEditLog() throws IOException {
+    try {
+      return PBHelper.convert(rpcProxy.rollEditLog(NULL_CONTROLLER,
+          ROLL_EDITLOG).getSignature());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public NamespaceInfo versionRequest() throws IOException {
+    try {
+      return PBHelper.convert(rpcProxy.versionRequest(NULL_CONTROLLER,
+          VERSION_REQUEST).getInfo());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public void errorReport(NamenodeRegistration registration, int errorCode,
+      String msg) throws IOException {
+    ErrorReportRequestProto req = ErrorReportRequestProto.newBuilder()
+        .setErrorCode(errorCode).setMsg(msg)
+        .setRegistartion(PBHelper.convert(registration)).build();
+    try {
+      rpcProxy.errorReport(NULL_CONTROLLER, req);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public NamenodeRegistration register(NamenodeRegistration registration)
+      throws IOException {
+    RegisterRequestProto req = RegisterRequestProto.newBuilder()
+        .setRegistration(PBHelper.convert(registration)).build();
+    try {
+      return PBHelper.convert(rpcProxy.register(NULL_CONTROLLER, req)
+          .getRegistration());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public NamenodeCommand startCheckpoint(NamenodeRegistration registration)
+      throws IOException {
+    StartCheckpointRequestProto req = StartCheckpointRequestProto.newBuilder()
+        .setRegistration(PBHelper.convert(registration)).build();
+    NamenodeCommandProto cmd;
+    try {
+      cmd = rpcProxy.startCheckpoint(NULL_CONTROLLER, req).getCommand();
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    return PBHelper.convert(cmd);
+  }
+
+  @Override
+  public void endCheckpoint(NamenodeRegistration registration,
+      CheckpointSignature sig) throws IOException {
+    EndCheckpointRequestProto req = EndCheckpointRequestProto.newBuilder()
+        .setRegistration(PBHelper.convert(registration))
+        .setSignature(PBHelper.convert(sig)).build();
+    try {
+      rpcProxy.endCheckpoint(NULL_CONTROLLER, req);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public RemoteEditLogManifest getEditLogManifest(long sinceTxId)
+      throws IOException {
+    GetEditLogManifestRequestProto req = GetEditLogManifestRequestProto
+        .newBuilder().setSinceTxId(sinceTxId).build();
+    try {
+      return PBHelper.convert(rpcProxy.getEditLogManifest(NULL_CONTROLLER, req)
+          .getManifest());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+}

+ 209 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

@@ -17,28 +17,55 @@
  */
  */
 package org.apache.hadoop.hdfs.protocolPB;
 package org.apache.hadoop.hdfs.protocolPB;
 
 
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
+import org.apache.hadoop.hdfs.security.token.block.BlockKey;
+import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
+import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 
 
 import com.google.protobuf.ByteString;
 import com.google.protobuf.ByteString;
 
 
 /**
 /**
- * Utilities for converting protobuf classes to and from 
- * implementation classes.
+ * Utilities for converting protobuf classes to and from implementation classes.
  */
  */
 class PBHelper {
 class PBHelper {
   private PBHelper() {
   private PBHelper() {
     /** Hidden constructor */
     /** Hidden constructor */
   }
   }
-  
+
   public static ByteString getByteString(byte[] bytes) {
   public static ByteString getByteString(byte[] bytes) {
     return ByteString.copyFrom(bytes);
     return ByteString.copyFrom(bytes);
   }
   }
-  
+
   public static NamenodeRole convert(NamenodeRoleProto role) {
   public static NamenodeRole convert(NamenodeRoleProto role) {
     switch (role) {
     switch (role) {
     case NAMENODE:
     case NAMENODE:
@@ -50,7 +77,7 @@ class PBHelper {
     }
     }
     return null;
     return null;
   }
   }
-  
+
   public static NamenodeRoleProto convert(NamenodeRole role) {
   public static NamenodeRoleProto convert(NamenodeRole role) {
     switch (role) {
     switch (role) {
     case NAMENODE:
     case NAMENODE:
@@ -62,31 +89,196 @@ class PBHelper {
     }
     }
     return null;
     return null;
   }
   }
-  
+
   public static StorageInfoProto convert(StorageInfo info) {
   public static StorageInfoProto convert(StorageInfo info) {
     return StorageInfoProto.newBuilder().setClusterID(info.getClusterID())
     return StorageInfoProto.newBuilder().setClusterID(info.getClusterID())
-        .setCTime(info.getCTime())
-        .setLayoutVersion(info.getLayoutVersion())
-        .setNamespceID(info.getNamespaceID())
-        .build();
+        .setCTime(info.getCTime()).setLayoutVersion(info.getLayoutVersion())
+        .setNamespceID(info.getNamespaceID()).build();
   }
   }
-  
+
   public static StorageInfo convert(StorageInfoProto info) {
   public static StorageInfo convert(StorageInfoProto info) {
     return new StorageInfo(info.getLayoutVersion(), info.getNamespceID(),
     return new StorageInfo(info.getLayoutVersion(), info.getNamespceID(),
         info.getClusterID(), info.getCTime());
         info.getClusterID(), info.getCTime());
   }
   }
-  
-  
+
   public static NamenodeRegistrationProto convert(NamenodeRegistration reg) {
   public static NamenodeRegistrationProto convert(NamenodeRegistration reg) {
     return NamenodeRegistrationProto.newBuilder()
     return NamenodeRegistrationProto.newBuilder()
-        .setHttpAddress(reg.getHttpAddress())
-        .setRole(convert(reg.getRole()))
+        .setHttpAddress(reg.getHttpAddress()).setRole(convert(reg.getRole()))
         .setRpcAddress(reg.getAddress())
         .setRpcAddress(reg.getAddress())
         .setStorageInfo(convert((StorageInfo) reg)).build();
         .setStorageInfo(convert((StorageInfo) reg)).build();
   }
   }
-  
+
   public static NamenodeRegistration convert(NamenodeRegistrationProto reg) {
   public static NamenodeRegistration convert(NamenodeRegistrationProto reg) {
     return new NamenodeRegistration(reg.getRpcAddress(), reg.getHttpAddress(),
     return new NamenodeRegistration(reg.getRpcAddress(), reg.getHttpAddress(),
         convert(reg.getStorageInfo()), convert(reg.getRole()));
         convert(reg.getStorageInfo()), convert(reg.getRole()));
   }
   }
-}
+
+  public static DatanodeID convert(DatanodeIDProto dn) {
+    return new DatanodeID(dn.getName(), dn.getStorageID(), dn.getInfoPort(),
+        dn.getIpcPort());
+  }
+
+  public static DatanodeIDProto convert(DatanodeID dn) {
+    return DatanodeIDProto.newBuilder().setName(dn.getName())
+        .setInfoPort(dn.getInfoPort()).setIpcPort(dn.getIpcPort())
+        .setStorageID(dn.getStorageID()).build();
+  }
+
+  public static BlockProto convert(Block b) {
+    return BlockProto.newBuilder().setBlockId(b.getBlockId())
+        .setGenStamp(b.getGenerationStamp()).setNumBytes(b.getNumBytes())
+        .build();
+  }
+
+  public static Block convert(BlockProto b) {
+    return new Block(b.getBlockId(), b.getGenStamp(), b.getNumBytes());
+  }
+
+  public static BlockWithLocationsProto convert(BlockWithLocations blk) {
+    return BlockWithLocationsProto.newBuilder()
+        .setBlock(convert(blk.getBlock()))
+        .addAllDatanodeIDs(Arrays.asList(blk.getDatanodes())).build();
+  }
+
+  public static BlockWithLocations convert(BlockWithLocationsProto b) {
+    return new BlockWithLocations(convert(b.getBlock()), b.getDatanodeIDsList()
+        .toArray(new String[0]));
+  }
+
+  public static BlocksWithLocationsProto convert(BlocksWithLocations blks) {
+    BlocksWithLocationsProto.Builder builder = BlocksWithLocationsProto
+        .newBuilder();
+    for (BlockWithLocations b : blks.getBlocks()) {
+      builder.addBlocks(convert(b));
+    }
+    return builder.build();
+  }
+
+  public static BlocksWithLocations convert(BlocksWithLocationsProto blocks) {
+    return new BlocksWithLocations(convert(blocks.getBlocksList()));
+  }
+
+  public static BlockKeyProto convert(BlockKey key) {
+    byte[] encodedKey = key.getEncodedKey();
+    ByteString keyBytes = ByteString.copyFrom(encodedKey == null ? new byte[0]
+        : encodedKey);
+    return BlockKeyProto.newBuilder().setKeyId(key.getKeyId())
+        .setKeyBytes(keyBytes).setExpiryDate(key.getExpiryDate()).build();
+  }
+
+  public static BlockKey convert(BlockKeyProto k) {
+    return new BlockKey(k.getKeyId(), k.getExpiryDate(), k.getKeyBytes()
+        .toByteArray());
+  }
+
+  public static ExportedBlockKeysProto convert(ExportedBlockKeys keys) {
+    ExportedBlockKeysProto.Builder builder = ExportedBlockKeysProto
+        .newBuilder();
+    builder.setIsBlockTokenEnabled(keys.isBlockTokenEnabled())
+        .setKeyUpdateInterval(keys.getKeyUpdateInterval())
+        .setTokenLifeTime(keys.getTokenLifetime())
+        .setCurrentKey(convert(keys.getCurrentKey()));
+    for (BlockKey k : keys.getAllKeys()) {
+      builder.addAllKeys(convert(k));
+    }
+    return builder.build();
+  }
+
+  public static ExportedBlockKeys convert(ExportedBlockKeysProto keys) {
+    return new ExportedBlockKeys(keys.getIsBlockTokenEnabled(),
+        keys.getKeyUpdateInterval(), keys.getTokenLifeTime(),
+        convert(keys.getCurrentKey()), convertBlockKeys(keys.getAllKeysList()));
+  }
+
+  public static CheckpointSignatureProto convert(CheckpointSignature s) {
+    return CheckpointSignatureProto.newBuilder()
+        .setBlockPoolId(s.getBlockpoolID())
+        .setCurSegmentTxId(s.getCurSegmentTxId())
+        .setMostRecentCheckpointTxId(s.getMostRecentCheckpointTxId())
+        .setStorageInfo(PBHelper.convert((StorageInfo) s)).build();
+  }
+
+  public static CheckpointSignature convert(CheckpointSignatureProto s) {
+    return new CheckpointSignature(PBHelper.convert(s.getStorageInfo()),
+        s.getBlockPoolId(), s.getMostRecentCheckpointTxId(),
+        s.getCurSegmentTxId());
+  }
+
+  public static RemoteEditLogProto convert(RemoteEditLog log) {
+    return RemoteEditLogProto.newBuilder().setEndTxId(log.getEndTxId())
+        .setStartTxId(log.getStartTxId()).build();
+  }
+
+  public static RemoteEditLog convert(RemoteEditLogProto l) {
+    return new RemoteEditLog(l.getStartTxId(), l.getEndTxId());
+  }
+
+  public static RemoteEditLogManifestProto convert(
+      RemoteEditLogManifest manifest) {
+    RemoteEditLogManifestProto.Builder builder = RemoteEditLogManifestProto
+        .newBuilder();
+    for (RemoteEditLog log : manifest.getLogs()) {
+      builder.addLogs(convert(log));
+    }
+    return builder.build();
+  }
+
+  public static RemoteEditLogManifest convert(
+      RemoteEditLogManifestProto manifest) {
+    List<RemoteEditLog> logs = new ArrayList<RemoteEditLog>(manifest
+        .getLogsList().size());
+    for (RemoteEditLogProto l : manifest.getLogsList()) {
+      logs.add(convert(l));
+    }
+    return new RemoteEditLogManifest(logs);
+  }
+
+  public static CheckpointCommandProto convert(CheckpointCommand cmd) {
+    return CheckpointCommandProto.newBuilder()
+        .setSignature(convert(cmd.getSignature())).build();
+  }
+
+  public static NamenodeCommandProto convert(NamenodeCommand cmd) {
+    if (cmd instanceof CheckpointCommand) {
+      return NamenodeCommandProto.newBuilder().setAction(cmd.getAction())
+          .setType(NamenodeCommandProto.Type.NamenodeCommand)
+          .setCheckpointCmd(convert((CheckpointCommand) cmd)).build();
+    }
+    return NamenodeCommandProto.newBuilder().setAction(cmd.getAction()).build();
+  }
+
+  public static BlockWithLocations[] convert(List<BlockWithLocationsProto> b) {
+    BlockWithLocations[] ret = new BlockWithLocations[b.size()];
+    int i = 0;
+    for (BlockWithLocationsProto entry : b) {
+      ret[i++] = convert(entry);
+    }
+    return ret;
+  }
+
+  public static BlockKey[] convertBlockKeys(List<BlockKeyProto> list) {
+    BlockKey[] ret = new BlockKey[list.size()];
+    int i = 0;
+    for (BlockKeyProto k : list) {
+      ret[i++] = convert(k);
+    }
+    return ret;
+  }
+
+  public static NamespaceInfo convert(NamespaceInfoProto info) {
+    StorageInfoProto storage = info.getStorageInfo();
+    return new NamespaceInfo(storage.getNamespceID(), storage.getClusterID(),
+        info.getBlockPoolID(), storage.getCTime(), info.getDistUpgradeVersion());
+  }
+
+  public static NamenodeCommand convert(NamenodeCommandProto cmd) {
+    switch (cmd.getType()) {
+    case CheckPointCommand:
+      CheckpointCommandProto chkPt = cmd.getCheckpointCmd();
+      return new CheckpointCommand(PBHelper.convert(chkPt.getSignature()),
+          chkPt.getNeedToReturnImage());
+    default:
+      return new NamenodeCommand(cmd.getAction());
+    }
+  }
+}

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockKey.java

@@ -36,4 +36,8 @@ public class BlockKey extends DelegationKey {
   public BlockKey(int keyId, long expiryDate, SecretKey key) {
   public BlockKey(int keyId, long expiryDate, SecretKey key) {
     super(keyId, expiryDate, key);
     super(keyId, expiryDate, key);
   }
   }
+  
+  public BlockKey(int keyId, long expiryDate, byte[] encodedKey) {
+    super(keyId, expiryDate, encodedKey);
+  }
 }
 }

+ 21 - 3
hadoop-hdfs-project/hadoop-hdfs/src/proto/NamenodeProtocol.proto

@@ -42,7 +42,7 @@ message GetBlocksRequestProto {
  * blocks - List of returned blocks
  * blocks - List of returned blocks
  */
  */
 message GetBlocksResponseProto {
 message GetBlocksResponseProto {
-  required BlockWithLocationsProto blocks = 1; // List of blocks
+  required BlocksWithLocationsProto blocks = 1; // List of blocks
 }
 }
 
 
 /**
 /**
@@ -85,12 +85,25 @@ message RollEditLogResponseProto {
 }
 }
 
 
 /**
 /**
- * registartion - Namenode reporting the error
+ * void request
+ */
+message VersionRequestProto {
+}
+
+/**
+ * void request
+ */
+message VersionResponseProto {
+  required NamespaceInfoProto info = 1;
+}
+
+/**
+ * registration - Namenode reporting the error
  * errorCode - error code indicating the error
  * errorCode - error code indicating the error
  * msg - Free text description of the error
  * msg - Free text description of the error
  */
  */
 message ErrorReportRequestProto {
 message ErrorReportRequestProto {
-  required NamenodeRegistrationProto registartion = 1; // Registartion info
+  required NamenodeRegistrationProto registration = 1; // Registration info
   required uint32 errorCode = 2;  // Error code
   required uint32 errorCode = 2;  // Error code
   required string msg = 3;        // Error message
   required string msg = 3;        // Error message
 }
 }
@@ -193,6 +206,11 @@ service NamenodeProtocolService {
    */
    */
   rpc rollEditLog(RollEditLogRequestProto) returns(RollEditLogResponseProto);
   rpc rollEditLog(RollEditLogRequestProto) returns(RollEditLogResponseProto);
 
 
+  /**
+   * Close the current editlog and open a new one for checkpointing purposes
+   */
+  rpc versionRequest(VersionRequestProto) returns(VersionResponseProto);
+
   /**
   /**
    * Report from a sub-ordinate namenode of an error to the active namenode.
    * Report from a sub-ordinate namenode of an error to the active namenode.
    * Active namenode may decide to unregister the reporting namenode 
    * Active namenode may decide to unregister the reporting namenode 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/proto/hdfs.proto

@@ -270,8 +270,8 @@ message BlockProto {
  * Block and datanodes where is it located
  * Block and datanodes where is it located
  */
  */
 message BlockWithLocationsProto {
 message BlockWithLocationsProto {
-  required BlockProto block = 1;            // Block
-  repeated DatanodeIDProto datanodeIDs = 2; // Datanodes with replicas of the block
+  required BlockProto block = 1;   // Block
+  repeated string datanodeIDs = 2; // Datanodes with replicas of the block
 }
 }
 
 
 /**
 /**

+ 169 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java

@@ -19,12 +19,35 @@ package org.apache.hadoop.hdfs.protocolPB;
 
 
 import static junit.framework.Assert.*;
 import static junit.framework.Assert.*;
 
 
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
+import org.apache.hadoop.hdfs.security.token.block.BlockKey;
+import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
+import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.junit.Test;
 import org.junit.Test;
 
 
 /**
 /**
@@ -46,10 +69,14 @@ public class TestPBHelper {
     assertEquals(NamenodeRole.NAMENODE,
     assertEquals(NamenodeRole.NAMENODE,
         PBHelper.convert(NamenodeRoleProto.NAMENODE));
         PBHelper.convert(NamenodeRoleProto.NAMENODE));
   }
   }
-  
+
+  private static StorageInfo getStorageInfo() {
+    return new StorageInfo(1, 2, "cid", 3);
+  }
+
   @Test
   @Test
   public void testConvertStoragInfo() {
   public void testConvertStoragInfo() {
-    StorageInfo info = new StorageInfo(1, 2, "cid", 3);
+    StorageInfo info = getStorageInfo();
     StorageInfoProto infoProto = PBHelper.convert(info);
     StorageInfoProto infoProto = PBHelper.convert(info);
     StorageInfo info2 = PBHelper.convert(infoProto);
     StorageInfo info2 = PBHelper.convert(infoProto);
     assertEquals(info.getClusterID(), info2.getClusterID());
     assertEquals(info.getClusterID(), info2.getClusterID());
@@ -57,10 +84,10 @@ public class TestPBHelper {
     assertEquals(info.getLayoutVersion(), info2.getLayoutVersion());
     assertEquals(info.getLayoutVersion(), info2.getLayoutVersion());
     assertEquals(info.getNamespaceID(), info2.getNamespaceID());
     assertEquals(info.getNamespaceID(), info2.getNamespaceID());
   }
   }
-  
+
   @Test
   @Test
   public void testConvertNamenodeRegistration() {
   public void testConvertNamenodeRegistration() {
-    StorageInfo info = new StorageInfo(1, 2, "cid", 3);
+    StorageInfo info = getStorageInfo();
     NamenodeRegistration reg = new NamenodeRegistration("address:999",
     NamenodeRegistration reg = new NamenodeRegistration("address:999",
         "http:1000", info, NamenodeRole.NAMENODE);
         "http:1000", info, NamenodeRole.NAMENODE);
     NamenodeRegistrationProto regProto = PBHelper.convert(reg);
     NamenodeRegistrationProto regProto = PBHelper.convert(reg);
@@ -74,6 +101,144 @@ public class TestPBHelper {
     assertEquals(reg.getRegistrationID(), reg2.getRegistrationID());
     assertEquals(reg.getRegistrationID(), reg2.getRegistrationID());
     assertEquals(reg.getRole(), reg2.getRole());
     assertEquals(reg.getRole(), reg2.getRole());
     assertEquals(reg.getVersion(), reg2.getVersion());
     assertEquals(reg.getVersion(), reg2.getVersion());
+
+  }
+
+  @Test
+  public void testConvertDatanodeID() {
+    DatanodeID dn = new DatanodeID("node", "sid", 1, 2);
+    DatanodeIDProto dnProto = PBHelper.convert(dn);
+    DatanodeID dn2 = PBHelper.convert(dnProto);
+    assertEquals(dn.getHost(), dn2.getHost());
+    assertEquals(dn.getInfoPort(), dn2.getInfoPort());
+    assertEquals(dn.getIpcPort(), dn2.getIpcPort());
+    assertEquals(dn.getName(), dn2.getName());
+    assertEquals(dn.getPort(), dn2.getPort());
+    assertEquals(dn.getStorageID(), dn2.getStorageID());
+  }
+
+  @Test
+  public void testConvertBlock() {
+    Block b = new Block(1, 100, 3);
+    BlockProto bProto = PBHelper.convert(b);
+    Block b2 = PBHelper.convert(bProto);
+    assertEquals(b, b2);
+  }
+
+  private static BlockWithLocations getBlockWithLocations(int bid) {
+    return new BlockWithLocations(new Block(bid, 0, 1), new String[] { "dn1",
+        "dn2", "dn3" });
+  }
+
+  private void compare(BlockWithLocations locs1, BlockWithLocations locs2) {
+    assertEquals(locs1.getBlock(), locs2.getBlock());
+    assertTrue(Arrays.equals(locs1.getDatanodes(), locs2.getDatanodes()));
+  }
+
+  @Test
+  public void testConvertBlockWithLocations() {
+    BlockWithLocations locs = getBlockWithLocations(1);
+    BlockWithLocationsProto locsProto = PBHelper.convert(locs);
+    BlockWithLocations locs2 = PBHelper.convert(locsProto);
+    compare(locs, locs2);
+  }
+
+  @Test
+  public void testConvertBlocksWithLocations() {
+    BlockWithLocations[] list = new BlockWithLocations[] {
+        getBlockWithLocations(1), getBlockWithLocations(2) };
+    BlocksWithLocations locs = new BlocksWithLocations(list);
+    BlocksWithLocationsProto locsProto = PBHelper.convert(locs);
+    BlocksWithLocations locs2 = PBHelper.convert(locsProto);
+    BlockWithLocations[] blocks = locs.getBlocks();
+    BlockWithLocations[] blocks2 = locs2.getBlocks();
+    assertEquals(blocks.length, blocks2.length);
+    for (int i = 0; i < blocks.length; i++) {
+      compare(blocks[i], blocks2[i]);
+    }
+  }
+
+  private static BlockKey getBlockKey(int keyId) {
+    return new BlockKey(keyId, 10, "encodedKey".getBytes());
+  }
+
+  private void compare(BlockKey k1, BlockKey k2) {
+    assertEquals(k1.getExpiryDate(), k2.getExpiryDate());
+    assertEquals(k1.getKeyId(), k2.getKeyId());
+    assertTrue(Arrays.equals(k1.getEncodedKey(), k2.getEncodedKey()));
+
+  }
+
+  @Test
+  public void testConvertBlockKey() {
+    BlockKey key = getBlockKey(1);
+    BlockKeyProto keyProto = PBHelper.convert(key);
+    BlockKey key1 = PBHelper.convert(keyProto);
+    compare(key, key1);
+  }
+
+  @Test
+  public void testConvertExportedBlockKeys() {
+    BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
+    ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
+        getBlockKey(1), keys);
+    ExportedBlockKeysProto expKeysProto = PBHelper.convert(expKeys);
+    ExportedBlockKeys expKeys1 = PBHelper.convert(expKeysProto);
+
+    BlockKey[] allKeys = expKeys.getAllKeys();
+    BlockKey[] allKeys1 = expKeys1.getAllKeys();
+    assertEquals(allKeys.length, allKeys1.length);
+    for (int i = 0; i < allKeys.length; i++) {
+      compare(allKeys[i], allKeys1[i]);
+    }
+    compare(expKeys.getCurrentKey(), expKeys1.getCurrentKey());
+    assertEquals(expKeys.getKeyUpdateInterval(),
+        expKeys1.getKeyUpdateInterval());
+    assertEquals(expKeys.getTokenLifetime(), expKeys1.getTokenLifetime());
+  }
+
+  @Test
+  public void testConvertCheckpointSignature() {
+    CheckpointSignature s = new CheckpointSignature(getStorageInfo(), "bpid",
+        100, 1);
+    CheckpointSignatureProto sProto = PBHelper.convert(s);
+    CheckpointSignature s1 = PBHelper.convert(sProto);
+    assertEquals(s.getBlockpoolID(), s1.getBlockpoolID());
+    assertEquals(s.getClusterID(), s1.getClusterID());
+    assertEquals(s.getCTime(), s1.getCTime());
+    assertEquals(s.getCurSegmentTxId(), s1.getCurSegmentTxId());
+    assertEquals(s.getLayoutVersion(), s1.getLayoutVersion());
+    assertEquals(s.getMostRecentCheckpointTxId(),
+        s1.getMostRecentCheckpointTxId());
+    assertEquals(s.getNamespaceID(), s1.getNamespaceID());
+  }
+  
+  private static void compare(RemoteEditLog l1, RemoteEditLog l2) {
+    assertEquals(l1.getEndTxId(), l2.getEndTxId());
+    assertEquals(l1.getStartTxId(), l2.getStartTxId());
+  }
+  
+  @Test
+  public void testConvertRemoteEditLog() {
+    RemoteEditLog l = new RemoteEditLog(1, 100);
+    RemoteEditLogProto lProto = PBHelper.convert(l);
+    RemoteEditLog l1 = PBHelper.convert(lProto);
+    compare(l, l1);
+  }
+  
+  @Test
+  public void testConvertRemoteEditLogManifest() {
+    List<RemoteEditLog> logs = new ArrayList<RemoteEditLog>();
+    logs.add(new RemoteEditLog(1, 10));
+    logs.add(new RemoteEditLog(11, 20));
+    RemoteEditLogManifest m = new RemoteEditLogManifest(logs);
+    RemoteEditLogManifestProto mProto = PBHelper.convert(m);
+    RemoteEditLogManifest m1 = PBHelper.convert(mProto);
     
     
+    List<RemoteEditLog> logs1 = m1.getLogs();
+    assertEquals(logs.size(), logs1.size());
+    for (int i = 0; i < logs.size(); i++) {
+      compare(logs.get(i), logs1.get(i));
+    }
   }
   }
 }
 }

برخی فایل ها در این مقایسه diff نمایش داده نمی شوند زیرا تعداد فایل ها بسیار زیاد است