瀏覽代碼

HDFS-7756. Restore method signature for LocatedBlock#getLocations(). (Ted Yu via yliu)

yliu 10 年之前
父節點
當前提交
260b5e32c4

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -887,6 +887,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7647. DatanodeManager.sortLocatedBlocks sorts DatanodeInfos
     but not StorageIDs. (Milan Desai via Arpit Agarwal)
 
+    HDFS-7756. Restore method signature for LocatedBlock#getLocations(). (Ted
+    Yu via yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeInfoWithStorage.java → hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java

@@ -15,11 +15,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.protocol;
+package org.apache.hadoop.hdfs.protocol;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class DatanodeInfoWithStorage extends DatanodeInfo {
   private final String storageID;
   private final StorageType storageType;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java

@@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeInfoWithStorage;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
 import org.apache.hadoop.security.token.Token;
 
 import com.google.common.collect.Lists;
@@ -140,7 +140,7 @@ public class LocatedBlock {
    * {@link org.apache.hadoop.hdfs.protocol.LocatedBlock#invalidateCachedStorageInfo}
    * to invalidate the cached Storage ID/Type arrays.
    */
-  public DatanodeInfoWithStorage[] getLocations() {
+  public DatanodeInfo[] getLocations() {
     return locs;
   }
 

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java

@@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeInfoWithStorage;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.junit.Assert;
@@ -274,15 +274,15 @@ public class TestDatanodeManager {
     dm.sortLocatedBlocks(targetIp, blocks);
 
     // check that storage IDs/types are aligned with datanode locs
-    DatanodeInfoWithStorage[] sortedLocs = block.getLocations();
+    DatanodeInfo[] sortedLocs = block.getLocations();
     storageIDs = block.getStorageIDs();
     storageTypes = block.getStorageTypes();
     assertThat(sortedLocs.length, is(5));
     assertThat(storageIDs.length, is(5));
     assertThat(storageTypes.length, is(5));
     for(int i = 0; i < sortedLocs.length; i++) {
-      assertThat(sortedLocs[i].getStorageID(), is(storageIDs[i]));
-      assertThat(sortedLocs[i].getStorageType(), is(storageTypes[i]));
+      assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageID(), is(storageIDs[i]));
+      assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageType(), is(storageTypes[i]));
     }
 
     // Ensure the local node is first.