Kaynağa Gözat

HDFS-8147. StorageGroup in Dispatcher should override equals nad hashCode. Contributed by surendra singh lilhore

Tsz-Wo Nicholas Sze 10 yıl önce
ebeveyn
işleme
5b4eda7573

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -41,6 +41,9 @@ Release 2.7.1 - UNRELEASED
     HDFS-8163. Using monotonicNow for block report scheduling causes
     test failures on recently restarted systems. (Arpit Agarwal)
 
+    HDFS-8147. StorageGroup in Dispatcher should override equals nad hashCode.
+    (surendra singh lilhore via szetszwo)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

+ 29 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java

@@ -471,6 +471,25 @@ public class Dispatcher {
       public String toString() {
         return getDisplayName();
       }
+
+      @Override
+      public int hashCode() {
+        return getStorageType().hashCode() ^ getDatanodeInfo().hashCode();
+      }
+
+      @Override
+      public boolean equals(Object obj) {
+        if (this == obj) {
+          return true;
+        } else if (obj == null || !(obj instanceof StorageGroup)) {
+          return false;
+        } else {
+          final StorageGroup that = (StorageGroup) obj;
+          return this.getStorageType() == that.getStorageType()
+              && this.getDatanodeInfo().equals(that.getDatanodeInfo());
+        }
+      }
+
     }
 
     final DatanodeInfo datanode;
@@ -755,6 +774,16 @@ public class Dispatcher {
         }
       }
     }
+
+    @Override
+    public int hashCode() {
+      return super.hashCode();
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      return super.equals(obj);
+    }
   }
 
   public Dispatcher(NameNodeConnector nnc, Set<String> includedNodes,

+ 47 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java

@@ -277,4 +277,51 @@ public class TestMover {
        cluster.shutdown();
     }
   }
+
+  @Test(timeout = 300000)
+  public void testTwoReplicaSameStorageTypeShouldNotSelect() throws Exception {
+    // HDFS-8147
+    final Configuration conf = new HdfsConfiguration();
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(3)
+        .storageTypes(
+            new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE },
+                { StorageType.DISK, StorageType.DISK },
+                { StorageType.DISK, StorageType.ARCHIVE } }).build();
+    try {
+      cluster.waitActive();
+      final DistributedFileSystem dfs = cluster.getFileSystem();
+      final String file = "/testForTwoReplicaSameStorageTypeShouldNotSelect";
+      // write to DISK
+      final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
+      out.writeChars("testForTwoReplicaSameStorageTypeShouldNotSelect");
+      out.close();
+
+      // verify before movement
+      LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
+      StorageType[] storageTypes = lb.getStorageTypes();
+      for (StorageType storageType : storageTypes) {
+        Assert.assertTrue(StorageType.DISK == storageType);
+      }
+      // move to ARCHIVE
+      dfs.setStoragePolicy(new Path(file), "COLD");
+      int rc = ToolRunner.run(conf, new Mover.Cli(),
+          new String[] { "-p", file.toString() });
+      Assert.assertEquals("Movement to ARCHIVE should be successfull", 0, rc);
+
+      // Wait till namenode notified
+      Thread.sleep(3000);
+      lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
+      storageTypes = lb.getStorageTypes();
+      int archiveCount = 0;
+      for (StorageType storageType : storageTypes) {
+        if (StorageType.ARCHIVE == storageType) {
+          archiveCount++;
+        }
+      }
+      Assert.assertEquals(archiveCount, 2);
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }