Parcourir la source

HDFS-5515. Fix TestDFSStartupVersions for HDFS-2832.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1542176 13f79535-47bb-0310-9956-ffa450edef68
Arpit Agarwal il y a 11 ans
Parent
commit
cd768489f3

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt

@@ -106,4 +106,6 @@ IMPROVEMENTS:
 
 
     HDFS-5510. Fix a findbug warning in DataStorage.java on HDFS-2832 branch.
     HDFS-5510. Fix a findbug warning in DataStorage.java on HDFS-2832 branch.
     (Junping Du via Arpit Agarwal)
     (Junping Du via Arpit Agarwal)
+ 
+    HDFS-5515. Fix TestDFSStartupVersions for HDFS-2832. (Arpit Agarwal)
 
 

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

@@ -97,8 +97,8 @@ public class DataStorage extends Storage {
   public synchronized String getDatanodeUuid() {
   public synchronized String getDatanodeUuid() {
     return datanodeUuid;
     return datanodeUuid;
   }
   }
-  
-  synchronized void setDatanodeUuid(String newDatanodeUuid) {
+
+  public synchronized void setDatanodeUuid(String newDatanodeUuid) {
     this.datanodeUuid = newDatanodeUuid;
     this.datanodeUuid = newDatanodeUuid;
   }
   }
 
 
@@ -292,8 +292,7 @@ public class DataStorage extends Storage {
     props.setProperty("storageID", sd.getStorageUuid());
     props.setProperty("storageID", sd.getStorageUuid());
 
 
     String datanodeUuid = getDatanodeUuid();
     String datanodeUuid = getDatanodeUuid();
-    if (LayoutVersion.supports(Feature.ADD_DATANODE_AND_STORAGE_UUIDS,
-          layoutVersion) && datanodeUuid != null) {
+    if (datanodeUuid != null) {
       props.setProperty("datanodeUuid", datanodeUuid);
       props.setProperty("datanodeUuid", datanodeUuid);
     }
     }
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java

@@ -237,7 +237,7 @@ public class TestDFSStartupVersions {
    *         this iterations version 3-tuple
    *         this iterations version 3-tuple
    * </pre>
    * </pre>
    */
    */
-  @Test
+  @Test (timeout=300000)
   public void testVersions() throws Exception {
   public void testVersions() throws Exception {
     UpgradeUtilities.initialize();
     UpgradeUtilities.initialize();
     Configuration conf = UpgradeUtilities.initializeStorageStateConf(1, 
     Configuration conf = UpgradeUtilities.initializeStorageStateConf(1, 

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java

@@ -454,6 +454,7 @@ public class UpgradeUtilities {
   public static void createDataNodeVersionFile(File[] parent,
   public static void createDataNodeVersionFile(File[] parent,
       StorageInfo version, String bpid, String bpidToWrite) throws IOException {
       StorageInfo version, String bpid, String bpidToWrite) throws IOException {
     DataStorage storage = new DataStorage(version);
     DataStorage storage = new DataStorage(version);
+    storage.setDatanodeUuid("FixedDatanodeUuid");
 
 
     File[] versionFiles = new File[parent.length];
     File[] versionFiles = new File[parent.length];
     for (int i = 0; i < parent.length; i++) {
     for (int i = 0; i < parent.length; i++) {

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java

@@ -55,7 +55,7 @@ public class TestListCorruptFileBlocks {
   static Log LOG = NameNode.stateChangeLog;
   static Log LOG = NameNode.stateChangeLog;
 
 
   /** check if nn.getCorruptFiles() returns a file that has corrupted blocks */
   /** check if nn.getCorruptFiles() returns a file that has corrupted blocks */
-  @Test
+  @Test (timeout=300000)
   public void testListCorruptFilesCorruptedBlock() throws Exception {
   public void testListCorruptFilesCorruptedBlock() throws Exception {
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
     Random random = new Random();
     Random random = new Random();
@@ -131,7 +131,7 @@ public class TestListCorruptFileBlocks {
   /**
   /**
    * Check that listCorruptFileBlocks works while the namenode is still in safemode.
    * Check that listCorruptFileBlocks works while the namenode is still in safemode.
    */
    */
-  @Test
+  @Test (timeout=300000)
   public void testListCorruptFileBlocksInSafeMode() throws Exception {
   public void testListCorruptFileBlocksInSafeMode() throws Exception {
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
     Random random = new Random();
     Random random = new Random();
@@ -262,7 +262,7 @@ public class TestListCorruptFileBlocks {
   }
   }
   
   
   // deliberately remove blocks from a file and validate the list-corrupt-file-blocks API
   // deliberately remove blocks from a file and validate the list-corrupt-file-blocks API
-  @Test
+  @Test (timeout=300000)
   public void testlistCorruptFileBlocks() throws Exception {
   public void testlistCorruptFileBlocks() throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
@@ -372,7 +372,7 @@ public class TestListCorruptFileBlocks {
   /**
   /**
    * test listCorruptFileBlocks in DistributedFileSystem
    * test listCorruptFileBlocks in DistributedFileSystem
    */
    */
-  @Test
+  @Test (timeout=300000)
   public void testlistCorruptFileBlocksDFS() throws Exception {
   public void testlistCorruptFileBlocksDFS() throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
@@ -445,7 +445,7 @@ public class TestListCorruptFileBlocks {
    * Also, test that DFS.listCorruptFileBlocks can make multiple successive
    * Also, test that DFS.listCorruptFileBlocks can make multiple successive
    * calls.
    * calls.
    */
    */
-  @Test
+  @Test (timeout=300000)
   public void testMaxCorruptFiles() throws Exception {
   public void testMaxCorruptFiles() throws Exception {
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
     try {
     try {