Browse Source

HDFS-6453. Use Time#monotonicNow to avoid system clock reset. Contributed by Liang Xie.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1598144 13f79535-47bb-0310-9956-ffa450edef68
Andrew Wang 11 years ago
parent
commit
66c5bcfc6d

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -604,6 +604,9 @@ Release 2.5.0 - UNRELEASED
     HDFS-6448. BlockReaderLocalLegacy should set socket timeout based on
     conf.socketTimeout (liangxie via cmccabe)
 
+    HDFS-6453. Use Time#monotonicNow to avoid system clock reset.
+    (Liang Xie via wang)
+
 Release 2.4.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -2581,7 +2581,7 @@ public class DataNode extends Configured
                   return;
                 }
                 synchronized(checkDiskErrorMutex) {
-                  lastDiskErrorCheck = System.currentTimeMillis();
+                  lastDiskErrorCheck = Time.monotonicNow();
                 }
               }
               try {

+ 11 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java

@@ -23,6 +23,7 @@ import java.util.*;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 
 class FsVolumeList {
@@ -97,7 +98,7 @@ class FsVolumeList {
   }
   
   void getAllVolumesMap(final String bpid, final ReplicaMap volumeMap) throws IOException {
-    long totalStartTime = System.currentTimeMillis();
+    long totalStartTime = Time.monotonicNow();
     final List<IOException> exceptions = Collections.synchronizedList(
         new ArrayList<IOException>());
     List<Thread> replicaAddingThreads = new ArrayList<Thread>();
@@ -107,9 +108,9 @@ class FsVolumeList {
           try {
             FsDatasetImpl.LOG.info("Adding replicas to map for block pool " +
                 bpid + " on volume " + v + "...");
-            long startTime = System.currentTimeMillis();
+            long startTime = Time.monotonicNow();
             v.getVolumeMap(bpid, volumeMap);
-            long timeTaken = System.currentTimeMillis() - startTime;
+            long timeTaken = Time.monotonicNow() - startTime;
             FsDatasetImpl.LOG.info("Time to add replicas to map for block pool"
                 + " " + bpid + " on volume " + v + ": " + timeTaken + "ms");
           } catch (IOException ioe) {
@@ -132,7 +133,7 @@ class FsVolumeList {
     if (!exceptions.isEmpty()) {
       throw exceptions.get(0);
     }
-    long totalTimeTaken = System.currentTimeMillis() - totalStartTime;
+    long totalTimeTaken = Time.monotonicNow() - totalStartTime;
     FsDatasetImpl.LOG.info("Total time to add all replicas to map: "
         + totalTimeTaken + "ms");
   }
@@ -141,9 +142,9 @@ class FsVolumeList {
       throws IOException {
     FsDatasetImpl.LOG.info("Adding replicas to map for block pool " + bpid +
                                " on volume " + volume + "...");
-    long startTime = System.currentTimeMillis();
+    long startTime = Time.monotonicNow();
     volume.getVolumeMap(bpid, volumeMap);
-    long timeTaken = System.currentTimeMillis() - startTime;
+    long timeTaken = Time.monotonicNow() - startTime;
     FsDatasetImpl.LOG.info("Time to add replicas to map for block pool " + bpid +
                                " on volume " + volume + ": " + timeTaken + "ms");
   }
@@ -195,7 +196,7 @@ class FsVolumeList {
 
 
   void addBlockPool(final String bpid, final Configuration conf) throws IOException {
-    long totalStartTime = System.currentTimeMillis();
+    long totalStartTime = Time.monotonicNow();
     
     final List<IOException> exceptions = Collections.synchronizedList(
         new ArrayList<IOException>());
@@ -206,9 +207,9 @@ class FsVolumeList {
           try {
             FsDatasetImpl.LOG.info("Scanning block pool " + bpid +
                 " on volume " + v + "...");
-            long startTime = System.currentTimeMillis();
+            long startTime = Time.monotonicNow();
             v.addBlockPool(bpid, conf);
-            long timeTaken = System.currentTimeMillis() - startTime;
+            long timeTaken = Time.monotonicNow() - startTime;
             FsDatasetImpl.LOG.info("Time taken to scan block pool " + bpid +
                 " on " + v + ": " + timeTaken + "ms");
           } catch (IOException ioe) {
@@ -232,7 +233,7 @@ class FsVolumeList {
       throw exceptions.get(0);
     }
     
-    long totalTimeTaken = System.currentTimeMillis() - totalStartTime;
+    long totalTimeTaken = Time.monotonicNow() - totalStartTime;
     FsDatasetImpl.LOG.info("Total time to scan all replicas for block pool " +
         bpid + ": " + totalTimeTaken + "ms");
   }

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java

@@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.CompressorStream;
+import org.apache.hadoop.util.Time;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -160,13 +161,13 @@ public final class FSImageFormatProtobuf {
     }
 
     void load(File file) throws IOException {
-      long start = System.currentTimeMillis();
+      long start = Time.monotonicNow();
       imgDigest = MD5FileUtils.computeMd5ForFile(file);
       RandomAccessFile raFile = new RandomAccessFile(file, "r");
       FileInputStream fin = new FileInputStream(file);
       try {
         loadInternal(raFile, fin);
-        long end = System.currentTimeMillis();
+        long end = Time.monotonicNow();
         LOG.info("Loaded FSImage in " + (end - start) / 1000 + " seconds.");
       } finally {
         fin.close();

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java

@@ -159,7 +159,7 @@ public class TransferFsImage {
       }
     }
 
-    final long milliTime = System.currentTimeMillis();
+    final long milliTime = Time.monotonicNow();
     String tmpFileName = NNStorage.getTemporaryEditsFileName(
         log.getStartTxId(), log.getEndTxId(), milliTime);
     List<File> tmpFiles = dstStorage.getFiles(NameNodeDirType.EDITS,

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java

@@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -215,6 +216,6 @@ public class TestDiskError {
     dataNode.checkDiskError();
     Thread.sleep(dataNode.checkDiskErrorInterval);
     long lastDiskErrorCheck = dataNode.getLastDiskErrorCheck();
-    assertTrue("Disk Error check is not performed within  " + dataNode.checkDiskErrorInterval +  "  ms", ((System.currentTimeMillis()-lastDiskErrorCheck) < (dataNode.checkDiskErrorInterval + slackTime)));
+    assertTrue("Disk Error check is not performed within  " + dataNode.checkDiskErrorInterval +  "  ms", ((Time.monotonicNow()-lastDiskErrorCheck) < (dataNode.checkDiskErrorInterval + slackTime)));
   }
 }