Pārlūkot izejas kodu

HDFS-1739. Add available volume size to the error message when datanode throws DiskOutOfSpaceException. Contributed by Uma Maheswara Rao G

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1150067 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 14 gadi atpakaļ
vecāks
revīzija
598b85c9e2

+ 3 - 0
hdfs/CHANGES.txt

@@ -593,6 +593,9 @@ Trunk (unreleased changes)
     HDFS-2112.  Move ReplicationMonitor to block management.  (Uma Maheswara
     Rao G via szetszwo)
 
+    HDFS-1739.  Add available volume size to the error message when datanode
+    throws DiskOutOfSpaceException.  (Uma Maheswara Rao G via szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image

+ 13 - 2
hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java

@@ -41,13 +41,24 @@ public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy {
     }
     
     int startVolume = curVolume;
+    long maxAvailable = 0;
     
     while (true) {
       FSVolume volume = volumes.get(curVolume);
       curVolume = (curVolume + 1) % volumes.size();
-      if (volume.getAvailable() > blockSize) { return volume; }
+      long availableVolumeSize = volume.getAvailable();
+      if (availableVolumeSize > blockSize) { return volume; }
+      
+      if (availableVolumeSize > maxAvailable) {
+        maxAvailable = availableVolumeSize;
+      }
+      
       if (curVolume == startVolume) {
-        throw new DiskOutOfSpaceException("Insufficient space for an additional block");
+        throw new DiskOutOfSpaceException(
+            "Insufficient space for an additional block. Volume with the most available space has "
+                + maxAvailable
+                + " bytes free, configured block size is "
+                + blockSize);
       }
     }
   }

+ 29 - 0
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java

@@ -24,6 +24,7 @@ import java.util.List;
 import junit.framework.Assert;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -63,5 +64,33 @@ public class TestRoundRobinVolumesPolicy {
       // Passed.
     }
   }
+  
+  // ChooseVolume should throw DiskOutOfSpaceException with volume and block sizes in exception message.
+  @Test
+  public void testRRPolicyExceptionMessage()
+      throws Exception {
+    final List<FSVolume> volumes = new ArrayList<FSVolume>();
+
+    // First volume, with 500 bytes of space.
+    volumes.add(Mockito.mock(FSVolume.class));
+    Mockito.when(volumes.get(0).getAvailable()).thenReturn(500L);
+
+    // Second volume, with 600 bytes of space.
+    volumes.add(Mockito.mock(FSVolume.class));
+    Mockito.when(volumes.get(1).getAvailable()).thenReturn(600L);
+
+    RoundRobinVolumesPolicy policy = new RoundRobinVolumesPolicy();
+    int blockSize = 700;
+    try {
+      policy.chooseVolume(volumes, blockSize);
+      Assert.fail("expected to throw DiskOutOfSpaceException");
+    } catch (DiskOutOfSpaceException e) {
+      Assert
+          .assertEquals(
+              "Not returnig the expected message",
+              "Insufficient space for an additional block. Volume with the most available space has 600 bytes free, configured block size is " + blockSize, e
+                  .getMessage());
+    }
+  }
 
 }