Browse Source

HDFS-12797. Add Test for NFS mount of not supported filesystems like (file:///). Contributed by Mukul Kumar Singh.

Jitendra Pandey 7 years ago
parent
commit
8a1bd9a4f4

+ 87 - 1
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.nfs.nfs3;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.nio.file.FileSystemException;
 
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -33,9 +34,14 @@ import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.nfs.mount.Mountd;
 import org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.ExpectedException;
 
 public class TestExportsTable {
+
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
  
   @Test
   public void testHdfsExportPoint() throws IOException {
@@ -70,7 +76,7 @@ public class TestExportsTable {
   }
 
   @Test
-  public void testViewFsExportPoint() throws IOException {
+  public void testViewFsMultipleExportPoint() throws IOException {
     NfsConfiguration config = new NfsConfiguration();
     MiniDFSCluster cluster = null;
     String clusterName = RandomStringUtils.randomAlphabetic(10);
@@ -182,6 +188,56 @@ public class TestExportsTable {
     }
   }
 
+  @Test
+  public void testViewFsRootExportPoint() throws IOException {
+    NfsConfiguration config = new NfsConfiguration();
+    MiniDFSCluster cluster = null;
+    String clusterName = RandomStringUtils.randomAlphabetic(10);
+
+    String exportPoint = "/";
+    config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
+    config.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
+        FsConstants.VIEWFS_SCHEME + "://" + clusterName);
+    // Use emphral port in case tests are running in parallel
+    config.setInt("nfs3.mountd.port", 0);
+    config.setInt("nfs3.server.port", 0);
+    config.set("nfs.http.address", "0.0.0.0:0");
+
+    try {
+      cluster =
+          new MiniDFSCluster.Builder(config).nnTopology(
+              MiniDFSNNTopology.simpleFederatedTopology(2))
+              .numDataNodes(2)
+              .build();
+      cluster.waitActive();
+      DistributedFileSystem hdfs1 = cluster.getFileSystem(0);
+      DistributedFileSystem hdfs2 = cluster.getFileSystem(1);
+      cluster.waitActive();
+      Path base1 = new Path("/user1");
+      Path base2 = new Path("/user2");
+      hdfs1.delete(base1, true);
+      hdfs2.delete(base2, true);
+      hdfs1.mkdirs(base1);
+      hdfs2.mkdirs(base2);
+      ConfigUtil.addLink(config, clusterName, "/hdfs1",
+          hdfs1.makeQualified(base1).toUri());
+      ConfigUtil.addLink(config, clusterName, "/hdfs2",
+          hdfs2.makeQualified(base2).toUri());
+
+      exception.expect(FileSystemException.class);
+      exception.
+          expectMessage("Only HDFS is supported as underlyingFileSystem, "
+              + "fs scheme:viewfs");
+      // Start nfs
+      final Nfs3 nfsServer = new Nfs3(config);
+      nfsServer.startServiceInternal(false);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
   @Test
   public void testHdfsInternalExportPoint() throws IOException {
     NfsConfiguration config = new NfsConfiguration();
@@ -219,4 +275,34 @@ public class TestExportsTable {
       }
     }
   }
+
+  @Test
+  public void testInvalidFsExport() throws IOException {
+    NfsConfiguration config = new NfsConfiguration();
+    MiniDFSCluster cluster = null;
+
+    // Use emphral port in case tests are running in parallel
+    config.setInt("nfs3.mountd.port", 0);
+    config.setInt("nfs3.server.port", 0);
+    config.set("nfs.http.address", "0.0.0.0:0");
+
+    try {
+      cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
+      cluster.waitActive();
+      config.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
+          FsConstants.LOCAL_FS_URI.toString());
+
+      exception.expect(FileSystemException.class);
+      exception.
+          expectMessage("Only HDFS is supported as underlyingFileSystem, "
+              + "fs scheme:file");
+      // Start nfs
+      final Nfs3 nfsServer = new Nfs3(config);
+      nfsServer.startServiceInternal(false);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }