Browse Source

HADOOP-13272. ViewFileSystem should support storage policy related API. Contributed by Peter Shi

Chris Douglas 9 years ago
parent
commit
22a9a6b0d4

+ 20 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -39,12 +40,12 @@ import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
@@ -425,5 +426,21 @@ class ChRootedFileSystem extends FilterFileSystem {
   @Override
   @Override
   public FsServerDefaults getServerDefaults(Path f) throws IOException {
   public FsServerDefaults getServerDefaults(Path f) throws IOException {
     return super.getServerDefaults(fullPath(f));
     return super.getServerDefaults(fullPath(f));
-  }  
+  }
+
+  @Override
+  public BlockStoragePolicySpi getStoragePolicy(Path src) throws IOException {
+    return super.getStoragePolicy(fullPath(src));
+  }
+
+  @Override
+  public void setStoragePolicy(Path src, String policyName) throws IOException {
+    super.setStoragePolicy(fullPath(src), policyName);
+  }
+
+  @Override
+  public void unsetStoragePolicy(Path src) throws IOException {
+    super.unsetStoragePolicy(fullPath(src));
+  }
+
 }
 }

+ 74 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -24,6 +24,7 @@ import java.io.IOException;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.util.Arrays;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.HashSet;
 import java.util.List;
 import java.util.List;
@@ -35,6 +36,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -48,6 +50,7 @@ import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.XAttrSetFlag;
@@ -56,7 +59,6 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.viewfs.InodeTree.INode;
 import org.apache.hadoop.fs.viewfs.InodeTree.INode;
 import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
 import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
@@ -760,6 +762,43 @@ public class ViewFileSystem extends FileSystem {
     res.targetFileSystem.deleteSnapshot(res.remainingPath, snapshotName);
     res.targetFileSystem.deleteSnapshot(res.remainingPath, snapshotName);
   }
   }
 
 
+  @Override
+  public void setStoragePolicy(Path src, String policyName) throws IOException {
+    InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(src),
+        true);
+    res.targetFileSystem.setStoragePolicy(res.remainingPath, policyName);
+  }
+
+  @Override
+  public void unsetStoragePolicy(Path src) throws IOException {
+    InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(src),
+        true);
+    res.targetFileSystem.unsetStoragePolicy(res.remainingPath);
+  }
+
+  @Override
+  public BlockStoragePolicySpi getStoragePolicy(Path src) throws IOException {
+    InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(src),
+        true);
+    return res.targetFileSystem.getStoragePolicy(res.remainingPath);
+  }
+
+  @Override
+  public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
+      throws IOException {
+    Collection<BlockStoragePolicySpi> allPolicies = new HashSet<>();
+    for (FileSystem fs : getChildFileSystems()) {
+      try {
+        Collection<? extends BlockStoragePolicySpi> policies =
+            fs.getAllStoragePolicies();
+        allPolicies.addAll(policies);
+      } catch (UnsupportedOperationException e) {
+        // ignored
+      }
+    }
+    return allPolicies;
+  }
+
   /*
   /*
    * An instance of this class represents an internal dir of the viewFs 
    * An instance of this class represents an internal dir of the viewFs 
    * that is internal dir of the mount table.
    * that is internal dir of the mount table.
@@ -1079,5 +1118,39 @@ public class ViewFileSystem extends FileSystem {
     public QuotaUsage getQuotaUsage(Path f) throws IOException {
     public QuotaUsage getQuotaUsage(Path f) throws IOException {
       throw new NotInMountpointException(f, "getQuotaUsage");
       throw new NotInMountpointException(f, "getQuotaUsage");
     }
     }
+
+    @Override
+    public void setStoragePolicy(Path src, String policyName)
+        throws IOException {
+      checkPathIsSlash(src);
+      throw readOnlyMountTable("setStoragePolicy", src);
+    }
+
+    @Override
+    public void unsetStoragePolicy(Path src) throws IOException {
+      checkPathIsSlash(src);
+      throw readOnlyMountTable("unsetStoragePolicy", src);
+    }
+
+    @Override
+    public BlockStoragePolicySpi getStoragePolicy(Path src) throws IOException {
+      throw new NotInMountpointException(src, "getStoragePolicy");
+    }
+
+    @Override
+    public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
+        throws IOException {
+      Collection<BlockStoragePolicySpi> allPolicies = new HashSet<>();
+      for (FileSystem fs : getChildFileSystems()) {
+        try {
+          Collection<? extends BlockStoragePolicySpi> policies =
+              fs.getAllStoragePolicies();
+          allPolicies.addAll(policies);
+        } catch (UnsupportedOperationException e) {
+          // ignored
+        }
+      }
+      return allPolicies;
+    }
   }
   }
 }
 }

+ 65 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java

@@ -468,4 +468,69 @@ public class TestChRootedFileSystem {
     verify(mockFs).renameSnapshot(chRootedSnapRootPath, "snapOldName",
     verify(mockFs).renameSnapshot(chRootedSnapRootPath, "snapOldName",
         "snapNewName");
         "snapNewName");
   }
   }
+
+  @Test(timeout = 30000)
+  public void testSetStoragePolicy() throws Exception {
+    Path storagePolicyPath = new Path("/storagePolicy");
+    Path chRootedStoragePolicyPath = new Path("/a/b/storagePolicy");
+
+    Configuration conf = new Configuration();
+    conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
+
+    URI chrootUri = URI.create("mockfs://foo/a/b");
+    ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf);
+    FileSystem mockFs = ((FilterFileSystem) chrootFs.getRawFileSystem())
+        .getRawFileSystem();
+
+    chrootFs.setStoragePolicy(storagePolicyPath, "HOT");
+    verify(mockFs).setStoragePolicy(chRootedStoragePolicyPath, "HOT");
+  }
+
+  @Test(timeout = 30000)
+  public void testUnsetStoragePolicy() throws Exception {
+    Path storagePolicyPath = new Path("/storagePolicy");
+    Path chRootedStoragePolicyPath = new Path("/a/b/storagePolicy");
+
+    Configuration conf = new Configuration();
+    conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
+
+    URI chrootUri = URI.create("mockfs://foo/a/b");
+    ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf);
+    FileSystem mockFs = ((FilterFileSystem) chrootFs.getRawFileSystem())
+        .getRawFileSystem();
+
+    chrootFs.unsetStoragePolicy(storagePolicyPath);
+    verify(mockFs).unsetStoragePolicy(chRootedStoragePolicyPath);
+  }
+
+  @Test(timeout = 30000)
+  public void testGetStoragePolicy() throws Exception {
+    Path storagePolicyPath = new Path("/storagePolicy");
+    Path chRootedStoragePolicyPath = new Path("/a/b/storagePolicy");
+
+    Configuration conf = new Configuration();
+    conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
+
+    URI chrootUri = URI.create("mockfs://foo/a/b");
+    ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf);
+    FileSystem mockFs = ((FilterFileSystem) chrootFs.getRawFileSystem())
+        .getRawFileSystem();
+
+    chrootFs.getStoragePolicy(storagePolicyPath);
+    verify(mockFs).getStoragePolicy(chRootedStoragePolicyPath);
+  }
+
+  @Test(timeout = 30000)
+  public void testGetAllStoragePolicy() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
+
+    URI chrootUri = URI.create("mockfs://foo/a/b");
+    ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf);
+    FileSystem mockFs = ((FilterFileSystem) chrootFs.getRawFileSystem())
+        .getRawFileSystem();
+
+    chrootFs.getAllStoragePolicies();
+    verify(mockFs).getAllStoragePolicies();
+  }
 }
 }

+ 42 - 12
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java

@@ -19,42 +19,41 @@ package org.apache.hadoop.fs.viewfs;
 
 
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
-import java.util.ArrayList;
+import java.util.Collection;
 import java.util.List;
 import java.util.List;
-
+import java.util.ArrayList;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.BlockStoragePolicySpi;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.FileSystemTestHelper;
-import static org.apache.hadoop.fs.FileSystemTestHelper.*;
-import org.apache.hadoop.fs.permission.AclEntry;
-import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.AclUtil;
-import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.viewfs.ConfigUtil;
 import org.apache.hadoop.fs.viewfs.ConfigUtil;
-import org.apache.hadoop.fs.viewfs.ViewFileSystem;
 import org.apache.hadoop.fs.viewfs.ViewFileSystem.MountPoint;
 import org.apache.hadoop.fs.viewfs.ViewFileSystem.MountPoint;
+import org.apache.hadoop.fs.viewfs.ViewFileSystem;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
+import static org.apache.hadoop.fs.FileSystemTestHelper.*;
+import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
+
 import org.junit.After;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
+import static org.junit.Assert.*;
 
 
 
 
 /**
 /**
@@ -885,4 +884,35 @@ abstract public class ViewFileSystemBaseTest {
   public void testInternalDeleteSnapshot() throws IOException {
   public void testInternalDeleteSnapshot() throws IOException {
     fsView.deleteSnapshot(new Path("/internalDir"), "snap1");
     fsView.deleteSnapshot(new Path("/internalDir"), "snap1");
   }
   }
+
+  @Test(expected = AccessControlException.class)
+  public void testInternalSetStoragePolicy() throws IOException {
+    fsView.setStoragePolicy(new Path("/internalDir"), "HOT");
+  }
+
+  @Test(expected = AccessControlException.class)
+  public void testInternalUnsetStoragePolicy() throws IOException {
+    fsView.unsetStoragePolicy(new Path("/internalDir"));
+  }
+
+  @Test(expected = NotInMountpointException.class)
+  public void testInternalgetStoragePolicy() throws IOException {
+    fsView.getStoragePolicy(new Path("/internalDir"));
+  }
+
+  @Test
+  public void testInternalGetAllStoragePolicies() throws IOException {
+    Collection<? extends BlockStoragePolicySpi> policies =
+        fsView.getAllStoragePolicies();
+    for (FileSystem fs : fsView.getChildFileSystems()) {
+      try {
+        for (BlockStoragePolicySpi s : fs.getAllStoragePolicies()) {
+          assertTrue("Missing policy: " + s, policies.contains(s));
+        }
+      } catch (UnsupportedOperationException e) {
+        // ignore
+      }
+    }
+  }
+
 }
 }