Browse Source

HDFS-12190. Enable 'hdfs dfs -stat' to display access time. Contributed by Yongjun Zhang.

Yongjun Zhang 7 years ago
parent
commit
c6330f22a5

+ 14 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java

@@ -40,8 +40,10 @@ import org.apache.hadoop.fs.FileStatus;
  *   %o: Block size<br>
  *   %o: Block size<br>
  *   %r: replication<br>
  *   %r: replication<br>
  *   %u: User name of owner<br>
  *   %u: User name of owner<br>
- *   %y: UTC date as &quot;yyyy-MM-dd HH:mm:ss&quot;<br>
- *   %Y: Milliseconds since January 1, 1970 UTC<br>
+ *   %x: atime UTC date as &quot;yyyy-MM-dd HH:mm:ss&quot;<br>
+ *   %X: atime Milliseconds since January 1, 1970 UTC<br>
+ *   %y: mtime UTC date as &quot;yyyy-MM-dd HH:mm:ss&quot;<br>
+ *   %Y: mtime Milliseconds since January 1, 1970 UTC<br>
  * If the format is not specified, %y is used by default.
  * If the format is not specified, %y is used by default.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
@@ -62,9 +64,10 @@ class Stat extends FsCommand {
     "octal (%a) and symbolic (%A), filesize in" + NEWLINE +
     "octal (%a) and symbolic (%A), filesize in" + NEWLINE +
     "bytes (%b), type (%F), group name of owner (%g)," + NEWLINE +
     "bytes (%b), type (%F), group name of owner (%g)," + NEWLINE +
     "name (%n), block size (%o), replication (%r), user name" + NEWLINE +
     "name (%n), block size (%o), replication (%r), user name" + NEWLINE +
-    "of owner (%u), modification date (%y, %Y)." + NEWLINE +
-    "%y shows UTC date as \"yyyy-MM-dd HH:mm:ss\" and" + NEWLINE +
-    "%Y shows milliseconds since January 1, 1970 UTC." + NEWLINE +
+    "of owner (%u), access date (%x, %X)." + NEWLINE +
+    "modification date (%y, %Y)." + NEWLINE +
+    "%x and %y show UTC date as \"yyyy-MM-dd HH:mm:ss\" and" + NEWLINE +
+    "%X and %Y show milliseconds since January 1, 1970 UTC." + NEWLINE +
     "If the format is not specified, %y is used by default." + NEWLINE;
     "If the format is not specified, %y is used by default." + NEWLINE;
 
 
   protected final SimpleDateFormat timeFmt;
   protected final SimpleDateFormat timeFmt;
@@ -127,6 +130,12 @@ class Stat extends FsCommand {
           case 'u':
           case 'u':
             buf.append(stat.getOwner());
             buf.append(stat.getOwner());
             break;
             break;
+          case 'x':
+            buf.append(timeFmt.format(new Date(stat.getAccessTime())));
+            break;
+          case 'X':
+            buf.append(stat.getAccessTime());
+            break;
           case 'y':
           case 'y':
             buf.append(timeFmt.format(new Date(stat.getModificationTime())));
             buf.append(timeFmt.format(new Date(stat.getModificationTime())));
             break;
             break;

+ 2 - 2
hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md

@@ -676,11 +676,11 @@ stat
 
 
 Usage: `hadoop fs -stat [format] <path> ...`
 Usage: `hadoop fs -stat [format] <path> ...`
 
 
-Print statistics about the file/directory at \<path\> in the specified format. Format accepts permissions in octal (%a) and symbolic (%A), filesize in bytes (%b), type (%F), group name of owner (%g), name (%n), block size (%o), replication (%r), user name of owner(%u), and modification date (%y, %Y). %y shows UTC date as "yyyy-MM-dd HH:mm:ss" and %Y shows milliseconds since January 1, 1970 UTC. If the format is not specified, %y is used by default.
+Print statistics about the file/directory at \<path\> in the specified format. Format accepts permissions in octal (%a) and symbolic (%A), filesize in bytes (%b), type (%F), group name of owner (%g), name (%n), block size (%o), replication (%r), user name of owner(%u), access date(%x, %X), and modification date (%y, %Y). %x and %y show UTC date as "yyyy-MM-dd HH:mm:ss", and %X and %Y show milliseconds since January 1, 1970 UTC. If the format is not specified, %y is used by default.
 
 
 Example:
 Example:
 
 
-* `hadoop fs -stat "%F %a %u:%g %b %y %n" /file`
+* `hadoop fs -stat "type:%F perm:%a %u:%g size:%b mtime:%y atime:%x name:%n" /file`
 
 
 Exit Code: Returns 0 on success and -1 on error.
 Exit Code: Returns 0 on success and -1 on error.
 
 

+ 7 - 3
hadoop-common-project/hadoop-common/src/test/resources/testConf.xml

@@ -919,15 +919,19 @@
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*of owner \(%u\), modification date \(%y, %Y\).( )*</expected-output>
+          <expected-output>^( |\t)*of owner \(%u\), access date \(%x, %X\).( )*</expected-output>
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*%y shows UTC date as "yyyy-MM-dd HH:mm:ss" and( )*</expected-output>
+          <expected-output>^( |\t)*modification date \(%y, %Y\).( )*</expected-output>
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*%Y shows milliseconds since January 1, 1970 UTC.( )*</expected-output>
+          <expected-output>^( |\t)*%x and %y show UTC date as "yyyy-MM-dd HH:mm:ss" and( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*%X and %Y show milliseconds since January 1, 1970 UTC.( )*</expected-output>
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>

+ 10 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java

@@ -36,12 +36,12 @@ import java.util.zip.GZIPOutputStream;
 
 
 import com.google.common.base.Supplier;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
+
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
 import org.junit.Test;
 import org.junit.Test;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
@@ -65,6 +65,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.rules.Timeout;
 import org.junit.rules.Timeout;
 import org.junit.AfterClass;
 import org.junit.AfterClass;
@@ -115,6 +116,7 @@ public class TestDFSShell {
         GenericTestUtils.getTestDir("TestDFSShell").getAbsolutePath());
         GenericTestUtils.getTestDir("TestDFSShell").getAbsolutePath());
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1000);
 
 
     miniCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     miniCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     miniCluster.waitActive();
     miniCluster.waitActive();
@@ -2002,8 +2004,12 @@ public class TestDFSShell {
     DFSTestUtil.createFile(dfs, testFile2, 2 * BLOCK_SIZE, (short) 3, 0);
     DFSTestUtil.createFile(dfs, testFile2, 2 * BLOCK_SIZE, (short) 3, 0);
     final FileStatus status1 = dfs.getFileStatus(testDir1);
     final FileStatus status1 = dfs.getFileStatus(testDir1);
     final String mtime1 = fmt.format(new Date(status1.getModificationTime()));
     final String mtime1 = fmt.format(new Date(status1.getModificationTime()));
+    final String atime1 = fmt.format(new Date(status1.getAccessTime()));
+    long now = Time.now();
+    dfs.setTimes(testFile2, now + 3000, now + 6000);
     final FileStatus status2 = dfs.getFileStatus(testFile2);
     final FileStatus status2 = dfs.getFileStatus(testFile2);
     final String mtime2 = fmt.format(new Date(status2.getModificationTime()));
     final String mtime2 = fmt.format(new Date(status2.getModificationTime()));
+    final String atime2 = fmt.format(new Date(status2.getAccessTime()));
 
 
     final ByteArrayOutputStream out = new ByteArrayOutputStream();
     final ByteArrayOutputStream out = new ByteArrayOutputStream();
     System.setOut(new PrintStream(out));
     System.setOut(new PrintStream(out));
@@ -2036,17 +2042,19 @@ public class TestDFSShell {
         out.toString().contains(String.valueOf(octal)));
         out.toString().contains(String.valueOf(octal)));
 
 
     out.reset();
     out.reset();
-    doFsStat(dfs.getConf(), "%F %a %A %u:%g %b %y %n", testDir1, testFile2);
+    doFsStat(dfs.getConf(), "%F %a %A %u:%g %b %x %y %n", testDir1, testFile2);
 
 
     n = status2.getPermission().toShort();
     n = status2.getPermission().toShort();
     octal = (n>>>9&1)*1000 + (n>>>6&7)*100 + (n>>>3&7)*10 + (n&7);
     octal = (n>>>9&1)*1000 + (n>>>6&7)*100 + (n>>>3&7)*10 + (n&7);
     assertTrue(out.toString(), out.toString().contains(mtime1));
     assertTrue(out.toString(), out.toString().contains(mtime1));
+    assertTrue(out.toString(), out.toString().contains(atime1));
     assertTrue(out.toString(), out.toString().contains("regular file"));
     assertTrue(out.toString(), out.toString().contains("regular file"));
     assertTrue(out.toString(),
     assertTrue(out.toString(),
         out.toString().contains(status2.getPermission().toString()));
         out.toString().contains(status2.getPermission().toString()));
     assertTrue(out.toString(),
     assertTrue(out.toString(),
         out.toString().contains(String.valueOf(octal)));
         out.toString().contains(String.valueOf(octal)));
     assertTrue(out.toString(), out.toString().contains(mtime2));
     assertTrue(out.toString(), out.toString().contains(mtime2));
+    assertTrue(out.toString(), out.toString().contains(atime2));
   }
   }
 
 
   private static void doFsStat(Configuration conf, String format, Path... files)
   private static void doFsStat(Configuration conf, String format, Path... files)