Explorar o código

HDFS-4139. fuse-dfs RO mode still allows file truncation. Contributed by Colin Patrick McCabe

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1409093 13f79535-47bb-0310-9956-ffa450edef68
Eli Collins %!s(int64=12) %!d(string=hai) anos
pai
achega
a32639ac33

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -237,6 +237,9 @@ Release 2.0.3-alpha - Unreleased
 
     HDFS-4171. WebHDFS and HttpFs should accept only valid Unix user names. (tucu)
 
+    HDFS-4139. fuse-dfs RO mode still allows file truncation.
+    (Colin Patrick McCabe via eli)    
+
 Release 2.0.2-alpha - 2012-09-07 
 
   INCOMPATIBLE CHANGES

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_context_handle.h

@@ -31,7 +31,6 @@
 //
 typedef struct dfs_context_struct {
   int debug;
-  int read_only;
   int usetrash;
   int direct_io;
   char **protectedpaths;

+ 12 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_dfs.c

@@ -93,6 +93,18 @@ int main(int argc, char *argv[])
   if (!options.no_permissions) {
     fuse_opt_add_arg(&args, "-odefault_permissions");
   }
+  /*
+   * FUSE already has a built-in parameter for mounting the filesystem as
+   * read-only, -r.  We defined our own parameter for doing this called -oro.
+   * We support it by translating it into -r internally.
+   * The kernel intercepts and returns an error message for any "write"
+   * operations that the user attempts to perform on a read-only filesystem.
+   * That means that we don't have to write any code to handle read-only mode.
+   * See HDFS-4139 for more details.
+   */
+  if (options.read_only) {
+    fuse_opt_add_arg(&args, "-r");
+  }
 
   {
     char buf[80];

+ 0 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_mkdir.c

@@ -39,11 +39,6 @@ int dfs_mkdir(const char *path, mode_t mode)
     return -EACCES;
   }
 
-  if (dfs->read_only) {
-    ERROR("HDFS is configured read-only, cannot create directory %s", path);
-    return -EACCES;
-  }
-  
   ret = fuseConnectAsThreadUid(&conn);
   if (ret) {
     fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "

+ 0 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rename.c

@@ -43,11 +43,6 @@ int dfs_rename(const char *from, const char *to)
     return -EACCES;
   }
 
-  if (dfs->read_only) {
-    ERROR("HDFS configured read-only, cannot rename directory %s", from);
-    return -EACCES;
-  }
-
   ret = fuseConnectAsThreadUid(&conn);
   if (ret) {
     fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "

+ 0 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rmdir.c

@@ -44,12 +44,6 @@ int dfs_rmdir(const char *path)
     goto cleanup;
   }
 
-  if (dfs->read_only) {
-    ERROR("HDFS configured read-only, cannot delete directory %s", path);
-    ret = -EACCES;
-    goto cleanup;
-  }
-
   ret = fuseConnectAsThreadUid(&conn);
   if (ret) {
     fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "

+ 0 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_unlink.c

@@ -40,12 +40,6 @@ int dfs_unlink(const char *path)
     goto cleanup;
   }
 
-  if (dfs->read_only) {
-    ERROR("HDFS configured read-only, cannot create directory %s", path);
-    ret = -EACCES;
-    goto cleanup;
-  }
-
   ret = fuseConnectAsThreadUid(&conn);
   if (ret) {
     fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c

@@ -114,7 +114,6 @@ void *dfs_init(void)
 
   // initialize the context
   dfs->debug                 = options.debug;
-  dfs->read_only             = options.read_only;
   dfs->usetrash              = options.usetrash;
   dfs->protectedpaths        = NULL;
   dfs->rdbuffer_size         = options.rdbuffer_size;