Browse Source

HDFS-1249. With fuse-dfs, chown which only has owner (or only group) argument fails with Input/output error. Contributed by Colin Patrick McCabe

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1363466 13f79535-47bb-0310-9956-ffa450edef68
Eli Collins 12 năm trước cách đây
mục cha
commit
5d5a83877a

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -349,6 +349,9 @@ Branch-2 ( Unreleased changes )
 
     HDFS-3675. libhdfs: follow documented return codes. (Colin Patrick McCabe via eli)
 
+    HDFS-1249. With fuse-dfs, chown which only has owner (or only group)
+    argument fails with Input/output error. (Colin Patrick McCabe via eli)
+
   OPTIMIZATIONS
 
     HDFS-2982. Startup performance suffers when there are many edit log

+ 21 - 13
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_chown.c

@@ -40,19 +40,26 @@ int dfs_chown(const char *path, uid_t uid, gid_t gid)
   assert(dfs);
   assert('/' == *path);
 
-  user = getUsername(uid);
-  if (NULL == user) {
-    ERROR("Could not lookup the user id string %d",(int)uid); 
-    ret = -EIO;
+  if ((uid == -1) && (gid == -1)) {
+    ret = 0;
     goto cleanup;
   }
-
-  group = getGroup(gid);
-  if (group == NULL) {
-    ERROR("Could not lookup the group id string %d",(int)gid);
-    ret = -EIO;
-    goto cleanup;
-  } 
+  if (uid != -1) {
+    user = getUsername(uid);
+    if (NULL == user) {
+      ERROR("Could not lookup the user id string %d",(int)uid);
+      ret = -EIO;
+      goto cleanup;
+    }
+  }
+  if (gid != -1) {
+    group = getGroup(gid);
+    if (group == NULL) {
+      ERROR("Could not lookup the group id string %d",(int)gid);
+      ret = -EIO;
+      goto cleanup;
+    }
+  }
 
   userFS = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
   if (userFS == NULL) {
@@ -62,8 +69,9 @@ int dfs_chown(const char *path, uid_t uid, gid_t gid)
   }
 
   if (hdfsChown(userFS, path, user, group)) {
-    ERROR("Could not chown %s to %d:%d", path, (int)uid, gid);
-    ret = (errno > 0) ? -errno : -EIO;
+    ret = errno;
+    ERROR("Could not chown %s to %d:%d: error %d", path, (int)uid, gid, ret);
+    ret = (ret > 0) ? -ret : -EIO;
     goto cleanup;
   }
 

+ 3 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c

@@ -1811,9 +1811,7 @@ int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group)
     }
 
     if (owner == NULL && group == NULL) {
-      fprintf(stderr, "Both owner and group cannot be null in chown");
-      errno = EINVAL;
-      return -1;
+      return 0;
     }
 
     jobject jFS = (jobject)fs;
@@ -1823,8 +1821,8 @@ int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group)
         return -1;
     }
 
-    jstring jOwnerString = (*env)->NewStringUTF(env, owner); 
-    jstring jGroupString = (*env)->NewStringUTF(env, group); 
+    jstring jOwnerString = owner ? (*env)->NewStringUTF(env, owner) : NULL;
+    jstring jGroupString = group ? (*env)->NewStringUTF(env, group) : NULL;
 
     //Create the directory
     int ret = 0;

+ 10 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h

@@ -556,14 +556,16 @@ extern  "C" {
     tOffset hdfsGetUsed(hdfsFS fs);
 
     /** 
-     * hdfsChown 
-     * @param fs The configured filesystem handle.
-     * @param path the path to the file or directory
-     * @param owner this is a string in Hadoop land. Set to null or "" if only setting group
-     * @param group  this is a string in Hadoop land. Set to null or "" if only setting user
-     * @return 0 on success else -1
-     */
-    int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group);
+     * Change the user and/or group of a file or directory.
+     *
+     * @param fs            The configured filesystem handle.
+     * @param path          the path to the file or directory
+     * @param owner         User string.  Set to NULL for 'no change'
+     * @param group         Group string.  Set to NULL for 'no change'
+     * @return              0 on success else -1
+     */
+    int hdfsChown(hdfsFS fs, const char* path, const char *owner,
+                  const char *group);
 
     /** 
      * hdfsChmod

+ 23 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c

@@ -67,6 +67,7 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs)
     char prefix[256], tmp[256];
     hdfsFile file;
     int ret, expected;
+    hdfsFileInfo *fileInfo;
 
     snprintf(prefix, sizeof(prefix), "/tlhData%04d", ti->threadIdx);
 
@@ -120,6 +121,28 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs)
     // TODO: Non-recursive delete should fail?
     //EXPECT_NONZERO(hdfsDelete(fs, prefix, 0));
 
+    snprintf(tmp, sizeof(tmp), "%s/file", prefix);
+    EXPECT_ZERO(hdfsChown(fs, tmp, NULL, NULL));
+    EXPECT_ZERO(hdfsChown(fs, tmp, NULL, "doop"));
+    fileInfo = hdfsGetPathInfo(fs, tmp);
+    EXPECT_NONNULL(fileInfo);
+    EXPECT_ZERO(strcmp("doop", fileInfo->mGroup));
+    hdfsFreeFileInfo(fileInfo, 1);
+
+    EXPECT_ZERO(hdfsChown(fs, tmp, "ha", "doop2"));
+    fileInfo = hdfsGetPathInfo(fs, tmp);
+    EXPECT_NONNULL(fileInfo);
+    EXPECT_ZERO(strcmp("ha", fileInfo->mOwner));
+    EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
+    hdfsFreeFileInfo(fileInfo, 1);
+
+    EXPECT_ZERO(hdfsChown(fs, tmp, "ha2", NULL));
+    fileInfo = hdfsGetPathInfo(fs, tmp);
+    EXPECT_NONNULL(fileInfo);
+    EXPECT_ZERO(strcmp("ha2", fileInfo->mOwner));
+    EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
+    hdfsFreeFileInfo(fileInfo, 1);
+
     EXPECT_ZERO(hdfsDelete(fs, prefix, 1));
     return 0;
 }