فهرست منبع

HADOOP-3485. Allow writing to files over fuse.
(Pete Wyckoff via dhruba)



git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/trunk@678080 13f79535-47bb-0310-9956-ffa450edef68

Dhruba Borthakur 17 سال پیش
والد
کامیت
c3a9755bc5
3فایلهای تغییر یافته به همراه16 افزوده شده و 6 حذف شده
  1. 3 0
      CHANGES.txt
  2. 12 5
      src/c++/libhdfs/hdfs.c
  3. 1 1
      src/c++/libhdfs/hdfs_test.c

+ 3 - 0
CHANGES.txt

@@ -134,6 +134,9 @@ Trunk (unreleased changes)
     HADOOP-3485. Allow writing to files over fuse.
     (Pete Wyckoff via dhruba)
 
+    HADOOP-3723. The flags to the libhdfs.create call can be treated as
+    a bitmask. (Pete Wyckoff via dhruba)
+
 Release 0.18.0 - Unreleased
 
   INCOMPATIBLE CHANGES

+ 12 - 5
src/c++/libhdfs/hdfs.c

@@ -253,9 +253,16 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
 
     jobject jFS = (jobject)fs;
 
+    if(flags & O_RDWR) {
+      fprintf(stderr, "ERROR: cannot open an hdfs file in O_RDWR mode\n");
+      errno = ENOTSUP;
+      return NULL;
+    }
+
+
     /* The hadoop java api/signature */
-    const char* method = (flags == O_RDONLY) ? "open" : "create";
-    const char* signature = (flags == O_RDONLY) ?
+    const char* method = ((flags & O_WRONLY) == 0) ? "open" : "create";
+    const char* signature = ((flags & O_WRONLY) == 0) ?
         JMETHOD2(JPARAM(HADOOP_PATH), "I", JPARAM(HADOOP_ISTRM)) :
         JMETHOD2(JPARAM(HADOOP_PATH), "ZISJ", JPARAM(HADOOP_OSTRM));
 
@@ -302,7 +309,7 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
         jBufferSize = jVal.i;
     }
 
-    if (flags == O_WRONLY) {
+    if (flags & O_WRONLY) {
         //replication
 
         if (!replication) {
@@ -334,7 +341,7 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
     /* Create and return either the FSDataInputStream or
        FSDataOutputStream references jobject jStream */
 
-    if (flags == O_RDONLY) {
+    if ((flags & O_WRONLY) == 0) {
         if (invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
                          method, signature, jPath, jBufferSize)) {
             fprintf(stderr, "Call to org.apache.hadoop.fs."
@@ -361,7 +368,7 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
         return NULL;
     }
     file->file = (*env)->NewGlobalRef(env, jVal.l);
-    file->type = ((flags == O_RDONLY) ? INPUT : OUTPUT);
+    file->type = (((flags & O_WRONLY) == 0) ? INPUT : OUTPUT);
 
     destroyLocalReference(env, jVal.l);
 

+ 1 - 1
src/c++/libhdfs/hdfs_test.c

@@ -37,7 +37,7 @@ int main(int argc, char **argv) {
         
         const char* writePath = "/tmp/testfile.txt";
         
-        hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY, 0, 0, 0);
+        hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
         if(!writeFile) {
             fprintf(stderr, "Failed to open %s for writing!\n", writePath);
             exit(-1);