Bladeren bron

HADOOP-5333. libhdfs supports appending to files. (dhruba)


git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/branches/branch-0.20@752568 13f79535-47bb-0310-9956-ffa450edef68
Dhruba Borthakur 16 jaren geleden
bovenliggende
commit
4fb8db82b2

+ 2 - 0
CHANGES.txt

@@ -722,6 +722,8 @@ Release 0.19.2 - Unreleased
 
     HADOOP-5332. Appending to files is not allowed (by default) unless
     dfs.support.append is set to true. (dhruba)
+
+    HADOOP-5333. libhdfs supports appending to files. (dhruba)
  
 Release 0.19.1 - Unreleased
 

+ 9 - 13
src/c++/libhdfs/hdfs.c

@@ -393,7 +393,6 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
        FSData{Input|Output}Stream f{is|os} = fs.create(f);
        return f{is|os};
     */
-
     /* Get the JNIEnv* corresponding to current thread */
     JNIEnv* env = getJNIEnv();
 
@@ -504,20 +503,17 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
                                    signature);
         goto done;
       }
+    }  else if ((flags & O_WRONLY) && (flags & O_APPEND)) {
       // WRITE/APPEND?
-      else if ((flags & O_WRONLY) && (flags & O_APPEND)) {
-        if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
-                         method, signature, jPath)) {
-          errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
-                                     "FileSystem::%s(%s)", method,
-                                     signature);
-          goto done;
-        }
+       if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
+                       method, signature, jPath)) {
+        errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
+                                   "FileSystem::%s(%s)", method,
+                                   signature);
+        goto done;
       }
-
-    }
-    // WRITE/CREATE
-    else {
+    } else {
+        // WRITE/CREATE
         jboolean jOverWrite = 1;
         if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
                          method, signature, jPath, jOverWrite,

+ 12 - 0
src/c++/libhdfs/hdfsJniHelper.c

@@ -19,10 +19,13 @@
 #include "hdfsJniHelper.h"
 
 static pthread_mutex_t hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t jvmMutex = PTHREAD_MUTEX_INITIALIZER;
 static volatile int hashTableInited = 0;
 
 #define LOCK_HASH_TABLE() pthread_mutex_lock(&hdfsHashMutex)
 #define UNLOCK_HASH_TABLE() pthread_mutex_unlock(&hdfsHashMutex)
+#define LOCK_JVM_MUTEX() pthread_mutex_lock(&jvmMutex)
+#define UNLOCK_JVM_MUTEX() pthread_mutex_unlock(&jvmMutex)
 
 
 /** The Native return types that methods could return */
@@ -391,9 +394,14 @@ JNIEnv* getJNIEnv(void)
     jint rv = 0; 
     jint noVMs = 0;
 
+    // Only the first thread should create the JVM. The other trheads should
+    // just use the JVM created by the first thread.
+    LOCK_JVM_MUTEX();
+
     rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), vmBufLength, &noVMs);
     if (rv != 0) {
         fprintf(stderr, "JNI_GetCreatedJavaVMs failed with error: %d\n", rv);
+        UNLOCK_JVM_MUTEX();
         return NULL;
     }
 
@@ -402,6 +410,7 @@ JNIEnv* getJNIEnv(void)
         char *hadoopClassPath = getenv("CLASSPATH");
         if (hadoopClassPath == NULL) {
             fprintf(stderr, "Environment variable CLASSPATH not set!\n");
+            UNLOCK_JVM_MUTEX();
             return NULL;
         } 
         char *hadoopClassPathVMArg = "-Djava.class.path=";
@@ -447,6 +456,7 @@ JNIEnv* getJNIEnv(void)
         if (rv != 0) {
             fprintf(stderr, "Call to JNI_CreateJavaVM failed "
                     "with error: %d\n", rv);
+            UNLOCK_JVM_MUTEX();
             return NULL;
         }
 
@@ -459,9 +469,11 @@ JNIEnv* getJNIEnv(void)
         if (rv != 0) {
             fprintf(stderr, "Call to AttachCurrentThread "
                     "failed with error: %d\n", rv);
+            UNLOCK_JVM_MUTEX();
             return NULL;
         }
     }
+    UNLOCK_JVM_MUTEX();
 
     return env;
 }

+ 36 - 0
src/c++/libhdfs/tests/conf/hdfs-site.xml

@@ -21,4 +21,40 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.datanode.address</name>
+  <value>0.0.0.0:50012</value>
+  <description>
+    The address where the datanode server will listen to.
+    If the port is 0 then the server will start on a free port.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.http.address</name>
+  <value>0.0.0.0:50079</value>
+  <description>
+    The datanode http server address and port.
+    If the port is 0 then the server will start on a free port.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.ipc.address</name>
+  <value>0.0.0.0:50022</value>
+  <description>
+    The datanode ipc server address and port.
+    If the port is 0 then the server will start on a free port.
+  </description>
+</property>
+
+<property>
+  <name>dfs.http.address</name>
+  <value>0.0.0.0:50072</value>
+  <description>
+    The address and the base port where the dfs namenode web ui will listen on.
+    If the port is 0 then the server will start on a free port.
+  </description>
+</property>
+
 </configuration>

+ 1 - 0
src/c++/libhdfs/tests/test-libhdfs.sh

@@ -117,6 +117,7 @@ cd $HADOOP_HOME
 echo Y | $HADOOP_BIN_DIR/hadoop namenode -format &&
 $HADOOP_BIN_DIR/hadoop-daemon.sh start namenode && sleep 2 && 
 $HADOOP_BIN_DIR/hadoop-daemon.sh start datanode && sleep 2 && 
+sleep 20
 echo CLASSPATH=$HADOOP_CONF_DIR:$CLASSPATH LD_PRELOAD="$LIBHDFS_INSTALL_DIR/libhdfs.so:$LIB_JVM_DIR/libjvm.so" $LIBHDFS_BUILD_DIR/$HDFS_TEST && 
 CLASSPATH=$HADOOP_CONF_DIR:$CLASSPATH LD_PRELOAD="$LIB_JVM_DIR/libjvm.so:$LIBHDFS_INSTALL_DIR/libhdfs.so:" $LIBHDFS_BUILD_DIR/$HDFS_TEST
 BUILD_STATUS=$?