Browse Source

HADOOP-1536. Remove file locks from libhdfs tests. Contributed by Dhruba.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@551947 13f79535-47bb-0310-9956-ffa450edef68
Nigel Daley 18 years ago
parent
commit
45c3bc4746
4 changed files with 3 additions and 99 deletions
  1. 3 0
      CHANGES.txt
  2. 0 75
      src/c++/libhdfs/hdfs.c
  3. 0 19
      src/c++/libhdfs/hdfs.h
  4. 0 5
      src/c++/libhdfs/hdfs_test.c

+ 3 - 0
CHANGES.txt

@@ -265,6 +265,9 @@ Trunk (unreleased changes)
  81. HADOOP-1485.  Add metrics for monitoring shuffle.
      (Devaraj Das via cutting)
 
+ 82. HADOOP-1536.  Remove file locks from libhdfs tests.
+     (Dhruba Borthakur via nigel)
+
 
 Release 0.13.0 - 2007-06-08
 

+ 0 - 75
src/c++/libhdfs/hdfs.c

@@ -948,81 +948,6 @@ int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath)
 
 
 
-int hdfsLock(hdfsFS fs, const char* path, int shared)
-{
-    // JAVA EQUIVALENT:
-    //  Path p = new Path(path);
-    //  fs.lock(p);
-
-    //Get the JNIEnv* corresponding to current thread
-    JNIEnv* env = getJNIEnv();
-
-    //Parameters
-    jobject jFS = (jobject)fs;
-    jboolean jb_shared = shared;
-
-    //Create an object of org.apache.hadoop.fs.Path
-    jobject jPath = constructNewObjectOfPath(env, path);
-    if (jPath == NULL) {
-        return -1;
-    }
-
-    //Lock the file
-    int retval = 0;
-    if (invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
-                     "lock", "(Lorg/apache/hadoop/fs/Path;Z)V",
-                     jPath, jb_shared) != 0) {
-        fprintf(stderr, "Call to org.apache.fs.FileSystem::lock failed!\n");
-        errno = EINTERNAL;
-        retval = -1;
-    }
-
-    //Delete unnecessary local references
-    destroyLocalReference(env, jPath);
-
-    return retval;
-}
-
-
-
-int hdfsReleaseLock(hdfsFS fs, const char* path)
-{
-    // JAVA EQUIVALENT:
-    //  Path f = new Path(path);
-    //  fs.release(f);
-
-    //Get the JNIEnv* corresponding to current thread
-    JNIEnv* env = getJNIEnv();
-
-    jobject jFS = (jobject)fs;
-
-    //Create an object of java.io.File
-    jobject jPath = constructNewObjectOfPath(env, path);
-    if (jPath == NULL) {
-        return -1;
-    }
-
-    //Release the lock on the file
-    int retval = 0;
-    if (invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS, "release",
-                     "(Lorg/apache/hadoop/fs/Path;)V", jPath) != 0) {
-        fprintf(stderr, "Call to org.apache.hadoop.fs.FileSystem::"
-                "release failed!\n");
-        errno = EINTERNAL;
-        retval = -1;
-        goto done;
-    }
-
-    done:
-
-    //Delete unnecessary local references
-    destroyLocalReference(env, jPath);
-
-    return retval;
-}
-
-
-
 char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize)
 {
     // JAVA EQUIVALENT:

+ 0 - 19
src/c++/libhdfs/hdfs.h

@@ -267,25 +267,6 @@ extern  "C" {
     int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath);
 
 
-    /**
-     * hdfsLock - Obtain a lock on the file.
-     * @param fs The configured filesystem handle.
-     * @param path The path of the file. 
-     * @param shared Shared/exclusive lock-type. 
-     * @return Returns 0 on success, -1 on error. 
-     */
-    int hdfsLock(hdfsFS fs, const char* path, int shared);
-
-
-    /**
-     * hdfsReleaseLock - Release the lock.
-     * @param fs The configured filesystem handle.
-     * @param path The path of the file. 
-     * @return Returns 0 on success, -1 on error. 
-     */
-    int hdfsReleaseLock(hdfsFS fs, const char* path);
-
-
     /** 
      * hdfsGetWorkingDirectory - Get the current working directory for
      * the given filesystem.

+ 0 - 5
src/c++/libhdfs/hdfs_test.c

@@ -138,11 +138,6 @@ int main(int argc, char **argv) {
         fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) ? "Failed!" : "Success!"));
         totalResult += result;
 
-        fprintf(stderr, "hdfsLock: %s\n", ((result = hdfsLock(fs, srcPath, 1)) ? "Failed!" : "Success!"));
-        totalResult += result;
-        fprintf(stderr, "hdfsReleaseLock: %s\n", ((result = hdfsReleaseLock(fs, srcPath)) ? "Failed!" : "Success!"));
-        totalResult += result;
-
         const char* slashTmp = "/tmp";
         const char* newDirectory = "/tmp/newdir";
         fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) ? "Failed!" : "Success!"));