Parcourir la source

Merging changes r1081580:r1083021 from trunk to federation

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/branches/HDFS-1052@1085475 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas il y a 14 ans
Parent
commit
7c7fca2948
40 fichiers modifiés avec 570 ajouts et 764 suppressions
  1. 10 0
      CHANGES.txt
  2. 1 0
      src/contrib/build-contrib.xml
  3. 2 1
      src/contrib/build.xml
  4. 0 22
      src/contrib/fuse-dfs/bootstrap.sh
  5. 57 62
      src/contrib/fuse-dfs/build.xml
  6. 3 0
      src/contrib/fuse-dfs/configure.ac
  7. 16 5
      src/contrib/fuse-dfs/ivy.xml
  8. 2 1
      src/contrib/fuse-dfs/src/Makefile.am
  9. 5 9
      src/contrib/fuse-dfs/src/fuse_dfs.c
  10. 29 6
      src/contrib/fuse-dfs/src/fuse_dfs.h
  11. 1 1
      src/contrib/fuse-dfs/src/fuse_impls_access.c
  12. 2 2
      src/contrib/fuse-dfs/src/fuse_impls_chmod.c
  13. 4 7
      src/contrib/fuse-dfs/src/fuse_impls_chown.c
  14. 1 1
      src/contrib/fuse-dfs/src/fuse_impls_flush.c
  15. 3 2
      src/contrib/fuse-dfs/src/fuse_impls_getattr.c
  16. 5 6
      src/contrib/fuse-dfs/src/fuse_impls_mkdir.c
  17. 4 3
      src/contrib/fuse-dfs/src/fuse_impls_mknod.c
  18. 4 5
      src/contrib/fuse-dfs/src/fuse_impls_open.c
  19. 1 1
      src/contrib/fuse-dfs/src/fuse_impls_read.c
  20. 5 6
      src/contrib/fuse-dfs/src/fuse_impls_readdir.c
  21. 1 2
      src/contrib/fuse-dfs/src/fuse_impls_release.c
  22. 4 5
      src/contrib/fuse-dfs/src/fuse_impls_rename.c
  23. 4 4
      src/contrib/fuse-dfs/src/fuse_impls_rmdir.c
  24. 1 1
      src/contrib/fuse-dfs/src/fuse_impls_statfs.c
  25. 3 3
      src/contrib/fuse-dfs/src/fuse_impls_truncate.c
  26. 4 6
      src/contrib/fuse-dfs/src/fuse_impls_unlink.c
  27. 1 2
      src/contrib/fuse-dfs/src/fuse_impls_utimens.c
  28. 6 5
      src/contrib/fuse-dfs/src/fuse_impls_write.c
  29. 3 3
      src/contrib/fuse-dfs/src/fuse_init.c
  30. 28 19
      src/contrib/fuse-dfs/src/fuse_options.c
  31. 12 11
      src/contrib/fuse-dfs/src/fuse_trash.c
  32. 1 1
      src/contrib/fuse-dfs/src/fuse_users.c
  33. 276 533
      src/contrib/fuse-dfs/src/test/TestFuseDFS.java
  34. 7 7
      src/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
  35. 2 2
      src/java/hdfs-default.xml
  36. 3 1
      src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
  37. 56 16
      src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
  38. 1 1
      src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
  39. 1 1
      src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
  40. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java

+ 10 - 0
CHANGES.txt

@@ -296,6 +296,11 @@ Trunk (unreleased changes)
     HDFS-1731. Amend previous commit for this JIRA to fix build on Cygwin.
     HDFS-1731. Amend previous commit for this JIRA to fix build on Cygwin.
     (todd)
     (todd)
 
 
+    HDFS-780. Revive TestFuseDFS. (eli)
+
+    HDFS-1445. Batch the calls in DataStorage to FileUtil.createHardLink().
+    (Matt Foley via jghoman)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
@@ -354,6 +359,8 @@ Trunk (unreleased changes)
     HDFS-1738. change hdfs jmxget to return an empty string instead of 
     HDFS-1738. change hdfs jmxget to return an empty string instead of 
     null when an attribute value is not available (tanping vi boryas)
     null when an attribute value is not available (tanping vi boryas)
 
 
+    HDFS-1757. Don't compile fuse-dfs by default. (eli)
+
 Release 0.22.0 - Unreleased
 Release 0.22.0 - Unreleased
 
 
   NEW FEATURES
   NEW FEATURES
@@ -854,6 +861,9 @@ Release 0.21.1 - Unreleased
     via szetszwo)
     via szetszwo)
 
 
 
 
+    HDFS-1596. Replace fs.checkpoint.* with dfs.namenode.checkpoint.*
+    in documentations.  (Harsh J Chouraria via szetszwo)
+
 Release 0.21.0 - 2010-08-13
 Release 0.21.0 - 2010-08-13
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 1 - 0
src/contrib/build-contrib.xml

@@ -46,6 +46,7 @@
   <!-- NB: sun.arch.data.model is not supported on all platforms -->
   <!-- NB: sun.arch.data.model is not supported on all platforms -->
   <property name="build.platform"
   <property name="build.platform"
             value="${os.name}-${os.arch}-${sun.arch.data.model}"/>
             value="${os.name}-${os.arch}-${sun.arch.data.model}"/>
+  <property name="build.c++.libhdfs" value="${build.dir}/../../c++/${build.platform}/lib"/>
   <property name="build.test" location="${build.dir}/test"/>
   <property name="build.test" location="${build.dir}/test"/>
   <property name="build.examples" location="${build.dir}/examples"/>
   <property name="build.examples" location="${build.dir}/examples"/>
   <property name="hadoop.log.dir" location="${build.dir}/test/logs"/>
   <property name="hadoop.log.dir" location="${build.dir}/test/logs"/>

+ 2 - 1
src/contrib/build.xml

@@ -28,7 +28,8 @@
   <!-- ====================================================== -->
   <!-- ====================================================== -->
   <target name="compile">
   <target name="compile">
     <subant target="compile">
     <subant target="compile">
-      <fileset dir="." includes="*/build.xml"/>
+      <fileset dir="." includes="thriftfs/build.xml"/>
+      <fileset dir="." includes="hdfsproxy/build.xml"/>
     </subant>
     </subant>
   </target>
   </target>
   
   

+ 0 - 22
src/contrib/fuse-dfs/bootstrap.sh

@@ -1,22 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-#!/bin/sh
-
-aclocal
-automake -a
-autoconf
-./configure

+ 57 - 62
src/contrib/fuse-dfs/build.xml

@@ -17,43 +17,32 @@
    limitations under the License.
    limitations under the License.
 -->
 -->
 
 
-<project name="fuse-dfs" default="compile">
+<project name="fuse-dfs" default="jar" xmlns:ivy="antlib:org.apache.ivy.ant">
 
 
   <import file="../build-contrib.xml"/>
   <import file="../build-contrib.xml"/>
 
 
-  <target name="check-libhdfs-fuse">
-    <condition property="libhdfs-fuse">
-      <and>
-        <isset property="fusedfs"/>
-        <isset property="libhdfs"/>
-      </and>
-    </condition>
-  </target>
-
-
   <target name="check-libhdfs-exists" if="fusedfs">
   <target name="check-libhdfs-exists" if="fusedfs">
-  <property name="libhdfs.lib" value="${hadoop.root}/build/c++/${build.platform}/lib/libhdfs.so"/>
-        <available file="${libhdfs.lib}" property="libhdfs-exists"/>
+    <property name="libhdfs.lib" value="${build.c++.libhdfs}/libhdfs.so"/>
+    <available file="${libhdfs.lib}" property="libhdfs-exists"/>
     <fail message="libhdfs.so does not exist: ${libhdfs.lib}. Please check flags -Dlibhdfs=1 -Dfusedfs=1 are set or first try ant compile -Dcompile.c++=true -Dlibhdfs=true">
     <fail message="libhdfs.so does not exist: ${libhdfs.lib}. Please check flags -Dlibhdfs=1 -Dfusedfs=1 are set or first try ant compile -Dcompile.c++=true -Dlibhdfs=true">
-         <condition>
-            <not><isset property="libhdfs-exists"/></not>
-          </condition>
-   </fail>
-   </target>
-
-  <!-- override compile target !-->
-  <target name="compile" depends="check-libhdfs-fuse,check-libhdfs-exists" if="libhdfs-fuse">
-    <echo message="contrib: ${name}"/>
+      <condition><not><isset property="libhdfs-exists"/></not></condition>
+    </fail>
+  </target>
 
 
+  <target name="compile">
     <condition property="perms" value="1" else="0">
     <condition property="perms" value="1" else="0">
-    <not>
-      <isset property="libhdfs.noperms"/>
-    </not>
+      <not> <isset property="libhdfs.noperms"/> </not>
     </condition>
     </condition>
 
 
-    <exec executable="/bin/sh" failonerror="true">
-      <arg value="${root}/bootstrap.sh"/>
+    <exec executable="autoreconf" dir="${basedir}" 
+          searchpath="yes" failonerror="yes">
+       <arg value="-if"/>
     </exec>
     </exec>
+
+    <exec executable="${basedir}/configure" dir="${basedir}"
+          failonerror="yes">
+    </exec>
+
     <exec executable="make" failonerror="true">
     <exec executable="make" failonerror="true">
       <env key="OS_NAME" value="${os.name}"/>
       <env key="OS_NAME" value="${os.name}"/>
       <env key="OS_ARCH" value="${os.arch}"/>
       <env key="OS_ARCH" value="${os.arch}"/>
@@ -62,48 +51,54 @@
       <env key="BUILD_PLATFORM" value="${build.platform}" />
       <env key="BUILD_PLATFORM" value="${build.platform}" />
       <env key="PERMS" value="${perms}"/>
       <env key="PERMS" value="${perms}"/>
     </exec>
     </exec>
-    <mkdir dir="${build.dir}"/>
-    <mkdir dir="${build.dir}/test"/>
-    <exec executable="cp" failonerror="true">
-    <arg line="${root}/src/fuse_dfs ${build.dir}"/>
-    </exec>
-    <mkdir dir="${build.dir}/test"/>
-    <exec executable="cp" failonerror="true">
-    <arg line="${root}/src/fuse_dfs_wrapper.sh ${build.dir}"/>
-    </exec>
-
   </target>
   </target>
 
 
-  <!-- override jar target !-->
-  <target name="jar"/>
-
-  <!-- override package target !-->
-  <target name="package" depends="check-libhdfs-fuse" if="libhdfs-fuse">
-    <echo message="contrib: ${name}"/>
+  <target name="jar" />
+  <target name="package" />
 
 
-    <mkdir dir="${dist.dir}/contrib/${name}"/>
-    <exec executable="cp">
-      <arg value="-p"/>
-      <arg value="README"/>
-      <arg value="src/fuse_dfs"/>
-      <arg value="src/fuse_dfs_wrapper.sh"/>
-      <arg value="${dist.dir}/contrib/${name}"/>
+  <target name="compile-test" depends="ivy-retrieve-common, check-libhdfs-exists" if="fusedfs">
+    <mkdir dir="${build.dir}"/>
+    <mkdir dir="${build.dir}/test"/>
+    <javac encoding="${build.encoding}"
+	   srcdir="${src.test}"
+	   includes="**/*.java"
+	   destdir="${build.test}"
+	   debug="${javac.debug}">
+      <classpath refid="test.classpath"/>
+    </javac>
+
+    <!-- Use exec since the copy task doesn't preserve attrs -->
+    <exec executable="cp" failonerror="true">
+      <arg line="${hadoop.root}/src/contrib/fuse-dfs/src/fuse_dfs ${build.dir}"/>
     </exec>
     </exec>
-  </target>
 
 
-  <target name="test" if="fusedfs">
-    <echo message="testing FuseDFS ..."/>
-   <antcall target="hadoopbuildcontrib.test"> 
-   </antcall>
-  </target>  
-
-  <!-- override clean target !-->
-  <target name="clean" depends="check-libhdfs-fuse" if="libhdfs-fuse">
-    <echo message="contrib: ${name}"/>
+    <mkdir dir="${build.dir}/test"/>
 
 
-    <exec executable="make">
-      <arg value="clean"/>
+    <exec executable="cp" failonerror="true">
+      <arg line="${hadoop.root}/src/contrib/fuse-dfs/src/fuse_dfs_wrapper.sh ${build.dir}"/>
     </exec>
     </exec>
   </target>
   </target>
 
 
+  <target name="test" depends="compile-test,check-libhdfs-exists" if="fusedfs">
+    <junit showoutput="${test.output}" fork="yes" printsummary="yes" errorProperty="tests.failed" haltonfailure="no" failureProperty="tests.failed">
+      <classpath refid="test.classpath"/>
+      <sysproperty key="test.build.data" value="${build.test}/data"/>
+      <sysproperty key="build.test" value="${build.test}"/>
+      <sysproperty key="user.dir" value="${build.test}/data"/>
+      <sysproperty key="hadoop.log.dir" value="${hadoop.log.dir}"/>
+      <sysproperty key="test.src.dir" value="${test.src.dir}"/>
+      <formatter type="plain" />
+      <batchtest todir="${build.test}" unless="testcase">
+        <fileset dir="${src.test}">
+          <include name="**/Test*.java"/>
+        </fileset>
+      </batchtest>
+      <batchtest todir="${build.test}" if="testcase">
+        <fileset dir="${src.test}">
+          <include name="**/${testcase}.java"/>
+        </fileset>
+      </batchtest>
+    </junit>
+    <fail if="tests.failed">Tests failed!</fail>
+ </target>
 </project>
 </project>

+ 3 - 0
src/contrib/fuse-dfs/configure.ac

@@ -45,6 +45,9 @@ AC_SUBST([DEFS])
 AC_FUNC_GETGROUPS
 AC_FUNC_GETGROUPS
 AC_TYPE_GETGROUPS
 AC_TYPE_GETGROUPS
 
 
+AC_PROG_CC
+AC_SYS_LARGEFILE
+
 ############################################################################
 ############################################################################
 # Section 2:
 # Section 2:
 # User Configurable system defaults. Change With CAUTION!
 # User Configurable system defaults. Change With CAUTION!

+ 16 - 5
src/contrib/fuse-dfs/ivy.xml

@@ -40,14 +40,25 @@
     <artifact conf="master"/>
     <artifact conf="master"/>
   </publications>
   </publications>
   <dependencies>
   <dependencies>
-    <dependency org="commons-logging"
-      name="commons-logging"
-      rev="${commons-logging.version}"
+    <dependency org="org.apache.hadoop"
+      name="hadoop-common"
+      rev="${hadoop-common.version}"
+      conf="common->default"/>
+    <dependency org="org.apache.hadoop"
+      name="hadoop-common-test"
+      rev="${hadoop-common.version}"
       conf="common->default"/>
       conf="common->default"/>
     <dependency org="log4j"
     <dependency org="log4j"
       name="log4j"
       name="log4j"
       rev="${log4j.version}"
       rev="${log4j.version}"
       conf="common->master"/>
       conf="common->master"/>
-    </dependencies>
-  
+    <dependency org="commons-logging"
+      name="commons-logging"
+      rev="${commons-logging.version}"
+      conf="common->master"/>
+    <dependency org="junit"
+      name="junit"
+      rev="${junit.version}"
+      conf="common->master"/>
+  </dependencies>
 </ivy-module>
 </ivy-module>

+ 2 - 1
src/contrib/fuse-dfs/src/Makefile.am

@@ -16,5 +16,6 @@
 #
 #
 bin_PROGRAMS = fuse_dfs
 bin_PROGRAMS = fuse_dfs
 fuse_dfs_SOURCES = fuse_dfs.c fuse_options.c fuse_trash.c fuse_stat_struct.c fuse_users.c fuse_init.c fuse_connect.c fuse_impls_access.c fuse_impls_chmod.c  fuse_impls_chown.c  fuse_impls_create.c  fuse_impls_flush.c fuse_impls_getattr.c  fuse_impls_mkdir.c  fuse_impls_mknod.c  fuse_impls_open.c fuse_impls_read.c fuse_impls_release.c fuse_impls_readdir.c fuse_impls_rename.c fuse_impls_rmdir.c fuse_impls_statfs.c fuse_impls_symlink.c fuse_impls_truncate.c fuse_impls_utimens.c  fuse_impls_unlink.c fuse_impls_write.c
 fuse_dfs_SOURCES = fuse_dfs.c fuse_options.c fuse_trash.c fuse_stat_struct.c fuse_users.c fuse_init.c fuse_connect.c fuse_impls_access.c fuse_impls_chmod.c  fuse_impls_chown.c  fuse_impls_create.c  fuse_impls_flush.c fuse_impls_getattr.c  fuse_impls_mkdir.c  fuse_impls_mknod.c  fuse_impls_open.c fuse_impls_read.c fuse_impls_release.c fuse_impls_readdir.c fuse_impls_rename.c fuse_impls_rmdir.c fuse_impls_statfs.c fuse_impls_symlink.c fuse_impls_truncate.c fuse_impls_utimens.c  fuse_impls_unlink.c fuse_impls_write.c
-AM_CPPFLAGS= -DPERMS=$(PERMS) -D_FILE_OFFSET_BITS=64 -I$(JAVA_HOME)/include -I$(HADOOP_HOME)/src/c++/libhdfs/ -I$(JAVA_HOME)/include/linux/ -D_FUSE_DFS_VERSION=\"$(PACKAGE_VERSION)\" -DPROTECTED_PATHS=\"$(PROTECTED_PATHS)\" -I$(FUSE_HOME)/include
+AM_CFLAGS= -Wall -g
+AM_CPPFLAGS= -DPERMS=$(PERMS) -D_FILE_OFFSET_BITS=64 -I$(JAVA_HOME)/include -I$(HADOOP_HOME)/src/c++/libhdfs -I$(JAVA_HOME)/include/linux -D_FUSE_DFS_VERSION=\"$(PACKAGE_VERSION)\" -DPROTECTED_PATHS=\"$(PROTECTED_PATHS)\" -I$(FUSE_HOME)/include
 AM_LDFLAGS= -L$(HADOOP_HOME)/build/c++/$(BUILD_PLATFORM)/lib -lhdfs -L$(FUSE_HOME)/lib -lfuse -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server -ljvm
 AM_LDFLAGS= -L$(HADOOP_HOME)/build/c++/$(BUILD_PLATFORM)/lib -lhdfs -L$(FUSE_HOME)/lib -lfuse -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server -ljvm

+ 5 - 9
src/contrib/fuse-dfs/src/fuse_dfs.c

@@ -56,7 +56,7 @@ static struct fuse_operations dfs_oper = {
   .write	= dfs_write,
   .write	= dfs_write,
   .flush        = dfs_flush,
   .flush        = dfs_flush,
   .mknod        = dfs_mknod,
   .mknod        = dfs_mknod,
-	.utimens	= dfs_utimens,
+  .utimens      = dfs_utimens,
   .chmod	= dfs_chmod,
   .chmod	= dfs_chmod,
   .chown	= dfs_chown,
   .chown	= dfs_chown,
   .truncate	= dfs_truncate,
   .truncate	= dfs_truncate,
@@ -118,15 +118,11 @@ int main(int argc, char *argv[])
     if ((temp = hdfsConnect(options.server, options.port)) == NULL) {
     if ((temp = hdfsConnect(options.server, options.port)) == NULL) {
       const char *cp = getenv("CLASSPATH");
       const char *cp = getenv("CLASSPATH");
       const char *ld = getenv("LD_LIBRARY_PATH");
       const char *ld = getenv("LD_LIBRARY_PATH");
-      fprintf(stderr, "FATAL: misconfiguration problem, cannot connect to hdfs - here's your environment\n");
-      fprintf(stderr, "LD_LIBRARY_PATH=%s\n",ld == NULL ? "NULL" : ld);
-      fprintf(stderr, "CLASSPATH=%s\n",cp == NULL ? "NULL" : cp);
-      syslog(LOG_ERR, "FATAL: misconfiguration problem, cannot connect to hdfs - here's your environment\n");
-      syslog(LOG_ERR, "LD_LIBRARY_PATH=%s\n",ld == NULL ? "NULL" : ld);
-      syslog(LOG_ERR, "CLASSPATH=%s\n",cp == NULL ? "NULL" : cp);
+      ERROR("FATAL: misconfiguration - cannot connect to HDFS");
+      ERROR("LD_LIBRARY_PATH=%s",ld == NULL ? "NULL" : ld);
+      ERROR("CLASSPATH=%s",cp == NULL ? "NULL" : cp);
       exit(0);
       exit(0);
-    }  
-    temp = NULL;
+    }
   }
   }
 
 
   int ret = fuse_main(args.argc, args.argv, &dfs_oper, NULL);
   int ret = fuse_main(args.argc, args.argv, &dfs_oper, NULL);

+ 29 - 6
src/contrib/fuse-dfs/src/fuse_dfs.h

@@ -49,16 +49,39 @@
 //
 //
 int is_protected(const char *path);
 int is_protected(const char *path);
 
 
+#undef INFO
+#define INFO(_fmt, ...) {                       \
+  fprintf(stdout, "INFO %s:%d " _fmt "\n",      \
+          __FILE__, __LINE__, ## __VA_ARGS__);  \
+  syslog(LOG_INFO, "INFO %s:%d " _fmt "\n",     \
+          __FILE__, __LINE__, ## __VA_ARGS__);  \
+}
+
+#undef DEBUG
+#define DEBUG(_fmt, ...) {                      \
+  fprintf(stdout, "DEBUG %s:%d " _fmt "\n",     \
+          __FILE__, __LINE__, ## __VA_ARGS__);  \
+  syslog(LOG_DEBUG, "DEBUG %s:%d " _fmt "\n",   \
+          __FILE__, __LINE__, ## __VA_ARGS__);  \
+}
+
+#undef ERROR
+#define ERROR(_fmt, ...) {                      \
+  fprintf(stderr, "ERROR %s:%d " _fmt "\n",     \
+          __FILE__, __LINE__, ## __VA_ARGS__);  \
+  syslog(LOG_ERR, "ERROR %s:%d " _fmt "\n",     \
+          __FILE__, __LINE__, ## __VA_ARGS__);  \
+}
 
 
 //#define DOTRACE
 //#define DOTRACE
 #ifdef DOTRACE
 #ifdef DOTRACE
-#define TRACE(x) \
-  syslog(LOG_ERR, "fuse_dfs TRACE - %s\n", x);  \
-  fprintf(stderr, "fuse_dfs TRACE - %s\n", x);
+#define TRACE(x) {        \
+    DEBUG("TRACE %s", x); \
+}
 
 
-#define TRACE1(x,y)                              \
-  syslog(LOG_ERR, "fuse_dfs TRACE - %s %s\n", x,y);  \
-  fprintf(stderr, "fuse_dfs TRACE - %s %s\n", x,y);
+#define TRACE1(x,y) {             \
+    DEBUG("TRACE %s %s\n", x, y); \
+}
 #else
 #else
 #define TRACE(x) ; 
 #define TRACE(x) ; 
 #define TRACE1(x,y) ; 
 #define TRACE1(x,y) ; 

+ 1 - 1
src/contrib/fuse-dfs/src/fuse_impls_access.c

@@ -33,7 +33,7 @@ int dfs_access(const char *path, int mask)
 
 
   hdfsFS userFS;
   hdfsFS userFS;
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port)) == NULL) {
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port)) == NULL) {
-    syslog(LOG_ERR, "ERROR: could not connect to dfs %s:%d\n", __FILE__, __LINE__);
+    ERROR("Could not connect to HDFS");
     return -EIO;
     return -EIO;
   }
   }
   //  return hdfsAccess(userFS, path, mask);
   //  return hdfsAccess(userFS, path, mask);

+ 2 - 2
src/contrib/fuse-dfs/src/fuse_impls_chmod.c

@@ -37,12 +37,12 @@ int dfs_chmod(const char *path, mode_t mode)
   hdfsFS userFS;
   hdfsFS userFS;
   // if not connected, try to connect and fail out if we can't.
   // if not connected, try to connect and fail out if we can't.
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port))== NULL) {
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port))== NULL) {
-    syslog(LOG_ERR, "ERROR: could not connect to dfs %s:%d\n", __FILE__, __LINE__);
+    ERROR("Could not connect to HDFS");
     return -EIO;
     return -EIO;
   }
   }
 
 
   if (hdfsChmod(userFS, path, (short)mode)) {
   if (hdfsChmod(userFS, path, (short)mode)) {
-    syslog(LOG_ERR,"ERROR: hdfs trying to chmod %s to %d",path, (int)mode);
+    ERROR("Could not chmod %s to %d", path, (int)mode);
     return -EIO;
     return -EIO;
   }
   }
 #endif
 #endif

+ 4 - 7
src/contrib/fuse-dfs/src/fuse_impls_chown.c

@@ -41,16 +41,14 @@
 
 
   user = getUsername(uid);
   user = getUsername(uid);
   if (NULL == user) {
   if (NULL == user) {
-    syslog(LOG_ERR,"Could not lookup the user id string %d\n",(int)uid); 
-    fprintf(stderr, "could not lookup userid %d\n", (int)uid); 
+    ERROR("Could not lookup the user id string %d",(int)uid); 
     ret = -EIO;
     ret = -EIO;
   }
   }
 
 
   if (0 == ret) {
   if (0 == ret) {
     group = getGroup(gid);
     group = getGroup(gid);
     if (group == NULL) {
     if (group == NULL) {
-      syslog(LOG_ERR,"Could not lookup the group id string %d\n",(int)gid); 
-      fprintf(stderr, "could not lookup group %d\n", (int)gid); 
+      ERROR("Could not lookup the group id string %d",(int)gid);
       ret = -EIO;
       ret = -EIO;
     } 
     } 
   }
   }
@@ -59,15 +57,14 @@
   if (0 == ret) {
   if (0 == ret) {
     // if not connected, try to connect and fail out if we can't.
     // if not connected, try to connect and fail out if we can't.
     if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port))== NULL) {
     if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port))== NULL) {
-      syslog(LOG_ERR, "ERROR: could not connect to dfs %s:%d\n", __FILE__, __LINE__);
+      ERROR("Could not connect to HDFS");
       ret = -EIO;
       ret = -EIO;
     }
     }
   }
   }
 
 
   if (0 == ret) {
   if (0 == ret) {
-    //  fprintf(stderr, "DEBUG: chown %s %d->%s %d->%s\n", path, (int)uid, user, (int)gid, group);
     if (hdfsChown(userFS, path, user, group)) {
     if (hdfsChown(userFS, path, user, group)) {
-      syslog(LOG_ERR,"ERROR: hdfs trying to chown %s to %d/%d",path, (int)uid, gid);
+      ERROR("Could not chown %s to %d:%d", path, (int)uid, gid);
       ret = -EIO;
       ret = -EIO;
     }
     }
   }
   }

+ 1 - 1
src/contrib/fuse-dfs/src/fuse_impls_flush.c

@@ -46,7 +46,7 @@ int dfs_flush(const char *path, struct fuse_file_info *fi) {
 
 
     assert(fh->fs);
     assert(fh->fs);
     if (hdfsFlush(fh->fs, file_handle) != 0) {
     if (hdfsFlush(fh->fs, file_handle) != 0) {
-      syslog(LOG_ERR, "ERROR: dfs problem - could not flush file_handle(%lx) for %s %s:%d\n",(long)file_handle,path, __FILE__, __LINE__);
+      ERROR("Could not flush %lx for %s\n",(long)file_handle, path);
       return -EIO;
       return -EIO;
     }
     }
   }
   }

+ 3 - 2
src/contrib/fuse-dfs/src/fuse_impls_getattr.c

@@ -33,8 +33,9 @@ int dfs_getattr(const char *path, struct stat *st)
   assert(st);
   assert(st);
 
 
   // if not connected, try to connect and fail out if we can't.
   // if not connected, try to connect and fail out if we can't.
-  if (NULL == dfs->fs && NULL == (dfs->fs = hdfsConnect(dfs->nn_hostname,dfs->nn_port))) {
-    syslog(LOG_ERR, "ERROR: could not connect to %s:%d %s:%d\n", dfs->nn_hostname, dfs->nn_port,__FILE__, __LINE__);
+  if (NULL == dfs->fs && 
+      NULL == (dfs->fs = hdfsConnect(dfs->nn_hostname,dfs->nn_port))) {
+    ERROR("Could not connect to %s:%d", dfs->nn_hostname, dfs->nn_port);
     return -EIO;
     return -EIO;
   }
   }
 
 

+ 5 - 6
src/contrib/fuse-dfs/src/fuse_impls_mkdir.c

@@ -34,35 +34,34 @@ int dfs_mkdir(const char *path, mode_t mode)
   assert('/' == *path);
   assert('/' == *path);
 
 
   if (is_protected(path)) {
   if (is_protected(path)) {
-    syslog(LOG_ERR,"ERROR: hdfs trying to create the directory: %s", path);
+    ERROR("HDFS trying to create directory %s", path);
     return -EACCES;
     return -EACCES;
   }
   }
 
 
   if (dfs->read_only) {
   if (dfs->read_only) {
-    syslog(LOG_ERR,"ERROR: hdfs is configured as read-only, cannot create the directory %s\n",path);
+    ERROR("HDFS is configured read-only, cannot create directory %s", path);
     return -EACCES;
     return -EACCES;
   }
   }
   
   
   hdfsFS userFS;
   hdfsFS userFS;
   // if not connected, try to connect and fail out if we can't.
   // if not connected, try to connect and fail out if we can't.
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port))== NULL) {
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port))== NULL) {
-    syslog(LOG_ERR, "ERROR: could not connect to dfs %s:%d\n", __FILE__, __LINE__);
+    ERROR("Could not connect");
     return -EIO;
     return -EIO;
   }
   }
 
 
   // In theory the create and chmod should be atomic.
   // In theory the create and chmod should be atomic.
 
 
   if (hdfsCreateDirectory(userFS, path)) {
   if (hdfsCreateDirectory(userFS, path)) {
-    syslog(LOG_ERR,"ERROR: hdfs trying to create directory %s",path);
+    ERROR("HDFS could not create directory %s", path);
     return -EIO;
     return -EIO;
   }
   }
 
 
 #if PERMS
 #if PERMS
   if (hdfsChmod(userFS, path, (short)mode)) {
   if (hdfsChmod(userFS, path, (short)mode)) {
-    syslog(LOG_ERR,"ERROR: hdfs trying to chmod %s to %d",path, (int)mode);
+    ERROR("Could not chmod %s to %d", path, (int)mode);
     return -EIO;
     return -EIO;
   }
   }
 #endif
 #endif
   return 0;
   return 0;
-
 }
 }

+ 4 - 3
src/contrib/fuse-dfs/src/fuse_impls_mknod.c

@@ -19,8 +19,9 @@
 #include "fuse_dfs.h"
 #include "fuse_dfs.h"
 #include "fuse_impls.h"
 #include "fuse_impls.h"
 
 
- int dfs_mknod(const char *path, mode_t mode, dev_t rdev) {
-  TRACE1("mknod", path)
-  syslog(LOG_DEBUG,"in dfs_mknod");
+int dfs_mknod(const char *path, mode_t mode, dev_t rdev)
+{
+  TRACE1("mknod", path);
+  DEBUG("dfs_mknod");
   return 0;
   return 0;
 }
 }

+ 4 - 5
src/contrib/fuse-dfs/src/fuse_impls_open.c

@@ -41,12 +41,12 @@ int dfs_open(const char *path, struct fuse_file_info *fi)
   // retrieve dfs specific data
   // retrieve dfs specific data
   dfs_fh *fh = (dfs_fh*)malloc(sizeof (dfs_fh));
   dfs_fh *fh = (dfs_fh*)malloc(sizeof (dfs_fh));
   if (fh == NULL) {
   if (fh == NULL) {
-    syslog(LOG_ERR, "ERROR: malloc of new file handle failed %s:%d\n", __FILE__, __LINE__);
+    ERROR("Malloc of new file handle failed");
     return -EIO;
     return -EIO;
   }
   }
 
 
   if ((fh->fs = doConnectAsUser(dfs->nn_hostname,dfs->nn_port)) == NULL) {
   if ((fh->fs = doConnectAsUser(dfs->nn_hostname,dfs->nn_port)) == NULL) {
-    syslog(LOG_ERR, "ERROR: could not connect to dfs %s:%d\n", __FILE__, __LINE__);
+    ERROR("Could not connect to dfs");
     return -EIO;
     return -EIO;
   }
   }
 
 
@@ -66,8 +66,7 @@ int dfs_open(const char *path, struct fuse_file_info *fi)
   }
   }
 
 
   if ((fh->hdfsFH = hdfsOpenFile(fh->fs, path, flags,  0, 0, 0)) == NULL) {
   if ((fh->hdfsFH = hdfsOpenFile(fh->fs, path, flags,  0, 0, 0)) == NULL) {
-    syslog(LOG_ERR, "ERROR: could not connect open file %s:%d\n", __FILE__, __LINE__);
-    syslog(LOG_ERR, "ERROR: errno %d\n", errno);
+    ERROR("Could not open file %s (errno=%d)", path, errno);
     if (errno == 0 || errno == EINTERNAL) {
     if (errno == 0 || errno == EINTERNAL) {
       return -EIO;
       return -EIO;
     }
     }
@@ -88,7 +87,7 @@ int dfs_open(const char *path, struct fuse_file_info *fi)
     assert(dfs->rdbuffer_size > 0);
     assert(dfs->rdbuffer_size > 0);
 
 
     if (NULL == (fh->buf = (char*)malloc(dfs->rdbuffer_size*sizeof (char)))) {
     if (NULL == (fh->buf = (char*)malloc(dfs->rdbuffer_size*sizeof (char)))) {
-      syslog(LOG_ERR, "ERROR: could not allocate memory for file buffer for a read for file %s dfs %s:%d\n", path,__FILE__, __LINE__);
+      ERROR("Could not allocate memory for a read for file %s\n", path);
       ret = -EIO;
       ret = -EIO;
     }
     }
 
 

+ 1 - 1
src/contrib/fuse-dfs/src/fuse_impls_read.c

@@ -108,7 +108,7 @@ int dfs_read(const char *path, char *buf, size_t size, off_t offset,
       if (total_read < size && num_read < 0) {
       if (total_read < size && num_read < 0) {
         // invalidate the buffer 
         // invalidate the buffer 
         fh->bufferSize = 0; 
         fh->bufferSize = 0; 
-        syslog(LOG_ERR, "Read error - pread failed for %s with return code %d %s:%d", path, (int)num_read, __FILE__, __LINE__);
+        ERROR("pread failed for %s with return code %d", path, (int)num_read);
         ret = -EIO;
         ret = -EIO;
       } else {
       } else {
         // Either EOF, all read or read beyond size, but then there was an error
         // Either EOF, all read or read beyond size, but then there was an error

+ 5 - 6
src/contrib/fuse-dfs/src/fuse_impls_readdir.c

@@ -42,7 +42,7 @@ int dfs_readdir(const char *path, void *buf, fuse_fill_dir_t filler,
   hdfsFS userFS;
   hdfsFS userFS;
   // if not connected, try to connect and fail out if we can't.
   // if not connected, try to connect and fail out if we can't.
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port))== NULL) {
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port))== NULL) {
-    syslog(LOG_ERR, "ERROR: could not connect to dfs %s:%d\n", __FILE__, __LINE__);
+    ERROR("Could not connect");
     return -EIO;
     return -EIO;
   }
   }
 
 
@@ -61,7 +61,7 @@ int dfs_readdir(const char *path, void *buf, fuse_fill_dir_t filler,
   for (i = 0; i < numEntries; i++) {
   for (i = 0; i < numEntries; i++) {
 
 
     if (NULL == info[i].mName) {
     if (NULL == info[i].mName) {
-      syslog(LOG_ERR,"ERROR: for <%s> info[%d].mName==NULL %s:%d", path, i, __FILE__,__LINE__);
+      ERROR("Path %s info[%d].mName is NULL", path, i);
       continue;
       continue;
     }
     }
 
 
@@ -71,8 +71,7 @@ int dfs_readdir(const char *path, void *buf, fuse_fill_dir_t filler,
     // Find the final path component
     // Find the final path component
     const char *str = strrchr(info[i].mName, '/');
     const char *str = strrchr(info[i].mName, '/');
     if (NULL == str) {
     if (NULL == str) {
-      syslog(LOG_ERR, "ERROR: invalid URI %s %s:%d",
-             info[i].mName, __FILE__,__LINE__);
+      ERROR("Invalid URI %s", info[i].mName);
       continue;
       continue;
     }
     }
     str++;
     str++;
@@ -80,7 +79,7 @@ int dfs_readdir(const char *path, void *buf, fuse_fill_dir_t filler,
     // pack this entry into the fuse buffer
     // pack this entry into the fuse buffer
     int res = 0;
     int res = 0;
     if ((res = filler(buf,str,&st,0)) != 0) {
     if ((res = filler(buf,str,&st,0)) != 0) {
-      syslog(LOG_ERR, "ERROR: readdir filling the buffer %d %s:%d\n",res, __FILE__, __LINE__);
+      ERROR("Readdir filler failed: %d\n",res);
     }
     }
   }
   }
 
 
@@ -111,7 +110,7 @@ int dfs_readdir(const char *path, void *buf, fuse_fill_dir_t filler,
       // flatten the info using fuse's function into a buffer
       // flatten the info using fuse's function into a buffer
       int res = 0;
       int res = 0;
       if ((res = filler(buf,str,&st,0)) != 0) {
       if ((res = filler(buf,str,&st,0)) != 0) {
-        syslog(LOG_ERR, "ERROR: readdir filling the buffer %d %s:%d", res, __FILE__, __LINE__);
+	ERROR("Readdir filler failed: %d\n",res);
       }
       }
     }
     }
   // free the info pointers
   // free the info pointers

+ 1 - 2
src/contrib/fuse-dfs/src/fuse_impls_release.c

@@ -55,8 +55,7 @@ int dfs_release (const char *path, struct fuse_file_info *fi) {
 
 
     if (NULL != file_handle) {
     if (NULL != file_handle) {
       if (hdfsCloseFile(fh->fs, file_handle) != 0) {
       if (hdfsCloseFile(fh->fs, file_handle) != 0) {
-        syslog(LOG_ERR, "ERROR: dfs problem - could not close file_handle(%ld) for %s %s:%d\n",(long)file_handle,path, __FILE__, __LINE__);
-        fprintf(stderr, "ERROR: dfs problem - could not close file_handle(%ld) for %s %s:%d\n",(long)file_handle,path, __FILE__, __LINE__);
+        ERROR("Could not close handle %ld for %s\n",(long)file_handle, path);
         ret = -EIO;
         ret = -EIO;
       }
       }
     }
     }

+ 4 - 5
src/contrib/fuse-dfs/src/fuse_impls_rename.c

@@ -37,27 +37,26 @@ int dfs_rename(const char *from, const char *to)
   assert('/' == *to);
   assert('/' == *to);
 
 
   if (is_protected(from) || is_protected(to)) {
   if (is_protected(from) || is_protected(to)) {
-    syslog(LOG_ERR,"ERROR: hdfs trying to rename: %s %s", from, to);
+    ERROR("Could not rename %s to %s", from, to);
     return -EACCES;
     return -EACCES;
   }
   }
 
 
   if (dfs->read_only) {
   if (dfs->read_only) {
-    syslog(LOG_ERR,"ERROR: hdfs is configured as read-only, cannot rename the directory %s\n",from);
+    ERROR("HDFS configured read-only, cannot rename directory %s", from);
     return -EACCES;
     return -EACCES;
   }
   }
 
 
   hdfsFS userFS;
   hdfsFS userFS;
   // if not connected, try to connect and fail out if we can't.
   // if not connected, try to connect and fail out if we can't.
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port))== NULL) {
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port))== NULL) {
-    syslog(LOG_ERR, "ERROR: could not connect to dfs %s:%d\n", __FILE__, __LINE__);
+    ERROR("Could not connect");
     return -EIO;
     return -EIO;
   }
   }
 
 
   if (hdfsRename(userFS, from, to)) {
   if (hdfsRename(userFS, from, to)) {
-    syslog(LOG_ERR,"ERROR: hdfs trying to rename %s to %s",from, to);
+    ERROR("Rename %s to %s failed", from, to);
     return -EIO;
     return -EIO;
   }
   }
 
 
   return 0;
   return 0;
-
 }
 }

+ 4 - 4
src/contrib/fuse-dfs/src/fuse_impls_rmdir.c

@@ -36,19 +36,19 @@ int dfs_rmdir(const char *path)
   assert('/' == *path);
   assert('/' == *path);
 
 
   if (is_protected(path)) {
   if (is_protected(path)) {
-    syslog(LOG_ERR,"ERROR: hdfs trying to delete a protected directory: %s ",path);
+    ERROR("Trying to delete protected directory %s", path);
     return -EACCES;
     return -EACCES;
   }
   }
 
 
   if (dfs->read_only) {
   if (dfs->read_only) {
-    syslog(LOG_ERR,"ERROR: hdfs is configured as read-only, cannot delete the directory %s\n",path);
+    ERROR("HDFS configured read-only, cannot delete directory %s", path);
     return -EACCES;
     return -EACCES;
   }
   }
 
 
   hdfsFS userFS;
   hdfsFS userFS;
   // if not connected, try to connect and fail out if we can't.
   // if not connected, try to connect and fail out if we can't.
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port))== NULL) {
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port))== NULL) {
-    syslog(LOG_ERR, "ERROR: could not connect to dfs %s:%d\n", __FILE__, __LINE__);
+    ERROR("Could not connect");
     return -EIO;
     return -EIO;
   }
   }
 
 
@@ -63,7 +63,7 @@ int dfs_rmdir(const char *path)
   }
   }
 
 
   if (hdfsDeleteWithTrash(userFS, path, dfs->usetrash)) {
   if (hdfsDeleteWithTrash(userFS, path, dfs->usetrash)) {
-    syslog(LOG_ERR,"ERROR: hdfs error trying to delete the directory %s\n",path);
+    ERROR("Error trying to delete directory %s", path);
     return -EIO;
     return -EIO;
   }
   }
 
 

+ 1 - 1
src/contrib/fuse-dfs/src/fuse_impls_statfs.c

@@ -39,7 +39,7 @@ int dfs_statfs(const char *path, struct statvfs *st)
   hdfsFS userFS;
   hdfsFS userFS;
   // if not connected, try to connect and fail out if we can't.
   // if not connected, try to connect and fail out if we can't.
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port))== NULL) {
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port))== NULL) {
-    syslog(LOG_ERR, "ERROR: could not connect to dfs %s:%d\n", __FILE__, __LINE__);
+    ERROR("Could not connect");
     return -EIO;
     return -EIO;
   }
   }
 
 

+ 3 - 3
src/contrib/fuse-dfs/src/fuse_impls_truncate.c

@@ -47,7 +47,7 @@ int dfs_truncate(const char *path, off_t size)
   hdfsFS userFS;
   hdfsFS userFS;
   // if not connected, try to connect and fail out if we can't.
   // if not connected, try to connect and fail out if we can't.
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port)) == NULL) {
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port)) == NULL) {
-    syslog(LOG_ERR, "ERROR: could not connect to dfs %s:%d\n", __FILE__, __LINE__);
+    ERROR("Could not connect");
     return -EIO;
     return -EIO;
   }
   }
 
 
@@ -55,12 +55,12 @@ int dfs_truncate(const char *path, off_t size)
 
 
   hdfsFile file;
   hdfsFile file;
   if ((file = (hdfsFile)hdfsOpenFile(userFS, path, flags,  0, 0, 0)) == NULL) {
   if ((file = (hdfsFile)hdfsOpenFile(userFS, path, flags,  0, 0, 0)) == NULL) {
-    syslog(LOG_ERR, "ERROR: could not connect open file %s:%d\n", __FILE__, __LINE__);
+    ERROR("Could not connect open file %s", path);
     return -EIO;
     return -EIO;
   }
   }
 
 
   if (hdfsCloseFile(userFS, file) != 0) {
   if (hdfsCloseFile(userFS, file) != 0) {
-    syslog(LOG_ERR, "ERROR: could not connect close file %s:%d\n", __FILE__, __LINE__);
+    ERROR("Could not close file %s", path);
     return -EIO;
     return -EIO;
   }
   }
   return 0;
   return 0;

+ 4 - 6
src/contrib/fuse-dfs/src/fuse_impls_unlink.c

@@ -35,28 +35,26 @@ int dfs_unlink(const char *path)
   assert('/' == *path);
   assert('/' == *path);
 
 
   if (is_protected(path)) {
   if (is_protected(path)) {
-    syslog(LOG_ERR,"ERROR: hdfs trying to delete a protected directory: %s ",path);
+    ERROR("Trying to delete protected directory %s ", path);
     return -EACCES;
     return -EACCES;
   }
   }
 
 
   if (dfs->read_only) {
   if (dfs->read_only) {
-    syslog(LOG_ERR,"ERROR: hdfs is configured as read-only, cannot create the directory %s\n",path);
+    ERROR("HDFS configured read-only, cannot create directory %s", path);
     return -EACCES;
     return -EACCES;
   }
   }
 
 
   hdfsFS userFS;
   hdfsFS userFS;
   // if not connected, try to connect and fail out if we can't.
   // if not connected, try to connect and fail out if we can't.
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port))== NULL) {
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port))== NULL) {
-    syslog(LOG_ERR, "ERROR: could not connect to dfs %s:%d\n", __FILE__, __LINE__);
+    ERROR("Could not connect");
     return -EIO;
     return -EIO;
   }
   }
 
 
-
   if (hdfsDeleteWithTrash(userFS, path, dfs->usetrash)) {
   if (hdfsDeleteWithTrash(userFS, path, dfs->usetrash)) {
-    syslog(LOG_ERR,"ERROR: hdfs trying to delete the file %s",path);
+    ERROR("Could not delete file %s", path);
     return -EIO;
     return -EIO;
   }
   }
 
 
   return 0;
   return 0;
-
 }
 }

+ 1 - 2
src/contrib/fuse-dfs/src/fuse_impls_utimens.c

@@ -38,8 +38,7 @@ int dfs_utimens(const char *path, const struct timespec ts[2])
   hdfsFS userFS;
   hdfsFS userFS;
   // if not connected, try to connect and fail out if we can't.
   // if not connected, try to connect and fail out if we can't.
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port))== NULL) {
   if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port))== NULL) {
-    syslog(LOG_ERR, "ERROR: could not connect to dfs %s:%d\n",
-           __FILE__, __LINE__);
+    ERROR("Could not connect");
     return -EIO;
     return -EIO;
   }
   }
 
 

+ 6 - 5
src/contrib/fuse-dfs/src/fuse_impls_write.c

@@ -52,13 +52,14 @@ int dfs_write(const char *path, const char *buf, size_t size,
 
 
   tOffset cur_offset = hdfsTell(fh->fs, file_handle);
   tOffset cur_offset = hdfsTell(fh->fs, file_handle);
   if (cur_offset != offset) {
   if (cur_offset != offset) {
-    syslog(LOG_ERR, "ERROR: user trying to random access write to a file %d!=%d for %s %s:%d\n",(int)cur_offset, (int)offset,path, __FILE__, __LINE__);
+    ERROR("User trying to random access write to a file %d != %d for %s",
+	  (int)cur_offset, (int)offset, path);
     ret =  -ENOTSUP;
     ret =  -ENOTSUP;
   } else {
   } else {
     length = hdfsWrite(fh->fs, file_handle, buf, size);
     length = hdfsWrite(fh->fs, file_handle, buf, size);
     if (length <= 0) {
     if (length <= 0) {
-      syslog(LOG_ERR, "ERROR: could not write all the bytes for %s %d!=%d%s:%d\n", path, length, (int)size, __FILE__, __LINE__);
-      syslog(LOG_ERR, "ERROR: errno %d\n", errno);
+      ERROR("Could not write all bytes for %s %d != %d (errno=%d)", 
+	    path, length, (int)size, errno);
       if (errno == 0 || errno == EINTERNAL) {
       if (errno == 0 || errno == EINTERNAL) {
         ret = -EIO;
         ret = -EIO;
       } else {
       } else {
@@ -66,8 +67,8 @@ int dfs_write(const char *path, const char *buf, size_t size,
       }
       }
     } 
     } 
     if (length != size) {
     if (length != size) {
-      syslog(LOG_ERR, "ERROR: could not write all the bytes for %s %d!=%d%s:%d\n", path, length, (int)size, __FILE__, __LINE__);
-      syslog(LOG_ERR, "ERROR: errno - %d\n", errno);
+      ERROR("Could not write all bytes for %s %d != %d (errno=%d)", 
+	    path, length, (int)size, errno);
     }
     }
   }
   }
 
 

+ 3 - 3
src/contrib/fuse-dfs/src/fuse_init.c

@@ -97,7 +97,7 @@ void *dfs_init()
   dfs_context *dfs = (dfs_context*)malloc(sizeof (dfs_context));
   dfs_context *dfs = (dfs_context*)malloc(sizeof (dfs_context));
 
 
   if (NULL == dfs) {
   if (NULL == dfs) {
-    syslog(LOG_ERR, "FATAL: could not malloc fuse dfs context struct - out of memory %s:%d", __FILE__, __LINE__);
+    ERROR("FATAL: could not malloc dfs_context");
     exit(1);
     exit(1);
   }
   }
 
 
@@ -112,13 +112,13 @@ void *dfs_init()
   dfs->rdbuffer_size         = options.rdbuffer_size;
   dfs->rdbuffer_size         = options.rdbuffer_size;
   dfs->direct_io             = options.direct_io;
   dfs->direct_io             = options.direct_io;
 
 
-  syslog(LOG_INFO, "mounting %s:%d", dfs->nn_hostname, dfs->nn_port);
+  INFO("Mounting %s:%d", dfs->nn_hostname, dfs->nn_port);
 
 
   init_protectedpaths(dfs);
   init_protectedpaths(dfs);
   assert(dfs->protectedpaths != NULL);
   assert(dfs->protectedpaths != NULL);
 
 
   if (dfs->rdbuffer_size <= 0) {
   if (dfs->rdbuffer_size <= 0) {
-    syslog(LOG_DEBUG, "WARN: dfs->rdbuffersize <= 0 = %ld %s:%d", dfs->rdbuffer_size, __FILE__, __LINE__);
+    DEBUG("dfs->rdbuffersize <= 0 = %ld", dfs->rdbuffer_size);
     dfs->rdbuffer_size = 32768;
     dfs->rdbuffer_size = 32768;
   }
   }
   return (void*)dfs;
   return (void*)dfs;

+ 28 - 19
src/contrib/fuse-dfs/src/fuse_options.c

@@ -23,29 +23,38 @@
 #include "fuse_context_handle.h"
 #include "fuse_context_handle.h"
 
 
 void print_options() {
 void print_options() {
-  fprintf(stderr,"options:\n");
-  fprintf(stderr, "\tprotected=%s\n",options.protected);
-  fprintf(stderr, "\tserver=%s\n",options.server);
-  fprintf(stderr, "\tport=%d\n",options.port);
-  fprintf(stderr, "\tdebug=%d\n",options.debug);
-  fprintf(stderr, "\tread_only=%d\n",options.read_only);
-  fprintf(stderr, "\tusetrash=%d\n",options.usetrash);
-  fprintf(stderr, "\tentry_timeout=%d\n",options.entry_timeout);
-  fprintf(stderr, "\tattribute_timeout=%d\n",options.attribute_timeout);
-  fprintf(stderr, "\tprivate=%d\n",options.private);
-  fprintf(stderr, "\trdbuffer_size=%d (KBs)\n",(int)options.rdbuffer_size/1024);
+  printf("options:\n"
+	 "\tprotected=%s\n"
+	 "\tserver=%s\n"
+	 "\tport=%d\n"
+	 "\tdebug=%d\n"
+	 "\tread_only=%d\n"
+	 "\tusetrash=%d\n"
+	 "\tentry_timeout=%d\n"
+	 "\tattribute_timeout=%d\n"
+	 "\tprivate=%d\n"
+	 "\trdbuffer_size=%d (KBs)\n", 
+	 options.protected, options.server, options.port, options.debug,
+	 options.read_only, options.usetrash, options.entry_timeout, 
+	 options.attribute_timeout, options.private, 
+	 (int)options.rdbuffer_size / 1024);
 }
 }
 
 
-const char *program;  
-
+const char *program;
 
 
 /** macro to define options */
 /** macro to define options */
 #define DFSFS_OPT_KEY(t, p, v) { t, offsetof(struct options, p), v }
 #define DFSFS_OPT_KEY(t, p, v) { t, offsetof(struct options, p), v }
 
 
 void print_usage(const char *pname)
 void print_usage(const char *pname)
 {
 {
-  fprintf(stdout,"USAGE: %s [debug] [--help] [--version] [-oprotected=<colon_seped_list_of_paths] [rw] [-onotrash] [-ousetrash] [-obig_writes] [-oprivate (single user)] [ro] [-oserver=<hadoop_servername>] [-oport=<hadoop_port>] [-oentry_timeout=<secs>] [-oattribute_timeout=<secs>] [-odirect_io] [-onopoermissions] [-o<other fuse option>] <mntpoint> [fuse options]\n",pname);
-  fprintf(stdout,"NOTE: debugging option for fuse is -debug\n");
+  printf("USAGE: %s [debug] [--help] [--version] "
+	 "[-oprotected=<colon_seped_list_of_paths] [rw] [-onotrash] "
+	 "[-ousetrash] [-obig_writes] [-oprivate (single user)] [ro] "
+	 "[-oserver=<hadoop_servername>] [-oport=<hadoop_port>] "
+	 "[-oentry_timeout=<secs>] [-oattribute_timeout=<secs>] "
+	 "[-odirect_io] [-onopoermissions] [-o<other fuse option>] "
+	 "<mntpoint> [fuse options]\n", pname);
+  printf("NOTE: debugging option for fuse is -debug\n");
 }
 }
 
 
 
 
@@ -98,10 +107,10 @@ int dfs_options(void *data, const char *arg, int key,  struct fuse_args *outargs
 
 
   switch (key) {
   switch (key) {
   case FUSE_OPT_KEY_OPT:
   case FUSE_OPT_KEY_OPT:
-    fprintf(stderr,"fuse-dfs ignoring option %s\n",arg);
+    fprintf(stderr, "fuse-dfs ignoring option %s\n", arg);
     return 1;
     return 1;
   case  KEY_VERSION:
   case  KEY_VERSION:
-    fprintf(stdout,"%s %s\n",program,_FUSE_DFS_VERSION);
+    fprintf(stdout, "%s %s\n", program, _FUSE_DFS_VERSION);
     exit(0);
     exit(0);
   case KEY_HELP:
   case KEY_HELP:
     print_usage(program);
     print_usage(program);
@@ -150,14 +159,14 @@ int dfs_options(void *data, const char *arg, int key,  struct fuse_args *outargs
       } else if (strcmp(arg,"rw") == 0) {
       } else if (strcmp(arg,"rw") == 0) {
         options.read_only = 0;
         options.read_only = 0;
       } else {
       } else {
-        fprintf(stderr,"fuse-dfs didn't recognize %s,%d\n",arg,key);
+        ERROR("fuse-dfs didn't recognize %s,%d\n",arg,key);
         fuse_opt_add_arg(outargs,arg);
         fuse_opt_add_arg(outargs,arg);
         return 0;
         return 0;
       }
       }
     } else {
     } else {
       options.port = tmp_port;
       options.port = tmp_port;
       options.server = strdup(tmp_server);
       options.server = strdup(tmp_server);
-      fprintf(stderr, "port=%d,server=%s\n", options.port, options.server);
+      ERROR("port=%d,server=%s\n", options.port, options.server);
     }
     }
   }
   }
   }
   }

+ 12 - 11
src/contrib/fuse-dfs/src/fuse_trash.c

@@ -47,10 +47,10 @@ int move_to_trash(const char *item, hdfsFS userFS) {
 
 
 
 
   char fname[4096]; // or last element of the directory path
   char fname[4096]; // or last element of the directory path
-  char parent_directory[4096]; // the directory the fname resides in
+  char parent_dir[4096]; // the directory the fname resides in
 
 
   if (strlen(item) > sizeof(fname) - strlen(TrashDir)) {
   if (strlen(item) > sizeof(fname) - strlen(TrashDir)) {
-    syslog(LOG_ERR, "ERROR: internal buffer too small to accomodate path of length %d %s:%d\n", (int)strlen(item), __FILE__, __LINE__);
+    ERROR("Buffer too small to accomodate path of len %d", (int)strlen(item));
     return -EIO;
     return -EIO;
   }
   }
 
 
@@ -60,16 +60,17 @@ int move_to_trash(const char *item, hdfsFS userFS) {
     int length_of_fname = strlen(item) - length_of_parent_dir - 1; // the '/'
     int length_of_fname = strlen(item) - length_of_parent_dir - 1; // the '/'
 
 
     // note - the below strncpys should be safe from overflow because of the check on item's string length above.
     // note - the below strncpys should be safe from overflow because of the check on item's string length above.
-    strncpy(parent_directory, item, length_of_parent_dir);
-    parent_directory[length_of_parent_dir ] = 0;
+    strncpy(parent_dir, item, length_of_parent_dir);
+    parent_dir[length_of_parent_dir ] = 0;
     strncpy(fname, item + length_of_parent_dir + 1, strlen(item));
     strncpy(fname, item + length_of_parent_dir + 1, strlen(item));
     fname[length_of_fname + 1] = 0;
     fname[length_of_fname + 1] = 0;
   }
   }
 
 
   // create the target trash directory
   // create the target trash directory
   char trash_dir[4096];
   char trash_dir[4096];
-  if (snprintf(trash_dir, sizeof(trash_dir), "%s%s",TrashDir,parent_directory) >= sizeof trash_dir) {
-    syslog(LOG_ERR, "move_to_trash error target is not big enough to hold new name for %s %s:%d\n",item, __FILE__, __LINE__);
+  if (snprintf(trash_dir, sizeof(trash_dir), "%s%s", TrashDir, parent_dir) 
+      >= sizeof trash_dir) {
+    ERROR("Move to trash error target not big enough for %s", item);
     return -EIO;
     return -EIO;
   }
   }
 
 
@@ -89,19 +90,19 @@ int move_to_trash(const char *item, hdfsFS userFS) {
   char target[4096];
   char target[4096];
   int j ;
   int j ;
   if ( snprintf(target, sizeof target,"%s/%s",trash_dir, fname) >= sizeof target) {
   if ( snprintf(target, sizeof target,"%s/%s",trash_dir, fname) >= sizeof target) {
-    syslog(LOG_ERR, "move_to_trash error target is not big enough to hold new name for %s %s:%d\n",item, __FILE__, __LINE__);
+    ERROR("Move to trash error target not big enough for %s", item);
     return -EIO;
     return -EIO;
   }
   }
 
 
   // NOTE: this loop differs from the java version by capping the #of tries
   // NOTE: this loop differs from the java version by capping the #of tries
   for (j = 1; ! hdfsExists(userFS, target) && j < TRASH_RENAME_TRIES ; j++) {
   for (j = 1; ! hdfsExists(userFS, target) && j < TRASH_RENAME_TRIES ; j++) {
     if (snprintf(target, sizeof target,"%s/%s.%d",trash_dir, fname, j) >= sizeof target) {
     if (snprintf(target, sizeof target,"%s/%s.%d",trash_dir, fname, j) >= sizeof target) {
-      syslog(LOG_ERR, "move_to_trash error target is not big enough to hold new name for %s %s:%d\n",item, __FILE__, __LINE__);
+      ERROR("Move to trash error target not big enough for %s", item);
       return -EIO;
       return -EIO;
     }
     }
   }
   }
   if (hdfsRename(userFS, item, target)) {
   if (hdfsRename(userFS, item, target)) {
-    syslog(LOG_ERR,"ERROR: hdfs trying to rename %s to %s",item, target);
+    ERROR("Trying to rename %s to %s", item, target);
     return -EIO;
     return -EIO;
   }
   }
   return 0;
   return 0;
@@ -117,9 +118,9 @@ int hdfsDeleteWithTrash(hdfsFS userFS, const char *path, int useTrash) {
   }
   }
 
 
   if (hdfsDelete(userFS, path, 1)) {
   if (hdfsDelete(userFS, path, 1)) {
-    syslog(LOG_ERR,"ERROR: hdfs trying to delete the file %s",path);
+    ERROR("Trying to delete the file %s", path);
     return -EIO;
     return -EIO;
   }
   }
-  return 0;
 
 
+  return 0;
 }
 }

+ 1 - 1
src/contrib/fuse-dfs/src/fuse_users.c

@@ -194,7 +194,7 @@ char ** getGroups(uid_t uid, int *num_groups)
   for (i=0; i < *num_groups; i++)  {
   for (i=0; i < *num_groups; i++)  {
     groupnames[i] = getGroup(grouplist[i]);
     groupnames[i] = getGroup(grouplist[i]);
     if (groupnames[i] == NULL) {
     if (groupnames[i] == NULL) {
-      fprintf(stderr, "error could not lookup group %d\n",(int)grouplist[i]);
+      ERROR("Could not lookup group %d\n", (int)grouplist[i]);
     }
     }
   } 
   } 
   free(grouplist);
   free(grouplist);

+ 276 - 533
src/contrib/fuse-dfs/src/test/TestFuseDFS.java

@@ -16,611 +16,354 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
-import org.apache.hadoop.hdfs.*;
-import junit.framework.TestCase;
 import java.io.*;
 import java.io.*;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.concurrent.atomic.*;
+
+import org.apache.log4j.Level;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.*;
 import org.apache.hadoop.fs.permission.*;
-import java.net.*;
+import org.apache.hadoop.hdfs.*;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.StringUtils;
 
 
-/**
- * This class tests that the Fuse module for DFS can mount properly
- * and does a few simple commands:
- * mkdir
- * rmdir
- * ls
- * cat
- *
- * cp and touch are purposely not tested because they won't work with the current module
+import org.junit.Test;
+import org.junit.BeforeClass;
+import org.junit.AfterClass;
+import static org.junit.Assert.*;
 
 
- *
+/**
+ * Basic functional tests on a fuse-dfs mount.
  */
  */
-public class TestFuseDFS extends TestCase {
-
-  /**
-   * mount the fuse file system using assumed fuse module library installed in /usr/local/lib or somewhere else on your
-   * pre-existing LD_LIBRARY_PATH
-   *
-   */
-
-  static Process fuse_process;
-  static String fuse_cmd;
-  static private void mount(String mountpoint, URI dfs) throws IOException, InterruptedException  {
-
-    String cp = System.getProperty("java.class.path");
-    Runtime r = Runtime.getRuntime();
-    fuse_cmd = System.getProperty("build.test") + "/../fuse_dfs";
-    String libhdfs = System.getProperty("build.test") + "/../../../libhdfs/";
-    String arch = System.getProperty("os.arch");
-    String jvm = System.getProperty("java.home") + "/lib/" + arch + "/server";
-    String lp = System.getProperty("LD_LIBRARY_PATH") + ":" + "/usr/local/lib:" + libhdfs + ":" + jvm;
-    System.err.println("LD_LIBRARY_PATH=" + lp);
-    String cmd[] =  {  fuse_cmd, "dfs://" + dfs.getHost() + ":" + String.valueOf(dfs.getPort()), 
-                       mountpoint, "-obig_writes", "-odebug", "-oentry_timeout=0.1",  "-oattribute_timeout=0.1", "-ousetrash", "rw", "-oinitchecks",
-                       "-ordbuffer=32768"};
-    final String [] envp = {
-      "CLASSPATH="+  cp,
-      "LD_LIBRARY_PATH=" + lp,
-      "PATH=" + "/usr/bin:/bin"
-
-    };
-
-    // ensure the mount point is not currently mounted
-    Process p = r.exec("fusermount -u " + mountpoint);
-    p.waitFor();
+public class TestFuseDFS {
 
 
-    // clean up the mount point
-    p = r.exec("rm -rf " + mountpoint);
-    assertTrue(p.waitFor() == 0);
+  private static MiniDFSCluster cluster;
+  private static FileSystem fs;
+  private static Runtime r;
+  private static String mountPoint;
 
 
-    // make the mount point if needed
-    p = r.exec("mkdir -p " + mountpoint);
-    assertTrue(p.waitFor() == 0);
-
-    // mount fuse to the mount point
-    fuse_process = r.exec(cmd, envp);
-
-    // give DFS a chance to come up
-    try { Thread.sleep(3000); } catch(Exception e) { }
+  private static final Log LOG = LogFactory.getLog(TestFuseDFS.class);
+  {
+    ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
   }
   }
 
 
-  /**
-   * unmounts fuse for before shutting down.
-   */
-  static private void umount(String mpoint) throws IOException, InterruptedException {
-    Runtime r= Runtime.getRuntime();
-    Process p = r.exec("fusermount -u " + mpoint);
-    p.waitFor();
+  /** Dump the given intput stream to stderr */
+  private static void dumpInputStream(InputStream is) throws IOException {
+    int len;
+    do {
+      byte b[] = new byte[is.available()];
+      len = is.read(b);
+      System.out.println("Read "+len+" bytes");
+      System.out.write(b, 0, b.length);
+    } while (len > 0);
   }
   }
 
 
-  /**
-   * Set things up - create mini dfs cluster and mount the fuse filesystem.
+  /** 
+   * Wait for the given process to return and check that it exited
+   * as required. Log if the process failed.
    */
    */
-  public TestFuseDFS() throws IOException,InterruptedException  {
-  }
-
-  static private MiniDFSCluster cluster;
-  static private DistributedFileSystem fileSys;
-  final static private String mpoint;
-
-  static {
-    mpoint = System.getProperty("build.test") + "/mnt";
-    System.runFinalizersOnExit(true);
-    startStuff();
-  }
-
-
-  static public void startStuff() {
+  private static void checkProcessRet(Process p, boolean expectPass) 
+      throws IOException {
     try {
     try {
-      Configuration conf = new HdfsConfiguration();
-      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,false);
-      cluster = new MiniDFSCluster.Builder(conf).build();
-      fileSys = (DistributedFileSystem)cluster.getFileSystem();
-      assertTrue(fileSys.getFileStatus(new Path("/")).isDir());
-      mount(mpoint, fileSys.getUri());
-    } catch(Exception e) {
-      e.printStackTrace();
+      int ret = p.waitFor();
+      if (ret != 0) {
+	dumpInputStream(p.getErrorStream());
+      }
+      if (expectPass) {
+	assertEquals(0, ret);
+      } else {
+	assertTrue(ret != 0);
+      }
+    } catch (InterruptedException ie) {
+      fail("Process interrupted: "+ie.getMessage());
     }
     }
   }
   }
 
 
-  public void setUp() {
-  }
-
-  /**
-   * use shell to create a dir and then use filesys to see it exists.
-   */
-  public void testMkdir() throws IOException,InterruptedException, Exception  {
+  /** Exec the given command and assert it executed successfully */
+  private static void execWaitRet(String cmd) throws IOException {
+    LOG.debug("EXEC "+cmd);
+    Process p = r.exec(cmd);
     try {
     try {
-      // First create a new directory with mkdirs
-      Path path = new Path("/foo");
-      Runtime r = Runtime.getRuntime();
-      String cmd = "mkdir -p " + mpoint + path.toString();
-      Process p = r.exec(cmd);
-      assertTrue(p.waitFor() == 0);
-
-      // check it is there
-      assertTrue(fileSys.getFileStatus(path).isDir());
-
-      // check again through the shell
-      String lsCmd = "ls " + mpoint + path.toString();
-      p = r.exec(lsCmd);
-      assertTrue(p.waitFor() == 0);
-    } catch(Exception e) {
-      e.printStackTrace();
-      throw e;
+      p.waitFor();
+    } catch (InterruptedException ie) {
+      fail("Process interrupted: "+ie.getMessage());
     }
     }
   }
   }
 
 
+  /** Exec the given command and assert it executed successfully */
+  private static void execIgnoreRet(String cmd) throws IOException {
+    LOG.debug("EXEC "+cmd);
+    r.exec(cmd);
+  }
 
 
-  /**
-   * use shell to create a dir and then use filesys to see it exists.
-   */
-  public void testWrites() throws IOException,InterruptedException  {
-    try {
-
-      // write a hello file
-      File file = new File(mpoint, "hello.txt");
-      FileOutputStream f = new FileOutputStream(file);
-      String s = "hello ";
-      f.write(s.getBytes());
-      s = "world";
-      f.write(s.getBytes());
-      f.flush();
-      f.close();
-
+  /** Exec the given command and assert it executed successfully */
+  private static void execAssertSucceeds(String cmd) throws IOException {
+    LOG.debug("EXEC "+cmd);
+    checkProcessRet(r.exec(cmd), true);
+  }
 
 
-      try {
-        Thread.sleep(1000);
-      } catch(Exception e) {
-      }
+  /** Exec the given command, assert it returned an error code */
+  private static void execAssertFails(String cmd) throws IOException {
+    LOG.debug("EXEC "+cmd);
+    checkProcessRet(r.exec(cmd), false);
+  }
 
 
-      // check the file exists.
-      Path myPath = new Path("/hello.txt");
-      assertTrue(fileSys.exists(myPath));
-
-      // check the data is ok
-      FileInputStream fi = new FileInputStream(new File(mpoint, "hello.txt"));
-      byte b[] = new byte[12];
-      int length = fi.read(b,0,12);
-      assertTrue(length > 0);
-      String s2 = new String( b, 0, length);
-      assertEquals("hello world", s2);
-    } catch(Exception e) {
-      e.printStackTrace();
-    } finally {
-    }
+  /** Create and write the given file */
+  private static void createFile(File f, String s) throws IOException {
+    InputStream is = new ByteArrayInputStream(s.getBytes());
+    FileOutputStream fos = new FileOutputStream(f);
+    IOUtils.copyBytes(is, fos, s.length(), true);
   }
   }
 
 
-  /**
-   * Test ls for dir already created in testMkdDir also tests bad ls
-   */
-  public void testLs() throws IOException,InterruptedException  {
+  /** Check that the given file exists with the given contents */
+  private static void checkFile(File f, String expectedContents) 
+      throws IOException {
+    FileInputStream fi = new FileInputStream(f);
+    int len = expectedContents.length();
+    byte[] b = new byte[len];
     try {
     try {
-      // First create a new directory with mkdirs
-      Runtime r = Runtime.getRuntime();
-
-      // mkdir
-      Process p = r.exec("mkdir -p " + mpoint + "/test/mkdirs");
-      assertTrue(p.waitFor() == 0);
-
-      // ls
-      p = r.exec("ls " + mpoint + "/test/mkdirs");
-      assertTrue(p.waitFor() == 0);
-
-      // ls non-existant directory
-      p = r.exec("ls " + mpoint + "/test/mkdirsNotThere");
-      int res = p.waitFor();
-      assertFalse(res == 0);
-    } catch(Exception e) {
-      e.printStackTrace();
+      IOUtils.readFully(fi, b, 0, len);
+    } catch (IOException ie) {
+      fail("Reading "+f.getName()+" failed with "+ie.getMessage());
+    } finally {
+      fi.close(); // NB: leaving f unclosed prevents unmount
     }
     }
-
+    String s = new String(b, 0, len);
+    assertEquals("File content differs", expectedContents, s);
   }
   }
 
 
-  /**
-   * Remove a dir using the shell and use filesys to see it no longer exists.
-   */
-  public void testRmdir() throws IOException,InterruptedException  {
-    try {
-      // First create a new directory with mkdirs
-
-      Runtime r = Runtime.getRuntime();
-      Process p = r.exec("mkdir -p " + mpoint + "/test/rmdir");
-      assertTrue(p.waitFor() == 0);
-
-      Path myPath = new Path("/test/rmdir");
-      assertTrue(fileSys.exists(myPath));
-
-      // remove it
-      p = r.exec("rmdir " + mpoint + "/test/rmdir");
-      assertTrue(p.waitFor() == 0);
-
-      // check it is not there
-      assertFalse(fileSys.exists(myPath));
-
-      Path trashPath = new Path("/user/root/.Trash/Current/test/rmdir");
-      assertTrue(fileSys.exists(trashPath));
-
-      // make it again to test trashing same thing twice
-      p = r.exec("mkdir -p " + mpoint + "/test/rmdir");
-      assertTrue(p.waitFor() == 0);
+  /** Run a fuse-dfs process to mount the given DFS */
+  private static void establishMount(URI uri) throws IOException  {
+    Runtime r = Runtime.getRuntime();
+    String cp = System.getProperty("java.class.path");
 
 
-      assertTrue(fileSys.exists(myPath));
+    String buildTestDir = System.getProperty("build.test");
+    String fuseCmd = buildTestDir + "/../fuse_dfs";
+    String libHdfs = buildTestDir + "/../../../c++/lib";
 
 
-      // remove it
-      p = r.exec("rmdir " + mpoint + "/test/rmdir");
-      assertTrue(p.waitFor() == 0);
+    String arch = System.getProperty("os.arch");
+    String jvm = System.getProperty("java.home") + "/lib/" + arch + "/server";
+    String lp = System.getProperty("LD_LIBRARY_PATH")+":"+libHdfs+":"+jvm;
+    LOG.debug("LD_LIBRARY_PATH=" + lp);
+
+    String nameNode = 
+      "dfs://" + uri.getHost() + ":" + String.valueOf(uri.getPort());
+
+    // NB: We're mounting via an unprivileged user, therefore
+    // user_allow_other needs to be set in /etc/fuse.conf, which also
+    // needs to be world readable.
+    String mountCmd[] = {
+      fuseCmd, nameNode, mountPoint,
+      // "-odebug",              // Don't daemonize
+      "-obig_writes",            // Allow >4kb writes
+      "-oentry_timeout=0.1",     // Don't cache dents long
+      "-oattribute_timeout=0.1", // Don't cache attributes long
+      "-ordbuffer=32768",        // Read buffer size in kb
+      "rw"
+    };
 
 
-      // check it is not there
-      assertFalse(fileSys.exists(myPath));
+    String [] env = {
+      "CLASSPATH="+cp,
+      "LD_LIBRARY_PATH="+lp,
+      "PATH=/usr/bin:/bin"
+    };
 
 
-      trashPath = new Path("/user/root/.Trash/Current/test/rmdir.1");
-      assertTrue(fileSys.exists(trashPath));
+    execWaitRet("fusermount -u " + mountPoint);
+    execAssertSucceeds("rm -rf " + mountPoint);
+    execAssertSucceeds("mkdir -p " + mountPoint);
 
 
-    } catch(Exception e) {
-      e.printStackTrace();
+    // Mount the mini cluster
+    try {
+      Process fuseProcess = r.exec(mountCmd, env);
+      assertEquals(0, fuseProcess.waitFor());
+    } catch (InterruptedException ie) {
+      fail("Failed to mount");
     }
     }
   }
   }
 
 
-  /**
-   * use shell to create a dir and then use filesys to see it exists.
-   */
-  public void testDF() throws IOException,InterruptedException, Exception  {
-    try {
-      // First create a new directory with mkdirs
-      Path path = new Path("/foo");
-      Runtime r = Runtime.getRuntime();
-      String cmd = "mkdir -p " + mpoint + path.toString();
-      Process p = r.exec(cmd);
-      assertTrue(p.waitFor() == 0);
-      File f = new File(mpoint + "/foo");
-
-      DistributedFileSystem.DiskStatus d = fileSys.getDiskStatus();
-
-      long fileUsedBlocks =  (f.getTotalSpace() - f.getFreeSpace())/(64 * 1024 * 1024);
-      long dfsUsedBlocks = (long)Math.ceil((double)d.getDfsUsed()/(64 * 1024 * 1024));
+  /** Tear down the fuse-dfs process and mount */
+  private static void teardownMount() throws IOException {
+    execWaitRet("fusermount -u " + mountPoint);
+  }
 
 
-      assertTrue(fileUsedBlocks == dfsUsedBlocks);
-      assertTrue(d.getCapacity() == f.getTotalSpace());
+  @BeforeClass
+  public static void startUp() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    r = Runtime.getRuntime();
+    mountPoint = System.getProperty("build.test") + "/mnt";
+    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
+    cluster = new MiniDFSCluster.Builder(conf).build();
+    cluster.waitClusterUp();
+    fs = cluster.getFileSystem();
+    establishMount(fs.getUri());
+  }
 
 
-    } catch(Exception e) {
-      e.printStackTrace();
-      throw e;
+  @AfterClass
+  public static void tearDown() throws IOException {
+    // Unmount before taking down the mini cluster
+    // so no outstanding operations hang.
+    teardownMount();
+    if (fs != null) {
+      fs.close();
+    }
+    if (cluster != null) {
+      cluster.shutdown();
     }
     }
   }
   }
 
 
-  /**
-   * use shell to create a dir and then use filesys to see it exists.
-   */
-  public void testChown() throws IOException,InterruptedException, Exception  {
-    try {
-      // First create a new directory with mkdirs
-      Path path = new Path("/foo");
-      Runtime r = Runtime.getRuntime();
-      String cmd = "mkdir -p " + mpoint + path.toString();
-      Process p = r.exec(cmd);
-      assertTrue(p.waitFor() == 0);
+  /** Test basic directory creation, access, removal */
+  @Test
+  public void testBasicDir() throws IOException {
+    File d = new File(mountPoint, "dir1");
 
 
-      // check it is there
-      assertTrue(fileSys.getFileStatus(path).isDir());
+    // Mkdir, access and rm via the mount
+    execAssertSucceeds("mkdir " + d.getAbsolutePath());
+    execAssertSucceeds("ls " + d.getAbsolutePath());
+    execAssertSucceeds("rmdir " + d.getAbsolutePath());
 
 
-      FileStatus foo = fileSys.getFileStatus(path);
-      System.err.println("DEBUG:owner=" + foo.getOwner());
+    // The dir should no longer exist
+    execAssertFails("ls " + d.getAbsolutePath());
+  }
 
 
-      cmd = "chown nobody " + mpoint + path.toString();
-      p = r.exec(cmd);
-      assertTrue(p.waitFor() == 0);
+  /** Test basic file creation and writing */
+  @Test
+  public void testCreate() throws IOException {
+    final String contents = "hello world";
+    File f = new File(mountPoint, "file1");
 
 
-      //      cmd = "chgrp nobody " + mpoint + path.toString();
-      //      p = r.exec(cmd);
-      //      assertTrue(p.waitFor() == 0);
+    // Create and access via the mount
+    createFile(f, contents);
 
 
-      foo = fileSys.getFileStatus(path);
+    // XX avoids premature EOF
+    try {
+      Thread.sleep(1000);
+    } catch (InterruptedException ie) { }
 
 
-      System.err.println("DEBUG:owner=" + foo.getOwner());
+    checkFile(f, contents);
 
 
-      assertTrue(foo.getOwner().equals("nobody"));
-      assertTrue(foo.getGroup().equals("nobody"));
+    // Cat, stat and delete via the mount
+    execAssertSucceeds("cat " + f.getAbsolutePath());
+    execAssertSucceeds("stat " + f.getAbsolutePath());
+    execAssertSucceeds("rm " + f.getAbsolutePath());
 
 
-    } catch(Exception e) {
-      e.printStackTrace();
-      throw e;
-    }
+    // The file should no longer exist
+    execAssertFails("ls " + f.getAbsolutePath());
   }
   }
 
 
-  /**
-   * use shell to create a dir and then use filesys to see it exists.
-   */
-  public void testChmod() throws IOException,InterruptedException, Exception  {
-    try {
-      // First create a new directory with mkdirs
-      Path path = new Path("/foo");
-      Runtime r = Runtime.getRuntime();
-      String cmd = "mkdir -p " + mpoint + path.toString();
-      Process p = r.exec(cmd);
-      assertTrue(p.waitFor() == 0);
-
-      // check it is there
-      assertTrue(fileSys.getFileStatus(path).isDir());
-
-      cmd = "chmod 777 " + mpoint + path.toString();
-      p = r.exec(cmd);
-      assertTrue(p.waitFor() == 0);
-
-      FileStatus foo = fileSys.getFileStatus(path);
-      FsPermission perm = foo.getPermission();
-      assertTrue(perm.toShort() == 0777);
-
-    } catch(Exception e) {
-      e.printStackTrace();
-      throw e;
-    }
+  /** Test creating a file via touch */
+  @Test
+  public void testTouch() throws IOException {
+    File f = new File(mountPoint, "file1");
+    execAssertSucceeds("touch " + f.getAbsolutePath());
+    execAssertSucceeds("rm " + f.getAbsolutePath());
   }
   }
 
 
-  /**
-   * use shell to create a dir and then use filesys to see it exists.
-   */
-  public void testUtimes() throws IOException,InterruptedException, Exception  {
-    try {
-      // First create a new directory with mkdirs
-      Path path = new Path("/utimetest");
-      Runtime r = Runtime.getRuntime();
-      String cmd = "touch " + mpoint + path.toString();
-      Process p = r.exec(cmd);
-      assertTrue(p.waitFor() == 0);
-
-      // check it is there
-      assertTrue(fileSys.exists(path));
+  /** Test random access to a file */
+  @Test
+  public void testRandomAccess() throws IOException {
+    final String contents = "hello world";
+    File f = new File(mountPoint, "file1");
 
 
-      FileStatus foo = fileSys.getFileStatus(path);
-      long oldTime = foo.getModificationTime();
-      try { Thread.sleep(1000); } catch(Exception e) {}
+    createFile(f, contents);
 
 
-      cmd = "touch " + mpoint + path.toString();
-      p = r.exec(cmd);
-      assertTrue(p.waitFor() == 0);
-
-      try { Thread.sleep(1000); } catch(Exception e) {}
-      foo = fileSys.getFileStatus(path);
-      long newTime = foo.getModificationTime();
-
-      assertTrue(newTime > oldTime);
-
-    } catch(Exception e) {
-      e.printStackTrace();
-      throw e;
+    RandomAccessFile raf = new RandomAccessFile(f, "rw");
+    raf.seek(f.length());
+    try {
+      raf.write('b');
+    } catch (IOException e) {
+      // Expected: fuse-dfs not yet support append
+      assertEquals("Operation not supported", e.getMessage());
     } finally {
     } finally {
+      raf.close();
     }
     }
-  }
 
 
-  /**
-   *
-   * Test dfs_read on a file size that will trigger multiple internal reads. 
-   * First, just check raw size reading is ok and then check with smaller reads
-   * including checking the validity of the data read.
-   *
-   */
-  public void testReads() throws IOException,InterruptedException  {
+    raf = new RandomAccessFile(f, "rw");
+    raf.seek(0);
     try {
     try {
-      // First create a new directory with mkdirs
-      Runtime r = Runtime.getRuntime();
-      Process p;
-
-      // create the file
-      Path myPath = new Path("/test/hello.reads");
-      FSDataOutputStream s = fileSys.create(myPath);
-      String hello = "hello world!";
-      int written = 0;
-      int mycount = 0;
-      while(written < 1024 * 9) {
-        s.writeUTF(hello);
-        s.writeInt(mycount++);
-        written += hello.length() + 4;
-      }
-      s.close();
-
-      // check it exists
-      assertTrue(fileSys.exists(myPath));
-      FileStatus foo = fileSys.getFileStatus(myPath);
-      assertTrue(foo.getLen() >= 9 * 1024);
-
-
-      {
-        // cat the file
-        DataInputStream is = new DataInputStream(new FileInputStream(mpoint + "/test/hello.reads"));
-        byte buf [] = new byte[4096];
-        // test reading 0 length
-        assertTrue(is.read(buf, 0, 0) == 0);
-
-        // test real reads
-        assertTrue(is.read(buf, 0, 1024) == 1024);
-        assertTrue(is.read(buf, 0, 4096) == 4096);
-        assertTrue(is.read(buf, 0, 4096) == 4096);
-        is.close();
-      }
-
-      {
-        DataInputStream is = new DataInputStream(new FileInputStream(mpoint + "/test/hello.reads"));
-        int read = 0;
-        int counter = 0;
-        try {
-          while(true) {
-            String s2 = DataInputStream.readUTF(is);
-            int s3 = is.readInt();
-            assertTrue(s2.equals(hello));
-            assertTrue(s3 == counter++);
-            read += hello.length() + 4;
-          }
-        } catch(EOFException e) {
-          assertTrue(read >= 9 * 1024);
-        }
-      }
-
-      // check reading an empty file for EOF
-      {
-        // create the file
-        myPath = new Path("/test/hello.reads2");
-        s = fileSys.create(myPath);
-        s.close();
-
-        FSDataInputStream fs = fileSys.open(myPath);
-        assertEquals(-1,  fs.read());
-
-        FileInputStream f = new FileInputStream(mpoint + "/test/hello.reads2");
-        assertEquals(-1, f.read());
-      }
-
-    } catch(Exception e) {
-      e.printStackTrace();
+      raf.write('b');
+      fail("Over-wrote existing bytes");
+    } catch (IOException e) {
+      // Expected: can-not overwrite a file
+      assertEquals("Invalid argument", e.getMessage());
     } finally {
     } finally {
+      raf.close();
     }
     }
+    execAssertSucceeds("rm " + f.getAbsolutePath());
   }
   }
 
 
-
-  /**
-   * Use filesys to create the hello world! file and then cat it and see its contents are correct.
-   */
-  public void testCat() throws IOException,InterruptedException  {
-    try {
-      // First create a new directory with mkdirs
-      Runtime r = Runtime.getRuntime();
-      Process p = r.exec("rm -rf " + mpoint + "/test/hello");
-      assertTrue(p.waitFor() == 0);
-
-      // create the file
-      Path myPath = new Path("/test/hello");
-      FSDataOutputStream s = fileSys.create(myPath);
-      String hello = "hello world!";
-      s.writeUTF(hello);
-      s.writeInt(1033);
-      s.close();
-
-      // check it exists
-      assertTrue(fileSys.exists(myPath));
-
-      // cat the file
-      DataInputStream is = new DataInputStream(new FileInputStream(mpoint + "/test/hello"));
-      String s2 = DataInputStream.readUTF(is);
-      int s3 = is.readInt();
-      assertTrue(s2.equals(hello));
-      assertTrue(s3 == 1033);
-
-    } catch(Exception e) {
-      e.printStackTrace();
-    } finally {
+  /** Test copying a set of files from the mount to itself */
+  @Test
+  public void testCopyFiles() throws IOException {
+    final String contents = "hello world";
+    File d1 = new File(mountPoint, "dir1");
+    File d2 = new File(mountPoint, "dir2");
+
+    // Create and populate dir1 via the mount
+    execAssertSucceeds("mkdir " + d1.getAbsolutePath());
+    for (int i = 0; i < 5; i++) {
+      createFile(new File(d1, "file"+i), contents);
     }
     }
+    assertEquals(5, d1.listFiles().length);
+
+    // Copy dir from the mount to the mount
+    execAssertSucceeds("cp -r " + d1.getAbsolutePath() +
+                       " " + d2.getAbsolutePath());
+    assertEquals(5, d2.listFiles().length);
+
+    // Access all the files in the dirs and remove them
+    execAssertSucceeds("find " + d1.getAbsolutePath());
+    execAssertSucceeds("find " + d2.getAbsolutePath());
+    execAssertSucceeds("rm -r " + d1.getAbsolutePath());
+    execAssertSucceeds("rm -r " + d2.getAbsolutePath());
   }
   }
 
 
+  /** Test concurrent creation and access of the mount */
+  @Test
+  public void testMultipleThreads() throws IOException {
+    ArrayList<Thread> threads = new ArrayList<Thread>();
+    final AtomicReference<String> errorMessage = new AtomicReference<String>();
+
+    for (int i = 0; i < 10; i++) {
+      Thread t = new Thread() {
+	  public void run() {
+	    try {
+	      File d = new File(mountPoint, "dir"+getId());
+	      execWaitRet("mkdir " + d.getAbsolutePath());
+	      for (int j = 0; j < 10; j++) {
+		File f = new File(d, "file"+j);
+		final String contents = "thread "+getId()+" "+j;
+		createFile(f, contents);
+	      }
+	      for (int j = 0; j < 10; j++) {
+		File f = new File(d, "file"+j);
+		execWaitRet("cat " + f.getAbsolutePath());
+		execWaitRet("rm " + f.getAbsolutePath());
+	      }
+	      execWaitRet("rmdir " + d.getAbsolutePath());
+	    } catch (IOException ie) {
+	      errorMessage.set(
+		String.format("Exception %s", 
+			      StringUtils.stringifyException(ie)));
+	    }
+          }
+	};
+      t.start();
+      threads.add(t);
+    }
 
 
-
-  /**
-   * Use filesys to create the hello world! file and then cat it and see its contents are correct.
-   */
-  public void testAppends() throws IOException,InterruptedException  {
-    try {
-      // First create a new directory with mkdirs
-      Runtime r = Runtime.getRuntime();
-
-      {
-        FileOutputStream os = new FileOutputStream(mpoint + "/appends");
-        String hello = "hello";
-        os.write(hello.getBytes());
-        os.flush();
-        os.close();
-      }
-
-      // check it exists
-      Path myPath = new Path("/appends");
-      assertTrue(fileSys.exists(myPath));
-
+    for (Thread t : threads) {
       try {
       try {
-        Thread.sleep(1000);
-      } catch(Exception e) {
+	t.join();
+      } catch (InterruptedException ie) {
+	fail("Thread interrupted: "+ie.getMessage());
       }
       }
-
-      FileStatus foo = fileSys.getFileStatus(myPath);
-
-      File f = new File(mpoint + "/appends");
-      assertTrue(f.length() > 0);
-
-      {
-        FileOutputStream os = new FileOutputStream(mpoint + "/appends", true);
-        String hello = " world!";
-        os.write(hello.getBytes());
-        os.flush();
-        os.close();
-      }
-
-      // cat the file
-      FileInputStream is = new FileInputStream(mpoint + "/appends");
-      byte b[] = new byte[1024];
-      int len = is.read(b);
-      assertTrue(len > 0);
-      String s2 = new String(b,0,len);
-      assertTrue(s2.equals("hello world!"));
-
-    } catch(Exception e) {
-      e.printStackTrace();
-    } finally {
     }
     }
-  }
 
 
-
-
-
-  public void testDone() throws IOException {
-      close();
-  }
-
-  /**
-   * Unmount and close
-   */
-  protected void tearDown() throws Exception {
-  }
-
-  /**
-   * Unmount and close
-   */
-  protected void finalize() throws Throwable {
-    close();
-  }
-
-  public void close() {
-    try {
-      int length;
-
-      // print out the fuse debug output
-      {
-      do {
-      InputStream i = fuse_process.getInputStream();
-      byte b[] = new byte[i.available()];
-      length = i.read(b);
-      System.err.println("read x bytes: " + length);
-      System.err.write(b,0,b.length);
-      } while(length > 0) ;
-      }
-
-      do {
-      InputStream i = fuse_process.getErrorStream();
-      byte b[] = new byte[i.available()];
-      length = i.read(b);
-      System.err.println("read x bytes: " + length);
-      System.err.write(b,0,b.length);
-      } while(length > 0) ;
-
-      umount(mpoint);
-
-      fuse_process.destroy();
-      fuse_process = null;
-        if(fileSys != null) {
-        fileSys.close();
-        fileSys = null;
-      }
-      if(cluster != null) {
-        cluster.shutdown();
-        cluster = null;
-      }
-    } catch(Exception e) { }
+    assertNull(errorMessage.get(), errorMessage.get());
   }
   }
-};
+}

+ 7 - 7
src/docs/src/documentation/content/xdocs/hdfs_user_guide.xml

@@ -267,11 +267,11 @@
    </p>
    </p>
    <ul>
    <ul>
       <li>
       <li>
-        <code>fs.checkpoint.period</code>, set to 1 hour by default, specifies
+        <code>dfs.namenode.checkpoint.period</code>, set to 1 hour by default, specifies
         the maximum delay between two consecutive checkpoints, and 
         the maximum delay between two consecutive checkpoints, and 
       </li>
       </li>
       <li>
       <li>
-        <code>fs.checkpoint.size</code>, set to 64MB by default, defines the
+        <code>dfs.namenode.checkpoint.size</code>, set to 64MB by default, defines the
         size of the edits log file that forces an urgent checkpoint even if 
         size of the edits log file that forces an urgent checkpoint even if 
         the maximum checkpoint delay is not reached.
         the maximum checkpoint delay is not reached.
       </li>
       </li>
@@ -318,11 +318,11 @@
    </p>
    </p>
    <ul>
    <ul>
       <li>
       <li>
-        <code>fs.checkpoint.period</code>, set to 1 hour by default, specifies
+        <code>dfs.namenode.checkpoint.period</code>, set to 1 hour by default, specifies
         the maximum delay between two consecutive checkpoints 
         the maximum delay between two consecutive checkpoints 
       </li>
       </li>
       <li>
       <li>
-        <code>fs.checkpoint.size</code>, set to 64MB by default, defines the
+        <code>dfs.namenode.checkpoint.size</code>, set to 64MB by default, defines the
         size of the edits log file that forces an urgent checkpoint even if 
         size of the edits log file that forces an urgent checkpoint even if 
         the maximum checkpoint delay is not reached.
         the maximum checkpoint delay is not reached.
       </li>
       </li>
@@ -409,7 +409,7 @@
       </li>
       </li>
       <li>
       <li>
         Specify the location of the checkpoint directory in the 
         Specify the location of the checkpoint directory in the 
-        configuration variable <code>fs.checkpoint.dir</code>;
+        configuration variable <code>dfs.namenode.checkpoint.dir</code>;
       </li>
       </li>
       <li>
       <li>
         and start the NameNode with <code>-importCheckpoint</code> option.
         and start the NameNode with <code>-importCheckpoint</code> option.
@@ -417,11 +417,11 @@
    </ul>
    </ul>
    <p>
    <p>
      The NameNode will upload the checkpoint from the 
      The NameNode will upload the checkpoint from the 
-     <code>fs.checkpoint.dir</code> directory and then save it to the NameNode
+     <code>dfs.namenode.checkpoint.dir</code> directory and then save it to the NameNode
      directory(s) set in <code>dfs.name.dir</code>.
      directory(s) set in <code>dfs.name.dir</code>.
      The NameNode will fail if a legal image is contained in 
      The NameNode will fail if a legal image is contained in 
      <code>dfs.name.dir</code>.
      <code>dfs.name.dir</code>.
-     The NameNode verifies that the image in <code>fs.checkpoint.dir</code> is
+     The NameNode verifies that the image in <code>dfs.namenode.checkpoint.dir</code> is
      consistent, but does not modify it in any way.
      consistent, but does not modify it in any way.
    </p>
    </p>
    <p>
    <p>

+ 2 - 2
src/java/hdfs-default.xml

@@ -511,7 +511,7 @@ creations/deletions), or "all".</description>
       name node should store the temporary edits to merge.
       name node should store the temporary edits to merge.
       If this is a comma-delimited list of directoires then teh edits is
       If this is a comma-delimited list of directoires then teh edits is
       replicated in all of the directoires for redundancy.
       replicated in all of the directoires for redundancy.
-      Default value is same as fs.checkpoint.dir
+      Default value is same as dfs.namenode.checkpoint.dir
   </description>
   </description>
 </property>
 </property>
 
 
@@ -526,7 +526,7 @@ creations/deletions), or "all".</description>
   <name>dfs.namenode.checkpoint.size</name>
   <name>dfs.namenode.checkpoint.size</name>
   <value>67108864</value>
   <value>67108864</value>
   <description>The size of the current edit log (in bytes) that triggers
   <description>The size of the current edit log (in bytes) that triggers
-       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
+       a periodic checkpoint even if the dfs.namenode.checkpoint.period hasn't expired.
   </description>
   </description>
 </property>
 </property>
 
 

+ 3 - 1
src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java

@@ -28,6 +28,7 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.regex.Pattern;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage;
@@ -463,8 +464,9 @@ public class BlockPoolSliceStorage extends Storage {
     // do the link
     // do the link
     int diskLayoutVersion = this.getLayoutVersion();
     int diskLayoutVersion = this.getLayoutVersion();
     // hardlink finalized blocks in tmpDir
     // hardlink finalized blocks in tmpDir
+    HardLink hardLink = new HardLink();
     DataStorage.linkBlocks(fromDir, new File(toDir,
     DataStorage.linkBlocks(fromDir, new File(toDir,
-        DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion);
+        DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
   }
   }
 
 
   private void verifyDistributedUpgradeProgress(NamespaceInfo nsInfo)
   private void verifyDistributedUpgradeProgress(NamespaceInfo nsInfo)

+ 56 - 16
src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

@@ -38,7 +38,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileUtil.HardLink;
+import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -618,24 +618,26 @@ public class DataStorage extends Storage {
    * @throws IOException if error occurs during hardlink
    * @throws IOException if error occurs during hardlink
    */
    */
   private void linkAllBlocks(File fromDir, File toDir) throws IOException {
   private void linkAllBlocks(File fromDir, File toDir) throws IOException {
+    HardLink hardLink = new HardLink();
     // do the link
     // do the link
     int diskLayoutVersion = this.getLayoutVersion();
     int diskLayoutVersion = this.getLayoutVersion();
     if (diskLayoutVersion < PRE_RBW_LAYOUT_VERSION) { // RBW version
     if (diskLayoutVersion < PRE_RBW_LAYOUT_VERSION) { // RBW version
       // hardlink finalized blocks in tmpDir/finalized
       // hardlink finalized blocks in tmpDir/finalized
       linkBlocks(new File(fromDir, STORAGE_DIR_FINALIZED), 
       linkBlocks(new File(fromDir, STORAGE_DIR_FINALIZED), 
-          new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion);
+          new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
       // hardlink rbw blocks in tmpDir/finalized
       // hardlink rbw blocks in tmpDir/finalized
       linkBlocks(new File(fromDir, STORAGE_DIR_RBW), 
       linkBlocks(new File(fromDir, STORAGE_DIR_RBW), 
-          new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion);
+          new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
     } else { // pre-RBW version
     } else { // pre-RBW version
       // hardlink finalized blocks in tmpDir
       // hardlink finalized blocks in tmpDir
-      linkBlocks(fromDir, 
-          new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion);      
-    }    
+      linkBlocks(fromDir, new File(toDir, STORAGE_DIR_FINALIZED), 
+          diskLayoutVersion, hardLink);      
+    } 
+    LOG.info( hardLink.linkStats.report() );
   }
   }
   
   
-  static void linkBlocks(File from, File to, int oldLV)
-      throws IOException {
+  static void linkBlocks(File from, File to, int oldLV, HardLink hl) 
+  throws IOException {
     if (!from.exists()) {
     if (!from.exists()) {
       return;
       return;
     }
     }
@@ -646,6 +648,7 @@ public class DataStorage extends Storage {
           FileOutputStream out = new FileOutputStream(to);
           FileOutputStream out = new FileOutputStream(to);
           try {
           try {
             IOUtils.copyBytes(in, out, 16*1024);
             IOUtils.copyBytes(in, out, 16*1024);
+            hl.linkStats.countPhysicalFileCopies++;
           } finally {
           } finally {
             out.close();
             out.close();
           }
           }
@@ -661,23 +664,60 @@ public class DataStorage extends Storage {
         }
         }
         
         
         HardLink.createHardLink(from, to);
         HardLink.createHardLink(from, to);
+        hl.linkStats.countSingleLinks++;
       }
       }
       return;
       return;
     }
     }
     // from is a directory
     // from is a directory
+    hl.linkStats.countDirs++;
+    
     if (!to.mkdirs())
     if (!to.mkdirs())
       throw new IOException("Cannot create directory " + to);
       throw new IOException("Cannot create directory " + to);
-    String[] blockNames = from.list(new java.io.FilenameFilter() {
+    
+    //If upgrading from old stuff, need to munge the filenames.  That has to
+    //be done one file at a time, so hardlink them one at a time (slow).
+    if (oldLV >= PRE_GENERATIONSTAMP_LAYOUT_VERSION) {
+      String[] blockNames = from.list(new java.io.FilenameFilter() {
+          public boolean accept(File dir, String name) {
+            return name.startsWith(BLOCK_SUBDIR_PREFIX) 
+              || name.startsWith(BLOCK_FILE_PREFIX)
+              || name.startsWith(COPY_FILE_PREFIX);
+          }
+        });
+      if (blockNames.length == 0) {
+        hl.linkStats.countEmptyDirs++;
+      }
+      else for(int i = 0; i < blockNames.length; i++)
+        linkBlocks(new File(from, blockNames[i]), 
+            new File(to, blockNames[i]), oldLV, hl);
+    } 
+    else {
+      //If upgrading from a relatively new version, we only need to create
+      //links with the same filename.  This can be done in bulk (much faster).
+      String[] blockNames = from.list(new java.io.FilenameFilter() {
         public boolean accept(File dir, String name) {
         public boolean accept(File dir, String name) {
-          return name.startsWith(BLOCK_SUBDIR_PREFIX) 
-            || name.startsWith(BLOCK_FILE_PREFIX)
-            || name.startsWith(COPY_FILE_PREFIX);
+          return name.startsWith(BLOCK_FILE_PREFIX);
         }
         }
       });
       });
-    
-    for(int i = 0; i < blockNames.length; i++)
-      linkBlocks(new File(from, blockNames[i]), 
-                 new File(to, blockNames[i]), oldLV);
+      if (blockNames.length > 0) {
+        HardLink.createHardLinkMult(from, blockNames, to);
+        hl.linkStats.countMultLinks++;
+        hl.linkStats.countFilesMultLinks += blockNames.length;
+      } else {
+        hl.linkStats.countEmptyDirs++;
+      }
+      
+      //now take care of the rest of the files and subdirectories
+      String[] otherNames = from.list(new java.io.FilenameFilter() {
+          public boolean accept(File dir, String name) {
+            return name.startsWith(BLOCK_SUBDIR_PREFIX) 
+              || name.startsWith(COPY_FILE_PREFIX);
+          }
+        });
+      for(int i = 0; i < otherNames.length; i++)
+        linkBlocks(new File(from, otherNames[i]), 
+            new File(to, otherNames[i]), oldLV, hl);
+    }
   }
   }
 
 
   private void verifyDistributedUpgradeProgress(
   private void verifyDistributedUpgradeProgress(

+ 1 - 1
src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java

@@ -24,7 +24,7 @@ import java.io.IOException;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.FileUtil.HardLink;
+import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;

+ 1 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java

@@ -1156,7 +1156,7 @@ public class FSImage implements NNStorageListener, Closeable {
    * @param conf the Configuration
    * @param conf the Configuration
    * @param defaultValue a default value for the attribute, if null
    * @param defaultValue a default value for the attribute, if null
    * @return a Collection of URIs representing the values in 
    * @return a Collection of URIs representing the values in 
-   * fs.checkpoint.dir configuration property
+   * dfs.namenode.checkpoint.dir configuration property
    */
    */
   static Collection<URI> getCheckpointDirs(Configuration conf,
   static Collection<URI> getCheckpointDirs(Configuration conf,
       String defaultValue) {
       String defaultValue) {

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java

@@ -29,7 +29,7 @@ import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileUtil.HardLink;
+import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;