Просмотр исходного кода

HDFS-939. libhdfs test is broken. Contributed by Eli Collins.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@925495 13f79535-47bb-0310-9956-ffa450edef68
Thomas White 15 лет назад
Родитель
Сommit
eb92b86585

+ 2 - 0
CHANGES.txt

@@ -205,6 +205,8 @@ Trunk (unreleased changes)
 
     HDFS-1015. Fix intermittent failure in TestSecurityTokenEditLog.
     (Jitendra Nath Pandey via suresh)
+
+    HDFS-939. libhdfs test is broken. (Eli Collins via tomwhite)
     
 Release 0.21.0 - Unreleased
 

+ 3 - 2
build.xml

@@ -97,7 +97,6 @@
   <property name="test.hdfs.commit.tests.file" value="${test.src.dir}/commit-tests" />
   <property name="test.hdfs.all.tests.file" value="${test.src.dir}/all-tests" />
 
-  <property name="test.libhdfs.conf.dir" value="${c++.libhdfs.src}/tests/conf"/>
   <property name="test.libhdfs.dir" value="${test.build.dir}/libhdfs"/>
 
   <property name="web.src.dir" value="${basedir}/src/web"/>
@@ -1022,6 +1021,7 @@
  <target name="test-c++-libhdfs" depends="compile-c++-libhdfs, compile-core" if="islibhdfs" unless="clover.enabled">
     <delete dir="${test.libhdfs.dir}"/>
     <mkdir dir="${test.libhdfs.dir}"/>
+    <mkdir dir="${test.libhdfs.dir}/conf"/>
     <mkdir dir="${test.libhdfs.dir}/logs"/>
     <mkdir dir="${test.libhdfs.dir}/hdfs/name"/>
 
@@ -1031,8 +1031,9 @@
         <env key="JVM_ARCH" value="${jvm.arch}"/>
         <env key="LIBHDFS_BUILD_DIR" value="${build.c++.libhdfs}"/>
         <env key="HADOOP_HOME" value="${basedir}"/>
-        <env key="HADOOP_CONF_DIR" value="${test.libhdfs.conf.dir}"/>
+        <env key="HADOOP_CONF_DIR" value="${test.libhdfs.dir}/conf"/>
         <env key="HADOOP_LOG_DIR" value="${test.libhdfs.dir}/logs"/>
+        <env key="LIBHDFS_TEST_DIR" value="${test.libhdfs.dir}"/>
         <env key="LIBHDFS_SRC_DIR" value="${c++.libhdfs.src}"/>
         <env key="LIBHDFS_INSTALL_DIR" value="${install.c++}/lib"/>  
         <env key="LIB_DIR" value="${common.ivy.lib.dir}"/>

+ 0 - 27
src/c++/libhdfs/tests/conf/core-site.xml

@@ -1,27 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!-- Values used when running libhdfs unit tests. -->
-<!-- This is mostly empty, to use the default values, overriding the -->
-<!-- potentially user-editted core-site.xml in the conf/ directory.  -->
-
-<configuration>
-
-<property>
-  <name>hadoop.tmp.dir</name>
-  <value>build/test/libhdfs</value>
-  <description>A base for other temporary directories.</description>
-</property>
-
-
-<property>
-  <name>fs.default.name</name>
-  <value>hdfs://localhost:23000/</value>
-  <description>The name of the default file system.  A URI whose
-  scheme and authority determine the FileSystem implementation.  The
-  uri's scheme determines the config property (fs.SCHEME.impl) naming
-  the FileSystem implementation class.  The uri's authority is used to
-  determine the host, port, etc. for a filesystem.</description>
-</property>
-
-</configuration>

+ 0 - 13
src/c++/libhdfs/tests/conf/hadoop-site.xml

@@ -1,13 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="nutch-conf.xsl"?>
-
-<!-- DO NOT PUT ANY PROPERTY IN THIS FILE. INSTEAD USE -->
-<!-- core-site.xml, mapred-site.xml OR hdfs-site.xml -->
-<!-- This empty script is to avoid picking properties from  -->
-<!-- conf/hadoop-site.xml This would be removed once support  -->
-<!-- for hadoop-site.xml is removed.  -->
-
-<configuration>
-
-
-</configuration>

+ 0 - 24
src/c++/libhdfs/tests/conf/hdfs-site.xml

@@ -1,24 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-<property>
-  <name>dfs.replication</name>
-  <value>1</value>
-  <description>Default block replication.
-  The actual number of replications can be specified when the file is created.
-  The default is used if replication is not specified in create time.
-  </description>
-</property>
-
-<property>
-  <name>dfs.support.append</name>
-  <value>true</value>
-  <description>Allow appends to files.
-  </description>
-</property>
-
-</configuration>

+ 0 - 8
src/c++/libhdfs/tests/conf/mapred-site.xml

@@ -1,8 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-</configuration>

+ 0 - 1
src/c++/libhdfs/tests/conf/slaves

@@ -1 +0,0 @@
-localhost

+ 40 - 1
src/c++/libhdfs/tests/test-libhdfs.sh

@@ -36,6 +36,44 @@ HADOOP_BIN_DIR=$HADOOP_HOME/bin
 COMMON_BUILD_DIR=$HADOOP_HOME/build/ivy/lib/Hadoop-Hdfs/common
 COMMON_JAR=$COMMON_BUILD_DIR/hadoop-core-0.22.0-SNAPSHOT.jar
 
+cat > $HADOOP_CONF_DIR/core-site.xml <<EOF
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+<property>
+  <name>hadoop.tmp.dir</name>
+  <value>file:///$LIBHDFS_TEST_DIR</value>
+</property>
+<property>
+  <name>fs.default.name</name>
+  <value>hdfs://localhost:23000/</value>
+</property>
+</configuration>
+EOF
+
+cat > $HADOOP_CONF_DIR/hdfs-site.xml <<EOF
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+<property>
+  <name>dfs.replication</name>
+  <value>1</value>
+</property>
+<property>
+  <name>dfs.support.append</name>
+  <value>true</value>
+</property>
+<property>
+  <name>dfs.namenode.logging.level</name>
+  <value>DEBUG</value>
+</property>
+</configuration>
+EOF
+
+cat > $HADOOP_CONF_DIR/slaves <<EOF
+localhost
+EOF
+
 # If we are running from the hdfs repo we need to create HADOOP_BIN_DIR.  
 # If the bin directory does not and we've got a core jar extract it's
 # bin directory to HADOOP_HOME/bin. The bin scripts hdfs-config.sh and
@@ -142,7 +180,8 @@ cd $HADOOP_HOME
 echo Y | $HADOOP_BIN_DIR/hdfs namenode -format &&
 $HADOOP_BIN_DIR/hadoop-daemon.sh --script $HADOOP_BIN_DIR/hdfs start namenode && sleep 2
 $HADOOP_BIN_DIR/hadoop-daemon.sh --script $HADOOP_BIN_DIR/hdfs start datanode && sleep 2
-sleep 20
+echo "Wait 30s for the datanode to start up..."
+sleep 30
 CLASSPATH=$CLASSPATH LD_PRELOAD="$LIB_JVM_DIR/libjvm.so:$LIBHDFS_INSTALL_DIR/libhdfs.so:" $LIBHDFS_BUILD_DIR/$HDFS_TEST
 BUILD_STATUS=$?
 sleep 3