瀏覽代碼

HDFS-6534. Fix build on macosx: HDFS parts (Binglin Chang via aw)

Allen Wittenauer 10 年之前
父節點
當前提交
cb74f39697

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -935,6 +935,8 @@ Release 2.6.0 - UNRELEASED
     HDFS-7130. TestDataTransferKeepalive fails intermittently on Windows.
     (cnauroth)
 
+    HDFS-6534. Fix build on macosx: HDFS parts (Binglin Chang via aw)
+
 Release 2.5.1 - 2014-09-05
 
   INCOMPATIBLE CHANGES

+ 12 - 5
hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt

@@ -211,11 +211,18 @@ if (NOT WIN32)
     add_executable(test_libhdfs_vecsum
         main/native/libhdfs/test/vecsum.c
     )
-    target_link_libraries(test_libhdfs_vecsum
-        hdfs
-        pthread
-        rt
-    )
+    if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
+        target_link_libraries(test_libhdfs_vecsum
+            hdfs
+            pthread
+        )
+    else (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
+        target_link_libraries(test_libhdfs_vecsum
+            hdfs
+            pthread
+            rt
+        )
+    endif (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
 endif(NOT WIN32)
 
 IF(REQUIRE_LIBWEBHDFS)

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c

@@ -3215,7 +3215,7 @@ static void hdfsFreeFileInfoEntry(hdfsFileInfo *hdfsFileInfo)
     free(hdfsFileInfo->mName);
     free(hdfsFileInfo->mOwner);
     free(hdfsFileInfo->mGroup);
-    memset(hdfsFileInfo, 0, sizeof(hdfsFileInfo));
+    memset(hdfsFileInfo, 0, sizeof(*hdfsFileInfo));
 }
 
 void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries)

+ 11 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c

@@ -122,11 +122,11 @@ int main(int argc, char **argv) {
         currentPos = -1;
         if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
             fprintf(stderr, 
-                    "Failed to get current file position correctly! Got %ld!\n",
+                    "Failed to get current file position correctly! Got %" PRId64 "!\n",
                     currentPos);
             exit(-1);
         }
-        fprintf(stderr, "Current position: %ld\n", currentPos);
+        fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
 
         if (hdfsFlush(fs, writeFile)) {
             fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
@@ -177,11 +177,11 @@ int main(int argc, char **argv) {
         currentPos = -1;
         if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
             fprintf(stderr, 
-                    "Failed to get current file position correctly! Got %ld!\n", 
+                    "Failed to get current file position correctly! Got %" PRId64 "!\n",
                     currentPos);
             exit(-1);
         }
-        fprintf(stderr, "Current position: %ld\n", currentPos);
+        fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
 
         if (!hdfsFileUsesDirectRead(readFile)) {
           fprintf(stderr, "Direct read support incorrectly not detected "
@@ -283,9 +283,9 @@ int main(int argc, char **argv) {
         fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!"));
         totalResult += (resp ? 0 : 1);
 
-        fprintf(stderr, "hdfsGetDefaultBlockSize: %ld\n", hdfsGetDefaultBlockSize(fs));
-        fprintf(stderr, "hdfsGetCapacity: %ld\n", hdfsGetCapacity(fs));
-        fprintf(stderr, "hdfsGetUsed: %ld\n", hdfsGetUsed(fs));
+        fprintf(stderr, "hdfsGetDefaultBlockSize: %" PRId64 "\n", hdfsGetDefaultBlockSize(fs));
+        fprintf(stderr, "hdfsGetCapacity: %" PRId64 "\n", hdfsGetCapacity(fs));
+        fprintf(stderr, "hdfsGetUsed: %" PRId64 "\n", hdfsGetUsed(fs));
 
         fileInfo = NULL;
         if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
@@ -293,8 +293,8 @@ int main(int argc, char **argv) {
             fprintf(stderr, "Name: %s, ", fileInfo->mName);
             fprintf(stderr, "Type: %c, ", (char)(fileInfo->mKind));
             fprintf(stderr, "Replication: %d, ", fileInfo->mReplication);
-            fprintf(stderr, "BlockSize: %ld, ", fileInfo->mBlockSize);
-            fprintf(stderr, "Size: %ld, ", fileInfo->mSize);
+            fprintf(stderr, "BlockSize: %" PRId64 ", ", fileInfo->mBlockSize);
+            fprintf(stderr, "Size: %" PRId64 ", ", fileInfo->mSize);
             fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod)); 
             fprintf(stderr, "Owner: %s, ", fileInfo->mOwner);
             fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
@@ -312,8 +312,8 @@ int main(int argc, char **argv) {
                 fprintf(stderr, "Name: %s, ", fileList[i].mName);
                 fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind);
                 fprintf(stderr, "Replication: %d, ", fileList[i].mReplication);
-                fprintf(stderr, "BlockSize: %ld, ", fileList[i].mBlockSize);
-                fprintf(stderr, "Size: %ld, ", fileList[i].mSize);
+                fprintf(stderr, "BlockSize: %" PRId64 ", ", fileList[i].mBlockSize);
+                fprintf(stderr, "Size: %" PRId64 ", ", fileList[i].mSize);
                 fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod));
                 fprintf(stderr, "Owner: %s, ", fileList[i].mOwner);
                 fprintf(stderr, "Group: %s, ", fileList[i].mGroup);

+ 31 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/vecsum.c

@@ -29,6 +29,12 @@
 #include <time.h>
 #include <unistd.h>
 
+#ifdef __MACH__ // OS X does not have clock_gettime
+#include <mach/clock.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#endif
+
 #include "config.h"
 #include "hdfs.h"
 
@@ -49,6 +55,29 @@ struct stopwatch {
     struct timespec stop;
 };
 
+
+#ifdef __MACH__
+static int clock_gettime_mono(struct timespec * ts) {
+    static mach_timebase_info_data_t tb;
+    static uint64_t timestart = 0;
+    uint64_t t = 0;
+    if (timestart == 0) {
+        mach_timebase_info(&tb);
+        timestart = mach_absolute_time();
+    }
+    t = mach_absolute_time() - timestart;
+    t *= tb.numer;
+    t /= tb.denom;
+    ts->tv_sec = t / 1000000000ULL;
+    ts->tv_nsec = t - (ts->tv_sec * 1000000000ULL);
+    return 0;
+}
+#else
+static int clock_gettime_mono(struct timespec * ts) {
+    return clock_gettime(CLOCK_MONOTONIC, ts);
+}
+#endif
+
 static struct stopwatch *stopwatch_create(void)
 {
     struct stopwatch *watch;
@@ -58,7 +87,7 @@ static struct stopwatch *stopwatch_create(void)
         fprintf(stderr, "failed to allocate memory for stopwatch\n");
         goto error;
     }
-    if (clock_gettime(CLOCK_MONOTONIC, &watch->start)) {
+    if (clock_gettime_mono(&watch->start)) {
         int err = errno;
         fprintf(stderr, "clock_gettime(CLOCK_MONOTONIC) failed with "
             "error %d (%s)\n", err, strerror(err));
@@ -76,7 +105,7 @@ static void stopwatch_stop(struct stopwatch *watch,
 {
     double elapsed, rate;
 
-    if (clock_gettime(CLOCK_MONOTONIC, &watch->stop)) {
+    if (clock_gettime_mono(&watch->stop)) {
         int err = errno;
         fprintf(stderr, "clock_gettime(CLOCK_MONOTONIC) failed with "
             "error %d (%s)\n", err, strerror(err));

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c

@@ -84,7 +84,7 @@ static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cl, hdfsFS *fs
 
 static int doTestGetDefaultBlockSize(hdfsFS fs, const char *path)
 {
-    uint64_t blockSize;
+    int64_t blockSize;
     int ret;
 
     blockSize = hdfsGetDefaultBlockSize(fs);