Pārlūkot izejas kodu

HDFS-7188. support build libhdfs3 on windows (Thanh Do via Colin P. McCabe)

Colin Patrick Mccabe 10 gadi atpakaļ
vecāks
revīzija
f0ea98f0c8
34 mainītis faili ar 1167 papildinājumiem un 321 dzēšanām
  1. 19 12
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Options.cmake
  2. 344 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/posix/Platform.cc
  3. 0 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/posix/StackPrinter.cc
  4. 0 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/posix/Thread.cc
  5. 303 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/Platform.cc
  6. 62 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/StackPrinter.cc
  7. 41 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/Thread.cc
  8. 35 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/platform.h
  9. 204 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/mman.c
  10. 61 1
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/mman.h
  11. 21 1
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/time.h
  12. 7 2
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/uuid/uuid.h
  13. 15 1
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/CMakeLists.txt
  14. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/BlockLocation.h
  15. 2 75
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamImpl.cc
  16. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamImpl.h
  17. 0 68
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/KerberosName.cc
  18. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/Permission.h
  19. 0 34
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/UserInfo.cc
  20. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Atoi.cc
  21. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/BigEndian.h
  22. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/CFileWrapper.cc
  23. 0 1
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/ExceptionInternal.h
  24. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/MappedFileWrapper.cc
  25. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SharedPtr.h
  26. 16 1
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/UnorderedMap.h
  27. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WritableUtils.cc
  28. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WritableUtils.h
  29. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WriteBuffer.h
  30. 7 1
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/network/Syscall.h
  31. 4 20
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/network/TcpSocket.cc
  32. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/rpc/RpcClient.h
  33. 0 90
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/server/NamenodeProxy.cc
  34. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/server/NamenodeProxy.h

+ 19 - 12
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Options.cmake

@@ -63,6 +63,7 @@ IF(ENABLE_SSE STREQUAL ON)
         # In Visual Studio 2013, this option will use SS4.2 instructions
         # if available. Not sure about the behaviour in Visual Studio 2010.
         SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:SSE2")
+        ADD_DEFINITIONS(-D__SSE4_2__)
     ELSE(MSVC)
         SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.2")
     ENDIF(MSVC)
@@ -80,25 +81,29 @@ IF(OS_MACOSX AND CMAKE_COMPILER_IS_GNUCXX)
     SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wl,-bind_at_load")
 ENDIF(OS_MACOSX AND CMAKE_COMPILER_IS_GNUCXX)
 
-
 IF(OS_LINUX)
     SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wl,--export-dynamic")
 ENDIF(OS_LINUX)
 
+IF(MSVC)
+  # Always enable boost for windows as VC does not support some C++11 features,
+  # such as nested exception.
+  IF(ENABLE_BOOST STREQUAL OFF)
+    ADD_DEFINITIONS(-DNEED_BOOST)
+  ENDIF(ENABLE_BOOST STREQUAL OFF)
+  # Find boost libraries with flavor: mt-sgd (multi-thread, static, and debug)
+  SET(Boost_USE_STATIC_LIBS ON)
+  SET(Boost_USE_MULTITHREADED ON)
+  SET(Boost_USE_STATIC_RUNTIME ON)
+  FIND_PACKAGE(Boost 1.53 COMPONENTS thread chrono system atomic iostreams REQUIRED)
+  INCLUDE_DIRECTORIES("${Boost_INCLUDE_DIRS}")
+  LINK_DIRECTORIES("${Boost_LIBRARY_DIRS}")
+ENDIF(MSVC)
+
 SET(BOOST_ROOT ${CMAKE_PREFIX_PATH})
 IF(ENABLE_BOOST STREQUAL ON)
     MESSAGE(STATUS "using boost instead of native compiler c++0x support.")
-    IF(MSVC)
-        # Find boost libraries with flavor: mt-sgd (multi-thread, static, and debug)
-        SET(Boost_USE_STATIC_LIBS ON)
-        SET(Boost_USE_MULTITHREADED ON)
-        SET(Boost_USE_STATIC_RUNTIME ON)
-        FIND_PACKAGE(Boost 1.53 COMPONENTS thread chrono system atomic iostreams REQUIRED)			
-        INCLUDE_DIRECTORIES("${Boost_INCLUDE_DIRS}")
-        LINK_DIRECTORIES("${Boost_LIBRARY_DIRS}")
-    ELSE(MSVC)
-        FIND_PACKAGE(Boost 1.53 REQUIRED)
-    ENDIF(MSVC)
+    FIND_PACKAGE(Boost 1.53 REQUIRED)
     SET(NEED_BOOST true CACHE INTERNAL "boost is required")
 ELSE(ENABLE_BOOST STREQUAL ON)
     SET(NEED_BOOST false CACHE INTERNAL "boost is required")
@@ -157,6 +162,8 @@ ELSEIF(CMAKE_COMPILER_IS_CLANG)
     ENDIF(ENABLE_LIBCPP STREQUAL ON)
 ELSEIF(MSVC)
     SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MTd")
+    ADD_DEFINITIONS(-D_CRT_SECURE_NO_WARNINGS)
+    ADD_DEFINITIONS(-D_SCL_SECURE_NO_WARNINGS)
 ENDIF(CMAKE_COMPILER_IS_GNUCXX)
 
 TRY_COMPILE(STRERROR_R_RETURN_INT

+ 344 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/posix/Platform.cc

@@ -0,0 +1,344 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "platform.h"
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "Logger.h"
+#include "UnorderedMap.h"
+#include "client/KerberosName.h"
+#include "client/UserInfo.h"
+#include "network/Syscall.h"
+#include "network/TcpSocket.h"
+#include "server/NamenodeProxy.h"
+
+#include <algorithm>
+#include <arpa/inet.h>
+#include <cassert>
+#include <climits>
+#include <cstring>
+#include <errno.h>
+#include <fcntl.h>
+#include <ifaddrs.h>
+#include <inttypes.h>
+#include <iostream>
+#include <netdb.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <poll.h>
+#include <pwd.h>
+#include <regex.h>
+#include <stdint.h>
+#include <string>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <vector>
+
+#include <sstream>
+
+namespace hdfs {
+namespace internal {
+
+/* InputStreamImpl.cc */
+unordered_set<std::string> BuildLocalAddrSet() {
+    unordered_set<std::string> set;
+    struct ifaddrs *ifAddr = NULL;
+    struct ifaddrs *pifAddr = NULL;
+    struct sockaddr *addr;
+
+    if (getifaddrs(&ifAddr)) {
+        THROW(HdfsNetworkException,
+              "InputStreamImpl: cannot get local network interface: %s",
+              GetSystemErrorInfo(errno));
+    }
+
+    try {
+        std::vector<char> host;
+        const char *pHost;
+        host.resize(INET6_ADDRSTRLEN + 1);
+
+        for (pifAddr = ifAddr; pifAddr != NULL; pifAddr = pifAddr->ifa_next) {
+            addr = pifAddr->ifa_addr;
+            memset(&host[0], 0, INET6_ADDRSTRLEN + 1);
+
+            if (addr->sa_family == AF_INET) {
+                pHost = inet_ntop(
+                    addr->sa_family,
+                    &(reinterpret_cast<struct sockaddr_in *>(addr))->sin_addr,
+                    &host[0], INET6_ADDRSTRLEN);
+            } else if (addr->sa_family == AF_INET6) {
+                pHost = inet_ntop(
+                    addr->sa_family,
+                    &(reinterpret_cast<struct sockaddr_in6 *>(addr))->sin6_addr,
+                    &host[0], INET6_ADDRSTRLEN);
+            } else {
+                continue;
+            }
+
+            if (NULL == pHost) {
+                THROW(HdfsNetworkException,
+                      "InputStreamImpl: cannot get convert network address "
+                      "to textual form: %s",
+                      GetSystemErrorInfo(errno));
+            }
+
+            set.insert(pHost);
+        }
+
+        /*
+         * add hostname.
+         */
+        long hostlen = sysconf(_SC_HOST_NAME_MAX);
+        host.resize(hostlen + 1);
+
+        if (gethostname(&host[0], host.size())) {
+            THROW(HdfsNetworkException,
+                  "InputStreamImpl: cannot get hostname: %s",
+                  GetSystemErrorInfo(errno));
+        }
+
+        set.insert(&host[0]);
+    } catch (...) {
+        if (ifAddr != NULL) {
+            freeifaddrs(ifAddr);
+        }
+
+        throw;
+    }
+
+    if (ifAddr != NULL) {
+        freeifaddrs(ifAddr);
+    }
+
+    return set;
+}
+
+/* TpcSocket.cc */
+void TcpSocketImpl::setBlockMode(bool enable) {
+    int flag;
+    flag = syscalls::fcntl(sock, F_GETFL, 0);
+
+    if (-1 == flag) {
+        THROW(HdfsNetworkException, "Get socket flag failed for remote node %s: %s",
+              remoteAddr.c_str(), GetSystemErrorInfo(errno));
+    }
+
+    flag = enable ? (flag & ~O_NONBLOCK) : (flag | O_NONBLOCK);
+
+    if (-1 == syscalls::fcntl(sock, F_SETFL, flag)) {
+        THROW(HdfsNetworkException, "Set socket flag failed for remote "
+              "node %s: %s", remoteAddr.c_str(), GetSystemErrorInfo(errno));
+    }
+}
+
+/* NamenodeProxy.cc */
+static uint32_t GetInitNamenodeIndex(const std::string &id) {
+    std::string path = "/tmp/";
+    path += id;
+    int fd;
+    uint32_t index = 0;
+    /*
+     * try create the file
+     */
+    fd = open(path.c_str(), O_WRONLY | O_CREAT | O_EXCL, 0666);
+
+    if (fd < 0) {
+        if (errno == EEXIST) {
+            /*
+             * the file already exist, try to open it
+             */
+            fd = open(path.c_str(), O_RDONLY);
+        } else {
+            /*
+             * failed to create, do not care why
+             */
+            return 0;
+        }
+    } else {
+        if (0 != flock(fd, LOCK_EX)) {
+            /*
+             * failed to lock
+             */
+            close(fd);
+            return index;
+        }
+
+        /*
+         * created file, initialize it with 0
+         */
+        write(fd, &index, sizeof(index));
+        flock(fd, LOCK_UN);
+        close(fd);
+        return index;
+    }
+
+    /*
+     * the file exist, read it.
+     */
+    if (fd >= 0) {
+        if (0 != flock(fd, LOCK_SH)) {
+            /*
+             * failed to lock
+             */
+            close(fd);
+            return index;
+        }
+
+        if (sizeof(index) != read(fd, &index, sizeof(index))) {
+            /*
+             * failed to read, do not care why
+             */
+            index = 0;
+        }
+
+        flock(fd, LOCK_UN);
+        close(fd);
+    }
+
+    return index;
+}
+
+static void SetInitNamenodeIndex(const std::string &id, uint32_t index) {
+    std::string path = "/tmp/";
+    path += id;
+    int fd;
+    /*
+     * try open the file for write
+     */
+    fd = open(path.c_str(), O_WRONLY);
+
+    if (fd > 0) {
+        if (0 != flock(fd, LOCK_EX)) {
+            /*
+             * failed to lock
+             */
+            close(fd);
+            return;
+        }
+
+        write(fd, &index, sizeof(index));
+        flock(fd, LOCK_UN);
+        close(fd);
+    }
+}
+
+/* KerberosName.cc */
+static void HandleRegError(int rc, regex_t *comp) {
+    std::vector<char> buffer;
+    size_t size = regerror(rc, comp, NULL, 0);
+    buffer.resize(size + 1);
+    regerror(rc, comp, &buffer[0], buffer.size());
+    THROW(HdfsIOException,
+        "KerberosName: Failed to parse Kerberos principal.");
+}
+
+void KerberosName::parse(const std::string &principal) {
+    int rc;
+    static const char * pattern = "([^/@]*)(/([^/@]*))?@([^/@]*)";
+    regex_t comp;
+    regmatch_t pmatch[5];
+
+    if (principal.empty()) {
+        return;
+    }
+
+    memset(&comp, 0, sizeof(regex_t));
+    rc = regcomp(&comp, pattern, REG_EXTENDED);
+
+    if (rc) {
+        HandleRegError(rc, &comp);
+    }
+
+    try {
+        memset(pmatch, 0, sizeof(pmatch));
+        rc = regexec(&comp, principal.c_str(),
+                     sizeof(pmatch) / sizeof(pmatch[1]), pmatch, 0);
+
+        if (rc && rc != REG_NOMATCH) {
+            HandleRegError(rc, &comp);
+        }
+
+        if (rc == REG_NOMATCH) {
+            if (principal.find('@') != principal.npos) {
+                THROW(HdfsIOException,
+                      "KerberosName: Malformed Kerberos name: %s",
+                      principal.c_str());
+            } else {
+                name = principal;
+            }
+        } else {
+            if (pmatch[1].rm_so != -1) {
+                name = principal.substr(pmatch[1].rm_so,
+                                        pmatch[1].rm_eo - pmatch[1].rm_so);
+            }
+
+            if (pmatch[3].rm_so != -1) {
+                host = principal.substr(pmatch[3].rm_so,
+                                        pmatch[3].rm_eo - pmatch[3].rm_so);
+            }
+
+            if (pmatch[4].rm_so != -1) {
+                realm = principal.substr(pmatch[4].rm_so,
+                                         pmatch[4].rm_eo - pmatch[4].rm_so);
+            }
+        }
+    } catch (...) {
+        regfree(&comp);
+        throw;
+    }
+
+    regfree(&comp);
+}
+
+/* UserInfo.cc */
+UserInfo UserInfo::LocalUser() {
+    UserInfo retval;
+    uid_t uid, euid;
+    int bufsize;
+    struct passwd pwd, epwd, *result = NULL;
+    euid = geteuid();
+    uid = getuid();
+
+    if ((bufsize = sysconf(_SC_GETPW_R_SIZE_MAX)) == -1) {
+        THROW(InvalidParameter,
+              "Invalid input: \"sysconf\" function failed to get the "
+              "configure with key \"_SC_GETPW_R_SIZE_MAX\".");
+    }
+
+    std::vector<char> buffer(bufsize);
+
+    if (getpwuid_r(euid, &epwd, &buffer[0], bufsize, &result) != 0 || !result) {
+        THROW(InvalidParameter,
+              "Invalid input: effective user name cannot be found with UID %u.",
+              euid);
+    }
+
+    retval.setEffectiveUser(epwd.pw_name);
+
+    if (getpwuid_r(uid, &pwd, &buffer[0], bufsize, &result) != 0 || !result) {
+        THROW(InvalidParameter,
+              "Invalid input: real user name cannot be found with UID %u.",
+              uid);
+    }
+
+    retval.setRealUser(pwd.pw_name);
+    return retval;
+}
+
+}
+}

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StackPrinter.cc → hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/posix/StackPrinter.cc


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Thread.cc → hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/posix/Thread.cc


+ 303 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/Platform.cc

@@ -0,0 +1,303 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "platform.h"
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "Logger.h"
+#include "UnorderedMap.h"
+#include "client/KerberosName.h"
+#include "client/UserInfo.h"
+#include "network/Syscall.h"
+#include "network/TcpSocket.h"
+#include "server/NamenodeProxy.h"
+
+#include <regex>
+#include <vector>
+
+int poll(struct pollfd *fds, unsigned long nfds, int timeout) {
+    return WSAPoll(fds, nfds, timeout);
+}
+
+namespace hdfs {
+namespace internal {
+
+/* InputStreamImpl.cc */
+unordered_set<std::string> BuildLocalAddrSet() {
+#define MALLOC(x) HeapAlloc(GetProcessHeap(), 0, (x))
+#define FREE(x) HeapFree(GetProcessHeap(), 0, (x))
+    DWORD dwSize = 0;
+    DWORD dwRetVal = 0;
+    unsigned int i = 0;
+
+    // Set the flags to pass to GetAdaptersAddresses
+    ULONG flags = GAA_FLAG_INCLUDE_PREFIX;
+
+    // default to unspecified address family (both)
+    ULONG family = AF_UNSPEC;
+    PIP_ADAPTER_ADDRESSES pAddresses = NULL;
+    ULONG outBufLen = 0;
+
+    PIP_ADAPTER_ADDRESSES pCurrAddresses = NULL;
+    PIP_ADAPTER_UNICAST_ADDRESS pUnicast = NULL;
+    PIP_ADAPTER_ANYCAST_ADDRESS pAnycast = NULL;
+    PIP_ADAPTER_MULTICAST_ADDRESS pMulticast = NULL;
+
+    outBufLen = sizeof (IP_ADAPTER_ADDRESSES);
+    pAddresses = (IP_ADAPTER_ADDRESSES *) MALLOC(outBufLen);
+
+	// Make an initial call to GetAdaptersAddresses to get the
+	// size needed into the outBufLen variable
+	if (GetAdaptersAddresses(
+        family,
+        flags,
+        NULL,
+        pAddresses,
+        &outBufLen) == ERROR_BUFFER_OVERFLOW) {
+        FREE(pAddresses);
+        pAddresses = (IP_ADAPTER_ADDRESSES *) MALLOC(outBufLen);
+	}
+
+    if (pAddresses == NULL) {
+        THROW(HdfsNetworkException,
+            "InputStreamImpl: malloc failed, "
+            "cannot get local network interface: %s",
+            GetSystemErrorInfo(errno));
+    }
+
+    // Make a second call to GetAdapters Addresses to get the
+    // actual data we want
+    dwRetVal =
+        GetAdaptersAddresses(family, flags, NULL, pAddresses, &outBufLen);
+
+    if (dwRetVal == NO_ERROR) {
+        // If successful, construct the address set
+        unordered_set<std::string> set; // to be returned
+        std::vector<char> host;
+        const char *pHost;
+        host.resize(INET6_ADDRSTRLEN + 1);
+
+        pCurrAddresses = pAddresses;
+        while (pCurrAddresses) {
+            // TODO: scan Anycast, Multicast as well.
+            // scan unicast address list
+            pUnicast = pCurrAddresses->FirstUnicastAddress;
+            while (pUnicast != NULL) {
+                memset(&host[0], 0, INET6_ADDRSTRLEN);
+                ULONG _family = pUnicast->Address.lpSockaddr->sa_family;
+                if (_family == AF_INET) {
+                    SOCKADDR_IN *sa_in =
+                        (SOCKADDR_IN *)pUnicast->Address.lpSockaddr;
+                    pHost = InetNtop(
+                        AF_INET,
+                        &(sa_in->sin_addr),
+                        &host[0],
+                        INET6_ADDRSTRLEN);
+                }
+                else {
+                    SOCKADDR_IN6 *sa_in6 =
+                        (SOCKADDR_IN6 *)pUnicast->Address.lpSockaddr;
+                    pHost = InetNtop(
+                        AF_INET,
+                        &(sa_in6->sin6_addr),
+                        &host[0],
+                        INET6_ADDRSTRLEN);
+                }
+                if (pHost == NULL) {
+                    THROW(HdfsNetworkException,
+                        "InputStreamImpl: cannot get convert network address to textual form: %s",
+                        GetSystemErrorInfo(errno));
+                }
+                set.insert(pHost);
+                pUnicast = pUnicast->Next;
+            } // inner while
+            pCurrAddresses = pCurrAddresses->Next;
+        } // while
+
+        // TODO: replace hardcoded HOST_NAME_MAX
+        int _HOST_NAME_MAX = 128;
+        host.resize(_HOST_NAME_MAX + 1);
+        if (gethostname(&host[0], host.size())) {
+            THROW(HdfsNetworkException,
+                "InputStreamImpl: cannot get hostname: %s",
+                GetSystemErrorInfo(errno));
+        }
+        set.insert(&host[0]);
+        if (pAddresses != NULL) {
+            FREE(pAddresses);
+        }
+        return set;
+    }
+    else {
+        printf("Call to GetAdaptersAddresses failed with error: %d\n",
+            dwRetVal);
+        if (pAddresses != NULL) {
+            FREE(pAddresses);
+        }
+        THROW(HdfsNetworkException,
+            "InputStreamImpl: cannot get local network interface: %s",
+            GetSystemErrorInfo(errno));
+    }
+}
+
+/* TpcSocket.cc */
+void TcpSocketImpl::setBlockMode(bool enable) {
+    u_long blocking_mode = (enable) ? 0 : 1;
+    int rc = syscalls::ioctlsocket(sock, FIONBIO, &blocking_mode);
+    if (rc == SOCKET_ERROR) {
+        THROW(HdfsNetworkException, "Get socket flag failed for remote node %s: %s",
+            remoteAddr.c_str(), GetSystemErrorInfo(errno));
+    }
+}
+
+/* NamenodeProxy.cc */
+static std::string GetTmpPath() {
+    char lpTempPathBuffer[MAX_PATH];
+    //  Gets the temp path env string (no guarantee it's a valid path).
+    DWORD dwRetVal = GetTempPath(
+        MAX_PATH, // length of the buffer
+        lpTempPathBuffer); // buffer for path
+    if (dwRetVal > MAX_PATH || (dwRetVal == 0))
+        THROW(HdfsException, "GetTmpPath failed");
+    return std::string(lpTempPathBuffer);
+}
+
+static uint32_t GetInitNamenodeIndex(const std::string id) {
+    std::string path = GetTmpPath();
+    path += id;
+    HANDLE fd = INVALID_HANDLE_VALUE;
+    uint32_t index = 0;
+
+    fd = CreateFile(
+        path.c_str(),
+        GENERIC_WRITE, // write only
+        0, // do not share, is this right shared mode?
+        NULL, // default security
+        CREATE_NEW, // call fails if file exists, ERROR_FILE_EXISTS
+        FILE_ATTRIBUTE_NORMAL, // normal file
+        NULL); // no template
+
+    if (fd == INVALID_HANDLE_VALUE) {
+        // File already exists, try to open it
+        if (GetLastError() == ERROR_FILE_EXISTS) {
+            fd = CreateFile(path.c_str(),
+                GENERIC_READ, // open for reading
+                0, // do not share
+                NULL, // default security
+                OPEN_EXISTING, // existing file only
+                FILE_ATTRIBUTE_NORMAL, // normal file
+                NULL // // no template
+                );
+        }
+        else {
+            // TODO: log, or throw exception when a file is failed to open
+            return 0;
+        }
+    }
+    else {
+        DWORD dwBytesToWrite = (DWORD)sizeof(index);
+        DWORD dwBytesWritten = 0;
+        BOOL bErrorFlag = WriteFile(
+            fd,
+            &index,
+            dwBytesToWrite,
+            &dwBytesToWrite,
+            NULL);
+        // TODO: check error code and number of bytes written
+        return index;
+    }
+
+    // the file exists, read it
+    DWORD dwBytesToRead = 0;
+    if (FALSE == ReadFile(fd, &index, sizeof(index), &dwBytesToRead, NULL)) {
+        index = 0; // fail to read, don't care
+    }
+    return index;
+}
+
+static void SetInitNamenodeIndex(const std::string & id, uint32_t index) {
+    std::string path = GetTmpPath();
+    path += id;
+    HANDLE fd = INVALID_HANDLE_VALUE;
+    fd = CreateFile(
+        path.c_str(),
+        GENERIC_WRITE, // write only
+        0, // do not share, is this right shared mode?
+        NULL, // default security
+        OPEN_ALWAYS, // call fails if file exists, ERROR_FILE_EXISTS
+        FILE_ATTRIBUTE_NORMAL, // normal file
+        NULL);                 // no template
+    if (fd != INVALID_HANDLE_VALUE) {
+        DWORD dwBytesToWrite = (DWORD)sizeof(index);
+        DWORD dwBytesWritten = 0;
+        BOOL bErrorFlag = WriteFile(
+            fd,
+            &index,
+            dwBytesToWrite,
+            &dwBytesToWrite,
+            NULL);
+    }
+}
+
+/* KerberosName.cc */
+void KerberosName::parse(const std::string & principal) {
+    int rc;
+    // primary/instance@REALM
+    // [^/@]* = anything but / and @
+    static const char * pattern = "([^/@]*)(/([^/@]*))?@([^/@]*)";
+    std::tr1::cmatch res;
+    std::tr1::regex rx(pattern);
+    if (!std::tr1::regex_search(principal.c_str(), res, rx)) {
+        // Check if principal is just simply a username without the @thing
+        if (principal.find('@') != principal.npos) {
+            THROW(HdfsIOException,
+                "KerberosName: Malformed Kerberos name: %s",
+                principal.c_str());
+        }
+        else {
+            name = principal;
+            return;
+        }
+    }
+    if (res[1].length() > 0) {
+        name = res[1];
+    }
+    if (res[3].length() > 0) {
+        host = res[3];
+    }
+    if (res[4].length() > 0) {
+        realm = res[4];
+    }
+}
+
+/* UserInfo.cc */
+UserInfo UserInfo::LocalUser() {
+    UserInfo retval;
+    char username[UNLEN + 1];
+    DWORD username_len = UNLEN + 1;
+    GetUserName(username, &username_len);
+    std::string str(username);
+
+    // Assume for now in Windows real and effective users are the same.
+    retval.setRealUser(&str[0]);
+    retval.setEffectiveUser(&str[0]);
+    return retval;
+}
+
+}
+}

+ 62 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/StackPrinter.cc

@@ -0,0 +1,62 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include "StackPrinter.h"
+
+#include <boost/format.hpp>
+#include <DbgHelp.h>
+#pragma comment(lib, "dbghelp.lib")
+#include <sstream>
+#include <string>
+#include <vector>
+namespace hdfs {
+namespace internal {
+
+const std::string PrintStack(int skip, int maxDepth) {
+    std::ostringstream ss;
+    unsigned int i;
+    std::vector<void *> stack;
+    stack.resize(maxDepth);
+    unsigned short frames;
+    SYMBOL_INFO *symbol;
+    HANDLE process;
+    process = GetCurrentProcess();
+
+    SymInitialize(process, NULL, TRUE);
+
+    frames = CaptureStackBackTrace(0, maxDepth, &stack[0], NULL);
+    symbol = (SYMBOL_INFO *)
+        calloc(sizeof(SYMBOL_INFO) + 256 * sizeof(char), 1);
+    symbol->MaxNameLen = 255;
+    symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
+
+    for (i = 0; i < frames; i++) {
+        SymFromAddr(process, (DWORD64)(stack[i]), 0, symbol);
+        printf("%i: %s - 0x%0X\n",
+            frames - i - 1, symbol->Name, symbol->Address);
+        // We use boost here, this may not be optimized for performance.
+        // TODO: fix this when we decide not to use boost for VS 2010
+        ss << boost::format("%i: %s - 0x%0X\n")
+            % (frames - i - 1) % symbol->Name % symbol->Address;
+    }
+    free(symbol);
+    return ss.str();
+}
+
+}
+}

+ 41 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/Thread.cc

@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Thread.h"
+
+#include <pthread.h>
+#include <signal.h>
+#include <unistd.h>
+
+namespace hdfs {
+namespace internal {
+
+// Signal in Windows is limited.
+sigset_t ThreadBlockSignal() {
+    signal(SIGINT, SIG_IGN);
+    signal(SIGTERM, SIG_IGN);
+    return 0;
+}
+
+void ThreadUnBlockSignal(sigset_t sigs) {
+    signal(SIGINT, SIG_DFL);
+    signal(SIGTERM, SIG_DFL);
+}
+
+}
+}

+ 35 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/platform.h

@@ -76,6 +76,16 @@
 #define access _access
 #define lseek _lseek
 
+/*
+ * Constants used for socket api.
+ */
+#define SHUT_RDWR SD_BOTH
+
+/*
+ * Account for lack of poll syscall.
+ */
+int poll(struct pollfd *fds, unsigned long nfds, int timeout);
+
 /*
  * String related.
  */
@@ -104,9 +114,34 @@
  */
 #define PATH_SEPRATOR '\\'
 
+/*
+ * gcc-style type-checked format arguments are not supported on Windows, so just
+ * stub this macro.
+ */
+#define TYPE_CHECKED_PRINTF_FORMAT(formatArg, varArgs)
+
 /*
  * Support for signals in Windows is limited.
  */
 typedef unsigned long sigset_t;
 
+/*
+ * Account for lack of dprint in Windows by using
+ * write syscall to write message to a file.
+ */
+#include<vector>
+inline int dprintf(int fd, const char *fmt, ...) {
+    va_list ap;
+    std::vector<char> buffer;
+    //determine buffer size
+    va_start(ap, fmt);
+    int size = vsnprintf(&buffer[0], buffer.size(), fmt, ap);
+    va_end(ap);
+    va_start(ap, fmt);
+    buffer.resize(size);
+    vsnprintf(&buffer[0], buffer.size(), fmt, ap);
+    va_end(ap);
+    _write(fd, &buffer[0], buffer.size());
+}
+
 #endif // LIBHDFS3_WINDOWS_PLATFORM_H

+ 204 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/mman.c

@@ -0,0 +1,204 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * The MIT License (MIT)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <windows.h>
+#include <errno.h>
+#include <io.h>
+
+#include "mman.h"
+
+static int __map_mman_error(const DWORD err, const int deferr) {
+    return err;
+}
+
+static DWORD __map_mmap_prot_page(const int prot) {
+    DWORD protect = 0;
+
+    if (prot == PROT_NONE)
+        return protect;
+
+    if ((prot & PROT_EXEC) != 0) {
+        protect = ((prot & PROT_WRITE) != 0) ?
+            PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
+    }
+    else {
+        protect = ((prot & PROT_WRITE) != 0) ?
+            PAGE_READWRITE : PAGE_READONLY;
+    }
+
+    return protect;
+}
+
+static DWORD __map_mmap_prot_file(const int prot) {
+    DWORD desiredAccess = 0;
+
+    if (prot == PROT_NONE)
+        return desiredAccess;
+
+    if ((prot & PROT_READ) != 0)
+        desiredAccess |= FILE_MAP_READ;
+    if ((prot & PROT_WRITE) != 0)
+        desiredAccess |= FILE_MAP_WRITE;
+    if ((prot & PROT_EXEC) != 0)
+        desiredAccess |= FILE_MAP_EXECUTE;
+
+    return desiredAccess;
+}
+
+int posix_madvise(void *addr, size_t len, int advice) {
+    // Ignore this function in Windows.
+    // TODO: this can be implemented using PrefetchVirtualMemory.
+    return 0;
+}
+
+void* mmap(void *addr,size_t len, int prot,
+           int flags, int fildes, off_t off) {
+    HANDLE fm, h;
+
+    void * map = MAP_FAILED;
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable: 4293)
+#endif
+
+    const DWORD dwFileOffsetLow = (sizeof(off_t) <= sizeof(DWORD)) ?
+        (DWORD)off : (DWORD)(off & 0xFFFFFFFFL);
+    const DWORD dwFileOffsetHigh = (sizeof(off_t) <= sizeof(DWORD)) ?
+        (DWORD)0 : (DWORD)((off >> 32) & 0xFFFFFFFFL);
+    const DWORD protect = __map_mmap_prot_page(prot);
+    const DWORD desiredAccess = __map_mmap_prot_file(prot);
+    const off_t maxSize = off + (off_t)len;
+    const DWORD dwMaxSizeLow = (sizeof(off_t) <= sizeof(DWORD)) ?
+        (DWORD)maxSize : (DWORD)(maxSize & 0xFFFFFFFFL);
+    const DWORD dwMaxSizeHigh = (sizeof(off_t) <= sizeof(DWORD)) ?
+        (DWORD)0 : (DWORD)((maxSize >> 32) & 0xFFFFFFFFL);
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+    errno = 0;
+
+    if (len == 0
+        /* Unsupported flag combinations */
+        || (flags & MAP_FIXED) != 0
+        /* Usupported protection combinations */
+        || prot == PROT_EXEC) {
+        errno = EINVAL;
+        return MAP_FAILED;
+    }
+
+    h = ((flags & MAP_ANONYMOUS) == 0) ?
+        (HANDLE)_get_osfhandle(fildes) : INVALID_HANDLE_VALUE;
+
+    if ((flags & MAP_ANONYMOUS) == 0 && h == INVALID_HANDLE_VALUE) {
+        errno = EBADF;
+        return MAP_FAILED;
+    }
+
+    fm =
+        CreateFileMapping(h, NULL, protect, dwMaxSizeHigh, dwMaxSizeLow, NULL);
+
+    if (fm == NULL) {
+        errno = __map_mman_error(GetLastError(), EPERM);
+        return MAP_FAILED;
+    }
+    map = MapViewOfFile(
+        fm, desiredAccess,
+        dwFileOffsetHigh, dwFileOffsetLow, len);
+
+    CloseHandle(fm);
+
+    if (map == NULL) {
+        errno = __map_mman_error(GetLastError(), EPERM);
+        return MAP_FAILED;
+    }
+
+    return map;
+}
+
+int munmap(void *addr, size_t len) {
+    if (UnmapViewOfFile(addr))
+        return 0;
+
+    errno =  __map_mman_error(GetLastError(), EPERM);
+
+    return -1;
+}
+
+int _mprotect(void *addr, size_t len, int prot) {
+    DWORD newProtect = __map_mmap_prot_page(prot);
+    DWORD oldProtect = 0;
+
+    if (VirtualProtect(addr, len, newProtect, &oldProtect))
+        return 0;
+
+    errno =  __map_mman_error(GetLastError(), EPERM);
+
+    return -1;
+}
+
+int msync(void *addr, size_t len, int flags) {
+    if (FlushViewOfFile(addr, len))
+        return 0;
+
+    errno =  __map_mman_error(GetLastError(), EPERM);
+
+    return -1;
+}
+
+int mlock(const void *addr, size_t len) {
+    if (VirtualLock((LPVOID)addr, len))
+        return 0;
+
+    errno =  __map_mman_error(GetLastError(), EPERM);
+
+    return -1;
+}
+
+int munlock(const void *addr, size_t len) {
+    if (VirtualUnlock((LPVOID)addr, len))
+        return 0;
+
+    errno =  __map_mman_error(GetLastError(), EPERM);
+
+    return -1;
+}

+ 61 - 1
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/mman.h

@@ -16,4 +16,64 @@
  * limitations under the License.
  */
 
-/* Dummy file for Windows build */
+/**
+ * The MIT License (MIT)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef _SYS_MMAN_H_
+#define _SYS_MMAN_H_
+
+#include <sys/types.h>
+#include <stdlib.h>
+
+#define PROT_NONE       0
+#define PROT_READ       1
+#define PROT_WRITE      2
+#define PROT_EXEC       4
+
+#define MAP_FILE        0
+#define MAP_SHARED      1
+#define MAP_PRIVATE     2
+#define MAP_TYPE        0xf
+#define MAP_FIXED       0x10
+#define MAP_ANONYMOUS   0x20
+#define MAP_ANON        MAP_ANONYMOUS
+
+#define MAP_FAILED      ((void *)-1)
+
+/* Flags for msync. */
+#define MS_ASYNC        1
+#define MS_SYNC         2
+#define MS_INVALIDATE   4
+
+/* For poxis_madvice family */
+#define POSIX_MADV_SEQUENTIAL 2
+
+void*   mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off);
+int     munmap(void *addr, size_t len);
+int     _mprotect(void *addr, size_t len, int prot);
+int     msync(void *addr, size_t len, int flags);
+int     mlock(const void *addr, size_t len);
+int     munlock(const void *addr, size_t len);
+
+int posix_madvise(void *addr, size_t len, int advice);
+
+#endif /*  _SYS_MMAN_H_ */

+ 21 - 1
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/time.h

@@ -16,4 +16,24 @@
  * limitations under the License.
  */
 
-/* Dummy file for Windows build */
+#include "platform.h"
+
+inline int gettimeofday(struct timeval *tv, struct timezone* tz) {
+    SYSTEMTIME localtime;
+    GetLocalTime(&localtime);
+    // GetLocalTime only allows upto ms.
+    tv->tv_usec = localtime.wMilliseconds;
+    time_t timeval;
+    tv->tv_sec = time(&timeval);
+    return 0;
+}
+
+inline struct tm *localtime_r(long* tv_sec, struct tm *result) {
+    if (localtime_s(result, reinterpret_cast<time_t*>(tv_sec))  == 0) {
+        // Good
+        return result;
+    }
+    else {
+        return NULL;
+    }
+}

+ 7 - 2
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/uuid/uuid.h

@@ -20,13 +20,18 @@
 #define _UUID_HEADER_FOR_WIN_
 
 /*
- * This file an Windows equivalent of libuuid.
+ * This file a Windows equivalence of libuuid.
  */
 
 #include <Rpc.h>
 #include <RpcDce.h>
 #pragma comment(lib, "rpcrt4.lib")
 
-#define uuid_generate(id) UuidCreate(&(id))
+#undef uuid_t
+typedef unsigned char uuid_t[16];
+
+// It is OK to reinterpret cast, as UUID is a struct with 16 bytes.
+// TODO: write our own uuid generator to get rid of libuuid dependency.
+#define uuid_generate(id) UuidCreate(reinterpret_cast<UUID *>(id))
 
 #endif

+ 15 - 1
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/CMakeLists.txt

@@ -47,6 +47,17 @@ LIST(APPEND LIBHDFS3_SOURCES ${files})
 AUTO_SOURCES(files "*.h" "RECURSE" "${CMAKE_CURRENT_SOURCE_DIR}")
 LIST(APPEND LIBHDFS3_SOURCES ${files})
 
+SET(LIBHDFS3_SOURCES
+  ${LIBHDFS3_SOURCES}
+  ${libhdfs3_OS_PLATFORM_DIR}/Thread.cc
+  ${libhdfs3_OS_PLATFORM_DIR}/StackPrinter.cc
+  ${libhdfs3_OS_PLATFORM_DIR}/Platform.cc
+)
+IF(MSVC)
+  SET(LIBHDFS3_SOURCES
+    ${LIBHDFS3_SOURCES}
+    ${libhdfs3_OS_PLATFORM_DIR}/sys/mman.c)
+ENDIF(MSVC)
 AUTO_SOURCES(libhdfs3_PROTO_FILES "proto/*.proto" "RECURSE" "${CMAKE_CURRENT_SOURCE_DIR}")
 SET(libhdfs3_PROTO_FILES ${libhdfs3_PROTO_FILES} PARENT_SCOPE)
 
@@ -54,7 +65,10 @@ INCLUDE(GenerateProtobufs.cmake)
 INCLUDE_DIRECTORIES("${CMAKE_BINARY_DIR}")
 
 SET(LIBHDFS_SRC_DIR ${HADOOP_TOP_DIR}/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/)
-INCLUDE_DIRECTORIES(${LIBHDFS_SRC_DIR})
+IF(NOT MSVC)
+  # In windows, there will a conflict with Exception.h native/libhdfs.
+  INCLUDE_DIRECTORIES(${LIBHDFS_SRC_DIR})
+ENDIF(NOT MSVC)
 
 SET(HEADER 
     ${LIBHDFS_SRC_DIR}/hdfs.h

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/BlockLocation.h

@@ -19,6 +19,7 @@
 #ifndef _HDFS_LIBHDFS3_CLIENT_BLOCKLOCATION_H_
 #define _HDFS_LIBHDFS3_CLIENT_BLOCKLOCATION_H_
 
+#include <stdint.h>
 #include <string>
 #include <vector>
 

+ 2 - 75
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamImpl.cc

@@ -42,79 +42,6 @@ mutex InputStreamImpl::MutLocalBlockInforCache;
 unordered_map<uint32_t, shared_ptr<LocalBlockInforCacheType>>
     InputStreamImpl::LocalBlockInforCache;
 
-unordered_set<std::string> BuildLocalAddrSet() {
-    unordered_set<std::string> set;
-    struct ifaddrs *ifAddr = NULL;
-    struct ifaddrs *pifAddr = NULL;
-    struct sockaddr *addr;
-
-    if (getifaddrs(&ifAddr)) {
-        THROW(HdfsNetworkException,
-              "InputStreamImpl: cannot get local network interface: %s",
-              GetSystemErrorInfo(errno));
-    }
-
-    try {
-        std::vector<char> host;
-        const char *pHost;
-        host.resize(INET6_ADDRSTRLEN + 1);
-
-        for (pifAddr = ifAddr; pifAddr != NULL; pifAddr = pifAddr->ifa_next) {
-            addr = pifAddr->ifa_addr;
-            memset(&host[0], 0, INET6_ADDRSTRLEN + 1);
-
-            if (addr->sa_family == AF_INET) {
-                pHost = inet_ntop(
-                    addr->sa_family,
-                    &(reinterpret_cast<struct sockaddr_in *>(addr))->sin_addr,
-                    &host[0], INET6_ADDRSTRLEN);
-            } else if (addr->sa_family == AF_INET6) {
-                pHost = inet_ntop(
-                    addr->sa_family,
-                    &(reinterpret_cast<struct sockaddr_in6 *>(addr))->sin6_addr,
-                    &host[0], INET6_ADDRSTRLEN);
-            } else {
-                continue;
-            }
-
-            if (NULL == pHost) {
-                THROW(HdfsNetworkException,
-                      "InputStreamImpl: cannot get convert network address "
-                      "to textual form: %s",
-                      GetSystemErrorInfo(errno));
-            }
-
-            set.insert(pHost);
-        }
-
-        /*
-         * add hostname.
-         */
-        long hostlen = sysconf(_SC_HOST_NAME_MAX);
-        host.resize(hostlen + 1);
-
-        if (gethostname(&host[0], host.size())) {
-            THROW(HdfsNetworkException,
-                  "InputStreamImpl: cannot get hostname: %s",
-                  GetSystemErrorInfo(errno));
-        }
-
-        set.insert(&host[0]);
-    } catch (...) {
-        if (ifAddr != NULL) {
-            freeifaddrs(ifAddr);
-        }
-
-        throw;
-    }
-
-    if (ifAddr != NULL) {
-        freeifaddrs(ifAddr);
-    }
-
-    return set;
-}
-
 InputStreamImpl::InputStreamImpl()
     : closed(true),
       localRead(true),
@@ -772,9 +699,9 @@ void InputStreamImpl::readFullyInternal(char *buf, int64_t size) {
 
     try {
         while (todo > 0) {
-            done = todo < std::numeric_limits<int32_t>::max()
+            done = todo < (std::numeric_limits<int32_t>::max)()
                        ? static_cast<int32_t>(todo)
-                       : std::numeric_limits<int32_t>::max();
+                       : (std::numeric_limits<int32_t>::max)();
             done = readInternal(buf + (size - todo), done);
             todo -= done;
         }

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamImpl.h

@@ -45,6 +45,8 @@ typedef std::pair<int64_t, std::string> LocalBlockInforCacheKey;
 typedef LruMap<LocalBlockInforCacheKey, BlockLocalPathInfo>
     LocalBlockInforCacheType;
 
+unordered_set<std::string> BuildLocalAddrSet();
+
 /**
  * A input stream used read data from hdfs.
  */

+ 0 - 68
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/KerberosName.cc

@@ -21,22 +21,12 @@
 #include "Exception.h"
 #include "ExceptionInternal.h"
 
-#include <regex.h>
 #include <string.h>
 #include <vector>
 
 namespace hdfs {
 namespace internal {
 
-static void HandleRegError(int rc, regex_t *comp) {
-    std::vector<char> buffer;
-    size_t size = regerror(rc, comp, NULL, 0);
-    buffer.resize(size + 1);
-    regerror(rc, comp, &buffer[0], buffer.size());
-    THROW(HdfsIOException,
-        "KerberosName: Failed to parse Kerberos principal.");
-}
-
 KerberosName::KerberosName() {
 }
 
@@ -44,64 +34,6 @@ KerberosName::KerberosName(const std::string &principal) {
     parse(principal);
 }
 
-void KerberosName::parse(const std::string &principal) {
-    int rc;
-    static const char * pattern = "([^/@]*)(/([^/@]*))?@([^/@]*)";
-    regex_t comp;
-    regmatch_t pmatch[5];
-
-    if (principal.empty()) {
-        return;
-    }
-
-    memset(&comp, 0, sizeof(regex_t));
-    rc = regcomp(&comp, pattern, REG_EXTENDED);
-
-    if (rc) {
-        HandleRegError(rc, &comp);
-    }
-
-    try {
-        memset(pmatch, 0, sizeof(pmatch));
-        rc = regexec(&comp, principal.c_str(),
-                     sizeof(pmatch) / sizeof(pmatch[1]), pmatch, 0);
-
-        if (rc && rc != REG_NOMATCH) {
-            HandleRegError(rc, &comp);
-        }
-
-        if (rc == REG_NOMATCH) {
-            if (principal.find('@') != principal.npos) {
-                THROW(HdfsIOException,
-                      "KerberosName: Malformed Kerberos name: %s",
-                      principal.c_str());
-            } else {
-                name = principal;
-            }
-        } else {
-            if (pmatch[1].rm_so != -1) {
-                name = principal.substr(pmatch[1].rm_so,
-                                        pmatch[1].rm_eo - pmatch[1].rm_so);
-            }
-
-            if (pmatch[3].rm_so != -1) {
-                host = principal.substr(pmatch[3].rm_so,
-                                        pmatch[3].rm_eo - pmatch[3].rm_so);
-            }
-
-            if (pmatch[4].rm_so != -1) {
-                realm = principal.substr(pmatch[4].rm_so,
-                                         pmatch[4].rm_eo - pmatch[4].rm_so);
-            }
-        }
-    } catch (...) {
-        regfree(&comp);
-        throw;
-    }
-
-    regfree(&comp);
-}
-
 size_t KerberosName::hash_value() const {
     size_t values[] = { StringHasher(name), StringHasher(host), StringHasher(
                             realm)

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/Permission.h

@@ -18,6 +18,7 @@
 #ifndef _HDFS_LIBHDFS3_CLIENT_PERMISSION_H_
 #define _HDFS_LIBHDFS3_CLIENT_PERMISSION_H_
 
+#include <stdint.h>
 #include <string>
 
 namespace hdfs {

+ 0 - 34
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/UserInfo.cc

@@ -29,40 +29,6 @@
 namespace hdfs {
 namespace internal {
 
-UserInfo UserInfo::LocalUser() {
-    UserInfo retval;
-    uid_t uid, euid;
-    int bufsize;
-    struct passwd pwd, epwd, *result = NULL;
-    euid = geteuid();
-    uid = getuid();
-
-    if ((bufsize = sysconf(_SC_GETPW_R_SIZE_MAX)) == -1) {
-        THROW(InvalidParameter,
-              "Invalid input: \"sysconf\" function failed to get the "
-              "configure with key \"_SC_GETPW_R_SIZE_MAX\".");
-    }
-
-    std::vector<char> buffer(bufsize);
-
-    if (getpwuid_r(euid, &epwd, &buffer[0], bufsize, &result) != 0 || !result) {
-        THROW(InvalidParameter,
-              "Invalid input: effective user name cannot be found with UID %u.",
-              euid);
-    }
-
-    retval.setEffectiveUser(epwd.pw_name);
-
-    if (getpwuid_r(uid, &pwd, &buffer[0], bufsize, &result) != 0 || !result) {
-        THROW(InvalidParameter,
-              "Invalid input: real user name cannot be found with UID %u.",
-              uid);
-    }
-
-    retval.setRealUser(pwd.pw_name);
-    return retval;
-}
-
 size_t UserInfo::hash_value() const {
     size_t values[] = { StringHasher(realUser), effectiveUser.hash_value() };
     return CombineHasher(values, sizeof(values) / sizeof(values[0]));

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Atoi.cc

@@ -42,8 +42,8 @@ Status StrToInt32(const char *str, int32_t *ret) {
         oss << "Invalid int32_t type: " << str;
         return Status(EINVAL, oss.str());
     }
-    if (ERANGE == errno || retval > std::numeric_limits<int32_t>::max() ||
-        retval < std::numeric_limits<int32_t>::min()) {
+    if (ERANGE == errno || retval > (std::numeric_limits<int32_t>::max)() ||
+        retval < (std::numeric_limits<int32_t>::min)()) {
         ostringstream oss;
         oss << "Underflow/Overflow in int32_t type: " << str;
         return Status(EINVAL, oss.str());
@@ -63,8 +63,8 @@ Status StrToInt64(const char *str, int64_t *ret) {
         oss << "Invalid int64_t type: " << str;
         return Status(EINVAL, oss.str());
     }
-    if (ERANGE == errno || retval > std::numeric_limits<int64_t>::max() ||
-        retval < std::numeric_limits<int64_t>::min()) {
+    if (ERANGE == errno || retval > (std::numeric_limits<int64_t>::max)() ||
+        retval < (std::numeric_limits<int64_t>::min)()) {
         ostringstream oss;
         oss << "Underflow/Overflow in int64_t type: " << str;
         return Status(EINVAL, oss.str());
@@ -100,8 +100,8 @@ Status StrToDouble(const char *str, double *ret) {
         oss << "Invalid double type: " << str;
         return Status(EINVAL, oss.str());
     }
-    if (ERANGE == errno || retval > std::numeric_limits<double>::max() ||
-        retval < std::numeric_limits<double>::min()) {
+    if (ERANGE == errno || retval > (std::numeric_limits<double>::max)() ||
+        retval < (std::numeric_limits<double>::min)()) {
         ostringstream oss;
         oss << "Underflow/Overflow in double type: " << str;
         return Status(EINVAL, oss.str());

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/BigEndian.h

@@ -19,6 +19,7 @@
 #ifndef _HDFS_LIBHDFS3_COMMON_BIGENDIAN_H_
 #define _HDFS_LIBHDFS3_COMMON_BIGENDIAN_H_
 
+#include <platform.h>
 #include <arpa/inet.h>
 #include <stdint.h>
 #include <string.h>

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/CFileWrapper.cc

@@ -84,8 +84,8 @@ void CFileWrapper::seek(int64_t offset) {
     bool seek_set = true;
 
     while (todo > 0) {
-        batch = todo < std::numeric_limits<long>::max() ?
-                todo : std::numeric_limits<long>::max();
+        batch = todo < (std::numeric_limits<long>::max)() ?
+                todo : (std::numeric_limits<long>::max)();
         off_t rc = fseek(file, static_cast<long>(batch),
                          seek_set ? SEEK_SET : SEEK_CUR);
         seek_set = false;

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/ExceptionInternal.h

@@ -34,7 +34,6 @@
 
 #define STACK_DEPTH 64
 
-#define PATH_SEPRATOR '/'
 inline static const char *SkipPathPrefix(const char *path) {
     int i, len = strlen(path);
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/MappedFileWrapper.cc

@@ -67,7 +67,7 @@ bool MappedFileWrapper::open(int fd, bool delegate) {
     path = ss.str();
 
     if (static_cast<uint64_t>(size) >
-        static_cast<uint64_t>(std::numeric_limits<size_t>::max())) {
+        static_cast<uint64_t>((std::numeric_limits<size_t>::max)())) {
         THROW(HdfsIOException,
               "Cannot create memory mapped file for \"%s\", file is too large.",
               path.c_str());
@@ -86,7 +86,7 @@ bool MappedFileWrapper::open(const std::string &path) {
     size = st.st_size;
 
     if (static_cast<uint64_t>(size) >
-        static_cast<uint64_t>(std::numeric_limits<size_t>::max())) {
+        static_cast<uint64_t>((std::numeric_limits<size_t>::max)())) {
         THROW(HdfsIOException,
               "Cannot create memory mapped file for \"%s\", file is too large.",
               path.c_str());

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SharedPtr.h

@@ -19,7 +19,7 @@
 #ifndef _HDFS_LIBHDFS3_COMMON_SHARED_PTR_H_
 #define _HDFS_LIBHDFS3_COMMON_SHARED_PTR_H_
 
-#ifdef _LIBCPP_VERSION
+#if (defined _LIBCPP_VERSION || defined _WIN32)
 #include <memory>
 
 namespace hdfs {

+ 16 - 1
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/UnorderedMap.h

@@ -19,7 +19,22 @@
 #ifndef _HDFS_LIBHDFS3_COMMON_UNORDERED_MAP_H_
 #define _HDFS_LIBHDFS3_COMMON_UNORDERED_MAP_H_
 
-#ifdef _LIBCPP_VERSION
+// Use boost for Windows, to avoid xutility type cast complain.
+#if (defined NEED_BOOST && defined _WIN32)
+
+#include <boost/unordered_map.hpp>
+#include <boost/unordered_set.hpp>
+
+namespace hdfs {
+namespace internal {
+
+using boost::unordered_map;
+using boost::unordered_set;
+
+}
+}
+
+#elif (defined _LIBCPP_VERSION || defined _WIN32)
 
 #include <unordered_map>
 #include <unordered_set>

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WritableUtils.cc

@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+#include "platform.h"
 #include "WritableUtils.h"
 
 #include <arpa/inet.h>
@@ -35,8 +36,8 @@ int32_t WritableUtils::ReadInt32() {
     int64_t val;
     val = ReadInt64();
 
-    if (val < std::numeric_limits<int32_t>::min()
-            || val > std::numeric_limits<int32_t>::max()) {
+    if (val < (std::numeric_limits<int32_t>::min)()
+            || val > (std::numeric_limits<int32_t>::max)()) {
         throw std::range_error("overflow");
     }
 

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WritableUtils.h

@@ -19,6 +19,7 @@
 #ifndef _HDFS_LIBHDFS_3_UTIL_WRITABLEUTILS_H_
 #define _HDFS_LIBHDFS_3_UTIL_WRITABLEUTILS_H_
 
+#include <stdint.h>
 #include <string>
 
 namespace hdfs {

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WriteBuffer.h

@@ -19,6 +19,7 @@
 #ifndef _HDFS_LIBHDFS3_COMMON_WRITEBUFFER_H_
 #define _HDFS_LIBHDFS3_COMMON_WRITEBUFFER_H_
 
+#include "platform.h"
 #include <cassert>
 #include <cstddef>
 #include <cstring>

+ 7 - 1
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/network/Syscall.h

@@ -19,6 +19,7 @@
 #ifndef _HDFS_LIBHDFS3_NETWORK_SYSCALL_H_
 #define _HDFS_LIBHDFS3_NETWORK_SYSCALL_H_
 
+#include "platform.h"
 #include <fcntl.h>
 #include <netdb.h>
 #include <poll.h>
@@ -34,11 +35,16 @@ using ::freeaddrinfo;
 using ::socket;
 using ::connect;
 using ::getpeername;
-using ::fcntl;
 using ::setsockopt;
 using ::poll;
 using ::shutdown;
 using ::close;
+#ifdef _WIN32
+using ::ioctlsocket;
+using ::closesocket;
+#else
+using ::fcntl;
+#endif
 
 }
 

+ 4 - 20
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/network/TcpSocket.cc

@@ -280,7 +280,7 @@ void TcpSocketImpl::connect(struct addrinfo * paddr, const char * host,
 
             struct sockaddr peer;
 
-            unsigned int len = sizeof(peer);
+            socklen_t len = sizeof(peer);
 
             memset(&peer, 0, sizeof(peer));
 
@@ -309,23 +309,6 @@ void TcpSocketImpl::connect(struct addrinfo * paddr, const char * host,
     }
 }
 
-void TcpSocketImpl::setBlockMode(bool enable) {
-    int flag;
-    flag = syscalls::fcntl(sock, F_GETFL, 0);
-
-    if (-1 == flag) {
-        THROW(HdfsNetworkException, "Get socket flag failed for remote node %s: %s",
-              remoteAddr.c_str(), GetSystemErrorInfo(errno));
-    }
-
-    flag = enable ? (flag & ~O_NONBLOCK) : (flag | O_NONBLOCK);
-
-    if (-1 == syscalls::fcntl(sock, F_SETFL, flag)) {
-        THROW(HdfsNetworkException, "Set socket flag failed for remote "
-              "node %s: %s", remoteAddr.c_str(), GetSystemErrorInfo(errno));
-    }
-}
-
 bool TcpSocketImpl::poll(bool read, bool write, int timeout) {
     assert(-1 != sock);
     int rc;
@@ -375,7 +358,8 @@ void TcpSocketImpl::setLingerTimeoutInternal(int timeout) {
     l.l_onoff = timeout > 0 ? true : false;
     l.l_linger = timeout > 0 ? timeout : 0;
 
-    if (syscalls::setsockopt(sock, SOL_SOCKET, SO_LINGER, &l, sizeof(l))) {
+    if (syscalls::setsockopt(sock, SOL_SOCKET, SO_LINGER,
+            reinterpret_cast<char *>(&l), sizeof(l))) {
         THROW(HdfsNetworkException, "Set socket flag failed for remote "
               "node %s: %s", remoteAddr.c_str(), GetSystemErrorInfo(errno));
     }
@@ -388,7 +372,7 @@ void TcpSocketImpl::setSendTimeout(int timeout) {
     timeo.tv_usec = (timeout % 1000) * 1000;
 
     if (syscalls::setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
-                                &timeo, sizeof(timeo))) {
+                                reinterpret_cast<char *>(&timeo), sizeof(timeo))) {
         THROW(HdfsNetworkException, "Set socket flag failed for remote "
               "node %s: %s", remoteAddr.c_str(), GetSystemErrorInfo(errno));
     }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/rpc/RpcClient.h

@@ -123,7 +123,7 @@ public:
         static mutex mutid;
         lock_guard<mutex> lock(mutid);
         ++count;
-        count = count < std::numeric_limits<int32_t>::max() ? count : 0;
+        count = count < (std::numeric_limits<int32_t>::max)() ? count : 0;
         return count;
     }
 

+ 0 - 90
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/server/NamenodeProxy.cc

@@ -33,96 +33,6 @@
 namespace hdfs {
 namespace internal {
 
-static uint32_t GetInitNamenodeIndex(const std::string &id) {
-    std::string path = "/tmp/";
-    path += id;
-    int fd;
-    uint32_t index = 0;
-    /*
-     * try create the file
-     */
-    fd = open(path.c_str(), O_WRONLY | O_CREAT | O_EXCL, 0666);
-
-    if (fd < 0) {
-        if (errno == EEXIST) {
-            /*
-             * the file already exist, try to open it
-             */
-            fd = open(path.c_str(), O_RDONLY);
-        } else {
-            /*
-             * failed to create, do not care why
-             */
-            return 0;
-        }
-    } else {
-        if (0 != flock(fd, LOCK_EX)) {
-            /*
-             * failed to lock
-             */
-            close(fd);
-            return index;
-        }
-
-        /*
-         * created file, initialize it with 0
-         */
-        write(fd, &index, sizeof(index));
-        flock(fd, LOCK_UN);
-        close(fd);
-        return index;
-    }
-
-    /*
-     * the file exist, read it.
-     */
-    if (fd >= 0) {
-        if (0 != flock(fd, LOCK_SH)) {
-            /*
-             * failed to lock
-             */
-            close(fd);
-            return index;
-        }
-
-        if (sizeof(index) != read(fd, &index, sizeof(index))) {
-            /*
-             * failed to read, do not care why
-             */
-            index = 0;
-        }
-
-        flock(fd, LOCK_UN);
-        close(fd);
-    }
-
-    return index;
-}
-
-static void SetInitNamenodeIndex(const std::string &id, uint32_t index) {
-    std::string path = "/tmp/";
-    path += id;
-    int fd;
-    /*
-     * try open the file for write
-     */
-    fd = open(path.c_str(), O_WRONLY);
-
-    if (fd > 0) {
-        if (0 != flock(fd, LOCK_EX)) {
-            /*
-             * failed to lock
-             */
-            close(fd);
-            return;
-        }
-
-        write(fd, &index, sizeof(index));
-        flock(fd, LOCK_UN);
-        close(fd);
-    }
-}
-
 NamenodeProxy::NamenodeProxy(const std::vector<NamenodeInfo> &namenodeInfos,
                              const std::string &tokenService,
                              const SessionConfig &c, const RpcAuth &a) :

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/server/NamenodeProxy.h

@@ -30,6 +30,10 @@
 namespace hdfs {
 namespace internal {
 
+void SetInitNamenodeIndex(const std::string & id, uint32_t index);
+uint32_t GetInitNamenodeIndex(const std::string id);
+
+
 class NamenodeProxy : public Namenode {
 public:
     NamenodeProxy(const std::vector<NamenodeInfo> &namenodeInfos,