Bläddra i källkod

HDFS-10785: libhdfs++: Implement the rest of the tools. Contributed by Anatoli Schein

James Clampffer 8 år sedan
förälder
incheckning
869317be0a
55 ändrade filer med 2556 tillägg och 179 borttagningar
  1. 1 1
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/CMakeLists.txt
  2. 2 2
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/cat/cat.c
  3. 0 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/CMakeLists.txt
  4. 3 3
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/cat/CMakeLists.txt
  5. 2 2
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/cat/cat.cc
  6. 0 2
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/CMakeLists.txt
  7. 0 4
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/connect_cancel.cc
  8. 3 3
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/find/CMakeLists.txt
  9. 0 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/find/find.cc
  10. 3 3
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/gendirs/CMakeLists.txt
  11. 0 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/gendirs/gendirs.cc
  12. 48 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/content_summary.h
  13. 15 19
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/fsinfo.h
  14. 11 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfs_ext.h
  15. 24 1
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfspp.h
  16. 21 25
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/statinfo.h
  17. 31 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
  18. 1 1
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/CMakeLists.txt
  19. 55 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/content_summary.cc
  20. 61 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/fsinfo.cc
  21. 74 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/statinfo.cc
  22. 42 4
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc
  23. 18 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.h
  24. 52 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem_sync.cc
  25. 89 1
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.cc
  26. 9 1
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.h
  27. 10 1
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc
  28. 4 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c
  29. 1 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h
  30. 1 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h
  31. 58 7
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/CMakeLists.txt
  32. 90 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_allowSnapshot.cc
  33. 3 36
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_cat.cc
  34. 2 9
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chgrp.cc
  35. 11 18
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chmod.cc
  36. 2 9
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chown.cc
  37. 92 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_copyToLocal.cc
  38. 97 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_count.cc
  39. 99 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_createSnapshot.cc
  40. 91 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_deleteSnapshot.cc
  41. 93 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_df.cc
  42. 90 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_disallowSnapshot.cc
  43. 180 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_du.cc
  44. 9 15
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_find.cc
  45. 92 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_get.cc
  46. 134 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_ls.cc
  47. 102 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_mkdir.cc
  48. 94 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_moveToLocal.cc
  49. 92 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_renameSnapshot.cc
  50. 94 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_rm.cc
  51. 176 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_setrep.cc
  52. 91 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_stat.cc
  53. 128 0
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_tail.cc
  54. 52 7
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/tools_common.cc
  55. 3 5
      hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/tools_common.h

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/CMakeLists.txt

@@ -17,4 +17,4 @@
 #
 
 add_subdirectory(c)
-add_subdirectory(cpp)
+add_subdirectory(cc)

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/cat/cat.c

@@ -30,6 +30,8 @@
 #include "common/util_c.h"
 
 #define SCHEME "hdfs"
+#define BUF_SIZE 1048576 //1 MB
+static char input_buffer[BUF_SIZE];
 
 int main(int argc, char** argv) {
 
@@ -84,8 +86,6 @@ int main(int argc, char** argv) {
     return 1;
   }
 
-  char input_buffer[4096];
-
   ssize_t read_bytes_count = 0;
   ssize_t last_read_bytes = 0;
 

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cpp/CMakeLists.txt → hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/CMakeLists.txt


+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cpp/cat/CMakeLists.txt → hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/cat/CMakeLists.txt

@@ -23,13 +23,13 @@ set(LIBHDFSPP_DIR CACHE STRING ${CMAKE_INSTALL_PREFIX})
 include_directories( ${LIBHDFSPP_DIR}/include )
 link_directories( ${LIBHDFSPP_DIR}/lib )
 
-add_executable(cat_cpp cat.cpp)
-target_link_libraries(cat_cpp hdfspp)
+add_executable(cat_cc cat.cc)
+target_link_libraries(cat_cc hdfspp)
 
 # Several examples in different languages need to produce executables with
 # same names. To allow executables with same names we keep their CMake
 # names different, but specify their executable names as follows:
-set_target_properties( cat_cpp
+set_target_properties( cat_cc
     PROPERTIES
     OUTPUT_NAME "cat"
 )

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cpp/cat/cat.cpp → hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/cat/cat.cc

@@ -36,7 +36,8 @@
 
 #include <google/protobuf/stubs/common.h>
 
-#define BUF_SIZE 4096
+const std::size_t BUF_SIZE = 1048576; //1 MB
+static char input_buffer[BUF_SIZE];
 
 int main(int argc, char *argv[]) {
   if (argc != 2) {
@@ -81,7 +82,6 @@ int main(int argc, char *argv[]) {
   //wrapping file_raw into a unique pointer to guarantee deletion
   std::unique_ptr<hdfs::FileHandle> file(file_raw);
 
-  char input_buffer[BUF_SIZE];
   ssize_t total_bytes_read = 0;
   size_t last_bytes_read = 0;
 

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cpp/connect_cancel/CMakeLists.txt → hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/CMakeLists.txt

@@ -25,5 +25,3 @@ link_directories( ${LIBHDFSPP_DIR}/lib )
 
 add_executable(connect_cancel connect_cancel.cc)
 target_link_libraries(connect_cancel hdfspp)
-
-

+ 0 - 4
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cpp/connect_cancel/connect_cancel.cc → hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/connect_cancel.cc

@@ -152,7 +152,3 @@ int main(int arg_token_count, const char **args) {
 
   return 0;
 }
-
-
-
-

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cpp/find/CMakeLists.txt → hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/find/CMakeLists.txt

@@ -23,13 +23,13 @@ set(LIBHDFSPP_DIR CACHE STRING ${CMAKE_INSTALL_PREFIX})
 include_directories( ${LIBHDFSPP_DIR}/include )
 link_directories( ${LIBHDFSPP_DIR}/lib )
 
-add_executable(find_cpp find.cpp)
-target_link_libraries(find_cpp hdfspp)
+add_executable(find_cc find.cc)
+target_link_libraries(find_cc hdfspp)
 
 # Several examples in different languages need to produce executables with
 # same names. To allow executables with same names we keep their CMake
 # names different, but specify their executable names as follows:
-set_target_properties( find_cpp
+set_target_properties( find_cc
     PROPERTIES
     OUTPUT_NAME "find"
 )

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cpp/find/find.cpp → hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/find/find.cc


+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cpp/gendirs/CMakeLists.txt → hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/gendirs/CMakeLists.txt

@@ -23,13 +23,13 @@ set(LIBHDFSPP_DIR CACHE STRING ${CMAKE_INSTALL_PREFIX})
 include_directories( ${LIBHDFSPP_DIR}/include )
 link_directories( ${LIBHDFSPP_DIR}/lib )
 
-add_executable(gendirs_cpp gendirs.cpp)
-target_link_libraries(gendirs_cpp hdfspp)
+add_executable(gendirs_cc gendirs.cc)
+target_link_libraries(gendirs_cc hdfspp)
 
 # Several examples in different languages need to produce executables with
 # same names. To allow executables with same names we keep their CMake
 # names different, but specify their executable names as follows:
-set_target_properties( gendirs_cpp
+set_target_properties( gendirs_cc
     PROPERTIES
     OUTPUT_NAME "gendirs"
 )

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cpp/gendirs/gendirs.cpp → hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/gendirs/gendirs.cc


+ 48 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/content_summary.h

@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef HDFSPP_CONTENT_SUMMARY_H_
+#define HDFSPP_CONTENT_SUMMARY_H_
+
+#include <string>
+
+namespace hdfs {
+
+/**
+ * Content summary is assumed to be unchanging for the duration of the operation
+ */
+struct ContentSummary {
+  uint64_t length;
+  uint64_t filecount;
+  uint64_t directorycount;
+  uint64_t quota;
+  uint64_t spaceconsumed;
+  uint64_t spacequota;
+  std::string path;
+
+  ContentSummary();
+
+  //Converts ContentSummary object to std::string (hdfs_count format)
+  std::string str(bool include_quota) const;
+
+  //Converts ContentSummary object to std::string (hdfs_du format)
+  std::string str_du() const;
+};
+
+}
+
+#endif

+ 15 - 19
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/fsinfo.h

@@ -18,6 +18,8 @@
 #ifndef HDFSPP_FSINFO_H_
 #define HDFSPP_FSINFO_H_
 
+#include <string>
+
 namespace hdfs {
 
 /**
@@ -26,25 +28,19 @@ namespace hdfs {
  */
 struct FsInfo {
 
-  unsigned long int     capacity;
-  unsigned long int     used;
-  unsigned long int     remaining;
-  unsigned long int     under_replicated;
-  unsigned long int     corrupt_blocks;
-  unsigned long int     missing_blocks;
-  unsigned long int     missing_repl_one_blocks;
-  unsigned long int     blocks_in_future;
-
-  FsInfo()
-      : capacity(0),
-        used(0),
-        remaining(0),
-        under_replicated(0),
-        corrupt_blocks(0),
-        missing_blocks(0),
-        missing_repl_one_blocks(0),
-        blocks_in_future(0) {
-  }
+  uint64_t capacity;
+  uint64_t used;
+  uint64_t remaining;
+  uint64_t under_replicated;
+  uint64_t corrupt_blocks;
+  uint64_t missing_blocks;
+  uint64_t missing_repl_one_blocks;
+  uint64_t blocks_in_future;
+
+  FsInfo();
+
+  //Converts FsInfo object to std::string (hdfs_df format)
+  std::string str(const std::string fs_name) const;
 };
 
 }

+ 11 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfs_ext.h

@@ -329,6 +329,17 @@ int hdfsCreateSnapshot(hdfsFS fs, const char* path, const char* name);
 LIBHDFS_EXTERNAL
 int hdfsDeleteSnapshot(hdfsFS fs, const char* path, const char* name);
 
+/**
+ * Renames the directory snapshot specified by path from old_name to new_name
+ *
+ *  @param fs         The filesystem (required)
+ *  @param path       Path to the snapshotted directory (must be non-blank)
+ *  @param old_name   Current name of the snapshot (must be non-blank)
+ *  @param new_name   New name of the snapshot (must be non-blank)
+ *  @return           0 on success, corresponding errno on failure
+ **/
+int hdfsRenameSnapshot(hdfsFS fs, const char* path, const char* old_name, const char* new_name);
+
 /**
  * Allows snapshots to be made on the specified directory
  *

+ 24 - 1
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfspp.h

@@ -24,6 +24,7 @@
 #include "hdfspp/block_location.h"
 #include "hdfspp/statinfo.h"
 #include "hdfspp/fsinfo.h"
+#include "hdfspp/content_summary.h"
 
 #include <functional>
 #include <memory>
@@ -275,6 +276,14 @@ class FileSystem {
                   const std::function<void(const Status &, const StatInfo &)> &handler) = 0;
   virtual Status GetFileInfo(const std::string &path, StatInfo & stat_info) = 0;
 
+  /**
+   * Returns the number of directories, files and bytes under the given path
+   **/
+  virtual void
+  GetContentSummary(const std::string &path,
+                  const std::function<void(const Status &, const ContentSummary &)> &handler) = 0;
+  virtual Status GetContentSummary(const std::string &path, ContentSummary & stat_info) = 0;
+
   /**
    * Retrieves the file system information as a whole, such as the total raw size of all files in the filesystem
    * and the raw capacity of the filesystem
@@ -305,7 +314,7 @@ class FileSystem {
 
   /**
    * Returns the locations of all known blocks for the indicated file (or part of it), or an error
-   * if the information clould not be found
+   * if the information could not be found
    */
   virtual void GetBlockLocations(const std::string & path, uint64_t offset, uint64_t length,
     const std::function<void(const Status &, std::shared_ptr<FileBlockLocation> locations)> ) = 0;
@@ -417,6 +426,18 @@ class FileSystem {
   virtual Status DeleteSnapshot(const std::string &path,
       const std::string &name) = 0;
 
+  /**
+   * Renames the directory snapshot specified by path from old_name to new_name
+   *
+   *  @param path       Path to the snapshotted directory (must be non-blank)
+   *  @param old_name   Current name of the snapshot (must be non-blank)
+   *  @param new_name   New name of the snapshot (must be non-blank)
+   **/
+  virtual void RenameSnapshot(const std::string &path, const std::string &old_name,
+      const std::string &new_name, const std::function<void(const Status &)> &handler) = 0;
+  virtual Status RenameSnapshot(const std::string &path, const std::string &old_name,
+      const std::string &new_name) = 0;
+
   /**
    * Allows snapshots to be made on the specified directory
    *
@@ -454,6 +475,8 @@ class FileSystem {
   virtual void SetFsEventCallback(fs_event_callback callback) = 0;
 
   virtual Options get_options() = 0;
+
+  virtual std::string get_cluster_name() = 0;
 };
 }
 

+ 21 - 25
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/statinfo.h

@@ -18,6 +18,8 @@
 #ifndef HDFSPP_STATINFO_H_
 #define HDFSPP_STATINFO_H_
 
+#include <string>
+
 namespace hdfs {
 
 /**
@@ -31,31 +33,25 @@ struct StatInfo {
     IS_SYMLINK = 3
   };
 
-  int                   file_type;
-  ::std::string         path;
-  ::std::string         full_path;
-  unsigned long int     length;
-  unsigned long int     permissions;  //Octal number as in POSIX permissions; e.g. 0777
-  ::std::string         owner;
-  ::std::string         group;
-  unsigned long int     modification_time;
-  unsigned long int     access_time;
-  ::std::string         symlink;
-  unsigned int          block_replication;
-  unsigned long int     blocksize;
-  unsigned long int     fileid;
-  unsigned long int     children_num;
-  StatInfo()
-      : file_type(0),
-        length(0),
-        permissions(0),
-        modification_time(0),
-        access_time(0),
-        block_replication(0),
-        blocksize(0),
-        fileid(0),
-        children_num(0) {
-  }
+  int          file_type;
+  std::string  path;
+  std::string  full_path;
+  uint64_t     length;
+  uint64_t     permissions;  //Octal number as in POSIX permissions; e.g. 0777
+  std::string  owner;
+  std::string  group;
+  uint64_t     modification_time;
+  uint64_t     access_time;
+  std::string  symlink;
+  uint32_t     block_replication;
+  uint64_t     blocksize;
+  uint64_t     fileid;
+  uint64_t     children_num;
+
+  StatInfo();
+
+  //Converts StatInfo object to std::string (hdfs_ls format)
+  std::string str() const;
 };
 
 }

+ 31 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc

@@ -1094,6 +1094,37 @@ int hdfsDeleteSnapshot(hdfsFS fs, const char* path, const char* name) {
   }
 }
 
+
+int hdfsRenameSnapshot(hdfsFS fs, const char* path, const char* old_name, const char* new_name) {
+  try {
+    errno = 0;
+    if (!CheckSystem(fs)) {
+      return -1;
+    }
+    const optional<std::string> abs_path = getAbsolutePath(fs, path);
+    if(!abs_path) {
+      return -1;
+    }
+    if (!old_name) {
+      return Error(Status::InvalidArgument("hdfsRenameSnapshot: argument 'old_name' cannot be NULL"));
+    }
+    if (!new_name) {
+      return Error(Status::InvalidArgument("hdfsRenameSnapshot: argument 'new_name' cannot be NULL"));
+    }
+    Status stat;
+    stat = fs->get_impl()->RenameSnapshot(*abs_path, old_name, new_name);
+    if (!stat.ok()) {
+      return Error(stat);
+    }
+    return 0;
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
+
+}
+
 int hdfsAllowSnapshot(hdfsFS fs, const char* path) {
   try {
     errno = 0;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/CMakeLists.txt

@@ -19,6 +19,6 @@ if(NEED_LINK_DL)
    set(LIB_DL dl)
 endif()
 
-add_library(common_obj OBJECT status.cc sasl_digest_md5.cc hdfs_ioservice.cc options.cc configuration.cc configuration_loader.cc hdfs_configuration.cc uri.cc util.cc retry_policy.cc cancel_tracker.cc logging.cc libhdfs_events_impl.cc auth_info.cc namenode_info.cc)
+add_library(common_obj OBJECT status.cc sasl_digest_md5.cc hdfs_ioservice.cc options.cc configuration.cc configuration_loader.cc hdfs_configuration.cc uri.cc util.cc retry_policy.cc cancel_tracker.cc logging.cc libhdfs_events_impl.cc auth_info.cc namenode_info.cc statinfo.cc fsinfo.cc content_summary.cc)
 add_library(common $<TARGET_OBJECTS:common_obj> $<TARGET_OBJECTS:uriparser2_obj>)
 target_link_libraries(common ${LIB_DL})

+ 55 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/content_summary.cc

@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <hdfspp/content_summary.h>
+#include <sstream>
+#include <iomanip>
+
+namespace hdfs {
+
+ContentSummary::ContentSummary()
+: length(0),
+  filecount(0),
+  directorycount(0),
+  quota(0),
+  spaceconsumed(0),
+  spacequota(0) {
+}
+
+std::string ContentSummary::str(bool include_quota) const {
+  std::stringstream ss;
+  if(include_quota){
+    ss  << this->quota << " "
+        << spacequota << " "
+        << spaceconsumed << " ";
+  }
+  ss  << directorycount << " "
+      << filecount << " "
+      << length << " "
+      << path;
+  return ss.str();
+}
+
+std::string ContentSummary::str_du() const {
+  std::stringstream ss;
+  ss  << std::left << std::setw(10) << length
+      << path;
+  return ss.str();
+}
+
+}

+ 61 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/fsinfo.cc

@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <hdfspp/fsinfo.h>
+#include <sstream>
+#include <iomanip>
+
+namespace hdfs {
+
+FsInfo::FsInfo()
+  : capacity(0),
+    used(0),
+    remaining(0),
+    under_replicated(0),
+    corrupt_blocks(0),
+    missing_blocks(0),
+    missing_repl_one_blocks(0),
+    blocks_in_future(0) {
+}
+
+std::string FsInfo::str(const std::string fs_name) const {
+  std::string fs_name_label = "Filesystem";
+  std::string size = std::to_string(capacity);
+  std::string size_label = "Size";
+  std::string used = std::to_string(this->used);
+  std::string used_label = "Used";
+  std::string available = std::to_string(remaining);
+  std::string available_label = "Available";
+  std::string use_percentage = std::to_string(this->used * 100 / capacity) + "%";
+  std::string use_percentage_label = "Use%";
+  std::stringstream ss;
+  ss  << std::left << std::setw(std::max(fs_name.size(), fs_name_label.size())) << fs_name_label
+      << std::right << std::setw(std::max(size.size(), size_label.size()) + 2) << size_label
+      << std::right << std::setw(std::max(used.size(), used_label.size()) + 2) << used_label
+      << std::right << std::setw(std::max(available.size(), available_label.size()) + 2) << available_label
+      << std::right << std::setw(std::max(use_percentage.size(), use_percentage_label.size()) + 2) << use_percentage_label
+      << std::endl
+      << std::left << std::setw(std::max(fs_name.size(), fs_name_label.size())) << fs_name
+      << std::right << std::setw(std::max(size.size(), size_label.size()) + 2) << size
+      << std::right << std::setw(std::max(used.size(), used_label.size()) + 2) << used
+      << std::right << std::setw(std::max(available.size(), available_label.size()) + 2) << available
+      << std::right << std::setw(std::max(use_percentage.size(), use_percentage_label.size()) + 2) << use_percentage;
+  return ss.str();
+}
+
+}

+ 74 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/statinfo.cc

@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <hdfspp/statinfo.h>
+#include <sys/stat.h>
+#include <sstream>
+#include <iomanip>
+
+namespace hdfs {
+
+StatInfo::StatInfo()
+  : file_type(0),
+    length(0),
+    permissions(0),
+    modification_time(0),
+    access_time(0),
+    block_replication(0),
+    blocksize(0),
+    fileid(0),
+    children_num(0) {
+}
+
+std::string StatInfo::str() const {
+  char perms[11];
+  perms[0] = file_type == StatInfo::IS_DIR ? 'd' : '-';
+  perms[1] = permissions & S_IRUSR? 'r' : '-';
+  perms[2] = permissions & S_IWUSR? 'w': '-';
+  perms[3] = permissions & S_IXUSR? 'x': '-';
+  perms[4] = permissions & S_IRGRP? 'r' : '-';
+  perms[5] = permissions & S_IWGRP? 'w': '-';
+  perms[6] = permissions & S_IXGRP? 'x': '-';
+  perms[7] = permissions & S_IROTH? 'r' : '-';
+  perms[8] = permissions & S_IWOTH? 'w': '-';
+  perms[9] = permissions & S_IXOTH? 'x': '-';
+  perms[10] = 0;
+
+  //Convert to seconds from milliseconds
+  const int time_field_length = 17;
+  time_t rawtime = modification_time/1000;
+  struct tm * timeinfo;
+  char buffer[time_field_length];
+  timeinfo = localtime(&rawtime);
+
+  strftime(buffer,time_field_length,"%Y-%m-%d %H:%M",timeinfo);
+  buffer[time_field_length-1] = 0;  //null terminator
+  std::string time(buffer);
+
+  std::stringstream ss;
+  ss  << std::left << std::setw(12) << perms
+      << std::left << std::setw(3) << (!block_replication ? "-" : std::to_string(block_replication))
+      << std::left << std::setw(15) << owner
+      << std::left << std::setw(15) << group
+      << std::right << std::setw(5) << length
+      << std::right << std::setw(time_field_length + 2) << time//modification_time
+      << "  " << full_path;
+  return ss.str();
+}
+
+}

+ 42 - 4
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc

@@ -479,6 +479,15 @@ void FileSystemImpl::GetFileInfo(
   nn_.GetFileInfo(path, handler);
 }
 
+void FileSystemImpl::GetContentSummary(
+    const std::string &path,
+    const std::function<void(const Status &, const ContentSummary &)> &handler) {
+  LOG_DEBUG(kFileSystem, << "FileSystemImpl::GetContentSummary("
+                                 << FMT_THIS_ADDR << ", path="
+                                 << path << ") called");
+
+  nn_.GetContentSummary(path, handler);
+}
 
 void FileSystemImpl::GetFsStats(
     const std::function<void(const Status &, const FsInfo &)> &handler) {
@@ -515,13 +524,16 @@ void FileSystemImpl::GetListing(
   LOG_DEBUG(kFileSystem, << "FileSystemImpl::GetListing("
                                  << FMT_THIS_ADDR << ", path="
                                  << path << ") called");
-
+  std::string path_fixed = path;
+  if(path.back() != '/'){
+    path_fixed += "/";
+  }
   // Caputure the state and push it into the shim
-  auto callback = [this, path, handler](const Status &stat, const std::vector<StatInfo> & stat_infos, bool has_more) {
-    GetListingShim(stat, stat_infos, has_more, path, handler);
+  auto callback = [this, path_fixed, handler](const Status &stat, const std::vector<StatInfo> & stat_infos, bool has_more) {
+    GetListingShim(stat, stat_infos, has_more, path_fixed, handler);
   };
 
-  nn_.GetListing(path, callback);
+  nn_.GetListing(path_fixed, callback);
 }
 
 
@@ -772,6 +784,28 @@ void FileSystemImpl::DeleteSnapshot(const std::string &path,
   nn_.DeleteSnapshot(path, name, handler);
 }
 
+void FileSystemImpl::RenameSnapshot(const std::string &path,
+    const std::string &old_name, const std::string &new_name,
+    const std::function<void(const Status &)> &handler) {
+  LOG_DEBUG(kFileSystem,
+    << "FileSystemImpl::RenameSnapshot(" << FMT_THIS_ADDR << ", path=" << path <<
+    ", old_name=" << old_name << ", new_name=" << new_name << ") called");
+
+  if (path.empty()) {
+    handler(Status::InvalidArgument("RenameSnapshot: argument 'path' cannot be empty"));
+    return;
+  }
+  if (old_name.empty()) {
+    handler(Status::InvalidArgument("RenameSnapshot: argument 'old_name' cannot be empty"));
+    return;
+  }
+  if (new_name.empty()) {
+    handler(Status::InvalidArgument("RenameSnapshot: argument 'new_name' cannot be empty"));
+    return;
+  }
+
+  nn_.RenameSnapshot(path, old_name, new_name, handler);
+}
 
 void FileSystemImpl::AllowSnapshot(const std::string &path,
     const std::function<void(const Status &)> &handler) {
@@ -817,4 +851,8 @@ Options FileSystemImpl::get_options() {
   return options_;
 }
 
+std::string FileSystemImpl::get_cluster_name() {
+  return cluster_name_;
+}
+
 }

+ 18 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.h

@@ -87,6 +87,10 @@ public:
 
   Status GetFileInfo(const std::string &path, StatInfo & stat_info) override;
 
+  void GetContentSummary(const std::string &path,
+        const std::function<void(const Status &, const ContentSummary &)> &handler) override;
+  Status GetContentSummary(const std::string &path, ContentSummary & stat_info) override;
+
   /**
    * Retrieves the file system information such as the total raw size of all files in the filesystem
    * and the raw capacity of the filesystem
@@ -159,6 +163,18 @@ public:
         const std::function<void(const Status &)> &handler) override;
   Status DeleteSnapshot(const std::string &path, const std::string &name) override;
 
+  /**
+   * Renames the directory snapshot specified by path from old_name to new_name
+   *
+   *  @param path       Path to the snapshotted directory (must be non-blank)
+   *  @param old_name   Current name of the snapshot (must be non-blank)
+   *  @param new_name   New name of the snapshot (must be non-blank)
+   **/
+  void RenameSnapshot(const std::string &path, const std::string &old_name,
+      const std::string &new_name, const std::function<void(const Status &)> &handler) override;
+  Status RenameSnapshot(const std::string &path, const std::string &old_name,
+      const std::string &new_name) override;
+
   /**
    * Allows snapshots to be made on the specified directory
    *
@@ -189,6 +205,8 @@ public:
 
   Options get_options();
 
+  std::string get_cluster_name();
+
 private:
   /**
    *  The IoService must be the first member variable to ensure that it gets

+ 52 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem_sync.cc

@@ -252,6 +252,35 @@ Status FileSystemImpl::GetFileInfo(const std::string &path,
   return stat;
 }
 
+Status FileSystemImpl::GetContentSummary(const std::string &path,
+                                         ContentSummary & content_summary) {
+  LOG_DEBUG(kFileSystem, << "FileSystemImpl::[sync]GetContentSummary("
+                                 << FMT_THIS_ADDR << ", path="
+                                 << path << ") called");
+
+  auto callstate = std::make_shared<std::promise<std::tuple<Status, ContentSummary>>>();
+  std::future<std::tuple<Status, ContentSummary>> future(callstate->get_future());
+
+  /* wrap async FileSystem::GetContentSummary with promise to make it a blocking call */
+  auto h = [callstate](const Status &s, const ContentSummary &si) {
+    callstate->set_value(std::make_tuple(s, si));
+  };
+
+  GetContentSummary(path, h);
+
+  /* block until promise is set */
+  auto returnstate = future.get();
+  Status stat = std::get<0>(returnstate);
+  ContentSummary cs = std::get<1>(returnstate);
+
+  if (!stat.ok()) {
+    return stat;
+  }
+
+  content_summary = cs;
+  return stat;
+}
+
 Status FileSystemImpl::GetFsStats(FsInfo & fs_info) {
   LOG_DEBUG(kFileSystem,
       << "FileSystemImpl::[sync]GetFsStats(" << FMT_THIS_ADDR << ") called");
@@ -510,6 +539,29 @@ Status FileSystemImpl::DeleteSnapshot(const std::string &path,
   return stat;
 }
 
+Status FileSystemImpl::RenameSnapshot(const std::string &path,
+    const std::string &old_name, const std::string &new_name) {
+  LOG_DEBUG(kFileSystem,
+    << "FileSystemImpl::[sync]RenameSnapshot(" << FMT_THIS_ADDR << ", path=" << path <<
+    ", old_name=" << old_name << ", new_name=" << new_name << ") called");
+
+  auto callstate = std::make_shared<std::promise<Status>>();
+  std::future<Status> future(callstate->get_future());
+
+  /* wrap async FileSystem::RenameSnapshot with promise to make it a blocking call */
+  auto h = [callstate](const Status &s) {
+    callstate->set_value(s);
+  };
+
+  RenameSnapshot(path, old_name, new_name, h);
+
+  /* block until promise is set */
+  auto returnstate = future.get();
+  Status stat = returnstate;
+
+  return stat;
+}
+
 Status FileSystemImpl::AllowSnapshot(const std::string &path) {
   LOG_DEBUG(kFileSystem,
       << "FileSystemImpl::[sync]AllowSnapshot(" << FMT_THIS_ADDR << ", path=" << path << ") called");

+ 89 - 1
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.cc

@@ -247,6 +247,45 @@ void NameNodeOperations::GetFileInfo(const std::string & path,
   });
 }
 
+void NameNodeOperations::GetContentSummary(const std::string & path,
+  std::function<void(const Status &, const ContentSummary &)> handler)
+{
+  using ::hadoop::hdfs::GetContentSummaryRequestProto;
+  using ::hadoop::hdfs::GetContentSummaryResponseProto;
+
+  LOG_TRACE(kFileSystem, << "NameNodeOperations::GetContentSummary("
+                           << FMT_THIS_ADDR << ", path=" << path << ") called");
+
+  if (path.empty()) {
+    handler(Status::InvalidArgument("GetContentSummary: argument 'path' cannot be empty"), ContentSummary());
+    return;
+  }
+
+  GetContentSummaryRequestProto req;
+  req.set_path(path);
+
+  auto resp = std::make_shared<GetContentSummaryResponseProto>();
+
+  namenode_.GetContentSummary(&req, resp, [resp, handler, path](const Status &stat) {
+    if (stat.ok()) {
+      // For non-existant files, the server will respond with an OK message but
+      //   no summary in the protobuf.
+      if(resp -> has_summary()){
+          struct ContentSummary content_summary;
+          content_summary.path = path;
+          ContentSummaryProtoToContentSummary(content_summary, resp->summary());
+          handler(stat, content_summary);
+        } else {
+          std::string errormsg = "No such file or directory: " + path;
+          Status statNew = Status::PathNotFound(errormsg.c_str());
+          handler(statNew, ContentSummary());
+        }
+    } else {
+      handler(stat, ContentSummary());
+    }
+  });
+}
+
 void NameNodeOperations::GetFsStats(
     std::function<void(const Status &, const FsInfo &)> handler) {
   using ::hadoop::hdfs::GetFsStatusRequestProto;
@@ -300,7 +339,10 @@ void NameNodeOperations::GetListing(
         for (::hadoop::hdfs::HdfsFileStatusProto const& fs : resp->dirlist().partiallisting()) {
           StatInfo si;
           si.path = fs.path();
-          si.full_path = path + fs.path() + "/";
+          si.full_path = path + fs.path();
+          if(si.full_path.back() != '/'){
+            si.full_path += "/";
+          }
           HdfsFileStatusProtoToStatInfo(si, fs);
           stat_infos.push_back(si);
         }
@@ -554,6 +596,41 @@ void NameNodeOperations::DeleteSnapshot(const std::string & path,
       });
 }
 
+void NameNodeOperations::RenameSnapshot(const std::string & path, const std::string & old_name,
+    const std::string & new_name, std::function<void(const Status &)> handler) {
+  using ::hadoop::hdfs::RenameSnapshotRequestProto;
+  using ::hadoop::hdfs::RenameSnapshotResponseProto;
+
+  LOG_TRACE(kFileSystem,
+      << "NameNodeOperations::RenameSnapshot(" << FMT_THIS_ADDR << ", path=" << path <<
+      ", old_name=" << old_name << ", new_name=" << new_name << ") called");
+
+  if (path.empty()) {
+    handler(Status::InvalidArgument("RenameSnapshot: argument 'path' cannot be empty"));
+    return;
+  }
+  if (old_name.empty()) {
+    handler(Status::InvalidArgument("RenameSnapshot: argument 'old_name' cannot be empty"));
+    return;
+  }
+  if (new_name.empty()) {
+    handler(Status::InvalidArgument("RenameSnapshot: argument 'new_name' cannot be empty"));
+    return;
+  }
+
+  RenameSnapshotRequestProto req;
+  req.set_snapshotroot(path);
+  req.set_snapshotoldname(old_name);
+  req.set_snapshotnewname(new_name);
+
+  auto resp = std::make_shared<RenameSnapshotResponseProto>();
+
+  namenode_.RenameSnapshot(&req, resp,
+      [handler](const Status &stat) {
+        handler(stat);
+      });
+}
+
 void NameNodeOperations::AllowSnapshot(const std::string & path, std::function<void(const Status &)> handler) {
   using ::hadoop::hdfs::AllowSnapshotRequestProto;
   using ::hadoop::hdfs::AllowSnapshotResponseProto;
@@ -621,6 +698,17 @@ void NameNodeOperations::HdfsFileStatusProtoToStatInfo(
   stat_info.children_num = fs.childrennum();
 }
 
+void NameNodeOperations::ContentSummaryProtoToContentSummary(
+    hdfs::ContentSummary & content_summary,
+    const ::hadoop::hdfs::ContentSummaryProto & csp) {
+  content_summary.length = csp.length();
+  content_summary.filecount = csp.filecount();
+  content_summary.directorycount = csp.directorycount();
+  content_summary.quota = csp.quota();
+  content_summary.spaceconsumed = csp.spaceconsumed();
+  content_summary.spacequota = csp.spacequota();
+}
+
 void NameNodeOperations::GetFsStatsResponseProtoToFsInfo(
     hdfs::FsInfo & fs_info,
     const std::shared_ptr<::hadoop::hdfs::GetFsStatsResponseProto> & fs) {

+ 9 - 1
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.h

@@ -21,6 +21,7 @@
 #include "rpc/rpc_engine.h"
 #include "hdfspp/statinfo.h"
 #include "hdfspp/fsinfo.h"
+#include "hdfspp/content_summary.h"
 #include "common/namenode_info.h"
 #include "ClientNamenodeProtocol.pb.h"
 #include "ClientNamenodeProtocol.hrpc.inl"
@@ -67,7 +68,10 @@ public:
     std::function<void(const Status &)> handler);
 
   void GetFileInfo(const std::string & path,
-      std::function<void(const Status &, const StatInfo &)> handler);
+    std::function<void(const Status &, const StatInfo &)> handler);
+
+  void GetContentSummary(const std::string & path,
+    std::function<void(const Status &, const ContentSummary &)> handler);
 
   void GetFsStats(std::function<void(const Status &, const FsInfo &)> handler);
 
@@ -97,6 +101,9 @@ public:
   void DeleteSnapshot(const std::string & path, const std::string & name,
       std::function<void(const Status &)> handler);
 
+  void RenameSnapshot(const std::string & path, const std::string & old_name, const std::string & new_name,
+      std::function<void(const Status &)> handler);
+
   void AllowSnapshot(const std::string & path,
       std::function<void(const Status &)> handler);
 
@@ -107,6 +114,7 @@ public:
 
 private:
   static void HdfsFileStatusProtoToStatInfo(hdfs::StatInfo & si, const ::hadoop::hdfs::HdfsFileStatusProto & fs);
+  static void ContentSummaryProtoToContentSummary(hdfs::ContentSummary & content_summary, const ::hadoop::hdfs::ContentSummaryProto & csp);
   static void DirectoryListingProtoToStatInfo(std::shared_ptr<std::vector<StatInfo>> stat_infos, const ::hadoop::hdfs::DirectoryListingProto & dl);
   static void GetFsStatsResponseProtoToFsInfo(hdfs::FsInfo & fs_info, const std::shared_ptr<::hadoop::hdfs::GetFsStatsResponseProto> & fs);
 

+ 10 - 1
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc

@@ -121,6 +121,8 @@ TEST_F(HdfsExtTest, TestSnapshotOperations) {
   EXPECT_EQ((int) std::errc::invalid_argument, errno);
   EXPECT_EQ(-1, hdfsDeleteSnapshot(fs, nullptr, "Bad"));
   EXPECT_EQ((int) std::errc::invalid_argument, errno);
+  EXPECT_EQ(-1, hdfsRenameSnapshot(fs, nullptr, "Bad", "Bad"));
+  EXPECT_EQ((int) std::errc::invalid_argument, errno);
   EXPECT_EQ(-1, hdfsDisallowSnapshot(fs, nullptr));
   EXPECT_EQ((int) std::errc::invalid_argument, errno);
 
@@ -136,6 +138,8 @@ TEST_F(HdfsExtTest, TestSnapshotOperations) {
   EXPECT_EQ((int) std::errc::no_such_file_or_directory, errno);
   EXPECT_EQ(-1, hdfsDeleteSnapshot(fs, path.c_str(), "Bad"));
   EXPECT_EQ((int) std::errc::no_such_file_or_directory, errno);
+  EXPECT_EQ(-1, hdfsRenameSnapshot(fs, path.c_str(), "Bad", "Bad"));
+  EXPECT_EQ((int) std::errc::no_such_file_or_directory, errno);
   EXPECT_EQ(-1, hdfsDisallowSnapshot(fs, path.c_str()));
   EXPECT_EQ((int) std::errc::no_such_file_or_directory, errno);
 
@@ -147,6 +151,8 @@ TEST_F(HdfsExtTest, TestSnapshotOperations) {
   EXPECT_EQ((int) std::errc::not_a_directory, errno);
   EXPECT_EQ(-1, hdfsDeleteSnapshot(fs, path.c_str(), "Bad"));
   EXPECT_EQ((int) std::errc::not_a_directory, errno);
+  EXPECT_EQ(-1, hdfsRenameSnapshot(fs, path.c_str(), "Bad", "Bad"));
+  EXPECT_EQ((int) std::errc::not_a_directory, errno);
   EXPECT_EQ(-1, hdfsDisallowSnapshot(fs, path.c_str()));
   EXPECT_EQ((int) std::errc::not_a_directory, errno);
 
@@ -167,8 +173,11 @@ TEST_F(HdfsExtTest, TestSnapshotOperations) {
   EXPECT_STREQ("Good", file_infos[0].mName);
   hdfsFreeFileInfo(file_infos, 1);
 
+  //Verify snapshot renamed
+  EXPECT_EQ(0, hdfsRenameSnapshot(fs, dirName.c_str(), "Good", "Best"));
+
   //Verify snapshot deleted
-  EXPECT_EQ(0, hdfsDeleteSnapshot(fs, dirName.c_str(), "Good"));
+  EXPECT_EQ(0, hdfsDeleteSnapshot(fs, dirName.c_str(), "Best"));
   EXPECT_EQ(nullptr, file_infos = hdfsListDirectory(fs, snapDir.c_str(), &size));
   EXPECT_EQ(0, size);
   hdfsFreeFileInfo(file_infos, 0);

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c

@@ -504,6 +504,10 @@ int hdfsDeleteSnapshot(hdfsFS fs, const char* path, const char* name) {
   return libhdfspp_hdfsDeleteSnapshot(fs->libhdfsppRep, path, name);
 }
 
+int hdfsRenameSnapshot(hdfsFS fs, const char* path, const char* old_name, const char* new_name) {
+  return libhdfspp_hdfsRenameSnapshot(fs->libhdfsppRep, path, old_name, new_name);
+}
+
 int hdfsAllowSnapshot(hdfsFS fs, const char* path) {
   return libhdfspp_hdfsAllowSnapshot(fs->libhdfsppRep, path);
 }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h

@@ -97,5 +97,6 @@
 #undef hdfsFind
 #undef hdfsCreateSnapshot
 #undef hdfsDeleteSnapshot
+#undef hdfsRenameSnapshot
 #undef hdfsAllowSnapshot
 #undef hdfsDisallowSnapshot

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h

@@ -97,5 +97,6 @@
 #define hdfsFind libhdfspp_hdfsFind
 #define hdfsCreateSnapshot libhdfspp_hdfsCreateSnapshot
 #define hdfsDeleteSnapshot libhdfspp_hdfsDeleteSnapshot
+#define hdfsRenameSnapshot libhdfspp_hdfsRenameSnapshot
 #define hdfsAllowSnapshot libhdfspp_hdfsAllowSnapshot
 #define hdfsDisallowSnapshot libhdfspp_hdfsDisallowSnapshot

+ 58 - 7
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/CMakeLists.txt

@@ -23,20 +23,71 @@ set(LIBHDFSPP_DIR CACHE STRING ${CMAKE_INSTALL_PREFIX})
 include_directories( ${LIBHDFSPP_DIR}/include )
 link_directories( ${LIBHDFSPP_DIR}/lib )
 
-add_library(tools_common_obj OBJECT tools_common.cpp)
+add_library(tools_common_obj OBJECT tools_common.cc)
 add_library(tools_common $<TARGET_OBJECTS:tools_common_obj>)
 
-add_executable(hdfs_cat hdfs_cat.cpp)
+add_executable(hdfs_cat hdfs_cat.cc)
 target_link_libraries(hdfs_cat tools_common hdfspp)
 
-add_executable(hdfs_chgrp hdfs_chgrp.cpp)
+add_executable(hdfs_chgrp hdfs_chgrp.cc)
 target_link_libraries(hdfs_chgrp tools_common hdfspp)
 
-add_executable(hdfs_chown hdfs_chown.cpp)
+add_executable(hdfs_chown hdfs_chown.cc)
 target_link_libraries(hdfs_chown tools_common hdfspp)
 
-add_executable(hdfs_chmod hdfs_chmod.cpp)
+add_executable(hdfs_chmod hdfs_chmod.cc)
 target_link_libraries(hdfs_chmod tools_common hdfspp)
 
-add_executable(hdfs_find hdfs_find.cpp)
-target_link_libraries(hdfs_find tools_common hdfspp)
+add_executable(hdfs_find hdfs_find.cc)
+target_link_libraries(hdfs_find tools_common hdfspp)
+
+add_executable(hdfs_mkdir hdfs_mkdir.cc)
+target_link_libraries(hdfs_mkdir tools_common hdfspp)
+
+add_executable(hdfs_rm hdfs_rm.cc)
+target_link_libraries(hdfs_rm tools_common hdfspp)
+
+add_executable(hdfs_ls hdfs_ls.cc)
+target_link_libraries(hdfs_ls tools_common hdfspp)
+
+add_executable(hdfs_stat hdfs_stat.cc)
+target_link_libraries(hdfs_stat tools_common hdfspp)
+
+add_executable(hdfs_count hdfs_count.cc)
+target_link_libraries(hdfs_count tools_common hdfspp)
+
+add_executable(hdfs_df hdfs_df.cc)
+target_link_libraries(hdfs_df tools_common hdfspp)
+
+add_executable(hdfs_du hdfs_du.cc)
+target_link_libraries(hdfs_du tools_common hdfspp)
+
+add_executable(hdfs_get hdfs_get.cc)
+target_link_libraries(hdfs_get tools_common hdfspp)
+
+add_executable(hdfs_copyToLocal hdfs_copyToLocal.cc)
+target_link_libraries(hdfs_copyToLocal tools_common hdfspp)
+
+add_executable(hdfs_moveToLocal hdfs_moveToLocal.cc)
+target_link_libraries(hdfs_moveToLocal tools_common hdfspp)
+
+add_executable(hdfs_setrep hdfs_setrep.cc)
+target_link_libraries(hdfs_setrep tools_common hdfspp)
+
+add_executable(hdfs_allowSnapshot hdfs_allowSnapshot.cc)
+target_link_libraries(hdfs_allowSnapshot tools_common hdfspp)
+
+add_executable(hdfs_disallowSnapshot hdfs_disallowSnapshot.cc)
+target_link_libraries(hdfs_disallowSnapshot tools_common hdfspp)
+
+add_executable(hdfs_createSnapshot hdfs_createSnapshot.cc)
+target_link_libraries(hdfs_createSnapshot tools_common hdfspp)
+
+add_executable(hdfs_renameSnapshot hdfs_renameSnapshot.cc)
+target_link_libraries(hdfs_renameSnapshot tools_common hdfspp)
+
+add_executable(hdfs_deleteSnapshot hdfs_deleteSnapshot.cc)
+target_link_libraries(hdfs_deleteSnapshot tools_common hdfspp)
+
+add_executable(hdfs_tail hdfs_tail.cc)
+target_link_libraries(hdfs_tail tools_common hdfspp)

+ 90 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_allowSnapshot.cc

@@ -0,0 +1,90 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_allowSnapshot [OPTION] PATH"
+      << std::endl
+      << std::endl << "Allowing snapshots of a directory at PATH to be created."
+      << std::endl << "If the operation completes successfully, the directory becomes snapshottable."
+      << std::endl
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_allowSnapshot hdfs://localhost.localdomain:8020/dir"
+      << std::endl << "hdfs_allowSnapshot /dir1/dir2"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "h")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::Status status = fs->AllowSnapshot(uri->get_path());
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

+ 3 - 36
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_cat.cpp → hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_cat.cc

@@ -29,7 +29,7 @@ void usage(){
       << std::endl << "  -h  display this help and exit"
       << std::endl
       << std::endl << "Examples:"
-      << std::endl << "hdfs_cat hdfs://localhost.localdomain:9433/dir/file"
+      << std::endl << "hdfs_cat hdfs://localhost.localdomain:8020/dir/file"
       << std::endl << "hdfs_cat /dir/file"
       << std::endl;
 }
@@ -52,7 +52,6 @@ int main(int argc, char *argv[]) {
     case 'h':
       usage();
       exit(EXIT_SUCCESS);
-      break;
     case '?':
       if (isprint(optopt))
         std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
@@ -74,45 +73,13 @@ int main(int argc, char *argv[]) {
     exit(EXIT_FAILURE);
   }
 
-  //TODO: HDFS-9539 Currently options can be returned empty
-  hdfs::Options options = *hdfs::getOptions();
-
-  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), options);
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
   if (!fs) {
     std::cerr << "Could not connect the file system. " << std::endl;
     exit(EXIT_FAILURE);
   }
 
-  hdfs::FileHandle *file_raw = nullptr;
-  hdfs::Status status = fs->Open(uri->get_path(), &file_raw);
-  if (!status.ok()) {
-    std::cerr << "Could not open file " << uri->get_path() << ". " << status.ToString() << std::endl;
-    exit(EXIT_FAILURE);
-  }
-  //wrapping file_raw into a unique pointer to guarantee deletion
-  std::unique_ptr<hdfs::FileHandle> file(file_raw);
-
-  char input_buffer[BUF_SIZE];
-  ssize_t total_bytes_read = 0;
-  size_t last_bytes_read = 0;
-
-  do{
-    //Reading file chunks
-    status = file->Read(input_buffer, sizeof(input_buffer), &last_bytes_read);
-    if(status.ok()) {
-      //Writing file chunks to stdout
-      fwrite(input_buffer, last_bytes_read, 1, stdout);
-      total_bytes_read += last_bytes_read;
-    } else {
-      if(status.is_invalid_offset()){
-        //Reached the end of the file
-        break;
-      } else {
-        std::cerr << "Error reading the file: " << status.ToString() << std::endl;
-        exit(EXIT_FAILURE);
-      }
-    }
-  } while (last_bytes_read > 0);
+  readFile(fs, uri->get_path(), 0, stdout, false);
 
   // Clean up static data and prevent valgrind memory leaks
   google::protobuf::ShutdownProtobufLibrary();

+ 2 - 9
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chgrp.cpp → hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chgrp.cc

@@ -33,7 +33,7 @@ void usage(){
       << std::endl << "  -h  display this help and exit"
       << std::endl
       << std::endl << "Examples:"
-      << std::endl << "hdfs_chgrp -R new_group hdfs://localhost.localdomain:9433/dir/file"
+      << std::endl << "hdfs_chgrp -R new_group hdfs://localhost.localdomain:8020/dir/file"
       << std::endl << "hdfs_chgrp new_group /dir/file"
       << std::endl;
 }
@@ -84,7 +84,6 @@ int main(int argc, char *argv[]) {
     case 'h':
       usage();
       exit(EXIT_SUCCESS);
-      break;
     case '?':
       if (isprint(optopt))
         std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
@@ -108,13 +107,7 @@ int main(int argc, char *argv[]) {
     exit(EXIT_FAILURE);
   }
 
-  //TODO: HDFS-9539 Currently options can be returned empty
-  hdfs::Options options = *hdfs::getOptions();
-
-  //TODO: HDFS-9539 - until then we increase the time-out to allow all recursive async calls to finish
-  options.rpc_timeout = std::numeric_limits<int>::max();
-
-  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), options);
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), true);
   if (!fs) {
     std::cerr << "Could not connect the file system. " << std::endl;
     exit(EXIT_FAILURE);

+ 11 - 18
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chmod.cpp → hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chmod.cc

@@ -34,7 +34,7 @@ void usage(){
       << std::endl << "  -h  display this help and exit"
       << std::endl
       << std::endl << "Examples:"
-      << std::endl << "hdfs_chmod -R 755 hdfs://localhost.localdomain:9433/dir/file"
+      << std::endl << "hdfs_chmod -R 755 hdfs://localhost.localdomain:8020/dir/file"
       << std::endl << "hdfs_chmod 777 /dir/file"
       << std::endl;
 }
@@ -82,7 +82,6 @@ int main(int argc, char *argv[]) {
     case 'h':
       usage();
       exit(EXIT_SUCCESS);
-      break;
     case '?':
       if (isprint(optopt))
         std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
@@ -104,13 +103,7 @@ int main(int argc, char *argv[]) {
     exit(EXIT_FAILURE);
   }
 
-  //TODO: HDFS-9539 Currently options can be returned empty
-  hdfs::Options options = *hdfs::getOptions();
-
-  //TODO: HDFS-9539 - until then we increase the time-out to allow all recursive async calls to finish
-  options.rpc_timeout = std::numeric_limits<int>::max();
-
-  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), options);
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), true);
   if (!fs) {
     std::cerr << "Could not connect the file system. " << std::endl;
     exit(EXIT_FAILURE);
@@ -130,22 +123,22 @@ int main(int argc, char *argv[]) {
   }
   else {
     //Allocating shared state, which includes:
-    //username and groupname to be set, handler to be called, request counter, and a boolean to keep track if find is done
+    //permissions to be set, handler to be called, request counter, and a boolean to keep track if find is done
     std::shared_ptr<SetPermissionState> state = std::make_shared<SetPermissionState>(perm, handler, 0, false);
 
     // Keep requesting more from Find until we process the entire listing. Call handler when Find is done and reques counter is 0.
     // Find guarantees that the handler will only be called once at a time so we do not need locking in handlerFind.
     auto handlerFind = [fs, state](const hdfs::Status &status_find, const std::vector<hdfs::StatInfo> & stat_infos, bool has_more_results) -> bool {
 
-      //For each result returned by Find we call async SetOwner with the handler below.
-      //SetOwner DOES NOT guarantee that the handler will only be called once at a time, so we DO need locking in handlerSetOwner.
-      auto handlerSetOwner = [state](const hdfs::Status &status_set_owner) {
+      //For each result returned by Find we call async SetPermission with the handler below.
+      //SetPermission DOES NOT guarantee that the handler will only be called once at a time, so we DO need locking in handlerSetPermission.
+      auto handlerSetPermission = [state](const hdfs::Status &status_set_permission) {
         std::lock_guard<std::mutex> guard(state->lock);
 
         //Decrement the counter once since we are done with this async call
-        if (!status_set_owner.ok() && state->status.ok()){
+        if (!status_set_permission.ok() && state->status.ok()){
           //We make sure we set state->status only on the first error.
-          state->status = status_set_owner;
+          state->status = status_set_permission;
         }
         state->request_counter--;
         if(state->request_counter == 0 && state->find_is_done){
@@ -154,13 +147,13 @@ int main(int argc, char *argv[]) {
       };
       if(!stat_infos.empty() && state->status.ok()) {
         for (hdfs::StatInfo const& s : stat_infos) {
-          //Launch an asynchronous call to SetOwner for every returned result
+          //Launch an asynchronous call to SetPermission for every returned result
           state->request_counter++;
-          fs->SetPermission(s.full_path, state->permissions, handlerSetOwner);
+          fs->SetPermission(s.full_path, state->permissions, handlerSetPermission);
         }
       }
 
-      //Lock this section because handlerSetOwner might be accessing the same
+      //Lock this section because handlerSetPermission might be accessing the same
       //shared variables simultaneously
       std::lock_guard<std::mutex> guard(state->lock);
       if (!status_find.ok() && state->status.ok()){

+ 2 - 9
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chown.cpp → hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chown.cc

@@ -36,7 +36,7 @@ void usage(){
       << std::endl << "OWNER and GROUP may be numeric as well as symbolic."
       << std::endl
       << std::endl << "Examples:"
-      << std::endl << "hdfs_chown -R new_owner:new_group hdfs://localhost.localdomain:9433/dir/file"
+      << std::endl << "hdfs_chown -R new_owner:new_group hdfs://localhost.localdomain:8020/dir/file"
       << std::endl << "hdfs_chown new_owner /dir/file"
       << std::endl;
 }
@@ -87,7 +87,6 @@ int main(int argc, char *argv[]) {
     case 'h':
       usage();
       exit(EXIT_SUCCESS);
-      break;
     case '?':
       if (isprint(optopt))
         std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
@@ -118,13 +117,7 @@ int main(int argc, char *argv[]) {
     exit(EXIT_FAILURE);
   }
 
-  //TODO: HDFS-9539 Currently options can be returned empty
-  hdfs::Options options = *hdfs::getOptions();
-
-  //TODO: HDFS-9539 - until then we increase the time-out to allow all recursive async calls to finish
-  options.rpc_timeout = std::numeric_limits<int>::max();
-
-  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), options);
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), true);
   if (!fs) {
     std::cerr << "Could not connect the file system. " << std::endl;
     exit(EXIT_FAILURE);

+ 92 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_copyToLocal.cc

@@ -0,0 +1,92 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_copyToLocal [OPTION] SRC_FILE DST_FILE"
+      << std::endl
+      << std::endl << "Copy SRC_FILE from hdfs to DST_FILE on the local file system."
+      << std::endl
+      << std::endl << "  -h  display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_copyToLocal hdfs://localhost.localdomain:8020/dir/file /home/usr/myfile"
+      << std::endl << "hdfs_copyToLocal /dir/file /home/usr/dir/file"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  if (argc > 4) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "h")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+
+  std::string uri_path = argv[optind];
+  std::string dest = argv[optind+1];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::FILE* dst_file = std::fopen(dest.c_str(), "wb");
+  if(!dst_file){
+    std::cerr << "Unable to open the destination file: " << dest << std::endl;
+    exit(EXIT_FAILURE);
+  }
+  readFile(fs, uri->get_path(), 0, dst_file, false);
+  std::fclose(dst_file);
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

+ 97 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_count.cc

@@ -0,0 +1,97 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_count [OPTION] FILE"
+      << std::endl
+      << std::endl << "Count the number of directories, files and bytes under the path that match the specified FILE pattern."
+      << std::endl << "The output columns with -count are: DIR_COUNT, FILE_COUNT, CONTENT_SIZE, PATHNAME"
+      << std::endl
+      << std::endl << "  -q    output additional columns before the rest: QUOTA, SPACE_QUOTA, SPACE_CONSUMED"
+      << std::endl << "  -h    display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_count hdfs://localhost.localdomain:8020/dir"
+      << std::endl << "hdfs_count -q /dir1/dir2"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  bool quota = false;
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "qh")) != -1) {
+    switch (input)
+    {
+    case 'q':
+      quota = true;
+      break;
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::ContentSummary content_summary;
+  hdfs::Status status = fs->GetContentSummary(uri->get_path(), content_summary);
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+  std::cout << content_summary.str(quota) << std::endl;
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

+ 99 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_createSnapshot.cc

@@ -0,0 +1,99 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_createSnapshot [OPTION] PATH"
+      << std::endl
+      << std::endl << "Create a snapshot of a snapshottable directory."
+      << std::endl << "This operation requires owner privilege of the snapshottable directory."
+      << std::endl
+      << std::endl << "  -n NAME   The snapshot name. When it is omitted, a default name is generated"
+      << std::endl << "             using a timestamp with the format:"
+      << std::endl << "             \"'s'yyyyMMdd-HHmmss.SSS\", e.g. s20130412-151029.033"
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_createSnapshot hdfs://localhost.localdomain:8020/dir"
+      << std::endl << "hdfs_createSnapshot -n MySnapshot /dir1/dir2"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+  std::string name;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "hn:")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case 'n':
+      name = optarg;
+      break;
+    case '?':
+      if (optopt == 'n')
+        std::cerr << "Option -" << (char) optopt << " requires an argument." << std::endl;
+      else if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::Status status = fs->CreateSnapshot(uri->get_path(), name);
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

+ 91 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_deleteSnapshot.cc

@@ -0,0 +1,91 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_deleteSnapshot [OPTION] PATH NAME"
+      << std::endl
+      << std::endl << "Delete a snapshot NAME from a snapshottable directory."
+      << std::endl << "This operation requires owner privilege of the snapshottable directory."
+      << std::endl
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_deleteSnapshot hdfs://localhost.localdomain:8020/dir mySnapshot"
+      << std::endl << "hdfs_deleteSnapshot /dir1/dir2 mySnapshot"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "h")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+  std::string name = argv[optind+1];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::Status status = fs->DeleteSnapshot(uri->get_path(), name);
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

+ 93 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_df.cc

@@ -0,0 +1,93 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_df [OPTION] PATH"
+      << std::endl
+      << std::endl << "Displays size, used space, and available space of"
+      << std::endl << "the entire filesystem where PATH is located"
+      << std::endl
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_df hdfs://localhost.localdomain:8020/"
+      << std::endl << "hdfs_df /"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "h")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::FsInfo fs_info;
+
+  hdfs::Status status = fs->GetFsStats(fs_info);
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+  std::cout << fs_info.str("hdfs://" + fs->get_cluster_name()) << std::endl;
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

+ 90 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_disallowSnapshot.cc

@@ -0,0 +1,90 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_disallowSnapshot [OPTION] PATH"
+      << std::endl
+      << std::endl << "Disallowing snapshots of a directory at PATH to be created."
+      << std::endl << "All snapshots of the directory must be deleted before disallowing snapshots."
+      << std::endl
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_disallowSnapshot hdfs://localhost.localdomain:8020/dir"
+      << std::endl << "hdfs_disallowSnapshot /dir1/dir2"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "Rh")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::Status status = fs->DisallowSnapshot(uri->get_path());
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

+ 180 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_du.cc

@@ -0,0 +1,180 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include <future>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_du [OPTION] PATH"
+      << std::endl
+      << std::endl << "Displays sizes of files and directories contained in the given PATH"
+      << std::endl << "or the length of a file in case PATH is just a file"
+      << std::endl
+      << std::endl << "  -R        operate on files and directories recursively"
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_du hdfs://localhost.localdomain:8020/dir/file"
+      << std::endl << "hdfs_du -R /dir1/dir2"
+      << std::endl;
+}
+
+struct GetContentSummaryState {
+  const std::function<void(const hdfs::Status &)> handler;
+  //The request counter is incremented once every time GetContentSummary async call is made
+  uint64_t request_counter;
+  //This boolean will be set when find returns the last result
+  bool find_is_done;
+  //Final status to be returned
+  hdfs::Status status;
+  //Shared variables will need protection with a lock
+  std::mutex lock;
+  GetContentSummaryState(const std::function<void(const hdfs::Status &)> & handler_,
+              uint64_t request_counter_, bool find_is_done_)
+      : handler(handler_),
+        request_counter(request_counter_),
+        find_is_done(find_is_done_),
+        status(),
+        lock() {
+  }
+};
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  bool recursive = false;
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "Rh")) != -1) {
+    switch (input)
+    {
+    case 'R':
+      recursive = true;
+      break;
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), true);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  /* wrap async FileSystem::GetContentSummary with promise to make it a blocking call */
+  std::shared_ptr<std::promise<hdfs::Status>> promise = std::make_shared<std::promise<hdfs::Status>>();
+  std::future<hdfs::Status> future(promise->get_future());
+  auto handler = [promise](const hdfs::Status &s) {
+    promise->set_value(s);
+  };
+
+  //Allocating shared state, which includes:
+  //handler to be called, request counter, and a boolean to keep track if find is done
+  std::shared_ptr<GetContentSummaryState> state = std::make_shared<GetContentSummaryState>(handler, 0, false);
+
+  // Keep requesting more from Find until we process the entire listing. Call handler when Find is done and reques counter is 0.
+  // Find guarantees that the handler will only be called once at a time so we do not need locking in handlerFind.
+  auto handlerFind = [fs, state](const hdfs::Status &status_find, const std::vector<hdfs::StatInfo> & stat_infos, bool has_more_results) -> bool {
+
+    //For each result returned by Find we call async GetContentSummary with the handler below.
+    //GetContentSummary DOES NOT guarantee that the handler will only be called once at a time, so we DO need locking in handlerGetContentSummary.
+    auto handlerGetContentSummary = [state](const hdfs::Status &status_get_summary, const hdfs::ContentSummary &si) {
+      std::lock_guard<std::mutex> guard(state->lock);
+      std::cout << si.str_du() << std::endl;
+      //Decrement the counter once since we are done with this async call
+      if (!status_get_summary.ok() && state->status.ok()){
+        //We make sure we set state->status only on the first error.
+        state->status = status_get_summary;
+      }
+      state->request_counter--;
+      if(state->request_counter == 0 && state->find_is_done){
+        state->handler(state->status); //exit
+      }
+    };
+    if(!stat_infos.empty() && state->status.ok()) {
+      for (hdfs::StatInfo const& s : stat_infos) {
+        //Launch an asynchronous call to GetContentSummary for every returned result
+        state->request_counter++;
+        fs->GetContentSummary(s.full_path, handlerGetContentSummary);
+      }
+    }
+
+    //Lock this section because handlerGetContentSummary might be accessing the same
+    //shared variables simultaneously
+    std::lock_guard<std::mutex> guard(state->lock);
+    if (!status_find.ok() && state->status.ok()){
+      //We make sure we set state->status only on the first error.
+      state->status = status_find;
+    }
+    if(!has_more_results){
+      state->find_is_done = true;
+      if(state->request_counter == 0){
+        state->handler(state->status); //exit
+      }
+      return false;
+    }
+    return true;
+  };
+
+  if(!recursive){
+    //Asynchronous call to Find
+    fs->GetListing(uri->get_path(), handlerFind);
+  } else {
+    //Asynchronous call to Find
+    fs->Find(uri->get_path(), "*", hdfs::FileSystem::GetDefaultFindMaxDepth(), handlerFind);
+  }
+
+  /* block until promise is set */
+  hdfs::Status status = future.get();
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

+ 9 - 15
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_find.cpp → hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_find.cc

@@ -44,7 +44,7 @@ void usage(){
       << std::endl << "  -h            display this help and exit"
       << std::endl
       << std::endl << "Examples:"
-      << std::endl << "hdfs_find hdfs://localhost.localdomain:9433/dir?/tree* -n some?file*name"
+      << std::endl << "hdfs_find hdfs://localhost.localdomain:8020/dir?/tree* -n some?file*name"
       << std::endl << "hdfs_find / -n file_name -m 3"
       << std::endl;
 }
@@ -70,7 +70,6 @@ int main(int argc, char *argv[]) {
     case 'h':
       usage();
       exit(EXIT_SUCCESS);
-      break;
     case 'n':
       name = optarg;
       break;
@@ -99,20 +98,14 @@ int main(int argc, char *argv[]) {
     exit(EXIT_FAILURE);
   }
 
-  //TODO: HDFS-9539 Currently options can be returned empty
-  hdfs::Options options = *hdfs::getOptions();
-
-  //TODO: HDFS-9539 - until then we increase the time-out to allow all recursive async calls to finish
-  options.rpc_timeout = std::numeric_limits<int>::max();
-
-  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), options);
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), true);
   if (!fs) {
     std::cerr << "Could not connect the file system. " << std::endl;
     exit(EXIT_FAILURE);
   }
 
-  std::promise<void> promise;
-  std::future<void> future(promise.get_future());
+  std::shared_ptr<std::promise<void>> promise = std::make_shared<std::promise<void>>();
+  std::future<void> future(promise->get_future());
   hdfs::Status status = hdfs::Status::OK();
 
   /**
@@ -120,14 +113,15 @@ int main(int argc, char *argv[]) {
     * when we have the entire listing to stop.
     *
     * Find guarantees that the handler will only be called once at a time,
-    * so we do not need any locking here
+    * so we do not need any locking here. It also guarantees that the handler will be
+    * only called once with has_more_results set to false.
     */
-  auto handler = [&promise, &status]
+  auto handler = [promise, &status]
                   (const hdfs::Status &s, const std::vector<hdfs::StatInfo> & si, bool has_more_results) -> bool {
     //Print result chunks as they arrive
     if(!si.empty()) {
       for (hdfs::StatInfo const& s : si) {
-        std::cout << s.full_path << std::endl;
+        std::cout << s.str() << std::endl;
       }
     }
     if(!s.ok() && status.ok()){
@@ -135,7 +129,7 @@ int main(int argc, char *argv[]) {
       status = s;
     }
     if (!has_more_results) {
-      promise.set_value();  //set promise
+      promise->set_value();  //set promise
       return false;         //request stop sending results
     }
     return true;  //request more results

+ 92 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_get.cc

@@ -0,0 +1,92 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_get [OPTION] SRC_FILE DST_FILE"
+      << std::endl
+      << std::endl << "Copy SRC_FILE from hdfs to DST_FILE on the local file system."
+      << std::endl
+      << std::endl << "  -h  display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_get hdfs://localhost.localdomain:8020/dir/file /home/usr/myfile"
+      << std::endl << "hdfs_get /dir/file /home/usr/dir/file"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  if (argc > 4) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "h")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+
+  std::string uri_path = argv[optind];
+  std::string dest = argv[optind+1];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::FILE* dst_file = std::fopen(dest.c_str(), "wb");
+  if(!dst_file){
+    std::cerr << "Unable to open the destination file: " << dest << std::endl;
+    exit(EXIT_FAILURE);
+  }
+  readFile(fs, uri->get_path(), 0, dst_file, false);
+  std::fclose(dst_file);
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

+ 134 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_ls.cc

@@ -0,0 +1,134 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include <future>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_ls [OPTION] FILE"
+      << std::endl
+      << std::endl << "List information about the FILEs."
+      << std::endl
+      << std::endl << "  -R        list subdirectories recursively"
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_ls hdfs://localhost.localdomain:8020/dir"
+      << std::endl << "hdfs_ls -R /dir1/dir2"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  bool recursive = false;
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "Rh")) != -1) {
+    switch (input)
+    {
+    case 'R':
+      recursive = true;
+      break;
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), true);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<std::promise<void>> promise = std::make_shared<std::promise<void>>();
+  std::future<void> future(promise->get_future());
+  hdfs::Status status = hdfs::Status::OK();
+
+  /**
+    * Keep requesting more until we get the entire listing. Set the promise
+    * when we have the entire listing to stop.
+    *
+    * Find and GetListing guarantee that the handler will only be called once at a time,
+    * so we do not need any locking here. They also guarantee that the handler will be
+    * only called once with has_more_results set to false.
+    */
+  auto handler = [promise, &status]
+                  (const hdfs::Status &s, const std::vector<hdfs::StatInfo> & si, bool has_more_results) -> bool {
+    //Print result chunks as they arrive
+    if(!si.empty()) {
+      for (hdfs::StatInfo const& s : si) {
+        std::cout << s.str() << std::endl;
+      }
+    }
+    if(!s.ok() && status.ok()){
+      //We make sure we set 'status' only on the first error.
+      status = s;
+    }
+    if (!has_more_results) {
+      promise->set_value();  //set promise
+      return false;         //request stop sending results
+    }
+    return true;  //request more results
+  };
+
+  if(!recursive){
+    //Asynchronous call to GetListing
+    fs->GetListing(uri->get_path(), handler);
+  } else {
+    //Asynchronous call to Find
+    fs->Find(uri->get_path(), "*", hdfs::FileSystem::GetDefaultFindMaxDepth(), handler);
+  }
+
+  //block until promise is set
+  future.get();
+  if(!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

+ 102 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_mkdir.cc

@@ -0,0 +1,102 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_mkdir [OPTION] DIRECTORY"
+      << std::endl
+      << std::endl << "Create the DIRECTORY(ies), if they do not already exist."
+      << std::endl
+      << std::endl << "  -p        make parent directories as needed"
+      << std::endl << "  -m  MODE  set file mode (octal permissions) for the new DIRECTORY(ies)"
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_mkdir hdfs://localhost.localdomain:8020/dir1/dir2"
+      << std::endl << "hdfs_mkdir -p /extant_dir/non_extant_dir/non_extant_dir/new_dir"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  bool create_parents = false;
+  uint16_t permissions = hdfs::FileSystem::GetDefaultPermissionMask();
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "pm:h")) != -1) {
+    switch (input)
+    {
+    case 'p':
+      create_parents = true;
+      break;
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case 'm':
+      //Get octal permissions for the new DIRECTORY(ies)
+      permissions = strtol(optarg, NULL, 8);
+      break;
+    case '?':
+      if (optopt == 'm')
+        std::cerr << "Option -" << (char) optopt << " requires an argument." << std::endl;
+      else if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::Status status = fs->Mkdirs(uri->get_path(), permissions, create_parents);
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

+ 94 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_moveToLocal.cc

@@ -0,0 +1,94 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_moveToLocal [OPTION] SRC_FILE DST_FILE"
+      << std::endl
+      << std::endl << "Move SRC_FILE from hdfs to DST_FILE on the local file system."
+      << std::endl << "Moving is done by copying SRC_FILE to DST_FILE, and then"
+      << std::endl << "deleting DST_FILE if copy succeeded."
+      << std::endl
+      << std::endl << "  -h  display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_moveToLocal hdfs://localhost.localdomain:8020/dir/file /home/usr/myfile"
+      << std::endl << "hdfs_moveToLocal /dir/file /home/usr/dir/file"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  if (argc > 4) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "h")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+
+  std::string uri_path = argv[optind];
+  std::string dest = argv[optind+1];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::FILE* dst_file = std::fopen(dest.c_str(), "wb");
+  if(!dst_file){
+    std::cerr << "Unable to open the destination file: " << dest << std::endl;
+    exit(EXIT_FAILURE);
+  }
+  readFile(fs, uri->get_path(), 0, dst_file, true);
+  std::fclose(dst_file);
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

+ 92 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_renameSnapshot.cc

@@ -0,0 +1,92 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_renameSnapshot [OPTION] PATH OLD_NAME NEW_NAME"
+      << std::endl
+      << std::endl << "Rename a snapshot from OLD_NAME to NEW_NAME."
+      << std::endl << "This operation requires owner privilege of the snapshottable directory."
+      << std::endl
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_renameSnapshot hdfs://localhost.localdomain:8020/dir oldDir newDir"
+      << std::endl << "hdfs_renameSnapshot /dir1/dir2 oldSnap newSnap"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "h")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+  std::string old_name = argv[optind+1];
+  std::string new_name = argv[optind+2];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::Status status = fs->RenameSnapshot(uri->get_path(), old_name, new_name);
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

+ 94 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_rm.cc

@@ -0,0 +1,94 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_rm [OPTION] FILE"
+      << std::endl
+      << std::endl << "Remove (unlink) the FILE(s) or directory(ies)."
+      << std::endl
+      << std::endl << "  -R        remove directories and their contents recursively"
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_rm hdfs://localhost.localdomain:8020/dir/file"
+      << std::endl << "hdfs_rm -R /dir1/dir2"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  bool recursive = false;
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "Rh")) != -1) {
+    switch (input)
+    {
+    case 'R':
+      recursive = true;
+      break;
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), true);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::Status status = fs->Delete(uri->get_path(), recursive);
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

+ 176 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_setrep.cc

@@ -0,0 +1,176 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include <future>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_setrep [OPTION] NUM_REPLICAS PATH"
+      << std::endl
+      << std::endl << "Changes the replication factor of a file at PATH. If PATH is a directory then the command"
+      << std::endl << "recursively changes the replication factor of all files under the directory tree rooted at PATH."
+      << std::endl
+      << std::endl << "  -h  display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_setrep 5 hdfs://localhost.localdomain:8020/dir/file"
+      << std::endl << "hdfs_setrep 3 /dir1/dir2"
+      << std::endl;
+}
+
+struct SetReplicationState {
+  const uint16_t replication;
+  const std::function<void(const hdfs::Status &)> handler;
+  //The request counter is incremented once every time SetReplication async call is made
+  uint64_t request_counter;
+  //This boolean will be set when find returns the last result
+  bool find_is_done;
+  //Final status to be returned
+  hdfs::Status status;
+  //Shared variables will need protection with a lock
+  std::mutex lock;
+  SetReplicationState(const uint16_t replication_, const std::function<void(const hdfs::Status &)> & handler_,
+              uint64_t request_counter_, bool find_is_done_)
+      : replication(replication_),
+        handler(handler_),
+        request_counter(request_counter_),
+        find_is_done(find_is_done_),
+        status(),
+        lock() {
+  }
+};
+
+int main(int argc, char *argv[]) {
+  //We should have 3 or 4 parameters
+  if (argc < 3) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "h")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string repl = argv[optind];
+  std::string uri_path = argv[optind + 1];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), true);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  /* wrap async FileSystem::SetReplication with promise to make it a blocking call */
+  std::shared_ptr<std::promise<hdfs::Status>> promise = std::make_shared<std::promise<hdfs::Status>>();
+  std::future<hdfs::Status> future(promise->get_future());
+  auto handler = [promise](const hdfs::Status &s) {
+    promise->set_value(s);
+  };
+
+  uint16_t replication = std::stoi(repl.c_str(), NULL, 8);
+  //Allocating shared state, which includes:
+  //replication to be set, handler to be called, request counter, and a boolean to keep track if find is done
+  std::shared_ptr<SetReplicationState> state = std::make_shared<SetReplicationState>(replication, handler, 0, false);
+
+  // Keep requesting more from Find until we process the entire listing. Call handler when Find is done and reques counter is 0.
+  // Find guarantees that the handler will only be called once at a time so we do not need locking in handlerFind.
+  auto handlerFind = [fs, state](const hdfs::Status &status_find, const std::vector<hdfs::StatInfo> & stat_infos, bool has_more_results) -> bool {
+
+    //For each result returned by Find we call async SetReplication with the handler below.
+    //SetReplication DOES NOT guarantee that the handler will only be called once at a time, so we DO need locking in handlerSetReplication.
+    auto handlerSetReplication = [state](const hdfs::Status &status_set_replication) {
+      std::lock_guard<std::mutex> guard(state->lock);
+
+      //Decrement the counter once since we are done with this async call
+      if (!status_set_replication.ok() && state->status.ok()){
+        //We make sure we set state->status only on the first error.
+        state->status = status_set_replication;
+      }
+      state->request_counter--;
+      if(state->request_counter == 0 && state->find_is_done){
+        state->handler(state->status); //exit
+      }
+    };
+    if(!stat_infos.empty() && state->status.ok()) {
+      for (hdfs::StatInfo const& s : stat_infos) {
+        //Launch an asynchronous call to SetReplication for every returned file
+        if(s.file_type == hdfs::StatInfo::IS_FILE){
+          state->request_counter++;
+          fs->SetReplication(s.full_path, state->replication, handlerSetReplication);
+        }
+      }
+    }
+
+    //Lock this section because handlerSetReplication might be accessing the same
+    //shared variables simultaneously
+    std::lock_guard<std::mutex> guard(state->lock);
+    if (!status_find.ok() && state->status.ok()){
+      //We make sure we set state->status only on the first error.
+      state->status = status_find;
+    }
+    if(!has_more_results){
+      state->find_is_done = true;
+      if(state->request_counter == 0){
+        state->handler(state->status); //exit
+      }
+      return false;
+    }
+    return true;
+  };
+
+  //Asynchronous call to Find
+  fs->Find(uri->get_path(), "*", hdfs::FileSystem::GetDefaultFindMaxDepth(), handlerFind);
+
+  /* block until promise is set */
+  hdfs::Status status = future.get();
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

+ 91 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_stat.cc

@@ -0,0 +1,91 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_rm [OPTION] FILE"
+      << std::endl
+      << std::endl << "Display FILE status."
+      << std::endl
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_rm hdfs://localhost.localdomain:8020/dir/file"
+      << std::endl << "hdfs_rm -R /dir1/dir2"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "h")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::StatInfo stat_info;
+  hdfs::Status status = fs->GetFileInfo(uri->get_path(), stat_info);
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+  std::cout << stat_info.str() << std::endl;
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

+ 128 - 0
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_tail.cc

@@ -0,0 +1,128 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_tail [OPTION] FILE"
+      << std::endl
+      << std::endl << "Displays last kilobyte of the file to stdout."
+      << std::endl
+      << std::endl << "  -f  output appended data as the file grows, as in Unix"
+      << std::endl << "  -h  display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_tail hdfs://localhost.localdomain:8020/dir/file"
+      << std::endl << "hdfs_tail /dir/file"
+      << std::endl;
+}
+
+#define TAIL_SIZE 1024
+#define REFRESH_RATE 1 //seconds
+
+int main(int argc, char *argv[]) {
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  bool follow = false;
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "hf")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case 'f':
+      follow = true;
+      break;
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  //We need to get the size of the file using stat
+  hdfs::StatInfo stat_info;
+  hdfs::Status status = fs->GetFileInfo(uri->get_path(), stat_info);
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  //Determine where to start reading
+  off_t offset = 0;
+  if(stat_info.length > TAIL_SIZE){
+    offset = stat_info.length - TAIL_SIZE;
+  }
+
+  do {
+    off_t current_length = (off_t) stat_info.length;
+    readFile(fs, uri->get_path(), offset, stdout, false);
+
+    //Exit if -f flag was not set
+    if(!follow){
+      break;
+    }
+
+    do{
+      //Sleep for the REFRESH_RATE
+      sleep(REFRESH_RATE);
+      //Use stat to check the new filesize.
+      status = fs->GetFileInfo(uri->get_path(), stat_info);
+      if (!status.ok()) {
+        std::cerr << "Error: " << status.ToString() << std::endl;
+        exit(EXIT_FAILURE);
+      }
+      //If file became longer, loop back and print the difference
+    }
+    while((off_t) stat_info.length <= current_length);
+  } while (true);
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

+ 52 - 7
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/tools_common.cpp → hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/tools_common.cc

@@ -21,8 +21,8 @@
 
 namespace hdfs {
 
-  std::shared_ptr<hdfs::Options> getOptions() {
-    std::shared_ptr<hdfs::Options> options = std::make_shared<hdfs::Options>();
+  std::shared_ptr<hdfs::FileSystem> doConnect(hdfs::URI & uri, bool max_timeout) {
+    hdfs::Options options;
     //Setting the config path to the default: "$HADOOP_CONF_DIR" or "/etc/hadoop/conf"
     hdfs::ConfigurationLoader loader;
     //Loading default config files core-site.xml and hdfs-site.xml from the config path
@@ -30,12 +30,12 @@ namespace hdfs {
     //TODO: HDFS-9539 - after this is resolved, valid config will always be returned.
     if(config){
       //Loading options from the config
-      *options = config->GetOptions();
+      options = config->GetOptions();
+    }
+    if(max_timeout){
+      //TODO: HDFS-9539 - until then we increase the time-out to allow all recursive async calls to finish
+      options.rpc_timeout = std::numeric_limits<int>::max();
     }
-    return options;
-  }
-
-  std::shared_ptr<hdfs::FileSystem> doConnect(hdfs::URI & uri, hdfs::Options & options) {
     IoService * io_service = IoService::New();
     //Wrapping fs into a shared pointer to guarantee deletion
     std::shared_ptr<hdfs::FileSystem> fs(hdfs::FileSystem::New(io_service, "", options));
@@ -67,4 +67,49 @@ namespace hdfs {
     return fs;
   }
 
+  #define BUF_SIZE 1048576 //1 MB
+  static char input_buffer[BUF_SIZE];
+
+  void readFile(std::shared_ptr<hdfs::FileSystem> fs, std::string path, off_t offset, std::FILE* dst_file, bool to_delete) {
+    ssize_t total_bytes_read = 0;
+    size_t last_bytes_read = 0;
+
+    hdfs::FileHandle *file_raw = nullptr;
+    hdfs::Status status = fs->Open(path, &file_raw);
+    if (!status.ok()) {
+      std::cerr << "Could not open file " << path << ". " << status.ToString() << std::endl;
+      exit(EXIT_FAILURE);
+    }
+    //wrapping file_raw into a unique pointer to guarantee deletion
+    std::unique_ptr<hdfs::FileHandle> file(file_raw);
+
+    do{
+      //Reading file chunks
+      status = file->PositionRead(input_buffer, sizeof(input_buffer), offset, &last_bytes_read);
+      if(status.ok()) {
+        //Writing file chunks to stdout
+        fwrite(input_buffer, last_bytes_read, 1, dst_file);
+        total_bytes_read += last_bytes_read;
+        offset += last_bytes_read;
+      } else {
+        if(status.is_invalid_offset()){
+          //Reached the end of the file
+          if(to_delete) {
+            //Deleting the file (recursive set to false)
+            hdfs::Status status = fs->Delete(path, false);
+            if (!status.ok()) {
+              std::cerr << "Error deleting the source file: " << path
+                << " " << status.ToString() << std::endl;
+              exit(EXIT_FAILURE);
+            }
+          }
+          break;
+        } else {
+          std::cerr << "Error reading the file: " << status.ToString() << std::endl;
+          exit(EXIT_FAILURE);
+        }
+      }
+    } while (last_bytes_read > 0);
+    return;
+  }
 }

+ 3 - 5
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/tools_common.h

@@ -23,17 +23,15 @@
 #include "hdfspp/hdfspp.h"
 #include "common/hdfs_configuration.h"
 #include "common/configuration_loader.h"
-
 #include <mutex>
 
 namespace hdfs {
 
-  //Pull configurations and get the Options object
-  std::shared_ptr<hdfs::Options> getOptions();
-
   //Build all necessary objects and perform the connection
-  std::shared_ptr<hdfs::FileSystem> doConnect(hdfs::URI & uri, hdfs::Options & options);
+  std::shared_ptr<hdfs::FileSystem> doConnect(hdfs::URI & uri, bool max_timeout);
 
+  //Open HDFS file at offset, read it to destination file, optionally delete source file
+  void readFile(std::shared_ptr<hdfs::FileSystem> fs, std::string path, off_t offset, std::FILE* dst_file, bool to_delete);
 }
 
 #endif