Преглед на файлове

AMBARI-11391. Files View Should support NameNode HA (Erik Bergenholtz via rlevas)

Erik Bergenholtz преди 10 години
родител
ревизия
e28a9c073c
променени са 24 файла, в които са добавени 1674 реда и са изтрити 234 реда
  1. 113 125
      contrib/views/files/pom.xml
  2. 1 0
      contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/DownloadService.java
  3. 6 2
      contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/FileOperationService.java
  4. 3 17
      contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/HdfsService.java
  5. 1 0
      contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/HelpService.java
  6. 6 4
      contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/PropertyValidator.java
  7. 1 0
      contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/UploadService.java
  8. 60 16
      contrib/views/files/src/main/resources/view.xml
  9. 2 0
      contrib/views/pom.xml
  10. 122 0
      contrib/views/utils/pom.xml
  11. 55 0
      contrib/views/utils/readme.md
  12. 202 0
      contrib/views/utils/src/main/java/org/apache/ambari/view/utils/ambari/AmbariApi.java
  13. 32 0
      contrib/views/utils/src/main/java/org/apache/ambari/view/utils/ambari/AmbariApiException.java
  14. 25 0
      contrib/views/utils/src/main/java/org/apache/ambari/view/utils/ambari/NoClusterAssociatedException.java
  15. 104 0
      contrib/views/utils/src/main/java/org/apache/ambari/view/utils/ambari/RemoteCluster.java
  16. 89 0
      contrib/views/utils/src/main/java/org/apache/ambari/view/utils/ambari/URLStreamProviderBasicAuth.java
  17. 98 0
      contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/AuthConfigurationBuilder.java
  18. 197 0
      contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/ConfigurationBuilder.java
  19. 69 59
      contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/HdfsApi.java
  20. 29 0
      contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/HdfsApiException.java
  21. 150 0
      contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/HdfsUtil.java
  22. 137 0
      contrib/views/utils/src/test/java/org/apache/ambari/view/utils/ambari/RemoteClusterTest.java
  23. 159 0
      contrib/views/utils/src/test/java/org/apache/ambari/view/utils/ambari/URLStreamProviderBasicAuthTest.java
  24. 13 11
      contrib/views/utils/src/test/java/org/apache/ambari/view/utils/hdfs/ConfigurationBuilderTest.java

+ 113 - 125
contrib/views/files/pom.xml

@@ -15,107 +15,95 @@
    limitations under the License.
 -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-     xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-    <groupId>org.apache.ambari.contrib.views</groupId>
-    <artifactId>files</artifactId>
-    <version>0.1.0-SNAPSHOT</version>
-    <name>Files</name>
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>org.apache.ambari.contrib.views</groupId>
+  <artifactId>files</artifactId>
+  <version>0.2.0-SNAPSHOT</version>
+  <name>Files</name>
 
-    <parent>
-        <groupId>org.apache.ambari.contrib.views</groupId>
-        <artifactId>ambari-contrib-views</artifactId>
-        <version>2.0.0-SNAPSHOT</version>
-    </parent>
+  <parent>
+    <groupId>org.apache.ambari.contrib.views</groupId>
+    <artifactId>ambari-contrib-views</artifactId>
+    <version>2.0.0-SNAPSHOT</version>
+  </parent>
 
-    <dependencies>
+  <dependencies>
     <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdfs</artifactId>
-        <version>${hadoop-version}</version>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <version>${hadoop-version}</version>
     </dependency>
     <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-common</artifactId>
-        <version>${hadoop-version}</version>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>${hadoop-version}</version>
     </dependency>
     <dependency>
-        <groupId>junit</groupId>
-        <artifactId>junit</artifactId>
-        <scope>test</scope>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
     </dependency>
     <dependency>
-        <groupId>org.easymock</groupId>
-        <artifactId>easymock</artifactId>
-        <scope>test</scope>
+      <groupId>org.easymock</groupId>
+      <artifactId>easymock</artifactId>
+      <scope>test</scope>
     </dependency>
     <dependency>
-        <groupId>com.google.inject</groupId>
-        <artifactId>guice</artifactId>
+      <groupId>com.google.inject</groupId>
+      <artifactId>guice</artifactId>
     </dependency>
+
     <dependency>
-        <groupId>org.glassfish.jersey.containers</groupId>
-        <artifactId>jersey-container-servlet</artifactId>
+      <groupId>com.sun.jersey.contribs</groupId>
+      <artifactId>jersey-multipart</artifactId>
     </dependency>
     <dependency>
-        <groupId>com.sun.jersey.contribs</groupId>
-        <artifactId>jersey-multipart</artifactId>
-        <version>1.18</version>
+      <groupId>com.googlecode.json-simple</groupId>
+      <artifactId>json-simple</artifactId>
     </dependency>
     <dependency>
-        <groupId>com.googlecode.json-simple</groupId>
-        <artifactId>json-simple</artifactId>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minicluster</artifactId>
+      <version>${hadoop-version}</version>
+      <scope>test</scope>
     </dependency>
+
     <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-minicluster</artifactId>
-        <version>${hadoop-version}</version>
-        <scope>test</scope>
+      <groupId>com.sun.jersey.jersey-test-framework</groupId>
+      <artifactId>jersey-test-framework-core</artifactId>
+      <scope>test</scope>
     </dependency>
-
     <dependency>
-        <groupId>org.glassfish.jersey.test-framework</groupId>
-        <artifactId>jersey-test-framework-core</artifactId>
-        <version>2.6</version>
-        <scope>test</scope>
+      <groupId>org.apache.ambari</groupId>
+      <artifactId>ambari-views</artifactId>
+      <scope>provided</scope>
     </dependency>
     <dependency>
-        <groupId>org.glassfish.jersey.test-framework.providers</groupId>
-        <artifactId>jersey-test-framework-provider-grizzly2</artifactId>
-        <version>2.6</version>
-        <scope>test</scope>
+      <groupId>org.apache.ambari.contrib.views</groupId>
+      <artifactId>ambari-views-utils</artifactId>
+      <version>0.0.1-SNAPSHOT</version>
     </dependency>
     <dependency>
-        <groupId>
-        org.glassfish.jersey.test-framework.providers
-        </groupId>
-        <artifactId>
-        jersey-test-framework-provider-bundle
-        </artifactId>
-        <version>2.6</version>
-        <scope>test</scope>
-        <type>pom</type>
+      <groupId>com.google.code.gson</groupId>
+      <artifactId>gson</artifactId>
+      <version>2.2.2</version>
     </dependency>
     <dependency>
-        <groupId>org.apache.ambari</groupId>
-        <artifactId>ambari-views</artifactId>
-        <scope>provided</scope>
+      <groupId>org.glassfish.jersey.containers</groupId>
+      <artifactId>jersey-container-servlet</artifactId>
+      <scope>provided</scope>
     </dependency>
-        <dependency>
-            <groupId>com.google.code.gson</groupId>
-            <artifactId>gson</artifactId>
-            <version>2.2.2</version>
-        </dependency>
-    </dependencies>
+  </dependencies>
 
-    <properties>
-      <ambari.dir>${project.parent.parent.parent.basedir}</ambari.dir>
-      <hadoop-version>2.2.0</hadoop-version>
-      <nodejs.directory>${basedir}/target/nodejs</nodejs.directory>
-      <npm.version>1.4.3</npm.version>
-      <ui.directory>${basedir}/src/main/resources/ui</ui.directory>
-    </properties>
-    <build>
+  <properties>
+    <ambari.dir>${project.parent.parent.parent.basedir}</ambari.dir>
+    <hadoop-version>2.6.0</hadoop-version>
+    <nodejs.directory>${basedir}/target/nodejs</nodejs.directory>
+    <npm.version>1.4.3</npm.version>
+    <ui.directory>${basedir}/src/main/resources/ui</ui.directory>
+  </properties>
+  <build>
 
     <plugins>
       <plugin>
@@ -227,99 +215,99 @@
         </executions>
       </plugin>
       <plugin>
-         <groupId>org.vafer</groupId>
-         <artifactId>jdeb</artifactId>
-         <version>1.0.1</version>
-         <executions>
-             <execution>
-                 <phase>none</phase>
-                 <goals>
-                     <goal>jdeb</goal>
-                 </goals>
-             </execution>
-         </executions>
-         <configuration>
-             <skip>true</skip>
-             <submodules>false</submodules>
-         </configuration>
-     </plugin>
+        <groupId>org.vafer</groupId>
+        <artifactId>jdeb</artifactId>
+        <version>1.0.1</version>
+        <executions>
+          <execution>
+            <phase>none</phase>
+            <goals>
+              <goal>jdeb</goal>
+            </goals>
+          </execution>
+        </executions>
+        <configuration>
+          <skip>true</skip>
+          <submodules>false</submodules>
+        </configuration>
+      </plugin>
     </plugins>
     <resources>
-        <resource>
+      <resource>
         <directory>src/main/resources/ui/public</directory>
         <filtering>false</filtering>
-        </resource>
+      </resource>
 
-        <resource>
+      <resource>
         <directory>src/main/resources/</directory>
         <filtering>false</filtering>
         <includes>
-            <include>view.xml</include>
+          <include>view.xml</include>
         </includes>
-        </resource>
+      </resource>
 
-        <resource>
-          <targetPath>WEB-INF/lib</targetPath>
-          <filtering>false</filtering>
-          <directory>target/lib</directory>
-        </resource>
+      <resource>
+        <targetPath>WEB-INF/lib</targetPath>
+        <filtering>false</filtering>
+        <directory>target/lib</directory>
+      </resource>
     </resources>
     <pluginManagement>
-        <plugins>
+      <plugins>
         <!--This plugin's configuration is used to store Eclipse m2e settings only. It has no influence on the Maven build itself.-->
         <plugin>
-            <groupId>org.eclipse.m2e</groupId>
-            <artifactId>lifecycle-mapping</artifactId>
-            <version>1.0.0</version>
-            <configuration>
+          <groupId>org.eclipse.m2e</groupId>
+          <artifactId>lifecycle-mapping</artifactId>
+          <version>1.0.0</version>
+          <configuration>
             <lifecycleMappingMetadata>
-                <pluginExecutions>
+              <pluginExecutions>
                 <pluginExecution>
-                    <pluginExecutionFilter>
+                  <pluginExecutionFilter>
                     <groupId>
-                        org.codehaus.mojo
+                      org.codehaus.mojo
                     </groupId>
                     <artifactId>
-                        exec-maven-plugin
+                      exec-maven-plugin
                     </artifactId>
                     <versionRange>
-                        [1.2.1,)
+                      [1.2.1,)
                     </versionRange>
                     <goals>
-                        <goal>exec</goal>
+                      <goal>exec</goal>
                     </goals>
-                    </pluginExecutionFilter>
-                    <action>
+                  </pluginExecutionFilter>
+                  <action>
                     <ignore></ignore>
-                    </action>
+                  </action>
                 </pluginExecution>
                 <pluginExecution>
-                    <pluginExecutionFilter>
+                  <pluginExecutionFilter>
                     <groupId>
-                        com.github.eirslett
+                      com.github.eirslett
                     </groupId>
                     <artifactId>
-                        frontend-maven-plugin
+                      frontend-maven-plugin
                     </artifactId>
                     <versionRange>
-                        [0.0.14,)
+                      [0.0.14,)
                     </versionRange>
                     <goals>
-                        <goal>
+                      <goal>
                         install-node-and-npm
-                        </goal>
-                        <goal>npm</goal>
+                      </goal>
+                      <goal>npm</goal>
                     </goals>
-                    </pluginExecutionFilter>
-                    <action>
+                  </pluginExecutionFilter>
+                  <action>
                     <ignore></ignore>
-                    </action>
+                  </action>
                 </pluginExecution>
-                </pluginExecutions>
+              </pluginExecutions>
             </lifecycleMappingMetadata>
-            </configuration>
+          </configuration>
         </plugin>
-        </plugins>
+      </plugins>
     </pluginManagement>
   </build>
   <profiles>

+ 1 - 0
contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/DownloadService.java

@@ -48,6 +48,7 @@ import javax.xml.bind.annotation.XmlElement;
 import com.google.gson.Gson;
 import org.apache.ambari.view.filebrowser.utils.NotFoundFormattedException;
 import org.apache.ambari.view.filebrowser.utils.ServiceFormattedException;
+import org.apache.ambari.view.utils.hdfs.HdfsApi;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.ambari.view.ViewContext;

+ 6 - 2
contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/FileOperationService.java

@@ -33,6 +33,8 @@ import javax.xml.bind.annotation.XmlRootElement;
 import org.apache.ambari.view.ViewContext;
 import org.apache.ambari.view.filebrowser.utils.NotFoundFormattedException;
 import org.apache.ambari.view.filebrowser.utils.ServiceFormattedException;
+import org.apache.ambari.view.utils.hdfs.HdfsApi;
+import org.apache.ambari.view.utils.hdfs.HdfsApiException;
 import org.json.simple.JSONObject;
 
 /**
@@ -139,10 +141,12 @@ public class FileOperationService extends HdfsService {
     try {
       HdfsApi api = getApi(context);
       ResponseBuilder result;
-      if (api.copy(request.src, request.dst)) {
+      try {
+        api.copy(request.src, request.dst);
+
         result = Response.ok(getApi(context).fileStatusToJSON(api
             .getFileStatus(request.dst)));
-      } else {
+      } catch (HdfsApiException e) {
         result = Response.ok(new BoolResult(false)).status(422);
       }
       return result.build();

+ 3 - 17
contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/HdfsService.java

@@ -22,6 +22,8 @@ import javax.xml.bind.annotation.XmlRootElement;
 
 import org.apache.ambari.view.ViewContext;
 import org.apache.ambari.view.filebrowser.utils.ServiceFormattedException;
+import org.apache.ambari.view.utils.hdfs.HdfsApi;
+import org.apache.ambari.view.utils.hdfs.HdfsUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -67,13 +69,8 @@ public abstract class HdfsService {
    */
   public HdfsApi getApi(ViewContext context) {
     if (_api == null) {
-//      Thread.currentThread().setContextClassLoader(null);
-      String defaultFs = context.getProperties().get("webhdfs.url");
-
-      defaultFs = normalizeFsUrl(defaultFs);
-
       try {
-        _api = new HdfsApi(defaultFs, getDoAsUsername(context), getHdfsAuthParams(context));
+        _api = HdfsUtil.connectToHDFSApi(context);
       } catch (Exception ex) {
         throw new ServiceFormattedException("HdfsApi connection failed. Check \"webhdfs.url\" property", ex);
       }
@@ -81,17 +78,6 @@ public abstract class HdfsService {
     return _api;
   }
 
-  protected static String normalizeFsUrl(String defaultFs) {
-    //TODO: Don't add port if HA is enabled
-    if (!defaultFs.matches("^[^:]+://.*$"))
-      defaultFs = "webhdfs://" + defaultFs;
-
-    if (!defaultFs.matches("^.*:\\d+$"))
-      defaultFs = defaultFs + ":50070";
-
-    return defaultFs;
-  }
-
   private static Map<String, String> getHdfsAuthParams(ViewContext context) {
     String auth = context.getProperties().get("webhdfs.auth");
     Map<String, String> params = new HashMap<String, String>();

+ 1 - 0
contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/HelpService.java

@@ -30,6 +30,7 @@ import javax.ws.rs.core.Response;
 import org.apache.ambari.view.ViewContext;
 import org.apache.ambari.view.filebrowser.utils.NotFoundFormattedException;
 import org.apache.ambari.view.filebrowser.utils.ServiceFormattedException;
+import org.apache.ambari.view.utils.hdfs.HdfsApi;
 
 /**
  * Help service

+ 6 - 4
contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/PropertyValidator.java

@@ -40,10 +40,12 @@ public class PropertyValidator implements Validator {
   public ValidationResult validateProperty(String property, ViewInstanceDefinition viewInstanceDefinition, ValidationContext validationContext) {
     if (property.equals(WEBHDFS_URL)) {
       String webhdfsUrl = viewInstanceDefinition.getPropertyMap().get(WEBHDFS_URL);
-      try {
-        new URI(webhdfsUrl);
-      } catch (URISyntaxException e) {
-        return new InvalidPropertyValidationResult(false, "Must be valid URL");
+      if (webhdfsUrl != null) {
+        try {
+          new URI(webhdfsUrl);
+        } catch (URISyntaxException e) {
+          return new InvalidPropertyValidationResult(false, "Must be valid URL");
+        }
       }
     }
     return ValidationResult.SUCCESS;

+ 1 - 0
contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/UploadService.java

@@ -29,6 +29,7 @@ import javax.ws.rs.core.Response;
 
 import org.apache.ambari.view.ViewContext;
 import org.apache.ambari.view.filebrowser.utils.ServiceFormattedException;
+import org.apache.ambari.view.utils.hdfs.HdfsApi;
 import org.apache.hadoop.fs.FSDataOutputStream;
 
 import com.sun.jersey.core.header.FormDataContentDisposition;

+ 60 - 16
contrib/views/files/src/main/resources/view.xml

@@ -17,7 +17,7 @@
 <view>
     <name>FILES</name>
     <label>Files</label>
-    <version>0.1.0</version>
+    <version>0.2.0</version>
 
     <min-ambari-version>2.0.*</min-ambari-version>
 
@@ -25,13 +25,68 @@
 
     <parameter>
         <name>webhdfs.url</name>
-        <description>Enter the WebHDFS FileSystem URI. Typically this is the dfs.namenode.http-address property in the hdfs-site.xml configuration. URL must be accessible from Ambari Server.</description>
+        <description>Enter the WebHDFS FileSystem URI. Typically this is the dfs.namenode.http-address
+            property in the hdfs-site.xml configuration. URL must be accessible from Ambari Server.</description>
         <label>WebHDFS FileSystem URI</label>
-        <placeholder>webhdfs://namenode:50070</placeholder>
-        <default-value>webhdfs://localhost:50070</default-value>
         <required>true</required>
-        <cluster-config>hdfs-site/dfs.namenode.http-address</cluster-config>
+        <cluster-config>core-site/fs.defaultFS</cluster-config>
     </parameter>
+    <parameter>
+        <name>webhdfs.nameservices</name>
+        <description>Comma-separated list of nameservices. Value of hdfs-site/dfs.nameservices property</description>
+        <label>Logical name of the NameNode cluster</label>
+        <required>false</required>
+        <cluster-config>hdfs-site/dfs.nameservices</cluster-config>
+    </parameter>
+    <parameter>
+        <name>webhdfs.ha.namenodes.list</name>
+        <description>Comma-separated list of namenodes for a given nameservice.
+            Value of hdfs-site/dfs.ha.namenodes.[nameservice] property</description>
+        <label>List of NameNodes</label>
+        <required>false</required>
+        <cluster-config>fake</cluster-config>
+    </parameter>
+    <parameter>
+        <name>webhdfs.ha.namenode.rpc-address.nn1</name>
+        <description>RPC address for first name node.
+            Value of hdfs-site/dfs.namenode.rpc-address.[nameservice].[namenode1] property</description>
+        <label>First NameNode RPC Address</label>
+        <required>false</required>
+        <cluster-config>fake</cluster-config>
+    </parameter>
+    <parameter>
+        <name>webhdfs.ha.namenode.rpc-address.nn2</name>
+        <description>RPC address for second name node.
+            Value of hdfs-site/dfs.namenode.rpc-address.[nameservice].[namenode2] property</description>
+        <label>Second NameNode RPC Address</label>
+        <required>false</required>
+        <cluster-config>fake</cluster-config>
+    </parameter>
+    <parameter>
+        <name>webhdfs.ha.namenode.http-address.nn1</name>
+        <description>WebHDFS address for first name node.
+            Value of hdfs-site/dfs.namenode.http-address.[nameservice].[namenode1] property</description>
+        <label>First NameNode HTTP (WebHDFS) Address</label>
+        <required>false</required>
+        <cluster-config>fake</cluster-config>
+    </parameter>
+    <parameter>
+        <name>webhdfs.ha.namenode.http-address.nn2</name>
+        <description>WebHDFS address for second name node.
+            Value of hdfs-site/dfs.namenode.http-address.[nameservice].[namenode2] property</description>
+        <label>Second NameNode HTTP (WebHDFS) Address</label>
+        <required>false</required>
+        <cluster-config>fake</cluster-config>
+    </parameter>
+    <parameter>
+        <name>webhdfs.client.failover.proxy.provider</name>
+        <description>The Java class that HDFS clients use to contact the Active NameNode
+            Value of hdfs-site/dfs.client.failover.proxy.provider.[nameservice] property</description>
+        <label>Failover Proxy Provider</label>
+        <required>false</required>
+        <cluster-config>fake</cluster-config>
+    </parameter>
+
     <parameter>
         <name>webhdfs.username</name>
         <description>doAs for proxy user for HDFS. By default, uses the currently logged-in Ambari user.</description>
@@ -43,7 +98,6 @@
         <name>webhdfs.auth</name>
         <description>Semicolon-separated authentication configs.</description>
         <placeholder>auth=SIMPLE</placeholder>
-        <default-value>auth=SIMPLE</default-value>
         <label>WebHDFS Authorization</label>
         <required>false</required>
     </parameter>
@@ -52,14 +106,4 @@
         <name>files</name>
         <service-class>org.apache.ambari.view.filebrowser.FileBrowserService</service-class>
     </resource>
-
-    <auto-instance>
-        <name>AUTO_INSTANCE</name>
-        <label>Auto Create instance for the Files view</label>
-        <description>This view instance is auto created when the HDFS service is added to a cluster.</description>
-        <stack-id>HDP-2.*</stack-id>
-        <services>
-            <service>HDFS</service>
-        </services>
-    </auto-instance>
 </view>

+ 2 - 0
contrib/views/pom.xml

@@ -34,11 +34,13 @@
   </properties>
   <modules>
     <module>files</module>
+    <module>jobs</module>
     <module>pig</module>
     <module>slider</module>
     <module>capacity-scheduler</module>
     <module>hive</module>
     <module>tez</module>
+    <module>utils</module>
   </modules>
   <build>
     <pluginManagement>

+ 122 - 0
contrib/views/utils/pom.xml

@@ -0,0 +1,122 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>org.apache.ambari.contrib.views</groupId>
+  <artifactId>ambari-views-utils</artifactId>
+  <version>0.0.1-SNAPSHOT</version>
+  <name>Ambari View Utils</name>
+
+  <parent>
+    <groupId>org.apache.ambari.contrib.views</groupId>
+    <artifactId>ambari-contrib-views</artifactId>
+    <version>2.0.0-SNAPSHOT</version>
+  </parent>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <version>${hadoop-version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>${hadoop-version}</version>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.easymock</groupId>
+      <artifactId>easymock</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.inject</groupId>
+      <artifactId>guice</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.glassfish.jersey.containers</groupId>
+      <artifactId>jersey-container-servlet</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey.contribs</groupId>
+      <artifactId>jersey-multipart</artifactId>
+      <version>1.18</version>
+    </dependency>
+    <dependency>
+      <groupId>com.googlecode.json-simple</groupId>
+      <artifactId>json-simple</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minicluster</artifactId>
+      <version>${hadoop-version}</version>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.glassfish.jersey.test-framework</groupId>
+      <artifactId>jersey-test-framework-core</artifactId>
+      <version>2.6</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.glassfish.jersey.test-framework.providers</groupId>
+      <artifactId>jersey-test-framework-provider-grizzly2</artifactId>
+      <version>2.6</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-collections4</artifactId>
+      <version>4.0</version>
+    </dependency>
+    <dependency>
+      <groupId>
+        org.glassfish.jersey.test-framework.providers
+      </groupId>
+      <artifactId>
+        jersey-test-framework-provider-bundle
+      </artifactId>
+      <version>2.6</version>
+      <scope>test</scope>
+      <type>pom</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.ambari</groupId>
+      <artifactId>ambari-views</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.code.gson</groupId>
+      <artifactId>gson</artifactId>
+      <version>2.2.2</version>
+    </dependency>
+  </dependencies>
+
+  <properties>
+    <ambari.dir>${project.parent.parent.parent.basedir}</ambari.dir>
+    <hadoop-version>2.2.0</hadoop-version>
+  </properties>
+  <build>
+  </build>
+</project>

+ 55 - 0
contrib/views/utils/readme.md

@@ -0,0 +1,55 @@
+<!---
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+Utils
+============
+
+Description
+-----
+This module provides common utils for views
+
+Requirements
+-----
+
+- Ambari 2.1.0 or later
+
+HDFS Utility
+-----
+
+HdfsApi class provides business delegate for HDFS client that provides proxyuser configuration.
+You can create the HdfsApi based on your ViewContext:
+
+    HdfsApi api = HdfsUtil.connectToHDFSApi(viewContext);
+
+It will read instance properties and create HdfsApi configured to specific cluster. NameNodes HA is supported.
+
+AmbariApi
+-----
+
+AmbariApi provides methods to get Ambari configurations and cluster topology.
+
+Custer association functionality:
+
+    AmbariApi api = new AmbariApi(viewContext);
+    Cluster cluster = api.getCluster();
+
+It can work with local cluster or with remote cluster based on your instance properties of Ambari URL,
+username and password in the ViewContext. To determine if you have associated cluster, either local or remote:
+
+    boolean isAssociated = api.isClusterAssociated();
+
+Also provides the API to get cluster topology:
+
+    List<String> nnHosts = api.getHostsWithComponent("NAMENODE");

+ 202 - 0
contrib/views/utils/src/main/java/org/apache/ambari/view/utils/ambari/AmbariApi.java

@@ -0,0 +1,202 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.view.utils.ambari;
+
+import org.apache.ambari.view.AmbariStreamProvider;
+import org.apache.ambari.view.URLStreamProvider;
+import org.apache.ambari.view.ViewContext;
+import org.apache.ambari.view.cluster.Cluster;
+import org.apache.commons.io.IOUtils;
+import org.json.simple.JSONArray;
+import org.json.simple.JSONObject;
+import org.json.simple.JSONValue;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Provides API to Ambari. Supports both Local and Remote cluster association.
+ * Also provides API to get cluster topology (determine what node contains specific service)
+ * on both local and remote cluster.
+ */
+public class AmbariApi {
+  public static final String AMBARI_SERVER_URL_INSTANCE_PROPERTY = "ambari.server.url";
+  public static final String AMBARI_SERVER_USERNAME_INSTANCE_PROPERTY = "ambari.server.username";
+  public static final String AMBARI_SERVER_PASSWORD_INSTANCE_PROPERTY = "ambari.server.password";
+
+  private Cluster cluster;
+  private ViewContext context;
+  private String remoteUrl;
+  private String remoteUsername;
+  private String remotePassword;
+
+  /**
+   * Constructor for Ambari API based on ViewContext
+   * @param context View Context
+   */
+  public AmbariApi(ViewContext context) {
+    this.context = context;
+
+    remoteUrl = context.getProperties().get(AMBARI_SERVER_URL_INSTANCE_PROPERTY);
+    remoteUsername = context.getProperties().get(AMBARI_SERVER_USERNAME_INSTANCE_PROPERTY);
+    remotePassword = context.getProperties().get(AMBARI_SERVER_PASSWORD_INSTANCE_PROPERTY);
+  }
+
+  /**
+   * Provides ability to get cluster topology
+   * @param requestComponent name of component
+   * @return list of hostnames with component
+   * @throws AmbariApiException
+   */
+  public List<String> getHostsWithComponent(String requestComponent) throws AmbariApiException {
+    String method = "hosts?fields=Hosts/public_host_name,host_components/HostRoles/component_name";
+    String response = readFromAmbari(method);
+
+    List<String> foundHosts = new ArrayList<String>();
+
+    JSONObject jsonObject = (JSONObject) JSONValue.parse(response);
+    JSONArray hosts = (JSONArray) jsonObject.get("items");
+    for (Object host : hosts) {
+      JSONObject hostJson = (JSONObject) host;
+      JSONArray hostComponents = (JSONArray) hostJson.get("host_components");
+      for (Object component : hostComponents) {
+        JSONObject componentJson = (JSONObject) component;
+        JSONObject hostRoles = (JSONObject) componentJson.get("HostRoles");
+        String componentName = (String) hostRoles.get("component_name");
+        if (componentName.equals(requestComponent)) {
+          foundHosts.add((String) hostRoles.get("host_name"));
+        }
+      }
+    }
+    return foundHosts;
+  }
+
+  /**
+   * Request to Ambari REST API. Supports both local and remote cluster
+   * @param method REST API path, e.g. /api/v1/clusters/mycluster?...
+   * @return response
+   * @throws AmbariApiException IO error or not associated with cluster
+   */
+  public String readFromAmbari(String method) throws AmbariApiException {
+    String response;
+
+    try {
+      InputStream inputStream;
+
+      if (isLocalCluster()) {
+        AmbariStreamProvider ambariStreamProvider = context.getAmbariStreamProvider();
+        String url = String.format("/api/v1/clusters/%s/%s", getCluster().getName(), method);
+        inputStream = ambariStreamProvider.readFrom(url, "GET", (String) null, null, true);
+
+      } else if (isRemoteCluster()) {
+        URLStreamProvider urlStreamProvider = getUrlStreamProviderBasicAuth();
+        String url = String.format("%s/%s", remoteUrl, method);
+        inputStream = urlStreamProvider.readFrom(url, "GET", (String) null, null);
+
+      } else {
+        throw new NoClusterAssociatedException(
+            "RA030 View is not associated with any cluster. No way to request Ambari.");
+      }
+
+      response = IOUtils.toString(inputStream);
+    } catch (IOException e) {
+      throw new AmbariApiException("RA040 I/O error while requesting Ambari", e);
+    }
+    return response;
+  }
+
+  /**
+   * Check if associated with local or remote cluster
+   * @return true if associated
+   */
+  public boolean isClusterAssociated() {
+    try {
+      getCluster();
+      return true;
+    } catch (NoClusterAssociatedException e) {
+      return false;
+    }
+  }
+
+  /**
+   * Cluster object that provides access for Ambari configuration
+   * @return cluster if locally associated or RemoteCluster
+   * @throws NoClusterAssociatedException
+   */
+  public Cluster getCluster() throws NoClusterAssociatedException {
+    if (cluster == null) {
+      if (isLocalCluster()) {
+        cluster = context.getCluster();
+
+      } else if (isRemoteCluster()) {
+        cluster = getRemoteCluster();
+
+      } else {
+        throw new NoClusterAssociatedException(
+            "RA050 View is not associated with any cluster. No way to request Ambari.");
+      }
+    }
+    return cluster;
+  }
+
+  /**
+   * Is associated with local cluster
+   * @return true if associated
+   */
+  public boolean isLocalCluster() {
+    return context.getCluster() != null;
+  }
+
+  /**
+   * Is associated with remote cluster
+   * @return true if associated
+   */
+  public boolean isRemoteCluster() {
+    return remoteUrl != null && !remoteUrl.isEmpty();
+  }
+
+  /**
+   * Build RemoteCluster instance based on viewContext properties
+   * @return RemoteCluster instance
+   */
+  public RemoteCluster getRemoteCluster() {
+    if (!isRemoteCluster())
+      return null;
+
+    URLStreamProvider urlStreamProviderBasicAuth = getUrlStreamProviderBasicAuth();
+    return new RemoteCluster(remoteUrl, urlStreamProviderBasicAuth);
+  }
+
+  /**
+   * Build URLStreamProvider with Basic Authentication for Remote Cluster
+   * @return URLStreamProvider
+   */
+  public URLStreamProvider getUrlStreamProviderBasicAuth() {
+    if (remoteUsername == null || remoteUsername.isEmpty() ||
+        remotePassword == null || remotePassword.isEmpty()) {
+      throw new AmbariApiException("RA020 Remote Ambari username and password are not filled");
+    }
+
+    URLStreamProvider urlStreamProvider = context.getURLStreamProvider();
+
+    return new URLStreamProviderBasicAuth(urlStreamProvider, remoteUsername, remotePassword);
+  }
+}

+ 32 - 0
contrib/views/utils/src/main/java/org/apache/ambari/view/utils/ambari/AmbariApiException.java

@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.view.utils.ambari;
+
+/**
+ * Exception during work with Ambari API
+ */
+public class AmbariApiException extends RuntimeException {
+  public AmbariApiException(String message) {
+    super(message);
+  }
+
+  public AmbariApiException(String message, Throwable cause) {
+    super(message, cause);
+  }
+}

+ 25 - 0
contrib/views/utils/src/main/java/org/apache/ambari/view/utils/ambari/NoClusterAssociatedException.java

@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.view.utils.ambari;
+
+public class NoClusterAssociatedException extends AmbariApiException {
+  public NoClusterAssociatedException(String message) {
+    super(message);
+  }
+}

+ 104 - 0
contrib/views/utils/src/main/java/org/apache/ambari/view/utils/ambari/RemoteCluster.java

@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.view.utils.ambari;
+
+import org.apache.ambari.view.URLStreamProvider;
+import org.apache.ambari.view.ViewContext;
+import org.apache.ambari.view.cluster.Cluster;
+import org.apache.commons.collections4.map.PassiveExpiringMap;
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.io.input.NullInputStream;
+import org.json.simple.JSONArray;
+import org.json.simple.JSONObject;
+import org.json.simple.JSONValue;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.util.Collections;
+import java.util.Map;
+
+/**
+ * Class that provides same interface as local Cluster, but
+ * is able to retrieve configuration values by REST API
+ */
+public class RemoteCluster implements Cluster {
+  protected String name;
+  protected String baseUrl;
+  protected URLStreamProvider urlStreamProvider;
+  protected Map<String, JSONObject> configurationCache;
+
+  /**
+   * Constructor for RemoteCluster
+   * @param ambariClusterUrl Ambari Server Cluster REST API URL (for example: http://ambari.server:8080/api/v1/clusters/c1)
+   * @param urlStreamProvider stream provider with authorization support
+   */
+  public RemoteCluster(String ambariClusterUrl, URLStreamProvider urlStreamProvider) {
+    this.baseUrl = ambariClusterUrl;
+    this.urlStreamProvider = urlStreamProvider;
+
+    String[] parts = ambariClusterUrl.split("/");
+    this.name = parts[parts.length-1];
+    PassiveExpiringMap<String, JSONObject> configurations = new PassiveExpiringMap<String, JSONObject>(10000L);  // keep cache for 10 seconds
+    configurationCache = Collections.synchronizedMap(configurations);
+  }
+
+  @Override
+  public String getName() {
+    return name;
+  }
+
+  @Override
+  public String getConfigurationValue(String type, String key) {
+    JSONObject config;
+    try {
+      String desiredTag = getDesiredConfig(type);
+      config = readFromUrlJSON(String.format("%s/configurations?(type=%s&tag=%s)", baseUrl, type, desiredTag));
+    } catch (IOException e) {
+      throw new AmbariApiException("RA010 Can't retrieve configuration from Remote Ambari", e);
+    }
+
+    JSONObject items = (JSONObject) ((JSONArray) config.get("items")).get(0);
+    JSONObject properties = (JSONObject) items.get("properties");
+    return (String) properties.get(key);
+  }
+
+  private String getDesiredConfig(String type) throws IOException {
+    JSONObject desiredConfigResponse = readFromUrlJSON(
+        String.format("%s?fields=services/ServiceInfo,hosts,Clusters", baseUrl));
+    JSONObject clusters = (JSONObject) (desiredConfigResponse.get("Clusters"));
+    JSONObject desiredConfig = (JSONObject) (clusters.get("desired_configs"));
+    JSONObject desiredConfigForType = (JSONObject) desiredConfig.get(type);
+
+    return (String) desiredConfigForType.get("tag");
+  }
+
+  private JSONObject readFromUrlJSON(String url) throws IOException {
+    JSONObject jsonObject = configurationCache.get(url);
+    if (jsonObject == null) {
+      InputStream inputStream = urlStreamProvider.readFrom(url, "GET", (String)null, null);
+      String response = IOUtils.toString(inputStream);
+      jsonObject = (JSONObject) JSONValue.parse(response);
+
+      configurationCache.put(url, jsonObject);
+    }
+    return jsonObject;
+  }
+}

+ 89 - 0
contrib/views/utils/src/main/java/org/apache/ambari/view/utils/ambari/URLStreamProviderBasicAuth.java

@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.view.utils.ambari;
+
+import org.apache.ambari.view.URLStreamProvider;
+import org.apache.commons.codec.binary.Base64;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Wrapper for URLStreamProvider that adds authentication header
+ */
+public class URLStreamProviderBasicAuth implements URLStreamProvider {
+  private URLStreamProvider urlStreamProvider;
+  private String username;
+  private String password;
+
+  public URLStreamProviderBasicAuth(URLStreamProvider urlStreamProvider, String username, String password) {
+    this.urlStreamProvider = urlStreamProvider;
+    this.username = username;
+    this.password = password;
+  }
+
+  @Override
+  public InputStream readFrom(String url, String method, String data, Map<String, String> headers) throws IOException {
+    return urlStreamProvider.readFrom(url, method, data, addAuthHeaders(headers));
+  }
+
+  @Override
+  public InputStream readFrom(String url, String method, InputStream data, Map<String, String> headers) throws IOException {
+    return urlStreamProvider.readFrom(url, method, data, addAuthHeaders(headers));
+  }
+
+  @Override
+  public InputStream readAs(String url, String method, String data, Map<String, String> headers, String doAs) throws IOException {
+    return urlStreamProvider.readAs(url, method, data, addAuthHeaders(headers), doAs);
+  }
+
+  @Override
+  public InputStream readAs(String url, String method, InputStream data, Map<String, String> headers, String doAs) throws IOException {
+    return urlStreamProvider.readAs(url, method, data, addAuthHeaders(headers), doAs);
+  }
+
+  @Override
+  public InputStream readAsCurrent(String url, String method, String data, Map<String, String> headers) throws IOException {
+    return urlStreamProvider.readAsCurrent(url, method, data, addAuthHeaders(headers));
+  }
+
+  @Override
+  public InputStream readAsCurrent(String url, String method, InputStream data, Map<String, String> headers) throws IOException {
+    return urlStreamProvider.readAsCurrent(url, method, data, addAuthHeaders(headers));
+  }
+
+  private HashMap<String, String> addAuthHeaders(Map<String, String> customHeaders) {
+    HashMap<String, String> newHeaders = new HashMap<String, String>();
+    if (customHeaders != null)
+      newHeaders.putAll(customHeaders);
+
+    String authString = username + ":" + password;
+    byte[] authEncBytes = Base64.encodeBase64(authString.getBytes());
+    String authStringEnc = new String(authEncBytes);
+
+    newHeaders.put("Authorization", "Basic " + authStringEnc);
+    newHeaders.put("X-Requested-By", "views");
+    return newHeaders;
+  }
+}

+ 98 - 0
contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/AuthConfigurationBuilder.java

@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.view.utils.hdfs;
+
+import org.apache.ambari.view.ViewContext;
+import org.apache.ambari.view.utils.ambari.AmbariApi;
+import org.apache.ambari.view.utils.ambari.NoClusterAssociatedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Builds the Authentication parameters of HDFS based on ViewContext.
+ * Currently supports only SIMPLE authorization. KERBEROS is not supported
+ * because proxyuser can be arbitrary, so can't be determined from configuration.
+ */
+public class AuthConfigurationBuilder {
+
+  protected static final Logger LOG = LoggerFactory.getLogger(AuthConfigurationBuilder.class);
+  private Map<String, String> params = new HashMap<String, String>();
+
+  private ViewContext context;
+  private AmbariApi ambariApi;
+
+  public AuthConfigurationBuilder(ViewContext context) {
+    this.context = context;
+    this.ambariApi = new AmbariApi(context);
+  }
+
+  /**
+   * Converts auth params as semicolon separated string to Map.
+   * If auth params are not provided, tries to determine them
+   * from Ambari configuration.
+   */
+  private void parseProperties() throws HdfsApiException {
+    String auth;
+    auth = context.getProperties().get("webhdfs.auth");
+
+    if (auth == null || auth.isEmpty()) {
+      try {
+        auth = getConfigurationFromAmbari();
+      } catch (NoClusterAssociatedException e) {
+        auth = "auth=SIMPLE";
+        LOG.warn(String.format("HDFS090 Authentication parameters could not be determined. %s assumed.", auth));
+      }
+    }
+
+    parseAuthString(auth);
+  }
+
+  private void parseAuthString(String auth) {
+    for (String param : auth.split(";")) {
+      String[] keyvalue = param.split("=");
+      if (keyvalue.length != 2) {
+        LOG.error("HDFS050 Can not parse authentication param " + param + " in " + auth);
+        continue;
+      }
+      params.put(keyvalue[0], keyvalue[1]);
+    }
+  }
+
+  /**
+   * Determine configuration from Ambari.
+   */
+  private String getConfigurationFromAmbari() throws NoClusterAssociatedException {
+    String authMethod = ambariApi.getCluster().getConfigurationValue(
+        "core-site", "hadoop.security.authentication");
+    return String.format("auth=%s", authMethod);
+  }
+
+  /**
+   * Build the auth configuration
+   * @return Map of auth properties
+   * @throws HdfsApiException
+   */
+  public Map<String, String> build() throws HdfsApiException {
+    parseProperties();
+    return params;
+  }
+}

+ 197 - 0
contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/ConfigurationBuilder.java

@@ -0,0 +1,197 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.view.utils.hdfs;
+
+import org.apache.ambari.view.ViewContext;
+import org.apache.ambari.view.utils.ambari.AmbariApi;
+import org.apache.ambari.view.utils.ambari.NoClusterAssociatedException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+
+/**
+ * Builds the Configuration of HDFS based on ViewContext.
+ * Supports both directly specified properties and cluster associated
+ * properties loading.
+ */
+public class ConfigurationBuilder {
+  protected static final Logger LOG = LoggerFactory.getLogger(ConfigurationBuilder.class);
+  public static final String CORE_SITE = "core-site";
+  public static final String HDFS_SITE = "hdfs-site";
+
+  public static final String DEFAULT_FS_INSTANCE_PROPERTY = "webhdfs.url";
+  public static final String DEFAULT_FS_CLUSTER_PROPERTY  = "fs.defaultFS";
+
+  public static final String NAMESERVICES_INSTANCE_PROPERTY = "webhdfs.nameservices";
+  public static final String NAMESERVICES_CLUSTER_PROPERTY  = "dfs.nameservices";
+  public static final String HA_NAMENODES_INSTANCE_PROPERTY = "webhdfs.ha.namenodes.list";
+
+  public static final String HA_NAMENODES_CLUSTER_PROPERTY  = "dfs.ha.namenodes.%s";
+  public static final String NAMENODE_RPC_NN1_INSTANCE_PROPERTY = "webhdfs.ha.namenode.rpc-address.nn1";
+  public static final String NAMENODE_RPC_NN2_INSTANCE_PROPERTY = "webhdfs.ha.namenode.rpc-address.nn2";
+
+  public static final String NAMENODE_RPC_NN_CLUSTER_PROPERTY   = "dfs.namenode.rpc-address.%s.%s";
+  public static final String NAMENODE_HTTP_NN1_INSTANCE_PROPERTY = "webhdfs.ha.namenode.http-address.nn1";
+  public static final String NAMENODE_HTTP_NN2_INSTANCE_PROPERTY = "webhdfs.ha.namenode.http-address.nn2";
+
+  public static final String NAMENODE_HTTP_NN_CLUSTER_PROPERTY   = "dfs.namenode.http-address.%s.%s";
+  public static final String FAILOVER_PROXY_PROVIDER_INSTANCE_PROPERTY = "webhdfs.client.failover.proxy.provider";
+  public static final String FAILOVER_PROXY_PROVIDER_CLUSTER_PROPERTY  = "dfs.client.failover.proxy.provider.%s";
+
+  private Configuration conf = new Configuration();
+  private ViewContext context;
+  private AmbariApi ambariApi = null;
+
+  /**
+   * Constructor of ConfigurationBuilder based on ViewContext
+   * @param context ViewContext
+   */
+  public ConfigurationBuilder(ViewContext context) {
+    this.context = context;
+    ambariApi = new AmbariApi(context);
+  }
+
+  private void parseProperties() throws HdfsApiException {
+    String defaultFS = getDefaultFS(context);
+
+    try {
+
+      if (isHAEnabled(defaultFS)) {
+        copyHAProperties(defaultFS);
+
+        LOG.info("HA HDFS cluster found.");
+      } else {
+        if (!hasPort(defaultFS)) {
+          defaultFS = addPortIfMissing(defaultFS);
+        }
+      }
+
+      } catch (URISyntaxException e) {
+      throw new HdfsApiException("HDFS060 Invalid " + DEFAULT_FS_INSTANCE_PROPERTY +
+          "='" + defaultFS + "' URI", e);
+    }
+
+    conf.set("fs.defaultFS", defaultFS);
+    LOG.info(String.format("HdfsApi configured to connect to defaultFS='%s'", defaultFS));
+  }
+
+  private String getDefaultFS(ViewContext context) throws HdfsApiException {
+    String defaultFS = getProperty(CORE_SITE, DEFAULT_FS_CLUSTER_PROPERTY, DEFAULT_FS_INSTANCE_PROPERTY);
+
+    if (defaultFS == null || defaultFS.isEmpty())
+      throw new HdfsApiException("HDFS070 fs.defaultFS is not configured");
+
+    defaultFS = addProtocolIfMissing(defaultFS);
+    return defaultFS;
+  }
+
+  private String getProperty(String type, String key, String instanceProperty) {
+    String value;
+    try {
+      value = ambariApi.getCluster().getConfigurationValue(type, key);
+    } catch (NoClusterAssociatedException e) {
+      value = context.getProperties().get(instanceProperty);
+    }
+    return value;
+  }
+
+  private void copyHAProperties(String defaultFS) throws URISyntaxException, HdfsApiException {
+    URI uri = new URI(defaultFS);
+    String nameservice = uri.getHost();
+
+    copyClusterProperty(NAMESERVICES_CLUSTER_PROPERTY, NAMESERVICES_INSTANCE_PROPERTY);
+    String namenodeIDs = copyClusterProperty(String.format(HA_NAMENODES_CLUSTER_PROPERTY, nameservice),
+                                             HA_NAMENODES_INSTANCE_PROPERTY);
+
+    String[] namenodes = namenodeIDs.split(",");
+    if (namenodes.length != 2) {
+      throw new HdfsApiException("HDFS080 " + HA_NAMENODES_INSTANCE_PROPERTY + " namenodes count is not exactly 2");
+    }
+    //NN1
+    copyClusterProperty(String.format(NAMENODE_RPC_NN_CLUSTER_PROPERTY, nameservice, namenodes[0]),
+                        NAMENODE_RPC_NN1_INSTANCE_PROPERTY);
+    copyClusterProperty(String.format(NAMENODE_HTTP_NN_CLUSTER_PROPERTY, nameservice, namenodes[0]),
+                        NAMENODE_HTTP_NN1_INSTANCE_PROPERTY);
+
+    //NN2
+    copyClusterProperty(String.format(NAMENODE_RPC_NN_CLUSTER_PROPERTY, nameservice, namenodes[1]),
+                        NAMENODE_RPC_NN2_INSTANCE_PROPERTY);
+    copyClusterProperty(String.format(NAMENODE_HTTP_NN_CLUSTER_PROPERTY, nameservice, namenodes[1]),
+                        NAMENODE_HTTP_NN2_INSTANCE_PROPERTY);
+
+    copyClusterProperty(String.format(FAILOVER_PROXY_PROVIDER_CLUSTER_PROPERTY, nameservice),
+                        FAILOVER_PROXY_PROVIDER_INSTANCE_PROPERTY);
+  }
+
+  private String copyClusterProperty(String propertyName, String instancePropertyName) {
+    String value = getProperty(HDFS_SITE, propertyName, instancePropertyName);
+    conf.set(propertyName, value);
+    return value;
+  }
+
+  private boolean isHAEnabled(String defaultFS) throws URISyntaxException {
+    URI uri = new URI(defaultFS);
+    String nameservice = uri.getHost();
+    String namenodeIDs = getProperty(HDFS_SITE, String.format(HA_NAMENODES_CLUSTER_PROPERTY, nameservice),
+                                     HA_NAMENODES_INSTANCE_PROPERTY);
+    return namenodeIDs != null;
+  }
+
+  private static boolean hasPort(String url) throws URISyntaxException {
+    URI uri = new URI(url);
+    return uri.getPort() != -1;
+  }
+
+  protected static String addPortIfMissing(String defaultFs) throws URISyntaxException {
+    if (!hasPort(defaultFs)) {
+      defaultFs = defaultFs + ":50070";
+    }
+
+    return defaultFs;
+  }
+
+  protected static String addProtocolIfMissing(String defaultFs) {
+    if (!defaultFs.matches("^[^:]+://.*$")) {
+      defaultFs = "webhdfs://" + defaultFs;
+    }
+
+    return defaultFs;
+  }
+
+  /**
+   * Build the HDFS configuration
+   * @return configured HDFS Configuration object
+   * @throws HdfsApiException if configuration parsing failed
+   */
+  public Configuration build() throws HdfsApiException {
+    parseProperties();
+
+    conf.set("fs.hdfs.impl", DistributedFileSystem.class.getName());
+    conf.set("fs.webhdfs.impl", WebHdfsFileSystem.class.getName());
+    conf.set("fs.file.impl", LocalFileSystem.class.getName());
+
+    return conf;
+  }
+}

+ 69 - 59
contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/HdfsApi.java → contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/HdfsApi.java

@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.ambari.view.filebrowser;
+package org.apache.ambari.view.utils.hdfs;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
@@ -25,59 +25,47 @@ import org.apache.hadoop.fs.permission.FsPermission;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.net.URI;
 import java.security.PrivilegedExceptionAction;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
+import java.util.*;
 
 import org.apache.hadoop.security.UserGroupInformation;
 import org.json.simple.JSONArray;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.LinkedHashMap;
 
 /**
  * Hdfs Business Delegate
  */
 public class HdfsApi {
-  protected static final Logger logger = LoggerFactory.getLogger(HdfsApi.class);
-
-  private final Configuration conf = new Configuration();
-  private final Map<String, String> params;
+private final Configuration conf;
+  private final Map<String, String> authParams;
 
   private FileSystem fs;
   private UserGroupInformation ugi;
 
   /**
    * Constructor
-   * @param defaultFs hdfs uri
-   * @param params map of parameters
+   * @param configurationBuilder hdfs configuration builder
+   * @param authParams map of parameters
    * @throws IOException
    * @throws InterruptedException
    */
-  public HdfsApi(final String defaultFs, String username, Map<String, String> params) throws IOException,
-      InterruptedException {
-    logger.info("Files View HdfsApi is connecting to '%s'", defaultFs);
-    this.params = params;
-    conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
-    conf.set("fs.webhdfs.impl", "org.apache.hadoop.hdfs.web.WebHdfsFileSystem");
-    conf.set("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem");
+  public HdfsApi(ConfigurationBuilder configurationBuilder, String username, AuthConfigurationBuilder authParams) throws IOException,
+      InterruptedException, HdfsApiException {
+    this.authParams = authParams.build();
+    conf = configurationBuilder.build();
 
     ugi = UserGroupInformation.createProxyUser(username, getProxyUser());
 
     fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
       public FileSystem run() throws IOException {
-        return FileSystem.get(URI.create(defaultFs), conf);
+        return FileSystem.get(conf);
       }
     });
   }
 
   private UserGroupInformation getProxyUser() throws IOException {
     UserGroupInformation proxyuser;
-    if (params.containsKey("proxyuser")) {
-      proxyuser = UserGroupInformation.createRemoteUser(params.get("proxyuser"));
+    if (authParams.containsKey("proxyuser")) {
+      proxyuser = UserGroupInformation.createRemoteUser(authParams.get("proxyuser"));
     } else {
       proxyuser = UserGroupInformation.getCurrentUser();
     }
@@ -88,8 +76,9 @@ public class HdfsApi {
 
   private UserGroupInformation.AuthenticationMethod getAuthenticationMethod() {
     UserGroupInformation.AuthenticationMethod authMethod;
-    if (params.containsKey("auth")) {
-      authMethod = UserGroupInformation.AuthenticationMethod.valueOf(params.get("auth"));
+    if (authParams.containsKey("auth")) {
+      String authName = authParams.get("auth");
+      authMethod = UserGroupInformation.AuthenticationMethod.valueOf(authName.toUpperCase());
     } else {
       authMethod = UserGroupInformation.AuthenticationMethod.SIMPLE;
     }
@@ -190,6 +179,19 @@ public class HdfsApi {
     });
   }
 
+  /**
+   * Hdfs Status
+   * @return home directory
+   * @throws Exception
+   */
+  public synchronized FsStatus getStatus() throws Exception {
+    return ugi.doAs(new PrivilegedExceptionAction<FsStatus>() {
+      public FsStatus run() throws IOException {
+        return fs.getStatus();
+      }
+    });
+  }
+
   /**
    * Trash directory
    * @return trash directory
@@ -204,35 +206,35 @@ public class HdfsApi {
       }
     });
   }
- 
-   /**
-    * Trash directory path.
-    *
-    * @return trash directory path
-    * @throws Exception
-    */
+
+  /**
+   * Trash directory path.
+   *
+   * @return trash directory path
+   * @throws Exception
+   */
   public String getTrashDirPath() throws Exception {
     Path trashDir = getTrashDir();
-    
-    return  trashDir.toUri().getRawPath();
+
+    return trashDir.toUri().getRawPath();
   }
 
-   /**
-    * Trash directory path.
-    *
-    * @param    filePath        the path to the file
-    * @return trash directory path for the file
-    * @throws Exception
-    */
+  /**
+   * Trash directory path.
+   *
+   * @param    filePath        the path to the file
+   * @return trash directory path for the file
+   * @throws Exception
+   */
   public String getTrashDirPath(String filePath) throws Exception {
-      String trashDirPath = getTrashDirPath();
+    String trashDirPath = getTrashDirPath();
+
+    Path path = new Path(filePath);
+    trashDirPath = trashDirPath + "/" + path.getName();
 
-      Path path = new Path(filePath);
-      trashDirPath = trashDirPath+"/"+path.getName();
-      
-    return  trashDirPath;
+    return trashDirPath;
   }
-      
+
   /**
    * Empty trash
    * @return
@@ -339,16 +341,25 @@ public class HdfsApi {
    * Copy file
    * @param src source path
    * @param dest destination path
-   * @return success
-   * @throws IOException
+   * @throws java.io.IOException
    * @throws InterruptedException
    */
-  public boolean copy(final String src, final String dest) throws IOException,
-      InterruptedException {
+  public synchronized void copy(final String src, final String dest) throws IOException, InterruptedException, HdfsApiException {
+    boolean result = ugi.doAs(new PrivilegedExceptionAction<Boolean>() {
+      public Boolean run() throws Exception {
+        return FileUtil.copy(fs, new Path(src), fs, new Path(dest), false, conf);
+      }
+    });
+
+    if (!result) {
+      throw new HdfsApiException("HDFS010 Can't copy source file from \" + src + \" to \" + dest");
+    }
+  }
+
+  public synchronized boolean exists(final String newFilePath) throws IOException, InterruptedException {
     return ugi.doAs(new PrivilegedExceptionAction<Boolean>() {
       public Boolean run() throws Exception {
-        return FileUtil
-            .copy(fs, new Path(src), fs, new Path(dest), false, conf);
+        return fs.exists(new Path(newFilePath));
       }
     });
   }
@@ -377,7 +388,6 @@ public class HdfsApi {
    *          Hadoop file status.
    * @return The JSON representation of the file status.
    */
-
   public Map<String, Object> fileStatusToJSON(FileStatus status) {
     Map<String, Object> json = new LinkedHashMap<String, Object>();
     json.put("path", Path.getPathWithoutSchemeAndAuthority(status.getPath())

+ 29 - 0
contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/HdfsApiException.java

@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.view.utils.hdfs;
+
+public class HdfsApiException extends Exception {
+  public HdfsApiException(String message) {
+    super(message);
+  }
+
+  public HdfsApiException(String message, Throwable cause) {
+    super(message, cause);
+  }
+}

+ 150 - 0
contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/HdfsUtil.java

@@ -0,0 +1,150 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.view.utils.hdfs;
+
+
+import org.apache.ambari.view.ViewContext;
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+public class HdfsUtil {
+  private final static Logger LOG =
+      LoggerFactory.getLogger(HdfsUtil.class);
+
+  /**
+   * Write string to file with overwriting
+   * @param filePath path to file
+   * @param content new content of file
+   */
+  public static void putStringToFile(HdfsApi hdfs, String filePath, String content) throws HdfsApiException {
+    FSDataOutputStream stream;
+    try {
+      synchronized (hdfs) {
+        stream = hdfs.create(filePath, true);
+        stream.writeBytes(content);
+        stream.close();
+      }
+    } catch (IOException e) {
+      throw new HdfsApiException("HDFS020 Could not write file " + filePath, e);
+    } catch (InterruptedException e) {
+      throw new HdfsApiException("HDFS021 Could not write file " + filePath, e);
+    }
+  }
+
+  /**
+   * Read string from file
+   * @param filePath path to file
+   */
+  public static String readFile(HdfsApi hdfs, String filePath) throws HdfsApiException {
+    FSDataInputStream stream;
+    try {
+      stream = hdfs.open(filePath);
+      return IOUtils.toString(stream);
+    } catch (IOException e) {
+      throw new HdfsApiException("HDFS060 Could not read file " + filePath, e);
+    } catch (InterruptedException e) {
+      throw new HdfsApiException("HDFS061 Could not read file " + filePath, e);
+    }
+  }
+
+
+  /**
+   * Increment index appended to filename until find first unallocated file
+   * @param fullPathAndFilename path to file and prefix for filename
+   * @param extension file extension
+   * @return if fullPathAndFilename="/tmp/file",extension=".txt" then filename will be like "/tmp/file_42.txt"
+   */
+  public static String findUnallocatedFileName(HdfsApi hdfs, String fullPathAndFilename, String extension)
+      throws HdfsApiException {
+    int triesCount = 0;
+    String newFilePath;
+    boolean isUnallocatedFilenameFound;
+
+    try {
+      do {
+        newFilePath = String.format(fullPathAndFilename + "%s" + extension, (triesCount == 0) ? "" : "_" + triesCount);
+        LOG.debug("Trying to find free filename " + newFilePath);
+
+        isUnallocatedFilenameFound = !hdfs.exists(newFilePath);
+        if (isUnallocatedFilenameFound) {
+          LOG.debug("File created successfully!");
+        }
+
+        triesCount += 1;
+        if (triesCount > 1000) {
+          throw new HdfsApiException("HDFS100 Can't find unallocated file name " + fullPathAndFilename + "...");
+        }
+      } while (!isUnallocatedFilenameFound);
+    } catch (IOException e) {
+      throw new HdfsApiException("HDFS030 Error in creation " + fullPathAndFilename + "...", e);
+    } catch (InterruptedException e) {
+      throw new HdfsApiException("HDFS031 Error in creation " + fullPathAndFilename + "...", e);
+    }
+
+    return newFilePath;
+  }
+
+  /**
+   * Factory of HdfsApi for specific ViewContext
+   * @param context ViewContext that contains connection credentials
+   * @return HdfsApi object
+   */
+  public static synchronized HdfsApi connectToHDFSApi(ViewContext context) throws HdfsApiException {
+    HdfsApi api = null;
+    Thread.currentThread().setContextClassLoader(null);
+
+    ConfigurationBuilder configurationBuilder = new ConfigurationBuilder(context);
+    AuthConfigurationBuilder authConfigurationBuilder = new AuthConfigurationBuilder(context);
+
+    try {
+      api = new HdfsApi(configurationBuilder, getHdfsUsername(context), authConfigurationBuilder);
+      LOG.info("HdfsApi connected OK");
+    } catch (IOException e) {
+      String message = "HDFS040 Couldn't open connection to HDFS";
+      LOG.error(message);
+      throw new HdfsApiException(message, e);
+    } catch (InterruptedException e) {
+      String message = "HDFS041 Couldn't open connection to HDFS";
+      LOG.error(message);
+      throw new HdfsApiException(message, e);
+    }
+    return api;
+  }
+
+  /**
+   * Returns username for HdfsApi from "webhdfs.username" property if set,
+   * if not set then current Ambari username
+   * @param context ViewContext
+   * @return username
+   */
+  public static String getHdfsUsername(ViewContext context) {
+    String userName = context.getProperties().get("webhdfs.username");
+    if (userName == null || userName.compareTo("null") == 0 || userName.compareTo("") == 0) {
+      userName = context.getUsername();
+    }
+    return userName;
+  }
+}

+ 137 - 0
contrib/views/utils/src/test/java/org/apache/ambari/view/utils/ambari/RemoteClusterTest.java

@@ -0,0 +1,137 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.view.utils.ambari;
+
+import org.apache.ambari.view.URLStreamProvider;
+import org.apache.ambari.view.ViewContext;
+import org.apache.ambari.view.cluster.Cluster;
+import org.apache.commons.collections4.map.PassiveExpiringMap;
+import org.easymock.IAnswer;
+import org.json.simple.JSONObject;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.io.ByteArrayInputStream;
+import java.io.InputStream;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.easymock.EasyMock.*;
+import static org.junit.Assert.*;
+
+public class RemoteClusterTest {
+  public static final String AMBARI_CLUSTER_REST_URL = "http://example.com:8080/api/v1/clusters/c1";
+
+  @Rule
+  public ExpectedException thrown= ExpectedException.none();
+
+  @Test
+  public void testGetRemoteClusterThatIsNotPresent() throws Exception {
+    ViewContext viewContext = createNiceMock(ViewContext.class);
+    Map<String, String> instanceProperties = new HashMap<String, String>();
+    expect(viewContext.getProperties()).andReturn(instanceProperties).anyTimes();
+    replay(viewContext);
+
+    AmbariApi ambariApi = new AmbariApi(viewContext);
+    Cluster cluster = ambariApi.getRemoteCluster();
+    assertNull(cluster);
+  }
+
+  @Test
+  public void testGetRemoteClusterNoCredentials() throws Exception {
+    ViewContext viewContext = createNiceMock(ViewContext.class);
+    Map<String, String> instanceProperties = new HashMap<String, String>();
+    instanceProperties.put(AmbariApi.AMBARI_SERVER_URL_INSTANCE_PROPERTY,
+        AMBARI_CLUSTER_REST_URL);
+    expect(viewContext.getProperties()).andReturn(instanceProperties).anyTimes();
+    replay(viewContext);
+
+    thrown.expect(AmbariApiException.class);
+    AmbariApi ambariApi = new AmbariApi(viewContext);
+    Cluster cluster = ambariApi.getRemoteCluster();
+  }
+
+  @Test
+  public void testGetRemoteClusterThatIsPresent() throws Exception {
+    ViewContext viewContext = createNiceMock(ViewContext.class);
+    Map<String, String> instanceProperties = new HashMap<String, String>();
+    instanceProperties.put(AmbariApi.AMBARI_SERVER_URL_INSTANCE_PROPERTY,
+        AMBARI_CLUSTER_REST_URL);
+    instanceProperties.put(AmbariApi.AMBARI_SERVER_USERNAME_INSTANCE_PROPERTY, "admin");
+    instanceProperties.put(AmbariApi.AMBARI_SERVER_PASSWORD_INSTANCE_PROPERTY, "admin");
+    expect(viewContext.getProperties()).andReturn(instanceProperties).anyTimes();
+    replay(viewContext);
+
+    AmbariApi ambariApi = new AmbariApi(viewContext);
+    Cluster cluster = ambariApi.getRemoteCluster();
+    assertNotNull(cluster);
+    assertEquals(cluster.getName(), "c1");
+  }
+
+  @Test
+  public void testGetConfigurationValue() throws Exception {
+    URLStreamProvider urlStreamProvider = createNiceMock(URLStreamProvider.class);
+
+    final String desiredConfigsString = "{\"Clusters\": {\"desired_configs\": {\"test-site\": {\"tag\": \"TAG\"}}}}";
+    final String configurationString = "{\"items\": [{\"properties\": {\"test.property.name\": \"test property value\"}}]}";
+    final int[] desiredConfigPolls = {0};
+    final int[] testConfigPolls = {0};
+
+    expect(urlStreamProvider.readFrom(eq(AMBARI_CLUSTER_REST_URL + "?fields=services/ServiceInfo,hosts,Clusters"),
+        eq("GET"), (String) isNull(), (Map<String, String>) anyObject())).andAnswer(new IAnswer<InputStream>() {
+      @Override
+      public InputStream answer() throws Throwable {
+        desiredConfigPolls[0] += 1;
+        return new ByteArrayInputStream(desiredConfigsString.getBytes());
+      }
+    }).anyTimes();
+
+    expect(urlStreamProvider.readFrom(eq(AMBARI_CLUSTER_REST_URL + "/configurations?(type=test-site&tag=TAG)"),
+        eq("GET"), (String)isNull(), (Map<String, String>) anyObject())).andAnswer(new IAnswer<InputStream>() {
+      @Override
+      public InputStream answer() throws Throwable {
+        testConfigPolls[0] += 1;
+        return new ByteArrayInputStream(configurationString.getBytes());
+      }
+    }).anyTimes();
+
+    replay(urlStreamProvider);
+
+    RemoteCluster cluster = new RemoteCluster(AMBARI_CLUSTER_REST_URL, urlStreamProvider);
+    PassiveExpiringMap<String, JSONObject> cache = new PassiveExpiringMap<String, JSONObject>(10000L);
+    cluster.configurationCache = cache;
+
+    String value = cluster.getConfigurationValue("test-site", "test.property.name");
+    assertEquals(value, "test property value");
+    assertEquals(desiredConfigPolls[0], 1);
+    assertEquals(testConfigPolls[0], 1);
+
+    value = cluster.getConfigurationValue("test-site", "test.property.name");
+    assertEquals(value, "test property value");
+    assertEquals(desiredConfigPolls[0], 1);  // cache hit
+    assertEquals(testConfigPolls[0], 1);
+
+    cache.clear();
+    value = cluster.getConfigurationValue("test-site", "test.property.name");
+    assertEquals(value, "test property value");
+    assertEquals(desiredConfigPolls[0], 2);
+    assertEquals(testConfigPolls[0], 2);
+  }
+}

+ 159 - 0
contrib/views/utils/src/test/java/org/apache/ambari/view/utils/ambari/URLStreamProviderBasicAuthTest.java

@@ -0,0 +1,159 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.view.utils.ambari;
+
+import org.apache.ambari.view.URLStreamProvider;
+import org.easymock.EasyMock;
+import org.easymock.IArgumentMatcher;
+
+import org.junit.Test;
+
+import java.io.InputStream;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.easymock.EasyMock.*;
+
+public class URLStreamProviderBasicAuthTest {
+
+  @Test
+  public void testReadFrom() throws Exception {
+    URLStreamProvider urlStreamProvider = createNiceMock(URLStreamProvider.class);
+    expect(urlStreamProvider.readFrom(anyString(), anyString(), anyString(), HeadersMatcher.mapContainsAuthHeader())).andReturn(null);
+    URLStreamProviderBasicAuth urlStreamProviderBasicAuth =
+        new URLStreamProviderBasicAuth(urlStreamProvider, "user", "pass");
+
+    replay(urlStreamProvider);
+
+    urlStreamProviderBasicAuth.readFrom("http://example.com", "GET",
+        (String) null, null);
+    urlStreamProviderBasicAuth.readFrom("http://example.com", "GET",
+        (String)null, new HashMap<String, String>());
+  }
+
+  @Test
+  public void testReadFrom1() throws Exception {
+    URLStreamProvider urlStreamProvider = createNiceMock(URLStreamProvider.class);
+    expect(urlStreamProvider.readFrom(anyString(), anyString(), (InputStream)anyObject(),
+        HeadersMatcher.mapContainsAuthHeader())).andReturn(null);
+    URLStreamProviderBasicAuth urlStreamProviderBasicAuth =
+        new URLStreamProviderBasicAuth(urlStreamProvider, "user", "pass");
+
+    replay(urlStreamProvider);
+
+    urlStreamProviderBasicAuth.readFrom("http://example.com", "GET",
+        (InputStream) null, null);
+    urlStreamProviderBasicAuth.readFrom("http://example.com", "GET",
+        (InputStream)null, new HashMap<String, String>());
+  }
+
+  @Test
+  public void testReadAs() throws Exception {
+    URLStreamProvider urlStreamProvider = createNiceMock(URLStreamProvider.class);
+    expect(urlStreamProvider.readAs(anyString(), anyString(), anyString(),
+        HeadersMatcher.mapContainsAuthHeader(), anyString())).andReturn(null);
+    URLStreamProviderBasicAuth urlStreamProviderBasicAuth =
+        new URLStreamProviderBasicAuth(urlStreamProvider, "user", "pass");
+
+    replay(urlStreamProvider);
+
+    urlStreamProviderBasicAuth.readAs("http://example.com", "GET",
+        (String) null, null, "admin");
+    urlStreamProviderBasicAuth.readAs("http://example.com", "GET",
+        (String) null, new HashMap<String, String>(), "admin");
+  }
+
+  @Test
+  public void testReadAs1() throws Exception {
+    URLStreamProvider urlStreamProvider = createNiceMock(URLStreamProvider.class);
+    expect(urlStreamProvider.readAs(anyString(), anyString(), (InputStream) anyObject(),
+        HeadersMatcher.mapContainsAuthHeader(), anyString())).andReturn(null);
+    URLStreamProviderBasicAuth urlStreamProviderBasicAuth =
+        new URLStreamProviderBasicAuth(urlStreamProvider, "user", "pass");
+
+    replay(urlStreamProvider);
+
+    urlStreamProviderBasicAuth.readAs("http://example.com", "GET",
+        (InputStream) null, null, "admin");
+    urlStreamProviderBasicAuth.readAs("http://example.com", "GET",
+        (InputStream) null, new HashMap<String, String>(), "admin");
+  }
+
+  @Test
+  public void testReadAsCurrent() throws Exception {
+    URLStreamProvider urlStreamProvider = createNiceMock(URLStreamProvider.class);
+    expect(urlStreamProvider.readAsCurrent(anyString(), anyString(), anyString(),
+        HeadersMatcher.mapContainsAuthHeader())).andReturn(null);
+    URLStreamProviderBasicAuth urlStreamProviderBasicAuth =
+        new URLStreamProviderBasicAuth(urlStreamProvider, "user", "pass");
+
+    replay(urlStreamProvider);
+
+    urlStreamProviderBasicAuth.readAsCurrent("http://example.com", "GET",
+        (String) null, null);
+    urlStreamProviderBasicAuth.readAsCurrent("http://example.com", "GET",
+        (String) null, new HashMap<String, String>());
+  }
+
+  @Test
+  public void testReadAsCurrent1() throws Exception {
+    URLStreamProvider urlStreamProvider = createNiceMock(URLStreamProvider.class);
+    expect(urlStreamProvider.readAsCurrent(anyString(), anyString(), (InputStream) anyObject(),
+        HeadersMatcher.mapContainsAuthHeader())).andReturn(null);
+    URLStreamProviderBasicAuth urlStreamProviderBasicAuth =
+        new URLStreamProviderBasicAuth(urlStreamProvider, "user", "pass");
+
+    replay(urlStreamProvider);
+
+    urlStreamProviderBasicAuth.readAsCurrent("http://example.com", "GET",
+        (InputStream) null, null);
+    urlStreamProviderBasicAuth.readAsCurrent("http://example.com", "GET",
+        (InputStream)null, new HashMap<String, String>());
+  }
+
+
+  public static class HeadersMatcher implements IArgumentMatcher {
+
+    public static Map<String, String> mapContainsAuthHeader() {
+      EasyMock.reportMatcher(new HeadersMatcher());
+      return null;
+    }
+
+    public void appendTo(StringBuffer buffer) {
+      buffer.append("Authentication header matcher");
+    }
+
+    public boolean matches(Object headers) {
+      if (!(headers instanceof Map)) {
+        return false;
+      }
+
+      Map<String, String> headersMap = (Map<String, String>) headers;
+
+      if (!headersMap.containsKey("Authorization"))
+        return false;
+      String authHeader = headersMap.get("Authorization");
+
+      if (!authHeader.startsWith("Basic "))
+        return false;
+
+      return true;
+    }
+  }
+}

+ 13 - 11
contrib/views/files/src/test/java/org/apache/ambari/view/filebrowser/HdfsServiceTest.java → contrib/views/utils/src/test/java/org/apache/ambari/view/utils/hdfs/ConfigurationBuilderTest.java

@@ -16,34 +16,36 @@
  * limitations under the License.
  */
 
-package org.apache.ambari.view.filebrowser;
+package org.apache.ambari.view.utils.hdfs;
 
 import org.junit.Test;
 
+import java.net.URI;
+
 import static org.junit.Assert.*;
 
-public class HdfsServiceTest {
+public class ConfigurationBuilderTest {
   @Test
-  public void testNormalizeFsUrlWithoutProtocol() throws Exception {
-    String normalized = HdfsService.normalizeFsUrl("namenode.example.com:50070");
+  public void testAddProtocolMissing() throws Exception {
+    String normalized = ConfigurationBuilder.addProtocolIfMissing("namenode.example.com:50070");
     assertEquals(normalized, "webhdfs://namenode.example.com:50070");
   }
 
   @Test
-  public void testNormalizeFsUrlWithoutPort() throws Exception {
-    String normalized = HdfsService.normalizeFsUrl("webhdfs://namenode.example.com");
-    assertEquals(normalized, "webhdfs://namenode.example.com:50070");
+  public void testAddProtocolPresent() throws Exception {
+    String normalized = ConfigurationBuilder.addProtocolIfMissing("webhdfs://namenode.example.com");
+    assertEquals(normalized, "webhdfs://namenode.example.com");
   }
 
   @Test
-  public void testNormalizeFsUrlOnlyHostname() throws Exception {
-    String normalized = HdfsService.normalizeFsUrl("namenode.example.com");
+  public void testAddPortMissing() throws Exception {
+    String normalized = ConfigurationBuilder.addPortIfMissing("webhdfs://namenode.example.com");
     assertEquals(normalized, "webhdfs://namenode.example.com:50070");
   }
 
   @Test
-  public void testNormalizeFsUrlFixNoCorrectUrl() throws Exception {
-    String normalized = HdfsService.normalizeFsUrl("webhdfs://namenode.example.com:50070");
+  public void testAddPortPresent() throws Exception {
+    String normalized = ConfigurationBuilder.addPortIfMissing("webhdfs://namenode.example.com:50070");
     assertEquals(normalized, "webhdfs://namenode.example.com:50070");
   }
 }