Explorar o código

AMBARI-9658: Enhance parameter configs for Pig, Files and CapSch views

sposetti %!s(int64=10) %!d(string=hai) anos
pai
achega
3e0a9c9ce0

+ 4 - 4
contrib/views/capacity-scheduler/readme.md

@@ -23,7 +23,7 @@ This View provides a UI to manage queues for the YARN Capacity Scheduler.
 Requirements
 -----
 
-- Ambari 1.7.0
+- Ambari 1.7.0 or later
 - YARN
 
 Build
@@ -59,9 +59,9 @@ From the Ambari Administration interface, create a view instance.
 | Details: Instance Name | CS_1 |
 | Details: Display Name | Queue Manager |
 | Details: Description | Browse and manage YARN Capacity Scheduler queues |
-| Properties: ambari.server.url | http://c6401.ambari.apache.org:8080/api/v1/clusters/MyCluster |
-| Properties: ambari.server.username | admin |
-| Properties: ambari.server.password | password |
+| Properties: Ambari Cluster URL | http://c6401.ambari.apache.org:8080/api/v1/clusters/MyCluster |
+| Properties: Operator Username | admin |
+| Properties: Operator Password | password |
 
 Login to Ambari and browse to the view instance.
 

+ 4 - 3
contrib/views/capacity-scheduler/src/main/resources/view.xml

@@ -18,24 +18,25 @@
     <name>CAPACITY-SCHEDULER</name>
     <label>Capacity Scheduler</label>
     <version>0.3.0</version>
+    <min-ambari-version>1.7.*</min-ambari-version>
 
     <parameter>
         <name>ambari.server.url</name>
-        <description>The Ambari REST API cluster resource.</description>
+        <description>Enter the Ambari REST API cluster resource.</description>
         <label>Ambari Cluster URL</label>
         <placeholder>http://ambari.server:8080/api/v1/clusters/MyCluster</placeholder>
         <required>true</required>
     </parameter>
     <parameter>
         <name>ambari.server.username</name>
-        <description>The Cluster Operator username (for example: admin).</description>
+        <description>Enter the Cluster Operator username (for example: admin).</description>
         <label>Operator Username</label>
         <placeholder>admin</placeholder>
         <required>true</required>
     </parameter>
     <parameter>
         <name>ambari.server.password</name>
-        <description>The Cluster Operator password (for example: password).</description>
+        <description>Enter the Cluster Operator password (for example: password).</description>
         <label>Operator Password</label>
         <required>true</required>
         <masked>true</masked>

+ 2 - 16
contrib/views/files/readme.md

@@ -23,7 +23,7 @@ This View provides a UI to browse HDFS, create directories and upload + download
 Requirements
 -----
 
-- Ambari 1.7.0
+- Ambari 1.7.0 or later
 - HDFS with WebHDFS configured
 
 Build
@@ -42,20 +42,6 @@ Place the view archive on the Ambari Server and restart to deploy.
     cp files-0.1.0-SNAPSHOT.jar /var/lib/ambari-server/resources/views/
     ambari-server restart
 
-View Definition
------
-
-    <parameter>
-        <name>webhdfs.url</name>
-        <description>WebHDFS FileSystem URI (example: webhdfs://namenode:50070)</description>
-        <required>true</required>
-    </parameter>
-    <parameter>
-        <name>webhdfs.username</name>
-        <description>User and doAs for proxy user for HDFS</description>
-        <required>false</required>
-    </parameter>
-
 Cluster Configuration
 -----
 
@@ -93,7 +79,7 @@ From the Ambari Administration interface, create a Files view instance.
 | Details: Instance Name | FILES_1 |
 | Details: Display Name | Files |
 | Details: Description | Browse HDFS files and directories |
-| Properties: webhdfs.url | webhdfs://c6401.ambari.apache.org:50070 |
+| Properties: WebHDFS FileSystem URI | webhdfs://c6401.ambari.apache.org:50070 |
 
 Login to Ambari as "admin" and browse to the view instance.
 

+ 6 - 2
contrib/views/files/src/main/resources/view.xml

@@ -18,15 +18,19 @@
     <name>FILES</name>
     <label>Files</label>
     <version>0.1.0</version>
+    <min-ambari-version>1.7.*</min-ambari-version>
 
     <parameter>
         <name>webhdfs.url</name>
-        <description>WebHDFS FileSystem URI (example: webhdfs://namenode:50070)</description>
+        <description>Enter the WebHDFS FileSystem URI. Typically this is the dfs.namenode.http-address property in the hdfs-site.xml configuration. URL must be accessible from Ambari Server.</description>
+        <label>WebHDFS FileSystem URI</label>
+        <placeholder>webhdfs://namenode:50070</description>
         <required>true</required>
     </parameter>
     <parameter>
         <name>webhdfs.username</name>
-        <description>User and doAs for proxy user for HDFS</description>
+        <description>User and doAs for proxy user for HDFS. By default, uses the currently logged-in Ambari user.</description>
+        <label>WebHDFS Username</label>
         <required>false</required>
     </parameter>
 

+ 5 - 59
contrib/views/pig/readme.md

@@ -25,7 +25,7 @@ UDFs with your pig scripts.
 Requirements
 -----
 
-- Ambari 1.7.0
+- Ambari 1.7.0 or later
 - HDFS with WebHDFS configured
 - WebHCat with Pig configured
 
@@ -45,60 +45,6 @@ Place the view archive on the Ambari Server and restart to deploy.
     cp pig-0.1.0-SNAPSHOT.jar /var/lib/ambari-server/resources/views/
     ambari-server restart
 
-View Definition
------
-
-    <!-- HDFS Configs -->
-    <parameter>
-        <name>webhdfs.url</name>
-        <description>WebHDFS FileSystem URL (example: webhdfs://namenode.host:50070)</description>
-        <required>true</required>
-    </parameter>
-
-    <parameter>
-        <name>webhdfs.username</name>
-        <description>User and doAs for proxy user for HDFS</description>
-        <required>false</required>
-    </parameter>
-
-    <!-- WebHCat Configs -->
-    <parameter>
-        <name>webhcat.url</name>
-        <description>WebHCat URL (example: http://webhcat.host:50111/templeton/v1)</description>
-        <required>true</required>
-    </parameter>
-
-    <parameter>
-        <name>webhcat.username</name>
-        <description>User and doAs for proxy user for WebHCat</description>
-        <required>false</required>
-    </parameter>
-
-    <!-- General Configs -->
-    <parameter>
-        <name>dataworker.username</name>
-        <description>The username (defaults to ViewContext username)</description>
-        <required>false</required>
-    </parameter>
-
-    <parameter>
-        <name>scripts.dir</name>
-        <description>HDFS directory path to store Pig scripts (example: /tmp/${username}/scripts)</description>
-        <required>true</required>
-    </parameter>
-
-    <parameter>
-        <name>jobs.dir</name>
-        <description>HDFS directory path to store Pig job status (example: /tmp/${username}/jobs)</description>
-        <required>true</required>
-    </parameter>
-
-    <parameter>
-        <name>store.dir</name>
-        <description>HDFS directory to store meta information about Pig scripts and jobs (example: /tmp/${username}/store)</description>
-        <required>false</required>
-    </parameter>
-
 Cluster Configuration
 -----
 Configure HDFS for a proxy user. In core-site.xml, add the following properties:
@@ -158,10 +104,10 @@ From the Ambari Administration interface, create a Pig view instance.
 | Details: Instance Name | PIG_1 |
 | Details: Display Name | Pig |
 | Details: Description | Save and execute Pig scripts |
-| Properties: webhdfs.url | webhdfs://c6401.ambari.apache.org:50070 |
-| Properties: webhcat.url | http://c6401.ambari.apache.org:50111/templeton/v1 |
-| Properties: scripts.dir | /tmp/${username}/scripts |
-| Properties: jobs.dir | /tmp/${username}/jobs |
+| Properties: WebHDFS FileSystem URI | webhdfs://c6401.ambari.apache.org:50070 |
+| Properties: WebHCat URL | http://c6401.ambari.apache.org:50111/templeton/v1 |
+| Properties: Scripts HDFS Directory | /tmp/${username}/scripts |
+| Properties: Jobs HDFS Directory | /tmp/${username}/jobs |
 
 Login to Ambari as "admin" and browse to the view instance.
 

+ 24 - 8
contrib/views/pig/src/main/resources/view.xml

@@ -18,55 +18,71 @@
     <name>PIG</name>
     <label>Pig</label>
     <version>0.1.0</version>
+    <min-ambari-version>1.7.*</min-ambari-version>
 
     <!-- HDFS Configs -->
     <parameter>
         <name>webhdfs.url</name>
-        <description>WebHDFS FileSystem URI (example: webhdfs://namenode:50070)</description>
+        <description>Enter the WebHDFS FileSystem URI. Typically this is the dfs.namenode.http-address property in the hdfs-site.xml configuration. URL must be accessible from Ambari Server.</description>
+        <label>WebHDFS FileSystem URI</label>
+        <placeholder>webhdfs://namenode:50070</placeholder>
         <required>true</required>
     </parameter>
 
     <parameter>
         <name>webhdfs.username</name>
-        <description>User and doAs for proxy user for HDFS</description>
+        <description>User and doAs for proxy user for HDFS. By default, uses the currently logged-in Ambari user.</description>
+        <label>WebHDFS Username</label>
         <required>false</required>
     </parameter>
 
     <!-- WebHCat Configs -->
     <parameter>
         <name>webhcat.url</name>
-        <description>WebHCat URL (example: http://webhcat.host:50111/templeton/v1)</description>
+        <description>Enter the WebHCat URL for accessing WebHCat. URL must be accessible from Ambari Server.</description>
+        <label>WebHCat URL</label>
+        <placeholder>http://webhcat.host:50111/templeton/v1</placeholder>
         <required>true</required>
     </parameter>
 
     <parameter>
         <name>webhcat.username</name>
-        <description>User and doAs for proxy user for WebHCat</description>
+        <description>User and doAs for proxy user for WebHCat. By default, uses the currently logged-in Ambari user.</description>
+        <label>WebHCat Username</label>
         <required>false</required>
     </parameter>
 
     <!-- General Configs -->
     <parameter>
         <name>dataworker.username</name>
-        <description>The username (defaults to ViewContext username)</description>
+        <description>The dataworker username. By default, users the currently logged-in Ambari user.</description>
+        <label>Dataworker Username</label>
         <required>false</required>
     </parameter>
 
     <parameter>
         <name>scripts.dir</name>
-        <description>HDFS directory to store Pig scripts (example: /user/${username}/scripts)</description>
+        <description>HDFS directory to store Pig scripts.</description>
+        <label>Scripts HDFS Directory</label>
+        <placeholder>/user/${username}/pig/scripts</placeholder>
+        <default-value>/user/${username}/pig/scripts</default-value>
         <required>true</required>
     </parameter>
 
     <parameter>
         <name>jobs.dir</name>
-        <description>HDFS directory to store Pig job status (example: /user/${username}/jobs)</description>
+        <description>HDFS directory to store Pig job status.</description>
+        <label>Jobs HDFS Directory</label>
+        <placeholder>/user/${username}/pig/jobs</placeholder>
+        <default-value>/user/${username}/pig/jobs</default-value>
         <required>true</required>
     </parameter>
 
     <parameter>
         <name>store.dir</name>
-        <description>HDFS directory to store meta information about Pig scripts and jobs (example: /user/${username}/store)</description>
+        <description>HDFS directory to store meta information about Pig scripts and jobs.</description>
+        <label>Meta HDFS Directory</label>
+        <placeholder>/user/${username}/pig/store</placeholder>
         <required>false</required>
     </parameter>