Browse Source

Backporting patches for 1.2.2 release. (yusaku)

git-svn-id: https://svn.apache.org/repos/asf/incubator/ambari/branches/branch-1.2@1459041 13f79535-47bb-0310-9956-ffa450edef68
Yusaku Sako 12 years ago
parent
commit
e4dfbe8990
100 changed files with 4108 additions and 1362 deletions
  1. 552 7
      CHANGES.txt
  2. 0 5
      KEYS
  3. 8 3
      NOTICE.txt
  4. 1 1
      ambari-agent/conf/unix/ambari-agent.ini
  5. 7 2
      ambari-agent/pom.xml
  6. 18 14
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmondLib.sh
  7. 16 4
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrd.py
  8. 1 1
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startRrdcached.sh
  9. 1 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/params.pp
  10. 10 2
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp
  11. 1 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaLib.sh.erb
  12. 65 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_mode.rb
  13. 51 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_owner.rb
  14. 1 1
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/directory.pp
  15. 2 13
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
  16. 70 32
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp
  17. 3 3
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
  18. 2 2
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp
  19. 2 2
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
  20. 0 22
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/files/hiveSmoke.sh
  21. 0 35
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/client.pp
  22. 0 54
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/hcat/service_check.pp
  23. 0 54
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/hive/service_check.pp
  24. 0 72
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/init.pp
  25. 0 46
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/mysql-connector.pp
  26. 0 59
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/params.pp
  27. 0 61
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/server.pp
  28. 0 65
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/service.pp
  29. 0 25
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/templates/hcat-env.sh.erb
  30. 0 53
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/templates/hive-env.sh.erb
  31. 21 2
      ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/init.pp
  32. 1 1
      ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server.pp
  33. 45 43
      ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb
  34. 8 7
      ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/server.pp
  35. 2 2
      ambari-agent/src/main/puppet/modules/hdp-templeton/templates/webhcat-env.sh.erb
  36. 11 3
      ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_set_from_comma_list.rb
  37. 16 5
      ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
  38. 1 1
      ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
  39. 0 13
      ambari-agent/src/main/python/ambari_agent/imports.txt
  40. 0 55
      ambari-agent/src/main/python/ambari_agent/rolesToClass.dict
  41. 0 18
      ambari-agent/src/main/python/ambari_agent/serviceStates.dict
  42. 0 34
      ambari-agent/src/main/python/ambari_agent/servicesToPidNames.dict
  43. 8 4
      ambari-agent/src/test/python/TestHostname.py
  44. 9 2
      ambari-project/pom.xml
  45. 94 93
      ambari-server/docs/api/v1/clusters-cluster.md
  46. 11 8
      ambari-server/docs/api/v1/clusters.md
  47. 54 43
      ambari-server/docs/api/v1/components-component.md
  48. 35 34
      ambari-server/docs/api/v1/components.md
  49. 54 0
      ambari-server/docs/api/v1/host-component.md
  50. 28 1
      ambari-server/docs/api/v1/host-components.md
  51. 70 0
      ambari-server/docs/api/v1/hosts-host.md
  52. 19 0
      ambari-server/docs/api/v1/hosts.md
  53. 46 30
      ambari-server/docs/api/v1/index.md
  54. 40 38
      ambari-server/docs/api/v1/services-service.md
  55. 25 25
      ambari-server/docs/api/v1/services.md
  56. 0 1
      ambari-server/pass.txt
  57. 25 3
      ambari-server/pom.xml
  58. 5 1
      ambari-server/sbin/ambari-server
  59. 2 0
      ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java
  60. 10 8
      ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java
  61. 34 16
      ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java
  62. 2 6
      ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
  63. 9 1
      ambari-server/src/main/java/org/apache/ambari/server/api/handlers/BaseManagementHandler.java
  64. 9 2
      ambari-server/src/main/java/org/apache/ambari/server/api/handlers/ReadHandler.java
  65. 43 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/InvalidQueryException.java
  66. 49 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/PredicateCompiler.java
  67. 501 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/QueryLexer.java
  68. 514 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/QueryParser.java
  69. 110 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/Token.java
  70. 106 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/AbstractExpression.java
  71. 89 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/Expression.java
  72. 61 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/LogicalExpression.java
  73. 47 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/LogicalExpressionFactory.java
  74. 65 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/NotLogicalExpression.java
  75. 52 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/RelationalExpression.java
  76. 69 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/AbstractOperator.java
  77. 64 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/AndOperator.java
  78. 22 13
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/EqualsOperator.java
  79. 50 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/GreaterEqualsOperator.java
  80. 50 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/GreaterOperator.java
  81. 54 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/InOperator.java
  82. 51 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/IsEmptyOperator.java
  83. 50 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LessEqualsOperator.java
  84. 50 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LessOperator.java
  85. 35 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LogicalOperator.java
  86. 48 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LogicalOperatorFactory.java
  87. 51 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/NotEqualsOperator.java
  88. 63 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/NotOperator.java
  89. 63 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/Operator.java
  90. 64 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/OrOperator.java
  91. 37 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/RelationalOperator.java
  92. 57 0
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/RelationalOperatorFactory.java
  93. 3 4
      ambari-server/src/main/java/org/apache/ambari/server/api/query/Query.java
  94. 39 129
      ambari-server/src/main/java/org/apache/ambari/server/api/query/QueryImpl.java
  95. 2 2
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceImpl.java
  96. 11 72
      ambari-server/src/main/java/org/apache/ambari/server/api/services/BaseRequest.java
  97. 3 1
      ambari-server/src/main/java/org/apache/ambari/server/api/services/Request.java
  98. 21 0
      ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
  99. 7 2
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
  100. 2 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java

+ 552 - 7
CHANGES.txt

@@ -1,6 +1,4 @@
-Ambari Change log
-
-AMBARI-1.2 branch - date: 11/01/2013
+Ambari Change Log
 
 Notes:
  - Committers should be listed using their login and non-committers
@@ -8,20 +6,566 @@ should be listed by their full name.
  - Please keep the file to a max of 80 characters wide.
  - Put latest commits first in each section.
 
-  Merging AMBARI-666 to trunk.
+Trunk (unreleased changes):
+
+ INCOMPATIBLE CHANGES 
+
+ NEW FEATURES
+
+ AMBARI-1349. Expose host-specific Nagios alerts in Ambari Web. (yusaku)
+
+ AMBARI-1294. Add isEmpty() query operator support. (jspeidel)
+
+ AMBARI-1280. Support explicit predicate grouping in API queries. (jspeidel)
+
+ AMBARI-1180. Display host check status results given by the agent as part
+ of host registration. (yusaku)
+
+ AMBARI-1252. Fetch Nagios alerts through Ambari Server and not directly
+ from Nagios Server. (srimanth via yusaku)
+
+ AMBARI-1237. Expose Nagios alerts via Rest API. (Nate Cole via jspeidel)
+
+ AMBARI-1163. During agent registration and heartbeat, send information about
+ various hadoop artifacts back to Ambari. (Nate Cole via mahadev)
+
+ AMBARI-1194. API support for cascade delete of a specified cluster
+ (Tom Beerbower via mahadev)
+
+ AMBARI-1255. Make the agent hostname determination scriptable. 
+ (mahadev)
+
+ AMBARI-1267. Store example Hive Queries somewhere in Ambari that's easily
+ accessible for demo/test purposes. (mahadev)
+
+ IMPROVEMENTS
+
+ AMBARI-1437. Update stack version. (yusaku)
+
+ AMBARI-1429. Update API docs. (jspeidel)
+
+ AMBARI-1430. Increase UI timeout for long running API operations. (yusaku)
+
+ AMBARI-1427. Add ability to increase the time range for the zoomed-in graphs
+ beyond last one hour. (yusaku) 
+
+ AMBARI-1375. Remove text from templates (main). (jaimin)
+
+ AMBARI-1374. Add filter by alerts on the Hosts page. (jaimin)
+
+ AMBARI-1373. Since there is the ability to log in to Ambari Web as 
+ different users the current user should be indicated. (jaimin)
+
+ AMBARI-1366. Nagios alert tweaks. (jaimin)
+
+ AMBARI-1365. Make Hosts table update dynamically. (jaimin)
+
+ AMBARI-1361. Install progress dialog WARN icon + color. (jaimin)
+
+ AMBARI-1347. Expose host-level alerts via nagios_alerts.php with associated
+ service component names. (yusaku)
+   
+ AMBARI-1348. Externalize strings to messages.js. (yusaku)
+
+ AMBARI-1342. Hive client is not installed on Nagios server host.
+ (jaimin)
+
+ AMBARI-1341. Add Hosts: update the API call for new operator precedence.
+ (yusaku) 
+
+ AMBARI-1340. Enhance Install/Start/Test progress display. (yusaku) 
+
+ AMBARI-1339. Validate usernames in Misc section of Customize Services step
+ in Install Wizard. (yusaku)
+
+ AMBARI-1335. Show validation error when the user specifies target hosts that
+ are already part of the cluster. (yusaku)
+
+ AMBARI-1337. Refactor Job Browser filter. (yusaku)
+
+ AMBARI-1336. Externalize text to messages.js. (yusaku)
+
+ AMBARI-1334. Show hosts that have failed install tasks as "red" to allow the
+ user to easily identify source of failure. (yusaku)
+
+ AMBARI-1333. Add username validation for Ambari local users. (yusaku) 
+
+ AMBARI-1329. Adjust job browser column sizing. (yusaku)
+ 
+ AMBARI-1327. Add Hosts. Remove existig hosts display. (Alexandr Antonenko via jspeidel)
+
+ AMBARI-1326. Remake clearFilters function in app_view (part 3). (srimanth)
+ 
+ AMBARI-1305. Make sure that Ambari Web renders all elements correctly when
+ the browser width is 1024px or narrower (refactor). (Arun Kandregula via 
+ yusaku) 
+
+ AMBARI-1312. Remake clearFilters function in app_view (part2). (Arun Kandregula
+ via yusaku) 
+ 
+ AMBARI-1309. Remove all text from Apps views, controllers, templates to 
+ messages.js. (Arun Kandregula via yusaku)
+
+ AMBARI-1308. Properly display Apps page aggregate summary and data table when
+ there are no data to be show. (Arun Kandregula via yusaku)
+
+ AMBARI-1306. Change color of rack_local_map to #66B366. (yusaku)
+
+ AMBARI-1311. Host health indicator should have a tooltip showing few details - 
+ refactoring. (Arun Kandregula via yusaku)
+
+ AMBARI-1303. Remake clearFilters function in app_view. (Arun Kandregula via
+ yusaku)
+
+ AMBARI-1302. Minor label cleanup on Jobs Charts popup. (Arun Kandregula via
+ yusaku)
+
+ AMBARI-1296. Task log popup footer should be statically placed only the
+ content should scroll vertically. (Jaimin Jetly via yusaku)
+
+ AMBARI-1295. Move cluster name display from the main nav to the top nav.
+ (Jaimin Jetly via yusaku)
+
+ AMBARI-1268. Improve DAG UI. (billie via yusaku)
+
+ AMBARI-1289. App page: remove old ode and fix test mode. (srimanth via
+ yusaku)
+
+ AMBARI-1279. Make sure that Ambari Web renders all elements correctly when
+ the browser width is 1024px or narrower. (srimanth via yusaku)
+
+ AMBARI-1274. Shrink top nav height. (srimanth)
+ 
+ AMBARI-1272. Controller javascripts need comments. (srimanth)
+ 
+ AMBARI-1271. On Confirm Hosts page, add a link to show the Host Checks popup 
+ in the success message. (yusaku via srimanth)
+
+ AMBARI-1193. If Install fails, allow user to go back to any previous step so 
+ that the user can retry install with different configuration parameters.
+ (yusaku via srimanth)
+
+ AMBARI-1265. Job Browser - Filter by Input, output and duration. (yusaku)
+
+ AMBARI-1263. Refactoring of User Management code. (yusaku)
+
+ AMBARI-1254. Modify App Browser to use server-side paging/sorting/filtering.
+ (yusaku)
+
+ AMBARI-1258. Minor refactoring of User Management code. (yusaku)
+
+ AMBARI-1253. Use ember-precompiler-brunch npm plugin. (yusaku)
+
+ AMBARI-1236. Display a progress bar during deploy prep. (yusaku)
+
+ AMBARI-1249. Update mock data to make App.testMode work. (yusaku)
+
+ AMBARI-1239. Host health status should show orange when there is at least one
+ slave component on the host with state!=STARTED. (yusaku)
+
+ AMBARI-1248. Refactoring of update, services and hosts mapper. (yusaku)
+
+ AMBARI-1247. Disable links for previous steps in left nav on Summary step.
+ (yusaku)
+
+ AMBARI-1246. Add user minor improvements. (yusaku)
+
+ AMBARI-1245. Do not let the user go back to the previous step while host
+ bootstrap is in progress. (yusaku)
+
+ AMBARI-1244. Install Options - line up the Target Hosts section with the rest
+ of the page. (yusaku)
+
+ AMBARI-1235. Host health indicator should have a tooltip showing details.
+ (yusaku)
+ 
+ AMBARI-1234. On Heatmap host hover, including list of components running.
+ (yusaku)
+
+ AMBARI-1229. Dashboard - make disk usage pie chart in HDFS summary easier
+ to understand. (yusaku)
+
+ AMBARI-1228. During Install, show "warn" on hosts that have tasks cancelled.
+ (yusaku)
+
+ AMBARI-1225. Add Hosts wizard popup is too small. (yusaku)
+
+ AMBARI-1224. Drop the "all" option from Hosts > Component Filter and
+ Jobs > Users Filter. (yusaku)
+
+ AMBARI-1223. Confirm Hosts page: It looks like hosts disappear if you are
+ on "Fail" filter and click on "Retry Failed" button. (yusaku)
+
+ AMBARI-1222. DAG, Jobs Timeline, and Tasks graphs UI cleanup. (yusaku)
+
+ AMBARI-1221. There is no default sort order on Hosts table and the order
+ changes on every page refresh - should sort by hostname. (yusaku)
+
+ AMBARI-1220. Oozie service summary update. (yusaku)
+
+ AMBARI-1218. Refactor Job Browser User filter. (yusaku)
+
+ AMBARI-1217. Tighten up spacing for the rows in the Hosts table. (yusaku)
+
+ AMBARI-1216. Add filters module. (yusaku)
+
+ AMBARI-1215. Refactor hostComponent isSlaves and isMaster and add update
+ methods for server mapper. (yusaku)
+
+ AMBARI-1214. In any starts fails, "warn" the host and the overall install.
+ (yusaku)
+
+ AMBARI-1204. Install Wizard: Re-enable configuration of user/group names for
+ master component daemons. (yusaku)
+
+ AMBARI-1197. Refactor code for graphs. (yusaku)
+
+ AMBARI-1196. Automatically update host-level popup info/logs. (yusaku)
+
+ AMBARI-1189. Add App.Job class. (yusaku)
+
+ AMBARI-1188. Refactor isClient computed property for HostComponent class.
+ (yusaku)
+
+ AMBARI-1186. Add Run class to represent a job run. (yusaku)
+
+ AMBARI-1185. Refactor the method to check if the user is an admin.
+ (yusaku)
+
+ AMBARI-1183. Directories in the service config textarea should not wrap.
+ (yusaku)
+
+ AMBARI-1182. Clean up table header UI for sorting and filter clear "x" for
+ Jobs table. (yusaku)
+
+ AMBARI-1181. Clean up table header UI for sorting and filter clear "x" for
+ Hosts table. (yusaku)
+
+ AMBARI-1198. Ambari API Performance: Parsing of Ganglia json data is slow.
+ (jspeidel via mahadev)
+
+ AMBARI-1213. Cleanup python test cases and introduce third party library for
+ mock testing python code. (mahadev)
+
+ AMBARI-1206. Expose missing metrics on host components. (tbeerbower via
+ mahadev)
+
+ AMBARI-1205. Cannot persist service configuration when service is started
+ (Siddharth Wagle via mahadev)
+
+ AMBARI-1262. Apache Ambari point to dev url, need fix in pom.xml. 
+ (mahadev)
+
+ AMBARI-1207. Remove /hdp as the httpd conf for any of the nagios urls -
+ should replace it with ambarinagios or something else.
+ (mahadev)
+
+ AMBARI-1277. Failing build due to url moved on Suse. (mahadev)
+
+ AMBARI-1288. Change "authorization" to "authentication" in props setup for
+ LDAP. (mahadev)
+
+ AMBARI-1269. Refactor ResourceProvider SPI. (tbeerbower)
+ 
+ AMBARI-1270. Add predicate objects for checking empty resource category.
+ (tbeerbower)
+
+ AMBARI-1286. Set version number property in gsInstaller cluster resource
+ provider. (tbeerbower)
+
+ AMBARI-1287. Monitor for component/service state for gsInstaller resource provider. (tbeerbower)
+
+ AMBARI-1260. Remove hard coded JMX port mappings. (Siddharth Wagle via
+ mahadev)
+
+ AMBARI-1411. Missing unit test coverage for resource providers. (tbeerbower)
+
+ AMBARI-1433. Allow capacity scheduler to be configurable via the API's.
+ (mahadev)
+
+ AMBARI-1435. L2 Cache does not work due to Eclipse Link exception.
+ (Sid Wagle via mahadev)
+
+ AMBARI-1436. Threads blocking on ClustersImpl.getHost for several minutes.
+ (Sid Wagle via mahadev)
+
+ AMBARI-1438. Add new stack definition for new stacks. (mahadev)
+
+ AMBARI-1448. Enabling stack upgrade via Ambari Server. (mahadev)
+
+ AMBARI-1439. rrd file location should be read from global config. (Siddharth
+ Wagle via mahadev).
+
+ AMBARI-1466. Optimize ganglia rrd script to be able to respond within
+ reasonable time to queries made by the UI. (mahadev)
+
+ AMBARI-1474. Upgrade stack definition for HBase for 1.2.2 since the version
+ is upgraded. (mahadev)
+
+ AMBARI-1475. Update the version of ambari artifacts to 1.2.2 snapshot.
+ (mahadev)
+
+ AMBARI-1489. Add hadoop-lzo to be one of the rpms to check for before
+ installation. (mahadev)
+
+ BUG FIXES
+
+ AMBARI-1463. State of HBase region server not updated when instance is shut down on a cluster not installed via Ambari. (tbeerbower)
+
+ AMBARI-1446. URL used by API to invoke Ganglia rrd script may exceed max length 
+              for query string for large clusters. (jspeidel)
+
+ AMBARI-1431. Hosts table no longer allows sorting. (yusaku)
+
+ AMBARI-1376. Wrong calculation of duration filter on apps page. (jaimin via
+ yusaku)
+
+ AMBARI-1165. Change the dashboard graph for HBase since its using cumulative
+ metrics. (yusaku)
+
+ AMBARI-1372. three sorting states on jobs table. (jaimin)
+ 
+ AMBARI-1350. UI screen shifts left-right depending on scrollbar. (jaimin)
+
+ AMBARI-1367. Job# for Mapreduce jobs is seen as x. (jaimin)
+
+ AMBARI-1363. Graphs jump around upon loading. (jaimin)
+
+ AMBARI-1362. Alerts for the hosts with ZooKeeper Server grows on every poll. (jaimin)
+
+ AMBARI-1360. Mouse cursor hover behavior is strange on Job Browser. (jaimin) 
+
+ AMBARI-1359. App Browser rows colours should alternate from dark grey to light 
+ grey and back. (jaimin)
+
+ AMBARI-1356. Error in filtering Configuration properties maintained at UI for 
+ WebHcat service. (jaimin)
+
+ AMBARI-1352. Host-level alert badges should only show the total number
+ of CRIT and WARN alerts for the host excluding OK. (jaimin)
+
+ AMBARI-1355. Inconsistent casing and component name for alert title. (jaimin)
+
+ AMBARI-1354. "No alerts" badge on the Host Detail page should be green, not red. (jaimin)
+
+ AMBARI-1353. "Missing translation" shown in Job Browser. (jaimin)
+
+ AMBARI-1351. Provide consistent ordering of hosts in heatmap. (jaimin)
+
+ AMBARI_1344. mapred.tasktracker.reduce.tasks.maximum in mapred-site.xml is not
+ taking effect. (yusaku)
+
+ AMBARI-1345. Alerts are not showing up at all in Service pages. (yusaku)
+
+ AMBARI-1346. The number of task trackers does not reflect the actual number
+ in MapReduce service summary after new TaskTrackers have been added until
+ page refresh. (yusaku)
+
+ AMBARI-1331. Step 8 hangs on deploy task 2 of 59, server has exception (tbeerbower)
+
+ AMBARI-1164. Disk info ganglia metrics is broken for some OS. (Dmytro Shkvyra via jspeidel)
+
+ AMBARI-1325. Left border is missing from the main nav. (srimanth)
+ 
+ AMBARI-1324. Job Browser default sort order should be Run Date DESC. (srimanth)
+ 
+ AMBARI-1323. Job Browser's column sizing needs to be improved on Firefox. (srimanth)
+
+ AMBARI-1321. Switching out of Jobs page does not launch popup anymore.
+ (srimanth via yusaku) 
+
+ AMBARI-1313. Alert time jumps between 'less than a minute ago' and 'about a
+ minute ago'. (srimanth via yusaku) 
+
+ AMBARI-1304. When switching jobs in timeline + tasks charts, blank charts show.
+ (Arun Kandregula via yusaku) 
+
+ AMBARI-1317. Deploy progress returns to deploy screen (momentarily).
+ (Arun Kandregula via yusaku) 
+
+ AMBARI-1316. Vertical scrollbar shows regardless of how tall the browser height
+ is (content height is always slightly taller than viewport). (Arun Kandregula
+ via yusaku)
+
+ AMBARI-1315. Inconsistent error/warning status in Deploy step; install
+ stalls. (Arun Kandregula via yusaku)
+
+ AMBARI-1281. Heatmap does not show up if the cluster was installed by going
+ back to a previous step from the Deploy step after an install failure.
+ (yusaku)
+
+ AMBARI-1300. Service status / host component status can get stuck in the
+ green blinking state if stop fails - no further operation can be performed.
+ (srimanth via yusaku) 
+
+ AMBARI-1297. Edit User: if "old password" is not specified and "new
+ password" is specified, password update silently fails. (Jaimin Jetly via
+ yusaku)
+
+ AMBARI-1282. Admin user can lose its own admin privilege. (Jaimin Jetly
+ via yusaku)
+
+ AMBARI-1292. Add hosts should skip host checks on existing list of cluster
+ nodes. (srimanth via yusaku)
+
+ AMBARI-1290. Left border is missing from the summary section on Jobs page.
+ (srimanth via yusaku)
+
+ AMBARI-1278. Cannot proceed from Step 3 to Step 4 in App.testMode (Next 
+ button is disabled). (srimanth)
+
+ AMBARI-1276. Job Graphs need to show x-axis ticks for elapsed time since 
+ submission. (srimanth)
+
+ AMBARI-1275. Incorrect displaying "Background operations" window after 
+ changing state of component. (srimanth)
+
+ AMBARI-1273. Edit User: No error message is shown when the user does not 
+ enter the correct "old password". (srimanth)
+
+ AMBARI-1172. Alert status change does not change time for the alerts.
+ (srimanth via yusaku) 
+
+ AMBARI-1264. Service graphs refresh with spinners. (yusaku)
+
+ AMBARI-1257. Separator missing in between Oozie and ZooKeeper. (yusaku)
+
+ AMBARI-1251. Fix routing issues on Add Host Wizard. (yusaku)
+
+ AMBARI-1230. There is a big gap in the lower part of the Jobs table header.
+ (yusaku)
+
+ AMBARI-1212. After successful install with Ambari, the user is taken to the
+ welcome page of the Install Wizard upon browser relaunch if the HTTP session
+ is expired. (yusaku)
+
+ AMBARI-1227. Host-level task popup is not showing the display name for
+ components. (yusaku)
+
+ AMBARI-1226. On Dashboard, links to host components are missing. (yusaku)
+
+ AMBARI-1219. After adding hosts, the number of live TaskTrackers is not
+ updated. (yusaku)
+
+ AMBARI-1176. In some cases, once Add Hosts wizard has run once, it requires
+ a log out before the Add Hosts wizard can be run again. (yusaku)
+
+ AMBARI-1203. mapred-site.xml default system directory is not set
+ to /mapred/system. (yusaku)
+
+ AMBARI-1200. On some clusters, Nagios alerts show up about 30 seconds after
+ page load, while on others the alerts show up immediately. (srimanth via
+ yusaku)
+
+ AMBARI-1190. Detailed log view dialogs are not center-aligned. (yusaku)
+
+ AMBARI-1187. Dashboard > MapReduce mini chart sometimes shows partial graph and hides recent data. (yusaku)
+
+ AMBARI-1184. After adding hosts, the host count shown in the Dashboard is
+ incorrect. (yusaku)
+
+ AMBARI-1178. Fix use of use ip address for JMX metrics request. (tbeerbower
+ via mahadev)
+
+ AMBARI-1191. Datatable API needs work. (Billie Rinaldi via mahadev)
+
+ AMBARI-1211. Ability to configure the same username for all the services in
+ Ambari. (mahadev)
+
+ AMBARI-1231. Replace sudo with su in the ambari setup script since ambari
+ server setup is already run as root. (mahadev)
+
+ AMBARI-1201. Improve Agent Registration and Heartbeat json. (Nate Cole via
+ mahadev)
+
+ AMBARI-1238. AmbariMetaInfoTest getServices() acceptance test failure. 
+ (Siddharth Wagle via mahadev)
+
+ AMBARI-1243. Remove unwanted import causing the builds to fail on linux.
+ (mahadev)
+
+ AMBARI-1233.  Directory permissions on httpd /var/www/cgi-bin should not be
+ touched by Ambari. (mahadev)
+
+ AMBARI-1170. For live status checks we should only look at the run
+ directories that we get from the server (only for hadoop and its eco system)
+ and not all. (mahadev)
+
+ AMBARI-1250. Upgrade the posgres connector to 9.1.
+ (mahadev)
+
+ AMBARI-1259. Fix the host roles live status not go back to INSTALLED if it
+ was in START_FAILED state. (mahadev)
+
+ AMBARI-1210. Allow capacity scheduler to be attached to host role configs for
+ CS configurability in the API's. (mahadev)
+
+ AMBARI-1256. Host registration can fail due to mount point info not fitting
+ ambari.hosts::disks_info column. (Sumit Mohanty via mahadev)
+
+ AMBARI-1266. Agent checks packages as part of host check but doesn't tell
+ which ones are needed or conflicting. (mahadev)
+
+ AMBARI-1291. Incorrect directory for MySQL component on SLES-11.1sp1.
+ (mahadev)
+
+ AMBARI-1301. Live status checks dont get triggered on server restart.
+ (mahadev)
+
+ AMBARI-1285. Some host Ganglia metrics may be missing in some cases. (tbeerbower)
+
+ AMBARI-1310. Get rid of mvn warnings. (Arun Kumar via mahadev)
+
+ AMBARI-1314. Hostname test is failing in some environments. (Nate Cole via
+ mahadev) 
+
+ AMBARI-1330. Cluster missing hosts after successful install and restart.
+ (mahadev)
+
+ AMBARI-1358. Clean up alert messages. (Yusaku Sako via mahadev)
+
+ AMBARI-1432. Ambari Agent registration hangs due to Acceptor bug in Jetty for
+ not reading through accepted connections. (mahadev)
+
+ AMBARI-1434. Change state to installed from start_failed if there is any
+ issue in starting a host component. (mahadev)
+
+ AMBARI-1476. Change webhcat-env.sh to export HADOOP_HOME
+ (mahadev)
+
+ AMBARI-1486. Fix TestHostName to take care of issues when gethostname and
+ getfqdn do not match. (mahadev)
+
+ AMBARI-1495. Out of Memory Issues on Ambari Server when server is running on
+ single core. (mahadev)
+
+ AMBARI-1487. Fix alerts at host level if MapReduce is not selected not to
+ alert for tasktrackers not running. (mahadev)
+
+ AMBARI-1488. Nagios script causes unwanted Datanode logs. (mahadev)
+
+ AMBARI-1497. Fix start up option for ambari-server where there is a missing
+ space. (mahadev)
+
+AMBARI-1.2.0 branch:
 
  INCOMPATIBLE CHANGES
  
  NEW FEATURES
 
- AMBARI-1108 - PUT call to change the state on host_components collection
+ AMBARI-1108. PUT call to change the state on host_components collection
  returns 200 (no op), even though GET with the same predicate returns a number
  of host_components. (Tom Beerbower via mahadev)
 
  AMBARI-1114. BootStrap fails but the api says thats its done and exit status
  is 0. (Nate Cole via mahadev)
 
-  AMBARI-1136 - Add gsInstaller resource provider. (Tom Beerbower via mahadev)
+ AMBARI-1136. Add gsInstaller resource provider. (Tom Beerbower via mahadev)
+
+ AMBARI-1202. Unncessary use of xml tree python library in ambari-server
+ setup. Its not being used. (Siddharth Wagle via mahadev)
+
+ IMPROVEMENTS
 
  BUG FIXES
 
@@ -31,7 +575,7 @@ should be listed by their full name.
  AMBARI-1126. Change SUSE lzo dependency to only lzo-devel. (nate cole via
  mahadev)
 
-AMBARI-666 branch (unreleased changes)
+AMBARI-666 branch:
 
   INCOMPATIBLE CHANGES
 
@@ -1016,3 +1560,4 @@ AMBARI-666 branch (unreleased changes)
   AMBARI-684. Remove non-required dependencies from pom files (hitesh via jitendra)
 
   AMBARI-680. Fix pom structure. (hitesh)
+

+ 0 - 5
KEYS

@@ -65,11 +65,6 @@ q8NjvW09GT7Ls9llrf4IXG8kjX2PZRIfaGSa556PJjdD3xJWgTEP78i0zJTWQLku
 IWC8DKsT+np6ZGfoE58=
 =zb0j
 -----END PGP PUBLIC KEY BLOCK-----
-
-pub   4096R/8EE2F25C 2011-10-24
-uid                  Mahadev Konar (CODE SIGNING KEY) <mahadev@apache.org>
-sub   4096R/1F35DF4C 2011-10-24
-
 -----BEGIN PGP PUBLIC KEY BLOCK-----
 Version: SKS 1.1.0
 

+ 8 - 3
NOTICE.txt

@@ -1,9 +1,14 @@
-Apache Ambari
-Copyright 2011-2013 The Apache Software Foundation
+Apache [2012]
+Copyright [2012] The Apache Software Foundation
 
-This product includes software developed at The Apache Software
+This product includes software developed by The Apache Software
 Foundation (http://www.apache.org/).
 
+Component stdlib in puppet modules are under the following copyright: 
+Copyright (C) 2011 Puppet Labs Inc
+and some parts:
+Copyright (C) 2011 Krzysztof Wilczynski
+
 Component ambari-common/src/test/python are under the following copyright:
 
 Copyright (c) 2003-2012, Michael Foord

+ 1 - 1
ambari-agent/conf/unix/ambari-agent.ini

@@ -49,4 +49,4 @@ dirs=/etc/hadoop,/etc/hadoop/conf,/etc/hbase,/etc/hcatalog,/etc/hive,/etc/oozie,
   /var/log/hadoop,/var/log/zookeeper,/var/log/hbase,/var/run/templeton,/var/log/hive,
   /var/log/nagios
 rpms=nagios,ganglia,
-  hadoop,hbase,oozie,sqoop,pig,zookeeper,hive,libconfuse,ambari-log4j
+  hadoop,hadoop-lzo,hbase,oozie,sqoop,pig,zookeeper,hive,libconfuse,ambari-log4j

+ 7 - 2
ambari-agent/pom.xml

@@ -19,17 +19,18 @@
   <parent>
     <groupId>org.apache.ambari</groupId>
     <artifactId>ambari-project</artifactId>
-    <version>1.2.1-SNAPSHOT</version>
+    <version>1.2.2-SNAPSHOT</version>
     <relativePath>../ambari-project</relativePath>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.ambari</groupId>
   <artifactId>ambari-agent</artifactId>
   <packaging>pom</packaging>
-  <version>1.2.1-SNAPSHOT</version>
+  <version>1.2.2-SNAPSHOT</version>
   <name>Ambari Agent</name>
   <description>Ambari Agent</description>
   <properties>
+    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <final.name>${project.artifactId}-${project.version}</final.name>
     <package.release>1</package.release>
     <package.prefix>/usr</package.prefix>
@@ -59,6 +60,10 @@
   </profiles>
   <build>
     <plugins>
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.0</version>
+      </plugin>
       <plugin>
         <artifactId>maven-assembly-plugin</artifactId>
         <configuration>

+ 18 - 14
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmondLib.sh

@@ -220,6 +220,19 @@ collection_group {
   }
 }
 
+/* This collection group will send general info about this host total memory every
+   180 secs.
+   This information doesn't change between reboots and is only collected
+   once. This information needed for heatmap showing */
+ collection_group {
+   collect_once = yes
+   time_threshold = 180
+   metric {
+    name = "mem_total"
+    title = "Memory Total"
+   }
+ }
+
 /* This collection group will send general info about this host every
    1200 secs.
    This information doesn't change between reboots and is only collected
@@ -235,10 +248,6 @@ collection_group {
     name = "cpu_speed"
     title = "CPU Speed"
   }
-  metric {
-    name = "mem_total"
-    title = "Memory Total"
-  }
   /* Should this be here? Swap can be added/removed between reboots. */
   metric {
     name = "swap_total"
@@ -426,16 +435,6 @@ collection_group {
   }
 }
 
-/* Different than 2.5.x default since the old config made no sense */
-collection_group {
-  collect_every = 1800
-  time_threshold = 3600
-  metric {
-    name = "disk_total"
-    value_threshold = 1.0
-    title = "Total Disk Space"
-  }
-}
 
 collection_group {
   collect_every = 40
@@ -450,6 +449,11 @@ collection_group {
     value_threshold = 1.0
     title = "Maximum Disk Space Used"
   }
+  metric {
+    name = "disk_total"
+    value_threshold = 1.0
+    title = "Total Disk Space"
+  }
 }
 
 include ("${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d/*.conf")

+ 16 - 4
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrd.py

@@ -139,6 +139,14 @@ if "pt" in queryString:
 else:
   pointInTime = False
 
+
+host_metrics = ["boottime", "bytes_in", "bytes_out", "cpu_aidle", "cpu_idle",
+                "cpu_nice", "cpu_num", "cpu_speed", "cpu_system", "cpu_user",
+                "cpu_wio", "disk_free", "disk_total", "load_fifteen", "load_five",
+                "load_one", "mem_buffers", "mem_cached", "mem_free", "mem_shared",
+                "mem_total", "part_max_used", "pkts_in", "pkts_out", "proc_run",
+                "proc_total", "swap_free", "swap_total"]
+
 for cluster in clusterParts:
   for path, dirs, files in os.walk(rrdPath + cluster):
     pathParts = path.split("/")
@@ -146,10 +154,14 @@ for cluster in clusterParts:
       for file in files:
         for metric in metricParts:
           if file.endswith(metric + ".rrd"):
-
-            printMetric(pathParts[-2], pathParts[-1], file[:-4],
-                os.path.join(path, file), cf, start, end, resolution, pointInTime)
-
+            if not (metric in host_metrics):
+              printMetric(pathParts[-2], pathParts[-1], file[:-4],
+                    os.path.join(path, file), cf, start, end, resolution, pointInTime)
+            else:
+              if (cluster == "HDPSlaves"):
+                 printMetric(pathParts[-2], pathParts[-1], file[:-4],
+                    os.path.join(path, file), cf, start, end, resolution, pointInTime)
+                
 sys.stdout.write("[AMBARI_END]\n")
 # write end time
 sys.stdout.write(str(time.mktime(time.gmtime())))

+ 1 - 1
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startRrdcached.sh

@@ -39,7 +39,7 @@ then
     su - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
              -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
              -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
-             -b /var/lib/ganglia/rrds -B"
+             -b ${RRDCACHED_BASE_DIR} -B"
 
     # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for 
     # this, but it doesn't take sometimes due to a lack of permissions,

+ 1 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/params.pp

@@ -29,4 +29,5 @@ class hdp-ganglia::params() inherits hdp::params
   $gmond_user = $hdp::params::gmond_user
 
   $webserver_group = hdp_default("hadoop/gangliaEnv/webserver_group","apache")
+  $rrdcached_base_dir = hdp_default("rrdcached_base_dir", "/var/lib/ganglia/rrds")
 }

+ 10 - 2
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp

@@ -126,8 +126,6 @@ class hdp-ganglia::server::files(
   $ensure = present 
 )
 {
-
-
   $rrd_py_path = $hdp::params::rrd_py_path [$hdp::params::hdp_os_type]
   hdp::directory_recursive_create{$rrd_py_path:
     ensure => "directory", 
@@ -142,6 +140,16 @@ class hdp-ganglia::server::files(
     mode   => '0755',
     require => Hdp::Directory_recursive_create[$rrd_py_path]
   }
+
+  $rrd_files_dir = $hdp-ganglia::params::rrdcached_base_dir
+  $rrd_file_owner = $hdp-ganglia::params::gmetad_user
+  hdp::directory_recursive_create{ $rrd_files_dir :
+    ensure => "directory",
+    owner => $rrd_file_owner,
+    group => $rrd_file_owner,
+    mode => 755
+  }
+
 }
 
 

+ 1 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaLib.sh.erb

@@ -22,6 +22,7 @@ cd `dirname ${0}`;
 
 GANGLIA_CONF_DIR=<%=scope.function_hdp_template_var("ganglia_conf_dir")%>;
 GANGLIA_RUNTIME_DIR=<%=scope.function_hdp_template_var("ganglia_runtime_dir")%>;
+RRDCACHED_BASE_DIR=<%=scope.function_hdp_template_var("rrdcached_base_dir")%>;
 
 # This file contains all the info about each Ganglia Cluster in our Grid.
 GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;

+ 65 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_mode.rb

@@ -0,0 +1,65 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#to handle differences in how args passed in
+module Puppet::Parser::Functions
+  newfunction(:hdp_hadoop_get_mode, :type => :rvalue) do |args|
+  
+    dir = args[0]
+
+    oozie_dir = lookupvar("::hdp::params::oozie_hdfs_user_dir")
+    oozie_dir_mode = lookupvar("::hdp::params::oozie_hdfs_user_mode") 
+    
+    hcat_dir = lookupvar("::hdp::params::hcat_hdfs_user_dir")
+    hcat_dir_mode = lookupvar("::hdp::params::hcat_hdfs_user_mode") 
+    
+    webhcat_dir = lookupvar("::hdp::params::webhcat_hdfs_user_dir")
+    webhcat_dir_mode = lookupvar("::hdp::params::webhcat_hdfs_user_mode") 
+    
+    hive_dir = lookupvar("::hdp::params::hive_hdfs_user_dir")
+    hive_dir_mode = lookupvar("::hdp::params::hive_hdfs_user_mode") 
+    
+    smoke_dir = lookupvar("::hdp::params::smoke_hdfs_user_dir")
+    smoke_dir_mode = lookupvar("::hdp::params::smoke_hdfs_user_mode") 
+    
+    modes = []
+    modes.push({:dir => oozie_dir, :mode => oozie_dir_mode})
+    modes.push({:dir => hcat_dir, :mode => hcat_dir_mode})
+    modes.push({:dir => webhcat_dir, :mode => webhcat_dir_mode})
+    modes.push({:dir => hive_dir, :mode => hive_dir_mode})
+    modes.push({:dir => smoke_dir, :mode => smoke_dir_mode})
+
+    modes_grouped = {}
+    modes.each do |item|
+      if modes_grouped[item[:dir]].nil?
+        modes_grouped[item[:dir]]=[]
+      end
+      modes_grouped[item[:dir]]=modes_grouped[item[:dir]] + [(item[:mode])]
+    end
+
+    modes_max = {}
+    
+    modes_grouped.each_key do |key|
+      modes_max[key] = modes_grouped[key].max
+    end
+
+    modes_max[dir]
+  end
+end

+ 51 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_owner.rb

@@ -0,0 +1,51 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#to handle differences in how args passed in
+module Puppet::Parser::Functions
+  newfunction(:hdp_hadoop_get_owner, :type => :rvalue) do |args|
+  
+    dir = args[0]
+    
+    oozie_dir = lookupvar("::hdp::params::oozie_hdfs_user_dir")
+    oozie_user = lookupvar("::hdp::params::oozie_user") 
+
+    hcat_dir = lookupvar("::hdp::params::hcat_hdfs_user_dir")
+    hcat_user = lookupvar("::hdp::params::hcat_user") 
+
+    webhcat_dir = lookupvar("::hdp::params::webhcat_hdfs_user_dir")
+    webhcat_user = lookupvar("::hdp::params::webhcat_user") 
+
+    hive_dir = lookupvar("::hdp::params::hive_hdfs_user_dir")
+    hive_user = lookupvar("::hdp::params::hive_user") 
+
+    smoke_dir = lookupvar("::hdp::params::smoke_hdfs_user_dir")
+    smoke_user = lookupvar("::hdp::params::smokeuser") 
+
+    dirs_to_owners = {}
+    dirs_to_owners[oozie_dir] = oozie_user
+    dirs_to_owners[hcat_dir] = hcat_user
+    dirs_to_owners[webhcat_dir] = webhcat_user
+    dirs_to_owners[hive_dir] = hive_user
+    dirs_to_owners[smoke_dir] = smoke_user
+
+    dirs_to_owners[dir]
+  end
+end

+ 1 - 1
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/directory.pp

@@ -60,7 +60,7 @@ define hdp-hadoop::hdfs::directory(
   
     if ($mode != undef) {
       #TODO: see if there is a good 'unless test'
-      if ($recursive_mode == true) {
+      if ($recursive_chmod == true) {
         $chmod_cmd = "fs -chmod -R ${mode} ${name}"
       } else {
         $chmod_cmd = "fs -chmod ${mode} ${name}"

+ 2 - 13
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp

@@ -101,19 +101,8 @@ debug('##Configs generation for hdp-hadoop')
       configuration => $configuration['capacity-scheduler'],
       owner => $hdp-hadoop::params::hdfs_user,
       group => $hdp::params::user_group,
-      replace => true,
     }
-  } else { #   This file will just be a static file for now. - BUG-3195
-    file {"capacity-scheduler.xml":
-      ensure  => present,
-      source => "puppet:///modules/hdp-hadoop/capacity-scheduler.xml",
-      mode => '0744',
-      path => "${hdp-hadoop::params::conf_dir}/capacity-scheduler.xml",
-      owner => $hdp-hadoop::params::mapred_user,
-      group => $hdp::params::user_group,
-      replace => true,
-    }
-  }
+  } 
 
 
   if has_key($configuration, 'hdfs-site') {
@@ -184,7 +173,7 @@ class hdp-hadoop(
     hdp::directory_recursive_create { $logdirprefix: 
         owner => 'root'
     }
-    $piddirprefix = $hdp-hadoop::params::hadoop_piddirprefix
+    $piddirprefix = $hdp-hadoop::params::hadoop_pid_dir_prefix
     hdp::directory_recursive_create { $piddirprefix: 
         owner => 'root'
     }

+ 70 - 32
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp

@@ -85,10 +85,14 @@ class hdp-hadoop::namenode(
        service_state => $service_state
     }
 
+    hdp-hadoop::namenode::create_user_directories { 'create_user_directories' :
+       service_state => $service_state
+    }
+
     #top level does not need anchors
     Class['hdp-hadoop'] ->  Hdp-hadoop::Service['namenode']
     Hdp-hadoop::Namenode::Create_name_dirs<||> -> Hdp-hadoop::Service['namenode'] 
-    Hdp-hadoop::Service['namenode'] -> Hdp-hadoop::Namenode::Create_app_directories<||>
+    Hdp-hadoop::Service['namenode'] -> Hdp-hadoop::Namenode::Create_app_directories<||> -> Hdp-hadoop::Namenode::Create_user_directories<||>
     if ($service_state == 'running' and $format == true) {
       Class['hdp-hadoop'] -> Class['hdp-hadoop::namenode::format'] -> Hdp-hadoop::Service['namenode']
       Hdp-hadoop::Namenode::Create_name_dirs<||> -> Class['hdp-hadoop::namenode::format']
@@ -111,15 +115,8 @@ define hdp-hadoop::namenode::create_name_dirs($service_state)
 
 define hdp-hadoop::namenode::create_app_directories($service_state)
 {
+
   if ($service_state == 'running') {
-    $smoke_test_user = $hdp::params::smokeuser
-    $smoke_hdfs_user_dir = $hdp::params::smoke_hdfs_user_dir
-    hdp-hadoop::hdfs::directory{ $smoke_hdfs_user_dir:
-      service_state => $service_state,
-      owner => $smoke_test_user,
-      mode  => '770',
-      recursive_chmod => true
-    }
    
     hdp-hadoop::hdfs::directory{ "/tmp" :
       service_state => $service_state,
@@ -147,46 +144,87 @@ define hdp-hadoop::namenode::create_app_directories($service_state)
 
     if ($hdp::params::hive_server_host != "") {
       $hive_user = $hdp::params::hive_user
+      $hive_apps_whs_dir = $hdp::params::hive_apps_whs_dir
 
-      hdp-hadoop::hdfs::directory{ '/apps/hive/warehouse':
+      hdp-hadoop::hdfs::directory{ $hive_apps_whs_dir:
         service_state   => $service_state,
         owner            => $hive_user,
         mode             => '777',
         recursive_chmod  => true
       }
-      hdp-hadoop::hdfs::directory{ $hive_hdfs_user_dir:
+    }
+
+    if ($hdp::params::webhcat_server_host != "") {
+      $webhcat_user = $hdp::params::webhcat_user
+      $webhcat_apps_dir = $hdp::params::webhcat_apps_dir
+
+      hdp-hadoop::hdfs::directory{ $webhcat_apps_dir:
         service_state => $service_state,
-        owner         => $hive_user
+        owner => $webhcat_user,
+        mode  => '755',
+        recursive_chmod => true
       }
     }
+  }
+}
+
+
+define hdp-hadoop::namenode::create_user_directories($service_state)
+{
+  if ($service_state == 'running') {
+    $smoke_hdfs_user_dir = $hdp::params::smoke_hdfs_user_dir
+
+    $smoke_user_dir_item="$smoke_hdfs_user_dir,"
+
+    if ($hdp::params::hive_server_host != "") {
+      $hive_hdfs_user_dir = $hdp::params::hive_hdfs_user_dir
+      $hive_dir_item="$hive_hdfs_user_dir,"
+    } else {
+    $hive_dir_item=""
+    }
 
     if ($hdp::params::oozie_server != "") {
-      $oozie_user = $hdp::params::oozie_user
       $oozie_hdfs_user_dir = $hdp::params::oozie_hdfs_user_dir
-      hdp-hadoop::hdfs::directory{ $oozie_hdfs_user_dir:
-        service_state => $service_state,
-        owner => $oozie_user,
-        mode  => '775',
-        recursive_chmod => true
-      }
+      $oozie_dir_item="$oozie_hdfs_user_dir,"
+    } else {
+      $oozie_dir_item=""
     }
     
     if ($hdp::params::webhcat_server_host != "") {
-      $templeton_user = $hdp::params::templeton_user
       $hcat_hdfs_user_dir = $hdp::params::hcat_hdfs_user_dir
-      hdp-hadoop::hdfs::directory{ $hcat_hdfs_user_dir:
-        service_state => $service_state,
-        owner => $templeton_user,
-        mode  => '755',
-        recursive_chmod => true
-      }
-
-      hdp-hadoop::hdfs::directory{ '/apps/webhcat':
-        service_state => $service_state,
-        owner => $templeton_user,
-        mode  => '755',
-        recursive_chmod => true
+      $webhcat_hdfs_user_dir = $hdp::params::webhcat_hdfs_user_dir
+      $webhcat_dir_item="$webhcat_hdfs_user_dir,"
+      if ($hcat_hdfs_user_dir != webhcat_hdfs_user_dir) {
+        $hcat_dir_item="$hcat_hdfs_user_dir,"
+      } else {
+        $hcat_dir_item=""
       }
+    } else {
+      $webhcat_dir_item=""
     }
+
+    $users_dir_list_comm_sep = "$smoke_user_dir_item $hive_dir_item $oozie_dir_item $hcat_dir_item $webhcat_dir_item"
+
+    #Get unique users directories set
+    $users_dirs_set = hdp_set_from_comma_list($users_dir_list_comm_sep)
+
+    hdp-hadoop::namenode::create_user_directory{$users_dirs_set:
+      service_state => $service_state}
   }
+  
 }
+
+define hdp-hadoop::namenode::create_user_directory($service_state)
+{
+  
+  $owner = hdp_hadoop_get_owner($name)
+  $mode = hdp_hadoop_get_mode($name)
+  debug("## Creating user directory: $name, owner: $owner, mode: $mode")
+  hdp-hadoop::hdfs::directory{ $name:
+   service_state   => $service_state,
+   mode            => $mode,
+   owner           => $owner,
+   recursive_chmod => true
+  }
+}
+

+ 3 - 3
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp

@@ -45,7 +45,7 @@ class hdp-hadoop::params(
     $enable_security_authorization = false
     $security_type = "simple"
     $task_controller = "org.apache.hadoop.mapred.DefaultTaskController"
-    $dfs_datanode_address = 50010
+    $dfs_datanode_address = 50075
     $dfs_datanode_http_address = 50075
   }
 
@@ -58,8 +58,8 @@ class hdp-hadoop::params(
 
   $hdfs_log_dir_prefix = hdp_default("hadoop/hadoop-env/hdfs_log_dir_prefix","/var/log/hadoop")
 
-  $hadoop_piddirprefix = hdp_default("hadoop/hadoop-env/hadoop_piddirprefix","/var/run/hadoop")
-  $run_dir = $hadoop_piddirprefix
+  $hadoop_pid_dir_prefix = hdp_default("hadoop/hadoop-env/hadoop_pid_dir_prefix","/var/run/hadoop")
+  $run_dir = $hadoop_pid_dir_prefix
 
   $namenode_formatted_mark_dir = "${run_dir}/hdfs/namenode/formatted/"
 

+ 2 - 2
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp

@@ -30,7 +30,7 @@ define hdp-hadoop::service(
   $security_enabled = $hdp::params::security_enabled
 
   #NOTE does not work if namenode and datanode are on same host 
-  $pid_dir = "${hdp-hadoop::params::hadoop_piddirprefix}/${user}"
+  $pid_dir = "${hdp-hadoop::params::hadoop_pid_dir_prefix}/${user}"
   
   if (($security_enabled == true) and ($name == 'datanode')) {
     $run_as_root = true
@@ -40,7 +40,7 @@ define hdp-hadoop::service(
 
   if (($security_enabled == true) and ($name == 'datanode')) {
     $hdfs_user = $hdp::params::hdfs_user
-    $pid_file = "${hdp-hadoop::params::hadoop_piddirprefix}/${hdfs_user}/hadoop-${hdfs_user}-${name}.pid"
+    $pid_file = "${hdp-hadoop::params::hadoop_pid_dir_prefix}/${hdfs_user}/hadoop-${hdfs_user}-${name}.pid"
   } else {
     $pid_file = "${pid_dir}/hadoop-${user}-${name}.pid"
   } 

+ 2 - 2
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb

@@ -78,8 +78,8 @@ export HADOOP_SECURE_DN_LOG_DIR=<%=scope.function_hdp_template_var("hdfs_log_dir
 # export HADOOP_SLAVE_SLEEP=0.1
 
 # The directory where pid files are stored. /tmp by default.
-export HADOOP_PID_DIR=<%=scope.function_hdp_template_var("hadoop_piddirprefix")%>/$USER
-export HADOOP_SECURE_DN_PID_DIR=<%=scope.function_hdp_template_var("hadoop_piddirprefix")%>/$HADOOP_SECURE_DN_USER
+export HADOOP_PID_DIR=<%=scope.function_hdp_template_var("hadoop_pid_dir_prefix")%>/$USER
+export HADOOP_SECURE_DN_PID_DIR=<%=scope.function_hdp_template_var("hadoop_pid_dir_prefix")%>/$HADOOP_SECURE_DN_USER
 
 # A string representing this instance of hadoop. $USER by default.
 export HADOOP_IDENT_STRING=$USER

+ 0 - 22
ambari-agent/src/main/puppet/modules/hdp-hcat-old/files/hiveSmoke.sh

@@ -1,22 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-echo 'CREATE EXTERNAL TABLE IF NOT EXISTS hivesmoke ( foo INT, bar STRING );' | hive
-echo 'DESCRIBE hivesmoke;' | hive

+ 0 - 35
ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/client.pp

@@ -1,35 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hcat::client(
-  $service_state = $hdp::params::cluster_client_state,
-  $hcat_server_host = undef
-) inherits hdp::params
-{ 
-  if ($service_state == 'no_op') {
-   } elsif ($service_state == 'installed_and_configured') {
-    include hdp-hcat #installs package, creates user, sets configuration
-    if ($hcat_server_host != undef) {
-      Hdp-Hcat::Configfile<||>{hcat_server_host => $hcat_server_host}
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

+ 0 - 54
ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/hcat/service_check.pp

@@ -1,54 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hcat::hcat::service_check() 
-{
-  $smoke_test_user = $hdp::params::smokeuser
-  $output_file = "/apps/hive/warehouse/hcatsmoke"
-
-  $test_cmd = "fs -test -e ${output_file}" 
-  
-  anchor { 'hdp-hcat::hcat::service_check::begin':}
-
-  file { '/tmp/hcatSmoke.sh':
-    ensure => present,
-    source => "puppet:///modules/hdp-hcat/hcatSmoke.sh",
-    mode => '0755',
-  }
-
-  exec { '/tmp/hcatSmoke.sh':
-    command   => "su - ${smoke_test_user} -c 'sh /tmp/hcatSmoke.sh'",
-    tries     => 3,
-    try_sleep => 5,
-    require   => File['/tmp/hcatSmoke.sh'],
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    notify    => Hdp-hadoop::Exec-hadoop['hcat::service_check::test'],
-    logoutput => "true"
-  }
-
-  hdp-hadoop::exec-hadoop { 'hcat::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    require     => Exec['/tmp/hcatSmoke.sh'],
-    before      => Anchor['hdp-hcat::hcat::service_check::end'] 
-  }
-  
-  anchor{ 'hdp-hcat::hcat::service_check::end':}
-}

+ 0 - 54
ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/hive/service_check.pp

@@ -1,54 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hcat::hive::service_check() 
-{
-  $smoke_test_user = $hdp::params::smokeuser
-  $output_file = "/apps/hive/warehouse/hivesmoke"
-
-  $test_cmd = "fs -test -e ${output_file}" 
-  
-  anchor { 'hdp-hcat::hive::service_check::begin':}
-
-  file { '/tmp/hiveSmoke.sh':
-    ensure => present,
-    source => "puppet:///modules/hdp-hcat/hiveSmoke.sh",
-    mode => '0755',
-  }
-
-  exec { '/tmp/hiveSmoke.sh':
-    command   => "su - ${smoke_test_user} -c 'sh /tmp/hiveSmoke.sh'",
-    tries     => 3,
-    try_sleep => 5,
-    require   => File['/tmp/hiveSmoke.sh'],
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    notify    => Hdp-hadoop::Exec-hadoop['hive::service_check::test'],
-    logoutput => "true"
-  }
-
-  hdp-hadoop::exec-hadoop { 'hive::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    require     => Exec['/tmp/hiveSmoke.sh'],
-    before      => Anchor['hdp-hcat::hive::service_check::end'] 
-  }
-  
-  anchor{ 'hdp-hcat::hive::service_check::end':}
-}

+ 0 - 72
ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/init.pp

@@ -1,72 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hcat(
-  $server = false
-) 
-{
-  include hdp-hcat::params
-
-# Configs generation  
-
-  if has_key($configuration, 'hdp_hcat_old__hive_site') {
-    configgenerator::configfile{'hive_site_xml': 
-      filename => 'hive-site.xml',
-      module => 'hdp-hcat-old',
-      configuration => $configuration['hdp_hcat_old__hive_site']
-    }
-  }
-
-  $hcat_user = $hdp::params::hcat_user
-  $hcat_config_dir = $hdp-hcat::params::hcat_conf_dir
- 
-  hdp::package { 'hcat-base' : }
-  if ($server == true ) {
-    hdp::package { 'hcat-server':} 
-    class { 'hdp-hcat::mysql-connector': }
-  }
-  
-  hdp::user{ $hcat_user:}
-  
-  hdp::directory { $hcat_config_dir: }
-
-  hdp-hcat::configfile { ['hcat-env.sh','hive-env.sh','hive-site.xml']: }
-  
-  anchor { 'hdp-hcat::begin': } -> Hdp::Package['hcat-base'] -> Hdp::User[$hcat_user] -> 
-   Hdp::Directory[$hcat_config_dir] -> Hdp-hcat::Configfile<||> ->  anchor { 'hdp-hcat::end': }
-
-   if ($server == true ) {
-     Hdp::Package['hcat-base'] -> Hdp::Package['hcat-server'] ->  Hdp::User[$hcat_user] -> Class['hdp-hcat::mysql-connector'] -> Anchor['hdp-hcat::end']
-  }
-}
-
-### config files
-define hdp-hcat::configfile(
-  $mode = undef,
-  $hcat_server_host = undef
-) 
-{
-  hdp::configfile { "${hdp-hcat::params::hcat_conf_dir}/${name}":
-    component        => 'hcat',
-    owner            => $hdp::params::hcat_user,
-    mode             => $mode,
-    hcat_server_host => $hcat_server_host 
-  }
-}

+ 0 - 46
ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/mysql-connector.pp

@@ -1,46 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hcat::mysql-connector()
-{
-  include hdp-hcat::params
-
-  $url = $hdp-hcat::params::mysql_connector_url
-  $zip_name = regsubst($url,'^.+/([^/]+$)','\1')
-  $jar_name = regsubst($zip_name,'zip$','-bin.jar')
-  $target = "${hdp::params::artifact_dir}/${zip_name}"
-  $hcat_lib = $hdp-hcat::params::hcat_lib
-  
-  exec{ "curl ${url}":
-    command => "mkdir -p ${artifact_dir} ; curl -f --retry 10 ${url} -o ${target} ",
-    creates => $target,
-    path    => ["/bin","/usr/bin/"]
-  }
-  exec{ "unzip ${target}":
-    command => "unzip -o -j ${target} '*.jar' -x */lib/*",
-    cwd     => $hcat_lib,
-    user    => $hdp::params::hcat_user,
-    group   => $hdp::params::hadoop_user_group,
-    creates => "${hcat_lib}/${$jar_name}",
-    path    => ["/bin","/usr/bin/"]
-  }
-
-  Exec["curl ${url}"] -> Exec["unzip ${target}"]
-}

+ 0 - 59
ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/params.pp

@@ -1,59 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hcat::params() inherits hdp::params
-{
-
-  #TODO: will move to globals
-  $hcat_metastore_user_name = hdp_default("hadoop/hive-site/hcat_metastore_user_name","dbusername")
-  $hcat_metastore_user_passwd = hdp_default("hadoop/hive-site/hcat_metastore_user_passwd","dbpassword")
- 
- ####### users
- 
-  
-  ### common
-  $hcat_metastore_port = hdp_default("hcat_metastore_port",9933)
-  $hcat_lib = hdp_default("hcat_lib","/usr/share/hcatalog/lib") #TODO: should I remove and just use hcat_dbroot
-
-  ### hcat-env
-  $hcat_conf_dir = hdp_default("hadoop/hcat-env/hcat_conf_dir","/etc/hcatalog")
-
-  $hcat_dbroot = hdp_default("hadoop/hcat-env/hcat_dbroot",$hcat_lib)
-
-  $hcat_log_dir = hdp_default("hadoop/hcat-env/hcat_log_dir","/var/log/hcatalog")
-
-  $hcat_pid_dir = hdp_default("hadoop/hcat-env/hcat_pid_dir","/var/run/hcatalog")
-#  $hcat_pid_dir = "${hcat_piddirprefix}/${hdp::params::hcat_user}"
-  
-  ### hive-site
-  $hcat_database_name = hdp_default("hadoop/hive-site/hcat_database_name","hive")
-
-  $hcat_metastore_principal = hdp_default("hadoop/hive-site/hcat_metastore_principal")
-
-  $hcat_metastore_sasl_enabled = hdp_default("hadoop/hive-site/hcat_metastore_sasl_enabled",false)
-
-  #TODO: using instead hcat_server_host in hdp::params $hcat_metastore_server_host = hdp_default("hadoop/hive-site/hcat_metastore_server_host")
-
-  $keytab_path = hdp_default("hadoop/hive-site/keytab_path")
-  
-  ###mysql connector
-  $download_url = $hdp::params::gpl_artifacts_download_url
-  $mysql_connector_url = "${download_url}/mysql-connector-java-5.1.18.zip"
-}

+ 0 - 61
ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/server.pp

@@ -1,61 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hcat::server(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits  hdp-hcat::params
-{ 
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured']) { 
-    class{ 'hdp-hcat' : server => true} #installs package, creates user, sets configuration
-  
-    Hdp-Hcat::Configfile<||>{hcat_server_host => $hdp::params::host_address}
-
-    class { 'hdp-hcat::hdfs-directories' : 
-      service_state => $service_state
-    }
-
-    class { 'hdp-hcat::service' :
-      ensure => $service_state
-    }
-  
-    #top level does not need anchors
-    Class['hdp-hcat'] -> Class['hdp-hcat::hdfs-directories'] -> Class['hdp-hcat::service']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-class hdp-hcat::hdfs-directories($service_state)
-{
-  $hcat_user = $hdp::params::hcat_user
- 
-  hdp-hadoop::hdfs::directory{ '/apps/hive/warehouse':
-    service_state   => $service_state,
-    owner            => $hcat_user,
-    mode             => '770',
-    recursive_chmod  => true
-  }  
-  hdp-hadoop::hdfs::directory{ "/usr/${hcat_user}":
-    service_state => $service_state,
-    owner         => $hcat_user
-  }
-}

+ 0 - 65
ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/service.pp

@@ -1,65 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hcat::service(
-  $ensure,
-  $initial_wait = undef
-)
-{
-  include $hdp-hcat::params
-  
-  $user = $hdp::params::hcat_user
-  $hadoop_home = $hdp::hadoop_home
-  $cmd = "env HADOOP_HOME=${hadoop_home} /usr/sbin/hcat_server.sh"
-  $pid_file = "${hdp-hcat::params::hcat_pid_dir}/hcat.pid" 
-
-  if ($ensure == 'running') {
-    $daemon_cmd = "su - ${user} -c  '${cmd} start'"
-    $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-  } elsif ($ensure == 'stopped') {
-    $daemon_cmd = "su - ${user} -c  '${cmd} stop'"
-    $no_op_test = undef
-  } else {
-    $daemon_cmd = undef
-  }
-
-  hdp-hcat::service::directory { $hdp-hcat::params::hcat_pid_dir : }
-  hdp-hcat::service::directory { $hdp-hcat::params::hcat_log_dir : }
-
-  anchor{'hdp-hcat::service::begin':} -> Hdp-hcat::Service::Directory<||> -> anchor{'hdp-hcat::service::end':}
-  
-  if ($daemon_cmd != undef) {
-    hdp::exec { $daemon_cmd:
-      command => $daemon_cmd,
-      unless  => $no_op_test,
-      initial_wait => $initial_wait
-    }
-    Hdp-hcat::Service::Directory<||> -> Hdp::Exec[$daemon_cmd] -> Anchor['hdp-hcat::service::end']
-  }
-}
-
-define hdp-hcat::service::directory()
-{
-  hdp::directory_recursive_create { $name: 
-    owner => $hdp::params::hcat_user,
-    mode => '0755'
-  }
-}
-

+ 0 - 25
ambari-agent/src/main/puppet/modules/hdp-hcat-old/templates/hcat-env.sh.erb

@@ -1,25 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-JAVA_HOME=<%=scope.function_hdp_java_home()%>
-HCAT_PID_DIR=<%=scope.function_hdp_template_var("hcat_pid_dir")%>/
-HCAT_LOG_DIR=<%=scope.function_hdp_template_var("hcat_log_dir")%>/
-HCAT_CONF_DIR=<%=scope.function_hdp_template_var("hcat_conf_dir")%>
-HADOOP_HOME=${HADOOP_HOME:-<%=scope.function_hdp_template_var("::hdp::params::hadoop_home")%>}
-#DBROOT is the path where the connector jars are downloaded
-DBROOT=<%=scope.function_hdp_template_var("hcat_dbroot")%>
-USER=<%=scope.function_hdp_user("hcat_user")%>
-METASTORE_PORT=<%=scope.function_hdp_template_var("hcat_metastore_port")%>

+ 0 - 53
ambari-agent/src/main/puppet/modules/hdp-hcat-old/templates/hive-env.sh.erb

@@ -1,53 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hive and Hadoop environment variables here. These variables can be used
-# to control the execution of Hive. It should be used by admins to configure
-# the Hive installation (so that users do not have to set environment variables
-# or set command line parameters to get correct behavior).
-#
-# The hive service being invoked (CLI/HWI etc.) is available via the environment
-# variable SERVICE
-
-# Hive Client memory usage can be an issue if a large number of clients
-# are running at the same time. The flags below have been useful in
-# reducing memory usage:
-#
- if [ "$SERVICE" = "cli" ]; then
-   if [ -z "$DEBUG" ]; then
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-   else
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-   fi
- fi
-
-# The heap size of the jvm stared by hive shell script can be controlled via:
-
-export HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-1024}
-
-# Larger heap size may be required when running queries over large number of files or partitions.
-# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-# appropriate for hive server (hwi etc).
-
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-HADOOP_HOME=${HADOOP_HOME:-<%=scope.function_hdp_template_var("::hdp::params::hadoop_home")%>}
-
-# Hive Configuration Directory can be controlled by:
-export HIVE_CONF_DIR=<%=scope.function_hdp_template_var("hcat_conf_dir")%>
-
-# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
-# export HIVE_AUX_JARS_PATH=

+ 21 - 2
ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/init.pp

@@ -23,6 +23,7 @@ class hdp-hcat(
 ) inherits hdp-hcat::params
 {
   $hcat_config_dir = $hdp-hcat::params::hcat_conf_dir
+  $hcat_pid_dir = $hdp-hcat::params::hcat_pid_dir
 
   if ($hdp::params::use_32_bits_on_slaves == false) {
     $size = 64
@@ -42,7 +43,12 @@ class hdp-hcat(
       force => true
     }
 
-    Hdp::Package['hcat'] -> Hdp::Directory[$hcat_config_dir]
+    hdp::directory { $hcat_pid_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    Hdp::Package['hcat'] -> Hdp::Directory[$hcat_config_dir] -> Hdp::Directory[$hcat_pid_dir]
 
   } elsif ($service_state == 'installed_and_configured') {
     hdp::package { 'hcat' : 
@@ -54,9 +60,22 @@ class hdp-hcat(
       force => true
     }
 
+    hdp::directory_recursive_create { $hcat_pid_dir:
+      owner => $webhcat_user,
+      service_state => $service_state,
+      force => true
+    }
+
+    hdp::user{ $webhcat_user:}
+
+    if ($webhcat_user != $hcat_user) {
+      hdp::user { $hcat_user:}
+    }
+
     hdp-hcat::configfile { 'hcat-env.sh':}
   
-    Hdp::Package['hcat'] -> Hdp::Directory[$hcat_config_dir] -> Hdp-hcat::Configfile<||> 
+    Hdp::Package['hcat'] -> Hdp::User<|title == $webhcat_user or title == $hcat_user|>  -> Hdp::Directory[$hcat_config_dir] -> Hdp::Directory_recursive_create[$hcat_pid_dir] -> Hdp-hcat::Configfile<||> 
+
  } else {
     hdp_fail("TODO not implemented yet: service_state = ${service_state}")
   }

+ 1 - 1
ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server.pp

@@ -149,7 +149,7 @@ class hdp-nagios::server(
     if ($service_state == 'installed_and_configured') {
       $webserver_state = 'restart'
     } elsif ($service_state == 'running') {
-      $webserver_state = 'running'
+      $webserver_state = 'restart'
     } else {
       # We are never stopping httpd
       #$webserver_state = $service_state

+ 45 - 43
ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb

@@ -47,7 +47,7 @@ define service {
         use                     hadoop-service
         service_description     HDFS::Percent DataNodes storage full
         servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::Storage full"!10%!30%
+        check_command           check_aggregate!"DATANODE::DataNode storage full"!10%!30%
         normal_check_interval   2
         retry_check_interval    1 
         max_check_attempts      1
@@ -58,7 +58,7 @@ define service {
         use                     hadoop-service
         service_description     HDFS::Percent DataNodes down
         servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::Process down"!10%!30%
+        check_command           check_aggregate!"DATANODE::DataNode process down"!10%!30%
         normal_check_interval   0.5
         retry_check_interval    0.25
         max_check_attempts      3
@@ -70,7 +70,7 @@ define service {
         use                     hadoop-service
         service_description     MAPREDUCE::Percent TaskTrackers down
         servicegroups           MAPREDUCE
-        check_command           check_aggregate!"TASKTRACKER::Process down"!10%!30%
+        check_command           check_aggregate!"TASKTRACKER::TaskTracker process down"!10%!30%
         normal_check_interval   0.5
         retry_check_interval    0.25
         max_check_attempts      3
@@ -81,9 +81,9 @@ define service {
 define service {
         hostgroup_name          nagios-server
         use                     hadoop-service
-        service_description     ZOOKEEPER::Percent zookeeper servers down
+        service_description     ZOOKEEPER::Percent ZooKeeper Servers down
         servicegroups           ZOOKEEPER
-        check_command           check_aggregate!"ZKSERVERS::ZKSERVERS Process down"!35%!70%
+        check_command           check_aggregate!"ZOOKEEPER::ZooKeeper Server process down"!35%!70%
         normal_check_interval   0.5
         retry_check_interval    0.25
         max_check_attempts      3
@@ -95,9 +95,9 @@ define service {
 define service {
         hostgroup_name          nagios-server
         use                     hadoop-service
-        service_description     HBASE::Percent region servers down
+        service_description     HBASE::Percent RegionServers down
         servicegroups           HBASE
-        check_command           check_aggregate!"REGIONSERVER::Process down"!10%!30%
+        check_command           check_aggregate!"REGIONSERVER::RegionServer process down"!10%!30%
         normal_check_interval   0.5
         retry_check_interval    0.25
         max_check_attempts      3
@@ -110,7 +110,7 @@ define service {
 define service {
         hostgroup_name          ganglia-server
         use                     hadoop-service
-        service_description     GANGLIA::Ganglia [gmetad] Process down
+        service_description     GANGLIA::Ganglia [gmetad] process down
         servicegroups           GANGLIA
         check_command           check_tcp!8651!-w 1 -c 1
         normal_check_interval   0.25
@@ -121,7 +121,7 @@ define service {
 define service {
         hostgroup_name          ganglia-server
         use                     hadoop-service
-        service_description     GANGLIA::Ganglia collector [gmond] Process down alert for slaves
+        service_description     GANGLIA::Ganglia Collector [gmond] process down alert for slaves
         servicegroups           GANGLIA
         check_command           check_tcp!8660!-w 1 -c 1
         normal_check_interval   0.25
@@ -132,7 +132,7 @@ define service {
 define service {
         hostgroup_name          ganglia-server
         use                     hadoop-service
-        service_description     GANGLIA::Ganglia collector [gmond] Process down alert for namenode
+        service_description     GANGLIA::Ganglia Collector [gmond] process down alert for NameNode
         servicegroups           GANGLIA
         check_command           check_tcp!8661!-w 1 -c 1
         normal_check_interval   0.25
@@ -143,7 +143,7 @@ define service {
 define service {
         hostgroup_name          ganglia-server
         use                     hadoop-service
-        service_description     GANGLIA::Ganglia collector [gmond] Process down alert for jobtracker
+        service_description     GANGLIA::Ganglia Collector [gmond] process down alert for JobTracker
         servicegroups           GANGLIA
         check_command           check_tcp!8662!-w 1 -c 1
         normal_check_interval   0.25
@@ -155,7 +155,7 @@ define service {
 define service {
         hostgroup_name          ganglia-server
         use                     hadoop-service
-        service_description     GANGLIA::Ganglia collector [gmond] Process down alert for hbasemaster
+        service_description     GANGLIA::Ganglia Collector [gmond] process down alert for HBase Master
         servicegroups           GANGLIA
         check_command           check_tcp!8663!-w 1 -c 1
         normal_check_interval   0.25
@@ -170,7 +170,7 @@ define service {
 define service {
         hostgroup_name          snamenode
         use                     hadoop-service
-        service_description     NAMENODE::Secondary Namenode Process down
+        service_description     NAMENODE::Secondary NameNode process down
         servicegroups           HDFS
         check_command           check_tcp!50090!-w 1 -c 1
         normal_check_interval   0.5
@@ -183,7 +183,7 @@ define service {
 define service {
         hostgroup_name          namenode
         use                     hadoop-service
-        service_description     NAMENODE::Namenode Web UI down
+        service_description     NAMENODE::NameNode Web UI down
         servicegroups           HDFS
         check_command           check_webui!namenode
         normal_check_interval   1
@@ -194,7 +194,7 @@ define service {
 define service {
         hostgroup_name          namenode
         use                     hadoop-service
-        service_description     NAMENODE::Namenode Edit logs directory status
+        service_description     NAMENODE::NameNode edit logs directory status
         servicegroups           HDFS
         check_command           check_name_dir_status!50070
         normal_check_interval   0.5
@@ -205,7 +205,7 @@ define service {
 define service {        
         hostgroup_name          namenode        
         use                     hadoop-service
-        service_description     NAMENODE::Namenode Host CPU utilization
+        service_description     NAMENODE::NameNode host CPU utilization
         servicegroups           HDFS
         check_command           check_cpu!200%!250%
         normal_check_interval   5
@@ -217,7 +217,7 @@ define service {
 define service {
         hostgroup_name          namenode
         use                     hadoop-service
-        service_description     NAMENODE::Namenode Process down
+        service_description     NAMENODE::NameNode process down
         servicegroups           HDFS
         check_command           check_tcp!8020!-w 1 -c 1
         normal_check_interval   0.5
@@ -239,7 +239,7 @@ define service {
 define service {
         hostgroup_name          namenode
         use                     hadoop-service
-        service_description     HDFS::HDFS Capacity utilization
+        service_description     HDFS::HDFS capacity utilization
         servicegroups           HDFS
         check_command           check_hdfs_capacity!50070!80%!90%
         normal_check_interval   10
@@ -250,7 +250,7 @@ define service {
 define service {
         hostgroup_name          namenode
         use                     hadoop-service
-        service_description     HDFS::Namenode RPC Latency
+        service_description     HDFS::NameNode RPC latency
         servicegroups           HDFS
         check_command           check_rpcq_latency!NameNode!50070!3000!5000
         normal_check_interval   5
@@ -286,7 +286,7 @@ define service {
 define service {
         hostgroup_name          jobtracker
         use                     hadoop-service
-        service_description     JOBTRACKER::Jobtracker CPU utilization
+        service_description     JOBTRACKER::JobTracker CPU utilization
         servicegroups           MAPREDUCE
         check_command           check_cpu!200%!250%
         normal_check_interval   5
@@ -298,7 +298,7 @@ define service {
 define service {
         hostgroup_name          jobtracker
         use                     hadoop-service
-        service_description     JOBTRACKER::Jobtracker Process down
+        service_description     JOBTRACKER::JobTracker process down
         servicegroups           MAPREDUCE
         check_command           check_tcp!50030!-w 1 -c 1
         normal_check_interval   0.5
@@ -309,13 +309,26 @@ define service {
 define service {
         hostgroup_name          jobtracker
         use                     hadoop-service
-        service_description     MAPREDUCE::JobTracker RPC Latency
+        service_description     MAPREDUCE::JobTracker RPC latency
         servicegroups           MAPREDUCE
         check_command           check_rpcq_latency!JobTracker!50030!3000!5000
         normal_check_interval   5
         retry_check_interval    1 
         max_check_attempts      5
 }
+
+# MAPREDUCE::TASKTRACKER Checks 
+define service {
+        hostgroup_name          slaves
+        use                     hadoop-service
+        service_description     TASKTRACKER::TaskTracker process down
+        servicegroups           MAPREDUCE
+        check_command           check_tcp!50060!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
 <%end-%>
 
 <%if scope.function_hdp_nagios_members_exist('slaves')-%>
@@ -323,7 +336,7 @@ define service {
 define service {
         hostgroup_name          slaves
         use                     hadoop-service
-        service_description     DATANODE::Process down
+        service_description     DATANODE::DataNode process down
         servicegroups           HDFS
         check_command           check_tcp!<%=scope.function_hdp_template_var("dfs_datanode_address")%>!-w 1 -c 1
         normal_check_interval   1
@@ -334,7 +347,7 @@ define service {
 define service {
         hostgroup_name          slaves
         use                     hadoop-service
-        service_description     DATANODE::Storage full
+        service_description     DATANODE::DataNode storage full
         servicegroups           HDFS
         check_command           check_datanode_storage!<%=scope.function_hdp_template_var("dfs_datanode_http_address")%>!90%!90%
         normal_check_interval   5
@@ -342,17 +355,6 @@ define service {
         max_check_attempts      2
 }
 
-# MAPREDUCE::TASKTRACKER Checks 
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     TASKTRACKER::Process down
-        servicegroups           MAPREDUCE
-        check_command           check_tcp!50060!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
 <%end-%>
 
 <%if scope.function_hdp_nagios_members_exist('zookeeper-servers')-%>
@@ -360,7 +362,7 @@ define service {
 define service {
         hostgroup_name          zookeeper-servers
         use                     hadoop-service
-        service_description     ZKSERVERS::ZKSERVERS Process down
+        service_description     ZOOKEEPER::ZooKeeper Server process down
         servicegroups           ZOOKEEPER
         check_command           check_tcp!2181!-w 1 -c 1
         normal_check_interval   1
@@ -374,7 +376,7 @@ define service {
 define service {
         hostgroup_name          region-servers
         use                     hadoop-service
-        service_description     REGIONSERVER::Process down
+        service_description     REGIONSERVER::RegionServer process down
         servicegroups           HBASE
         check_command           check_tcp!60020!-w 1 -c 1
         normal_check_interval   1
@@ -386,7 +388,7 @@ define service {
 define service {
         hostgroup_name          hbasemaster
         use                     hadoop-service
-        service_description     HBASEMASTER::HBase Web UI down
+        service_description     HBASEMASTER::HBase Master Web UI down
         servicegroups           HBASE
         check_command           check_webui!hbase
         normal_check_interval   1
@@ -397,7 +399,7 @@ define service {
 define service {
         hostgroup_name          hbasemaster
         use                     hadoop-service
-        service_description     HBASEMASTER::HBaseMaster CPU utilization
+        service_description     HBASEMASTER::HBase Master CPU utilization
         servicegroups           HBASE
         check_command           check_cpu!200%!250%
         normal_check_interval   5
@@ -408,7 +410,7 @@ define service {
 define service {
         hostgroup_name          hbasemaster
         use                     hadoop-service
-        service_description     HBASEMASTER::HBaseMaster Process down
+        service_description     HBASEMASTER::HBase Master process down
         servicegroups           HBASE
         check_command           check_tcp!60000!-w 1 -c 1
         normal_check_interval   0.5
@@ -422,7 +424,7 @@ define service {
 define service {
         hostgroup_name          hiveserver
         use                     hadoop-service
-        service_description     HIVE-METASTORE::HIVE-METASTORE status check
+        service_description     HIVE-METASTORE::Hive Metastore status check
         servicegroups           HIVE-METASTORE
         <%if scope.function_hdp_template_var("security_enabled")-%>
         check_command           check_hive_metastore_status!9083!<%=scope.function_hdp_template_var("java64_home")%>!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>
@@ -439,7 +441,7 @@ define service {
 define service {
         hostgroup_name          oozie-server
         use                     hadoop-service
-        service_description     OOZIE::Oozie status check
+        service_description     OOZIE::Oozie Server status check
         servicegroups           OOZIE
         <%if scope.function_hdp_template_var("security_enabled")-%>
         check_command           check_oozie_status!11000!<%=scope.function_hdp_template_var("java64_home")%>!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>
@@ -456,7 +458,7 @@ define service {
 define service {
         hostgroup_name          webhcat-server
         use                     hadoop-service
-        service_description     WEBHCAT::WEBHCAT status check
+        service_description     WEBHCAT::WebHCat Server status check
         servicegroups           WEBHCAT 
         <%if scope.function_hdp_template_var("security_enabled")-%>
         check_command           check_templeton_status!50111!v1!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>

+ 8 - 7
ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/server.pp

@@ -75,7 +75,8 @@ class hdp-templeton::server(
 
 class hdp-templeton::copy-hdfs-directories($service_state)
 {
- $templeton_user = $hdp-templeton::params::templeton_user
+ $webhcat_apps_dir = $hdp::params::webhcat_apps_dir
+ $webhcat_user = $hdp::params::webhcat_user
 # $pig_src_tar = "$hdp::params::artifact_dir/pig.tar.gz"
 
 #  hdp-hadoop::hdfs::copyfromlocal { '/usr/share/templeton/templeton*jar':
@@ -86,22 +87,22 @@ class hdp-templeton::copy-hdfs-directories($service_state)
 #  }
   hdp-hadoop::hdfs::copyfromlocal { '/usr/lib/hadoop/contrib/streaming/hadoop-streaming*.jar':
    service_state => $service_state,
-   owner => $hdp-templeton::params::templeton_user,
+   owner => $webhcat_user,
    mode  => '755',
-   dest_dir => '/apps/webhcat/hadoop-streaming.jar'
+   dest_dir => "$webhcat_apps_dir/hadoop-streaming.jar"
   }
   #TODO: Use ${hdp::params::artifact_dir}/${hdp-templeton::params::pig_tar_name} instead
   hdp-hadoop::hdfs::copyfromlocal { '/usr/share/HDP-webhcat/pig.tar.gz' :
     service_state => $service_state,
-    owner => $hdp-templeton::params::templeton_user,
+    owner => $webhcat_user,
     mode  => '755',
-    dest_dir => '/apps/webhcat/pig.tar.gz'
+    dest_dir => "$webhcat_apps_dir/pig.tar.gz"
   }
   #TODO: Use ${hdp::params::artifact_dir}/${hdp-templeton::params::hive_tar_name} instead
   hdp-hadoop::hdfs::copyfromlocal { '/usr/share/HDP-webhcat/hive.tar.gz' :
     service_state => $service_state,
-    owner => $hdp-templeton::params::templeton_user,
+    owner => $webhcat_user,
     mode  => '755',
-    dest_dir => '/apps/webhcat/hive.tar.gz'
+    dest_dir => "$webhcat_apps_dir/hive.tar.gz"
   }
 }

+ 2 - 2
ambari-agent/src/main/puppet/modules/hdp-templeton/templates/webhcat-env.sh.erb

@@ -21,7 +21,7 @@
 #
 
 # The file containing the running pid
-PID_FILE=<%=scope.function_hdp_template_var("templeton_pid_dir")%>/webhcat.pid
+PID_FILE=<%=scope.function_hdp_template_var("hcat_pid_dir")%>/webhcat.pid
 
 TEMPLETON_LOG_DIR=<%=scope.function_hdp_template_var("hcat_log_dir")%>/
 
@@ -41,4 +41,4 @@ CONSOLE_LOG=<%=scope.function_hdp_template_var("hcat_log_dir")%>/webhcat-console
 #HCAT_PREFIX=<%=scope.function_hdp_template_var("hive_prefix")%>/
 
 # Set HADOOP_HOME to point to a specific hadoop install directory
-HADOOP_HOME=/usr/lib/hadoop
+export HADOOP_HOME=/usr/lib/hadoop

+ 11 - 3
ambari-agent/src/main/puppet/modules/hdp-hcat-old/files/hcatSmoke.sh → ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_set_from_comma_list.rb

@@ -18,6 +18,14 @@
 # under the License.
 #
 #
-hcat -e 'show tables'
-hcat -e 'drop table IF EXISTS hcatsmoke'
-hcat -e 'create table hcatsmoke ( id INT, name string ) stored as rcfile ;'
+#to handle differences in how args passed in
+require 'set'
+module Puppet::Parser::Functions
+  newfunction(:hdp_set_from_comma_list, :type => :rvalue) do |args|
+    list = function_hdp_array_from_comma_list(args)
+    list.each_index {|i| list [i]=list [i].strip}
+    #Delete empty strings
+    list.reject! { |e| e.empty? }
+    list.uniq   
+  end
+end

+ 16 - 5
ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp

@@ -122,6 +122,7 @@ class hdp::params()
 
   $hive_user = hdp_default("hive_user","hive")
   $hcat_user = hdp_default("hcat_user","hcat")
+  $webhcat_user = hdp_default("webhcat_user","hcat")
 
   $oozie_user = hdp_default("oozie_user","oozie")
   $templeton_user = hdp_default("templeton_user","hcat")
@@ -132,12 +133,22 @@ class hdp::params()
   $smokeuser = hdp_default("smokeuser","ambari_qa")
   $smoke_user_group = hdp_default("smoke_user_group","users")
   
-  ############ Hdfs directories
-  $hbase_hdfs_root_dir = hdp_default("hadoop/hbase-site/hbase_hdfs_root_dir","/apps/hbase/data")
-  $oozie_hdfs_user_dir = hdp_default("oozie_hdfs_user_dir", "/user/oozie")
-  $hcat_hdfs_user_dir = hdp_default("hcat_hdfs_user_dir", "/user/hcat")
-  $hive_hdfs_user_dir = hdp_default("hive_hdfs_user_dir", "/user/hive")
+  ############ Hdfs users directories
+  $oozie_hdfs_user_dir = hdp_default("oozie_hdfs_user_dir", "/user/${oozie_user}")
+  $oozie_hdfs_user_mode = 775
+  $hcat_hdfs_user_dir = hdp_default("hcat_hdfs_user_dir", "/user/${hcat_user}")
+  $hcat_hdfs_user_mode = 755
+  $webhcat_hdfs_user_dir = hdp_default("hcat_hdfs_user_dir", "/user/${webhcat_user}")
+  $webhcat_hdfs_user_mode = 755
+  $hive_hdfs_user_dir = hdp_default("hive_hdfs_user_dir", "/user/${hive_user}")
+  $hive_hdfs_user_mode = 700
   $smoke_hdfs_user_dir = hdp_default("smoke_hdfs_user_dir", "/user/${smokeuser}")
+  $smoke_hdfs_user_mode = 770
+  
+  ############ Hdfs apps directories
+  $hive_apps_whs_dir = hdp_default("hive_apps_whs_dir", "/apps/hive/warehouse")
+  $webhcat_apps_dir = hdp_default("webhcat_apps_dir", "/apps/webhcat")
+  $hbase_hdfs_root_dir = hdp_default("hadoop/hbase-site/hbase_hdfs_root_dir","/apps/hbase/data")
 
   #because of Puppet user resource issue make sure that $hadoop_user is different from user_group
   if ($security_enabled == true) {

+ 1 - 1
ambari-agent/src/main/python/ambari_agent/AmbariConfig.py

@@ -140,7 +140,7 @@ servicesToPidNames = {
   'GANGLIA_MONITOR': 'gmond.pid',
   'HBASE_MASTER': 'hbase-[A-Za-z0-9_]+-master.pid',
   'HBASE_REGIONSERVER': 'hbase-[A-Za-z0-9_]+-regionserver.pid',
-  'HCATALOG_SERVER': 'hcat.pid',
+  'HCATALOG_SERVER': 'webhcat.pid',
   'KERBEROS_SERVER': 'kadmind.pid',
   'HIVE_SERVER': 'hive-server.pid',
   'HIVE_METASTORE': 'hive.pid',

+ 0 - 13
ambari-agent/src/main/python/ambari_agent/imports.txt

@@ -1,13 +0,0 @@
-hdp/manifests/*.pp
-hdp-hadoop/manifests/*.pp
-hdp-hbase/manifests/*.pp
-hdp-zookeeper/manifests/*.pp
-hdp-oozie/manifests/*.pp
-hdp-pig/manifests/*.pp
-hdp-sqoop/manifests/*.pp
-hdp-templeton/manifests/*.pp
-hdp-hive/manifests/*.pp
-hdp-hcat/manifests/*.pp
-hdp-mysql/manifests/*.pp
-hdp-monitor-webserver/manifests/*.pp
-hdp-repos/manifests/*.pp

+ 0 - 55
ambari-agent/src/main/python/ambari_agent/rolesToClass.dict

@@ -1,55 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-NAMENODE = hdp-hadoop::namenode
-DATANODE = hdp-hadoop::datanode
-SECONDARY_NAMENODE = hdp-hadoop::snamenode
-JOBTRACKER = hdp-hadoop::jobtracker
-TASKTRACKER = hdp-hadoop::tasktracker
-HDFS_CLIENT = hdp-hadoop::client
-MAPREDUCE_CLIENT = hdp-hadoop::client
-ZOOKEEPER_SERVER = hdp-zookeeper
-ZOOKEEPER_CLIENT = hdp-zookeeper::client
-HBASE_MASTER = hdp-hbase::master
-HBASE_REGIONSERVER = hdp-hbase::regionserver
-HBASE_CLIENT = hdp-hbase::client
-PIG = hdp-pig
-SQOOP = hdp-sqoop
-OOZIE_SERVER = hdp-oozie::server
-OOZIE_CLIENT = hdp-oozie::client
-HIVE_CLIENT = hdp-hive::client
-HCAT = hdp-hcat
-HIVE_SERVER = hdp-hive::server
-HIVE_METASTORE = hdp-hive::metastore
-MYSQL_SERVER = hdp-mysql::server
-WEBHCAT_SERVER = hdp-templeton::server
-DASHBOARD = hdp-dashboard
-NAGIOS_SERVER = hdp-nagios::server
-GANGLIA_SERVER = hdp-ganglia::server
-GANGLIA_MONITOR = hdp-ganglia::monitor
-HTTPD = hdp-monitor-webserver
-HDFS_SERVICE_CHECK = hdp-hadoop::hdfs::service_check
-MAPREDUCE_SERVICE_CHECK = hdp-hadoop::mapred::service_check
-ZOOKEEPER_SERVICE_CHECK = hdp-zookeeper::zookeeper::service_check
-ZOOKEEPER_QUORUM_SERVICE_CHECK = hdp-zookeeper::quorum::service_check
-HBASE_SERVICE_CHECK = hdp-hbase::hbase::service_check
-HIVE_SERVICE_CHECK = hdp-hive::hive::service_check
-HCAT_SERVICE_CHECK = hdp-hcat::hcat::service_check
-OOZIE_SERVICE_CHECK = hdp-oozie::oozie::service_check
-PIG_SERVICE_CHECK = hdp-pig::pig::service_check
-SQOOP_SERVICE_CHECK = hdp-sqoop::sqoop::service_check
-WEBHCAT_SERVICE_CHECK = hdp-templeton::templeton::service_check
-DASHBOARD_SERVICE_CHECK = hdp-dashboard::dashboard::service_check
-DECOMMISSION_DATANODE = hdp-hadoop::hdfs::decommission

+ 0 - 18
ambari-agent/src/main/python/ambari_agent/serviceStates.dict

@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-START = running
-INSTALL = installed_and_configured
-STOP = stopped

+ 0 - 34
ambari-agent/src/main/python/ambari_agent/servicesToPidNames.dict

@@ -1,34 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-NAMENODE=hadoop-[a-z_]+-namenode.pid$
-SECONDARY_NAMENODE=hadoop-[a-z_]+-secondarynamenode.pid$
-DATANODE=hadoop-[a-z_]+-datanode.pid$
-JOBTRACKER=hadoop-[a-z_]+-jobtracker.pid$
-TASKTRACKER=hadoop-[a-z_]+-tasktracker.pid$
-OOZIE_SERVER=oozie.pid
-ZOOKEEPER_SERVER=zookeeper_server.pid
-TEMPLETON_SERVER=templeton.pid
-NAGIOS_SERVER=nagios.pid
-GANGLIA_SERVER=gmetad.pid
-GANGLIA_MONITOR=gmond.pid
-HBASE_MASTER=hbase-[a-z_]+-master.pid
-HBASE_REGIONSERVER=hbase-[a-z_]+-regionserver.pid
-NAGIOS_SERVER=nagios.pid
-HCATALOG_SERVER=hcat.pid
-KERBEROS_SERVER=kadmind.pid
-HIVE_SERVER=hive-server.pid
-HIVE_METASTORE=hive.pid
-MYSQL_SERVER=mysqld.pid

+ 8 - 4
ambari-agent/src/test/python/TestHostname.py

@@ -20,7 +20,7 @@ limitations under the License.
 
 from unittest import TestCase
 import ambari_agent.hostname as hostname
-from ambari_agent.AmbariConfig import AmbariConfig
+import ambari_agent.AmbariConfig as AmbariConfig
 import socket
 import tempfile
 import shutil
@@ -29,25 +29,29 @@ import os, pprint, json,stat
 class TestHostname(TestCase):
 
   def test_hostname(self):
-    self.assertEquals(hostname.hostname(), socket.gethostname(), "hostname should equal the socket-based hostname")
+    self.assertEquals(hostname.hostname(), socket.getfqdn(), 
+                      "hostname should equal the socket-based hostname")
     pass
 
   def test_hostname_override(self):
-    tmpname = tempfile.mkstemp(text=True)[1]
+    fd = tempfile.mkstemp(text=True)
+    tmpname = fd[1]
+    os.close(fd[0])
     os.chmod(tmpname, os.stat(tmpname).st_mode | stat.S_IXUSR)
 
     tmpfile = file(tmpname, "w+")
 
+    config = AmbariConfig.config
     try:
       tmpfile.write("#!/bin/sh\n\necho 'test.example.com'")
       tmpfile.close()
 
-      config = AmbariConfig().getConfig()
       config.set('agent', 'hostname_script', tmpname)
 
       self.assertEquals(hostname.hostname(), 'test.example.com', "expected hostname 'test.example.com'")
     finally:
       os.remove(tmpname)
+      config.remove_option('agent', 'hostname_script')
 
     pass
 

+ 9 - 2
ambari-project/pom.xml

@@ -17,14 +17,17 @@
   <parent>
     <groupId>org.apache.ambari</groupId>
     <artifactId>ambari</artifactId>
-    <version>1.2.1-SNAPSHOT</version>
+    <version>1.2.2-SNAPSHOT</version>
   </parent>
   <groupId>org.apache.ambari</groupId>
   <artifactId>ambari-project</artifactId>
-  <version>1.2.1-SNAPSHOT</version>
+  <version>1.2.2-SNAPSHOT</version>
   <description>Apache Ambari Project POM</description>
   <name>Apache Ambari Project POM</name>
   <packaging>pom</packaging>
+  <properties>
+    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+  </properties>
   <pluginRepositories>
     <pluginRepository>
       <id>maven2-repository.dev.java.net</id>
@@ -360,6 +363,10 @@
       </plugins>
     </pluginManagement>
     <plugins>
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.0</version>
+      </plugin>
       <plugin>
         <groupId>org.apache.rat</groupId>
         <artifactId>apache-rat-plugin</artifactId>

+ 94 - 93
ambari-server/docs/api/v1/clusters-cluster.md

@@ -1,4 +1,4 @@
-<!---
+	<!---
 Licensed to the Apache Software Foundation (ASF) under one or more
 contributor license agreements. See the NOTICE file distributed with
 this work for additional information regarding copyright ownership.
@@ -28,97 +28,98 @@ Returns information for the specified cluster identified by ":name"
 
     200 OK
     {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster",
-      "Clusters" : {
-        "cluster_name" : "MyCluster",
-        "cluster_id" : 1,
-        "version" : "HDP-1.2.0"
-      },
-      "services" : [
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/NAGIOS",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "NAGIOS"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HCATALOG",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "HCATALOG"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/PIG",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "PIG"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/MAPREDUCE",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "MAPREDUCE"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/GANGLIA",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "GANGLIA"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HIVE",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "HIVE"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS",
-        "ServiceInfo" : {
-          "cluster_name" : "MyIE9",
-          "service_name" : "HDFS"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/ZOOKEEPER",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "ZOOKEEPER"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HBASE",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "HBASE"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/OOZIE",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "OOZIE"
-          }
-        } ],
-    "hosts" : [
-      {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/hosts/some.cluster.host",
-      "Hosts" : {
-        "cluster_name" : "MyCluster",
-        "host_name" : "some.cluster.host"
-        }
-      },
-      {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/hosts/another.cluster.host",
-      "Hosts" : {
-        "cluster_name" : "MyCluster",
-        "host_name" : "another.cluster.host"
-        }
-      } ]
+    	"href" : "http://your.ambari.server/api/v1/clusters/c1",
+      	"Clusters" : {
+        	"cluster_name" : "c1",
+        	"cluster_id" : 1,
+        	"version" : "HDP-1.2.0"
+      	},
+      	"services" : [
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/NAGIOS",
+        		"ServiceInfo" : {
+          			"cluster_name" : "c1",
+          			"service_name" : "NAGIOS"
+          		}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HCATALOG",
+        		"ServiceInfo" : {
+          			"cluster_name" : "c1",
+          			"service_name" : "HCATALOG"
+          		}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/PIG",
+        		"ServiceInfo" : {
+          			"cluster_name" : "c1",
+         			"service_name" : "PIG"
+          		}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/MAPREDUCE",
+        		"ServiceInfo" : {
+          			"cluster_name" : "c1",
+          			"service_name" : "MAPREDUCE"
+          		}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/GANGLIA",
+        		"ServiceInfo" : {
+          			"cluster_name" : "c1",
+          			"service_name" : "GANGLIA"
+          		}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HIVE",
+        		"ServiceInfo" : {
+          			"cluster_name" : "c1",
+          			"service_name" : "HIVE"
+          		}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS",
+        		"ServiceInfo" : {
+          			"cluster_name" : "MyIE9",
+          			"service_name" : "HDFS"
+          		}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/ZOOKEEPER",
+        		"ServiceInfo" : {
+          			"cluster_name" : "c1",
+         	 		"service_name" : "ZOOKEEPER"
+          		}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HBASE",
+        		"ServiceInfo" : {
+          			"cluster_name" : "c1",
+          			"service_name" : "HBASE"
+          		}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/OOZIE",
+        		"ServiceInfo" : {
+          			"cluster_name" : "c1",
+          			"service_name" : "OOZIE"
+          		}
+        	} 
+    	],
+    	"hosts" : [
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1",
+      			"Hosts" : {
+        			"cluster_name" : "c1",
+        			"host_name" : "some.cluster.host"
+        		}
+      		},
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host2",
+      		"Hosts" : {
+        		"cluster_name" : "c1",
+        		"host_name" : "another.cluster.host"
+        	}
+        ]
     }
 

+ 11 - 8
ambari-server/docs/api/v1/clusters.md

@@ -1,3 +1,4 @@
+
 <!---
 Licensed to the Apache Software Foundation (ASF) under one or more
 contributor license agreements. See the NOTICE file distributed with
@@ -29,11 +30,13 @@ Returns a collection of the currently configured clusters.
     200 OK
     {
       "href" : "http://your.ambari.server/api/v1/clusters",
-      "items" : [ {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster",
-        "Clusters" : {
-          "cluster_name" : "MyCluster",
-          "version" : "HDP-1.2.0"
-        }
-      } ]
-    }
+      "items" : [ 
+      		{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1",
+        		"Clusters" : {
+          			"cluster_name" : "c1",
+          			"version" : "HDP-1.2.0"
+        		}
+      		} 	
+    	]
+	}

+ 54 - 43
ambari-server/docs/api/v1/components-component.md

@@ -28,47 +28,58 @@ Refers to a specific component identified by ":componentName" for a given servic
 
     200 OK
     {
-    "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/DATANODE",
-    "metrics" : {
-      "rpc" : {
-        ...
-      },
-      "dfs" : {
-        "datanode" : {
-          ...
-        }
-      },
-      "disk" : {
-        ...
-      },
-      "cpu" : {
-        ...
-      },
-      "jvm" : {
-        ...
-      },
-      "load" : {
-        ...
-      },
-      "memory" : {
-        ...
-      },
-      "network" : {
-        ...
-      },
-    },
-    "ServiceComponentInfo" : {
-      "cluster_name" : "MyCluster",
-      "component_name" : "DATANODE",
-      "service_name" : "HDFS"
-    },
-    "host_components" : [
-      {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/hosts/some.cluster.host/host_components/DATANODE",
-      "HostRoles" : {
-        "cluster_name" : "MyCluster",
-        "component_name" : "DATANODE",
-        "host_name" : "some.cluster.host"
-        }
-      } ]
+    	"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/DATANODE",
+    	"metrics" : {
+    		"process" : {
+    			...    
+    		},
+      		"rpc" : {
+        		...
+      		},
+      		"ugi" : {
+      			...
+      		},
+      		"dfs" : {
+        		"datanode" : {
+          		...
+        		}
+      		},
+      		"disk" : {
+        		...
+      		},
+      		"cpu" : {
+        		...
+      		},
+      		"rpcdetailed" : {
+      			...
+      		},
+      		"jvm" : {
+        		...
+      		},
+      		"load" : {
+        		...
+      		},
+      		"memory" : {
+        		...
+      		},
+      		"network" : {
+        		...
+      		},
+    	},
+    	"ServiceComponentInfo" : {
+      		"cluster_name" : "c1",
+      		"component_name" : "DATANODE",
+      		"service_name" : "HDFS"
+      		"state" : "STARTED"
+    	},
+    	"host_components" : [
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1/host_components/DATANODE",
+      			"HostRoles" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "DATANODE",
+        			"host_name" : "host1"
+        		}
+      		}
+       	]
     }

+ 35 - 34
ambari-server/docs/api/v1/components.md

@@ -28,38 +28,39 @@ Refers to a collection of all components for a given service.
 
     200 OK
     {
-    "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components",
-    "items" : [
-      {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/DATANODE",
-      "ServiceComponentInfo" : {
-        "cluster_name" : "MyCluster",
-        "component_name" : "DATANODE",
-        "service_name" : "HDFS"
-        }
-      },
-      {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/SECONDARY_NAMENODE",
-      "ServiceComponentInfo" : {
-        "cluster_name" : "MyCluster",
-        "component_name" : "SECONDARY_NAMENODE",
-        "service_name" : "HDFS"
-        }
-      },
-      {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/NAMENODE",
-      "ServiceComponentInfo" : {
-        "cluster_name" : "MyCluster",
-        "component_name" : "NAMENODE",
-        "service_name" : "HDFS"
-        }
-      },
-      {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/HDFS_CLIENT",
-      "ServiceComponentInfo" : {
-        "cluster_name" : "MyCluster",
-        "component_name" : "HDFS_CLIENT",
-        "service_name" : "HDFS"
-        }
-      } ]
+    	"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components",
+    	"items" : [
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/DATANODE",
+      			"ServiceComponentInfo" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "DATANODE",
+        			"service_name" : "HDFS"
+        		}
+      		},
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/SECONDARY_NAMENODE",
+      			"ServiceComponentInfo" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "SECONDARY_NAMENODE",
+        			"service_name" : "HDFS"
+        		}
+      		},
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/NAMENODE",
+      			"ServiceComponentInfo" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "NAMENODE",
+        			"service_name" : "HDFS"
+        		}
+      		},
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/HDFS_CLIENT",
+      			"ServiceComponentInfo" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "HDFS_CLIENT",
+        			"service_name" : "HDFS"
+        		}
+      		}
+      	]
     }

+ 54 - 0
ambari-server/docs/api/v1/host-component.md

@@ -27,3 +27,57 @@ Returns information for a specific role on the given host.
 **Response**
 
     200 OK
+    {
+    	"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1/host_components/DATANODE",
+    	"HostRoles" : {
+    		"cluster_name" : "c1",
+      		"component_name" : "DATANODE",
+      		"host_name" : "host1",
+      		"state" : "STARTED"
+    	},
+    	"host" : {
+    		"href" : "http://localhost:8080/api/v1/clusters/c1/hosts/dev.hortonworks.com"
+    	},
+    	"metrics" : {
+    		"process" : {
+    			...    
+    		},
+      		"ugi" : {
+      			...
+      		},
+      		"dfs" : {
+        		"datanode" : {
+          		...
+        		}
+      		},
+      		"disk" : {
+        		...
+      		},
+      		"cpu" : {
+        		...
+      		},
+      		"jvm" : {
+        		...
+      		},
+      		"load" : {
+        		...
+      		},
+      		"memory" : {
+        		...
+      		},
+      		"network" : {
+        		...
+      		},
+    	},
+    	"component" : [
+      		{
+    	      	"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/DATANODE",
+      			"ServiceComponentInfo" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "DATANODE",
+        			"service_name" : "HDFS"
+        		}
+      		}
+       	]
+    }
+

+ 28 - 1
ambari-server/docs/api/v1/host-components.md

@@ -27,4 +27,31 @@ Returns a collection of components running on a given host.
 **Response**
 
     200 OK
-
+    {
+    	"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1/host_components",
+    	items" : [
+    		{
+      			"href" : "your.ambari.server/api/v1/clusters/c1/hosts/host1/host_components/DATANODE",
+      			"HostRoles" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "DATANODE",
+        			"host_name" : "host1"
+      			},
+      			"host" : {
+        			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1"
+      			}
+    		},
+			{
+      			"href" : "your.ambari.server/api/v1/clusters/c1/hosts/host1/host_components/HBASE_CLIENT",
+      			"HostRoles" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "HBASE_CLIENT",
+        			"host_name" : "host1"
+      			},
+      			"host" : {
+        			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1"
+      			}
+    		},
+    		...
+		]
+	}

+ 70 - 0
ambari-server/docs/api/v1/hosts-host.md

@@ -27,4 +27,74 @@ Returns information about a single host in a given cluster.
 **Response**
 
     200 OK
+    {
+    	"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1",
+    	"metrics" : {
+    		"process" : {
+    			...    
+    		},
+      		"rpc" : {
+        		...
+      		},
+      		"ugi" : {
+      			...
+      		}
+      		"disk" : {
+        		...
+      		},
+      		"cpu" : {
+        		...
+      		},
+      		"rpcdetailed" : {
+      			...
+      		},
+      		"jvm" : {
+        		...
+      		},
+      		"load" : {
+        		...
+      		},
+      		"memory" : {
+        		...
+      		},
+      		"network" : {
+        		...
+      		},
+    	},
+    	"Hosts" : {
+      		"cluster_name" : "c1",
+      		"host_name" : "host1",
+      		"host_state" : "HEALTHY",
+      		"public_host_name" : "host1.yourDomain.com",
+      		"cpu_count" : 1,
+      		"rack_info" : "rack-name",
+      		"os_arch" : "x86_64",
+      		disk_info : [
+      			{
+      				"available" : "41497444",
+        			"used" : "9584560",
+        			"percent" : "19%",
+        			"size" : "51606140",
+        			"type" : "ext4",
+       	 			"mountpoint" : "/"
+      			}
+      		],
+      		"ip" : "10.0.2.15",
+      		"os_type" : "rhel6",
+      		"total_mem" : 2055208,
+      		...        	      		
+    	},
+    	"host_components" : [
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1/host_components/DATANODE",
+      			"HostRoles" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "DATANODE",
+        			"host_name" : "host1"
+        		}
+      		},
+      		...
+       	]
+    }
+
 

+ 19 - 0
ambari-server/docs/api/v1/hosts.md

@@ -27,3 +27,22 @@ Returns a collection of all hosts in a given cluster.
 **Response**
 
     200 OK
+    {
+    	"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/",
+    	"items" : [
+    		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1",
+      			"Hosts" : {
+        			"cluster_name" : "c1",
+        			"host_name" : "host1"
+      			}
+    		},
+    		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host2",
+      			"Hosts" : {
+        			"cluster_name" : "c1",
+        			"host_name" : "host2"
+      			}
+    		}
+    	]
+	}  	

File diff suppressed because it is too large
+ 46 - 30
ambari-server/docs/api/v1/index.md


+ 40 - 38
ambari-server/docs/api/v1/services-service.md

@@ -28,43 +28,45 @@ Refers to a specific service identified by ":serviceName" for a given cluster.
 
     200 OK
     {
-    "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS",
-    "ServiceInfo" : {
-      "cluster_name" : "MyCluster",
-      "service_name" : "HDFS"
-      },
-    "components" : [
-      {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/NAMENODE",
-      "ServiceComponentInfo" : {
-        "cluster_name" : "MyCluster",
-        "component_name" : "NAMENODE",
-        "service_name" : "HDFS"
-        }
-      },
-      {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/DATANODE",
-      "ServiceComponentInfo" : {
-        "cluster_name" : "MyCluster",
-        "component_name" : "DATANODE",
-        "service_name" : "HDFS"
-        }
-      },
-      {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/HDFS_CLIENT",
-      "ServiceComponentInfo" : {
-        "cluster_name" : "MyCluster",
-        "component_name" : "HDFS_CLIENT",
-        "service_name" : "HDFS"
-        }
-      },
-      {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/SECONDARY_NAMENODE",
-      "ServiceComponentInfo" : {
-        "cluster_name" : "MyCluster",
-        "component_name" : "SECONDARY_NAMENODE",
-        "service_name" : "HDFS"
-        }
-      } ]
+    	"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS",
+    	"ServiceInfo" : {
+      		"cluster_name" : "c1",
+      		"service_name" : "HDFS",
+      		"state" : "STARTED"      		
+      	},
+    	"components" : [
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/NAMENODE",
+      			"ServiceComponentInfo" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "NAMENODE",
+        			"service_name" : "HDFS"
+       			}
+      		},
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/DATANODE",
+      			"ServiceComponentInfo" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "DATANODE",
+        			"service_name" : "HDFS"
+        		}
+      		},
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/HDFS_CLIENT",
+      			"ServiceComponentInfo" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "HDFS_CLIENT",
+        			"service_name" : "HDFS"
+        		}
+      		},
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/SECONDARY_NAMENODE",
+     			"ServiceComponentInfo" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "SECONDARY_NAMENODE",
+        			"service_name" : "HDFS"
+        		}
+      		}
+      	]
     }
 

+ 25 - 25
ambari-server/docs/api/v1/services.md

@@ -28,28 +28,28 @@ Returns a collection of the services in a given cluster.
 
     200 OK
     {
-    "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services",
-    "items" : [
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/NAGIOS",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "NAGIOS"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HCATALOG",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "HCATALOG"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/PIG",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "PIG"
-          }
-        }
-      ]
-    }
+    	"href" : "http://your.ambari.server/api/v1/clusters/c1/services",
+    	"items" : [
+    		{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/NAGIOS",
+        		"ServiceInfo" : {
+          			"cluster_name" : "c1",
+          			"service_name" : "NAGIOS"
+          		}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HCATALOG",
+        		"ServiceInfo" : {
+        	  		"cluster_name" : "c1",
+        	  		"service_name" : "HCATALOG"
+        	  	}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/PIG",
+        		"ServiceInfo" : {
+        	  		"cluster_name" : "c1",
+        	  		"service_name" : "PIG"
+        	  	}	
+        	}
+        ]
+    }    

+ 0 - 1
ambari-server/pass.txt

@@ -1 +0,0 @@
-k0n9LEBvrNOBzCw4drmUBSnCykzL0ZVzt5cZyLXvJtlsQZpUNq

+ 25 - 3
ambari-server/pom.xml

@@ -16,7 +16,7 @@
   <parent>
     <groupId>org.apache.ambari</groupId>
     <artifactId>ambari-project</artifactId>
-    <version>1.2.1-SNAPSHOT</version>
+    <version>1.2.2-SNAPSHOT</version>
     <relativePath>../ambari-project</relativePath>
   </parent>
   <modelVersion>4.0.0</modelVersion>
@@ -24,13 +24,18 @@
   <artifactId>ambari-server</artifactId>
   <packaging>jar</packaging>
   <name>Ambari Server</name>
-  <version>1.2.1-SNAPSHOT</version>
+  <version>1.2.2-SNAPSHOT</version>
   <description>Ambari Server</description>
   <properties>
+    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <python.ver>python &gt;= 2.6</python.ver>
   </properties>
   <build>
     <plugins>
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.0</version>
+      </plugin>
       <plugin>
         <artifactId>maven-assembly-plugin</artifactId>
         <configuration>
@@ -248,7 +253,24 @@
               <groupname>root</groupname>
               <sources>
                 <source>
-                  <location>src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.1.sql</location>
+                  <location>src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.sql</location>
+                </source>
+                <source>
+                  <location>src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.Fix.sql</location>
+                </source>
+                <source>
+                  <location>src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.Check.sql</location>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <directory>/var/lib/ambari-server/resources/upgrade/dml</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+              <sources>
+                <source>
+                  <location>src/main/resources/upgrade/dml/Ambari-DML-Postgres-UPGRADE_STACK.sql</location>
                 </source>
               </sources>
             </mapping>

+ 5 - 1
ambari-server/sbin/ambari-server

@@ -77,6 +77,10 @@ case "$1" in
         echo -e "Upgrading ambari-server"
         $PYTHON /usr/sbin/ambari-server.py $@
         ;;
+  upgradestack)
+        echo -e "Upgrading stack of ambari-server"
+        $PYTHON /usr/sbin/ambari-server.py $@
+        ;;
   setup)
         echo -e "Run postgresql initdb"
         initdb_res=`/sbin/service postgresql initdb`
@@ -89,7 +93,7 @@ case "$1" in
         $PYTHON /usr/sbin/ambari-server.py $@
         ;;
   *)
-        echo "Usage: /usr/sbin/ambari-server {start|stop|restart|setup|upgrade} [options]"
+        echo "Usage: /usr/sbin/ambari-server {start|stop|restart|setup|upgrade|upgradestack} [options]"
         exit 1
 esac
 

+ 2 - 0
ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java

@@ -181,6 +181,8 @@ public class ActionDBAccessorImpl implements ActionDBAccessor {
         hostRoleCommand.setTaskId(hostRoleCommandEntity.getTaskId());
         ExecutionCommandEntity executionCommandEntity = hostRoleCommand.constructExecutionCommandEntity();
         executionCommandEntity.setHostRoleCommand(hostRoleCommandEntity);
+
+        executionCommandEntity.setTaskId(hostRoleCommandEntity.getTaskId());
         hostRoleCommandEntity.setExecutionCommand(executionCommandEntity);
 
         executionCommandDAO.create(hostRoleCommandEntity.getExecutionCommand());

+ 10 - 8
ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java

@@ -17,11 +17,9 @@
  */
 package org.apache.ambari.server.actionmanager;
 
-import java.util.Collection;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicLong;
-
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+import com.google.inject.name.Named;
 import org.apache.ambari.server.agent.ActionQueue;
 import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.controller.HostsMap;
@@ -30,9 +28,10 @@ import org.apache.ambari.server.utils.StageUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-import com.google.inject.name.Named;
+import java.util.Collection;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
 
 
 /**
@@ -75,6 +74,9 @@ public class ActionManager {
       LOG.info("Persisting stage into db: " + s.toString());
     }
     db.persistActions(stages);
+
+    // Now scheduler should process actions
+    scheduler.awake();
   }
 
   public List<Stage> getRequestStatus(long requestId) {

+ 34 - 16
ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java

@@ -17,32 +17,25 @@
  */
 package org.apache.ambari.server.actionmanager;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.ServiceComponentNotFoundException;
 import org.apache.ambari.server.agent.ActionQueue;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.controller.HostsMap;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.ServiceComponent;
-import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.*;
 import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpFailedEvent;
-import org.apache.ambari.server.utils.StageUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-//This class encapsulates the action scheduler thread.
-//Action schedule frequently looks at action database and determines if
-//there is an action that can be scheduled.
+import java.util.*;
+
+/**
+ * This class encapsulates the action scheduler thread.
+ * Action schedule frequently looks at action database and determines if
+ * there is an action that can be scheduled.
+ */
 class ActionScheduler implements Runnable {
 
   private static Logger LOG = LoggerFactory.getLogger(ActionScheduler.class);
@@ -56,6 +49,14 @@ class ActionScheduler implements Runnable {
   private final Clusters fsmObject;
   private boolean taskTimeoutAdjustment = true;
   private final HostsMap hostsMap;
+  private final Object wakeupSyncObject = new Object();
+
+  /**
+   * true if scheduler should run ASAP.
+   * We need this flag to avoid sleep in situations, when
+   * we receive awake() request during running a scheduler iteration.
+   */
+  private boolean activeAwakeRequest = false;
 
   public ActionScheduler(long sleepTimeMilliSec, long actionTimeoutMilliSec,
       ActionDBAccessor db, ActionQueue actionQueue, Clusters fsmObject,
@@ -79,11 +80,28 @@ class ActionScheduler implements Runnable {
     schedulerThread.interrupt();
   }
 
+  /**
+   * Should be called from another thread when we want scheduler to
+   * make a run ASAP (for example, to process desired configs of SCHs).
+   * The method is guaranteed to return quickly.
+   */
+  public void awake() {
+    synchronized (wakeupSyncObject) {
+      activeAwakeRequest = true;
+      wakeupSyncObject.notify();
+    }
+  }
+
   @Override
   public void run() {
     while (shouldRun) {
       try {
-        Thread.sleep(sleepTime);
+        synchronized (wakeupSyncObject) {
+          if (!activeAwakeRequest) {
+              wakeupSyncObject.wait(sleepTime);
+          }
+          activeAwakeRequest = false;
+        }
         doWork();
       } catch (InterruptedException ex) {
         LOG.warn("Scheduler thread is interrupted going to stop", ex);

+ 2 - 6
ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java

@@ -223,13 +223,9 @@ public class HeartBeatHandler {
               if (prevState.equals(State.INSTALLED)
                   || prevState.equals(State.START_FAILED)
                   || prevState.equals(State.STARTED)
+                  || prevState.equals(State.STARTING)
+                  || prevState.equals(State.STOPPING)
                   || prevState.equals(State.STOP_FAILED)) {
-                if (prevState == State.START_FAILED
-                        && liveState == State.INSTALLED) {
-                  LOG.info("Ignoring INSTALLED state update for " +
-                          "START_FAILED component");
-                  continue;
-                }
                 scHost.setState(liveState);
                 if (!prevState.equals(liveState)) {
                   LOG.info("State of service component " + componentName

+ 9 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/handlers/BaseManagementHandler.java

@@ -18,10 +18,12 @@
 
 package org.apache.ambari.server.api.handlers;
 
+import org.apache.ambari.server.api.predicate.InvalidQueryException;
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.api.services.Request;
 import org.apache.ambari.server.api.services.Result;
 import org.apache.ambari.server.api.services.ResultImpl;
+import org.apache.ambari.server.api.services.ResultStatus;
 import org.apache.ambari.server.api.services.persistence.PersistenceManager;
 import org.apache.ambari.server.api.services.persistence.PersistenceManagerImpl;
 import org.apache.ambari.server.api.util.TreeNode;
@@ -57,7 +59,13 @@ public abstract class BaseManagementHandler implements RequestHandler {
 
   public Result handleRequest(Request request) {
     ResourceInstance resource = request.getResource();
-    Predicate queryPredicate = request.getQueryPredicate();
+    Predicate queryPredicate;
+    try {
+      queryPredicate = request.getQueryPredicate();
+    } catch (InvalidQueryException e) {
+      return new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST,
+          "Invalid Request: " + e.getMessage()));
+    }
     if (queryPredicate != null) {
       resource.getQuery().setUserPredicate(queryPredicate);
     }

+ 9 - 2
ambari-server/src/main/java/org/apache/ambari/server/api/handlers/ReadHandler.java

@@ -18,6 +18,7 @@
 
 package org.apache.ambari.server.api.handlers;
 
+import org.apache.ambari.server.api.predicate.InvalidQueryException;
 import org.apache.ambari.server.api.services.Request;
 import org.apache.ambari.server.api.services.ResultImpl;
 import org.apache.ambari.server.api.services.ResultStatus;
@@ -51,9 +52,12 @@ public class ReadHandler implements RequestHandler {
       return new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage()));
     }
 
-    query.setUserPredicate(request.getQueryPredicate());
     Result result;
+    Predicate p = null;
     try {
+      p = request.getQueryPredicate();
+      query.setUserPredicate(p);
+
       result = query.execute();
       result.setResultStatus(new ResultStatus(ResultStatus.STATUS.OK));
     } catch (SystemException e) {
@@ -63,7 +67,7 @@ public class ReadHandler implements RequestHandler {
     } catch (UnsupportedPropertyException e) {
       result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage()));
     } catch (NoSuchResourceException e) {
-      if (request.getQueryPredicate() == null) {
+      if (p == null) {
         // no predicate specified, resource requested by id
         result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage()));
       } else {
@@ -74,6 +78,9 @@ public class ReadHandler implements RequestHandler {
     } catch (IllegalArgumentException e) {
       result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST,
           "Invalid Request: " + e.getMessage()));
+    } catch (InvalidQueryException e) {
+      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST,
+          "Invalid Request: " + e.getMessage()));
     } catch (RuntimeException e) {
       if (LOG.isErrorEnabled()) {
         LOG.error("Caught a runtime exception executing a query", e);

+ 43 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/InvalidQueryException.java

@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate;
+
+/**
+ * Exception indicating that a query compilation error occurred.
+ */
+public class InvalidQueryException extends Exception {
+  /**
+   * Constructor.
+   *
+   * @param msg msg
+   */
+  public InvalidQueryException(String msg) {
+    super(msg);
+  }
+
+  /**
+   * Constructor.
+   *
+   * @param msg        msg
+   * @param throwable  root cause
+   */
+  public InvalidQueryException(String msg, Throwable throwable) {
+    super(msg, throwable);
+  }
+}

+ 49 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/PredicateCompiler.java

@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate;
+
+import org.apache.ambari.server.controller.spi.Predicate;
+
+/**
+ * Compiler which takes a query expression as input and produces a predicate instance as output.
+ */
+public class PredicateCompiler {
+
+  /**
+   * Lexer instance used to translate expressions into stream of tokens.
+   */
+  private QueryLexer lexer = new QueryLexer();
+
+  /**
+   * Parser instance used to produce a predicate instance from a stream of tokens.
+   */
+  private QueryParser parser = new QueryParser();
+
+  /**
+   * Generate a predicate from a query expression.
+   *
+   * @param exp  query expression
+   *
+   * @return a predicate instance
+   * @throws InvalidQueryException if unable to compile the expression
+   */
+  public Predicate compile(String exp) throws InvalidQueryException {
+    return parser.parse(lexer.tokens(exp));
+  }
+}

+ 501 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/QueryLexer.java

@@ -0,0 +1,501 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate;
+
+import java.util.*;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * Scans a query expression and generates an array of tokens.
+ * Each token contains type and value information.
+ *
+ * First, the query expression is broken down into string tokens using
+ * a regular expression which splits on a set of deliminators which includes
+ * operators and brackets.
+ *
+ * Second, each string token is converted into a Token with type and value information.
+ */
+public class QueryLexer {
+  /**
+   * All valid deliminators.
+   */
+  private static final String[] ALL_DELIMS =
+      {".in\\(",".isEmpty\\(","<=",">=","!=","=","<",">","&","|","!","(", ")"};
+
+  /**
+   * Map of token type to list of valid handlers for next token.
+   */
+  private static final Map<Token.TYPE, List<TokenHandler>> TOKEN_HANDLERS =
+      new HashMap<Token.TYPE, List<TokenHandler>>();
+
+  /**
+   * Set of property names to ignore.
+   */
+  private static final Set<String> SET_IGNORE = new HashSet<String>();
+
+  /**
+   * Constructor.
+   * Register token handlers.
+   */
+  public QueryLexer() {
+    //todo: refactor handler registration
+    List<TokenHandler> listHandlers = new ArrayList<TokenHandler>();
+    listHandlers.add(new LogicalUnaryOperatorTokenHandler());
+    listHandlers.add(new OpenBracketTokenHandler());
+    listHandlers.add(new PropertyOperandTokenHandler());
+
+    TOKEN_HANDLERS.put(Token.TYPE.BRACKET_OPEN, listHandlers);
+    TOKEN_HANDLERS.put(Token.TYPE.LOGICAL_OPERATOR, listHandlers);
+    TOKEN_HANDLERS.put(Token.TYPE.LOGICAL_UNARY_OPERATOR, listHandlers);
+
+    listHandlers= new ArrayList<TokenHandler>();
+    listHandlers.add(new RelationalOperatorTokenHandler());
+    listHandlers.add(new RelationalOperatorFuncTokenHandler());
+    TOKEN_HANDLERS.put(Token.TYPE.PROPERTY_OPERAND, listHandlers);
+
+    listHandlers = new ArrayList<TokenHandler>();
+    listHandlers.add(new ValueOperandTokenHandler());
+    TOKEN_HANDLERS.put(Token.TYPE.RELATIONAL_OPERATOR, listHandlers);
+
+    listHandlers = new ArrayList<TokenHandler>();
+    listHandlers.add(new CloseBracketTokenHandler());
+    listHandlers.add(new ValueOperandTokenHandler());
+    TOKEN_HANDLERS.put(Token.TYPE.RELATIONAL_OPERATOR_FUNC, listHandlers);
+
+    listHandlers = new ArrayList<TokenHandler>();
+    listHandlers.add(new CloseBracketTokenHandler());
+    listHandlers.add(new LogicalOperatorTokenHandler());
+    TOKEN_HANDLERS.put(Token.TYPE.VALUE_OPERAND, listHandlers);
+    TOKEN_HANDLERS.put(Token.TYPE.BRACKET_CLOSE, listHandlers);
+  }
+
+
+  /**
+   * Scan the provided query and generate a token stream to be used by the query parser.
+   *
+   * @param exp  the query expression to scan
+   *
+   * @return an array of tokens
+   * @throws InvalidQueryException if the query is invalid
+   */
+  public Token[] tokens(String exp) throws InvalidQueryException {
+
+    ScanContext ctx = new ScanContext();
+    for (String tok : parseStringTokens(exp)) {
+      List<TokenHandler> listHandlers = TOKEN_HANDLERS.get(ctx.getLastTokenType());
+      boolean            processed    = false;
+      int                idx          = 0;
+
+      while (!processed && idx < listHandlers.size()) {
+        processed = listHandlers.get(idx++).handleToken(tok, ctx);
+      }
+
+      if (! processed) {
+        throw new InvalidQueryException("Invalid Query Token: token='" +
+            tok + "\', previous token type=" + ctx.getLastTokenType());
+      }
+    }
+    return ctx.getTokenList().toArray(new Token[ctx.getTokenList().size()]);
+  }
+
+  /**
+   * Uses a regular expression to scan a query expression and produce a list of string tokens.
+   * These tokens are the exact strings that exist in the original syntax.
+   *
+   * @param exp  the query expression
+   *
+   * @return list of string tokens from the query expression
+   */
+  private List<String> parseStringTokens(String exp) {
+    Pattern      pattern       = generatePattern();
+    Matcher      matcher       = pattern.matcher(exp);
+    List<String> listStrTokens = new ArrayList<String>();
+    int pos = 0;
+
+    while (matcher.find()) { // while there's a delimiter in the string
+      if (pos != matcher.start()) {
+        // add anything between the current and previous delimiter to the tokens list
+        listStrTokens.add(exp.substring(pos, matcher.start()));
+      }
+      listStrTokens.add(matcher.group()); // add the delimiter
+      pos = matcher.end(); // Remember end of delimiter
+    }
+    if (pos != exp.length()) {
+      // Add any chars remaining in the string after last delimiter
+      listStrTokens.add(exp.substring(pos));
+    }
+    return listStrTokens;
+  }
+
+  /**
+   * Generate the regex pattern to tokenize the query expression.
+   *
+   * @return the regex pattern
+   */
+  private Pattern generatePattern() {
+    StringBuilder sb = new StringBuilder();
+    sb.append('(');
+    for (String delim : ALL_DELIMS) { // For each delimiter
+      if (sb.length() != 1) sb.append('|');
+      sb.append('\\');
+      sb.append(delim);
+    }
+    sb.append(')');
+
+    return Pattern.compile(sb.toString());
+  }
+
+  /**
+   * Add property names that the lexer should ignore.
+   */
+  static {
+    // ignore values
+    SET_IGNORE.add("fields");
+    SET_IGNORE.add("_");
+  }
+
+  /**
+   * Scan context.  Provides contextual information related to the current scan.
+   */
+  private class ScanContext {
+    /**
+     * The last token type scanned.
+     */
+    private Token.TYPE m_lastType;
+
+    /**
+     * The last property operand value
+     */
+    private String m_propertyName;
+
+    /**
+     * List of tokens generated by the scan
+     */
+    private List<Token> m_listTokens = new ArrayList<Token>();
+
+    /**
+     * Whether the current expression should be ignored.
+     * This is used to ignore portions of the query string that are
+     * not query specific.
+     */
+    private boolean m_ignore = false;
+
+    /**
+     * Constructor.
+     */
+    private ScanContext() {
+      //init last type to the logical op type
+      m_lastType = Token.TYPE.LOGICAL_OPERATOR;
+    }
+
+    /**
+     * Set the ignore tokens flag.
+     *
+     * @param ignore  true to ignore tokens; false otherwise
+     */
+    public void setIgnoreTokens(boolean ignore) {
+      m_ignore = ignore;
+    }
+
+    /**
+     * Get the type of the last token.
+     *
+     * @return the type of the last token
+     */
+    public Token.TYPE getLastTokenType() {
+      return m_lastType;
+    }
+
+    /**
+     * Set the type of the last token.
+     *
+     * @param lastType  the type of the last token
+     */
+    public void setLastTokenType(Token.TYPE lastType) {
+      m_lastType = lastType;
+    }
+
+    /**
+     * Get the current property operand value.
+     * This is used to hold the property operand name until it is added since,
+     * the following relational operator token is added first.
+     *
+     * @return the current property operand value
+     */
+    public String getPropertyOperand() {
+      return m_propertyName;
+    }
+
+    /**
+     * Set the current property operand value.
+     * This is used to hold the property operand name until it is added since,
+     * the following relational operator token is added first.
+     */
+    public void setPropertyOperand(String prop) {
+      m_propertyName = prop;
+    }
+
+    /**
+     * Add a token.
+     *
+     * @param token  the token to add
+     */
+    public void addToken(Token token) {
+      if (! m_ignore) {
+        m_listTokens.add(token);
+      }
+    }
+
+    /**
+     * Get the list of generated tokens.
+     *
+     * @return the list of generated tokens
+     */
+    public List<Token> getTokenList() {
+      return m_listTokens;
+    }
+  }
+
+  /**
+   * Token handler base class.
+   * Token handlers are responsible for processing specific token type.
+   */
+  private abstract class TokenHandler {
+    /**
+     * Provides base token handler functionality then delegates to the individual concrete handlers.
+     *
+     * @param token   the token to process
+     * @param ctx     the scan context
+     *
+     * @return true if this handler processed the token; false otherwise
+     * @throws InvalidQueryException  if an invalid token is encountered
+     */
+    public boolean handleToken(String token, ScanContext ctx) throws InvalidQueryException {
+      if (handles(token, ctx.getLastTokenType())) {
+        _handleToken(token, ctx);
+        ctx.setLastTokenType(getType());
+        return true;
+      } else {
+        return false;
+      }
+    }
+
+    /**
+     * Process a token.
+     *
+     * @param token  the token to process
+     * @param ctx    the current scan context
+     * @throws InvalidQueryException if an invalid token is encountered
+     */
+    public abstract void _handleToken(String token, ScanContext ctx) throws InvalidQueryException;
+
+    /**
+     * Get the token handler type.
+     *
+     * @return the token handler type
+     */
+    public abstract Token.TYPE getType();
+
+    /**
+     * Determine if a handler handles a specific token type.
+     *
+     * @param token              the token type
+     * @param previousTokenType  the previous token type
+     *
+     * @return true if the handler handles the specified type; false otherwise
+     */
+    public abstract boolean handles(String token, Token.TYPE previousTokenType);
+  }
+
+  /**
+   * Property Operand token handler.
+   */
+  private class PropertyOperandTokenHandler extends TokenHandler {
+
+    @Override
+    public void _handleToken(String token, ScanContext ctx) throws InvalidQueryException {
+      //don't add prop name token until after operator token
+      if (! SET_IGNORE.contains(token)) {
+        ctx.setPropertyOperand(token);
+      } else {
+        ctx.setIgnoreTokens(true);
+        if (!ctx.getTokenList().isEmpty()) {
+        // remove '&' token that separates ignored token and query
+          ctx.getTokenList().remove(ctx.getTokenList().size() -1);
+        }
+      }
+    }
+
+    @Override
+    public Token.TYPE getType() {
+      return Token.TYPE.PROPERTY_OPERAND;
+    }
+
+    @Override
+    public boolean handles(String token, Token.TYPE previousTokenType) {
+      return token.matches("[^!&\\|<=|>=|!=|=|<|>\\(\\)]+");
+    }
+  }
+
+  /**
+   * Value Operand token handler.
+   */
+  private class ValueOperandTokenHandler extends TokenHandler {
+    @Override
+    public void _handleToken(String token, ScanContext ctx) throws InvalidQueryException {
+      ctx.addToken(new Token(Token.TYPE.VALUE_OPERAND, token));
+    }
+
+    @Override
+    public Token.TYPE getType() {
+      return Token.TYPE.VALUE_OPERAND;
+    }
+
+    @Override
+    public boolean handles(String token, Token.TYPE previousTokenType) {
+      return token.matches("[^!&\\|<=|>=|!=|=|<|>]+");
+    }
+  }
+
+  /**
+   * Open Bracket token handler.
+   */
+  private class OpenBracketTokenHandler extends TokenHandler {
+    @Override
+    public void _handleToken(String token, ScanContext ctx) throws InvalidQueryException {
+      ctx.addToken(new Token(Token.TYPE.BRACKET_OPEN, token));
+    }
+
+    @Override
+    public Token.TYPE getType() {
+      return Token.TYPE.BRACKET_OPEN;
+    }
+
+    @Override
+    public boolean handles(String token, Token.TYPE previousTokenType) {
+      return token.matches("\\(");
+    }
+  }
+
+  /**
+   * Close Bracket token handler.
+   */
+  private class CloseBracketTokenHandler extends TokenHandler {
+    @Override
+    public void _handleToken(String token, ScanContext ctx) throws InvalidQueryException {
+      ctx.addToken(new Token(Token.TYPE.BRACKET_CLOSE, token));
+    }
+
+    @Override
+    public Token.TYPE getType() {
+      return Token.TYPE.BRACKET_CLOSE;
+    }
+
+    @Override
+    public boolean handles(String token, Token.TYPE previousTokenType) {
+      return token.matches("\\)");
+    }
+  }
+
+  /**
+   * Relational Operator token handler.
+   */
+  private class RelationalOperatorTokenHandler extends TokenHandler {
+    @Override
+    public void _handleToken(String token, ScanContext ctx) throws InvalidQueryException {
+      ctx.addToken(new Token(Token.TYPE.RELATIONAL_OPERATOR, token));
+      ctx.addToken(new Token(Token.TYPE.PROPERTY_OPERAND, ctx.getPropertyOperand()));
+    }
+
+    @Override
+    public Token.TYPE getType() {
+      return Token.TYPE.RELATIONAL_OPERATOR;
+    }
+
+    @Override
+    public boolean handles(String token, Token.TYPE previousTokenType) {
+      return token.matches("<=|>=|!=|=|<|>");
+    }
+  }
+
+  /**
+   * Relational Operator function token handler.
+   */
+  private class RelationalOperatorFuncTokenHandler extends TokenHandler {
+    @Override
+    public void _handleToken(String token, ScanContext ctx) throws InvalidQueryException {
+      ctx.addToken(new Token(Token.TYPE.RELATIONAL_OPERATOR_FUNC, token));
+      ctx.addToken(new Token(Token.TYPE.PROPERTY_OPERAND, ctx.getPropertyOperand()));
+    }
+
+    @Override
+    public Token.TYPE getType() {
+      return Token.TYPE.RELATIONAL_OPERATOR_FUNC;
+    }
+
+    //todo: add a unary relational operator func
+    @Override
+    public boolean handles(String token, Token.TYPE previousTokenType) {
+      return token.matches("\\.[a-zA-Z]+\\(");
+    }
+  }
+
+
+  /**
+   * Logical Operator token handler.
+   */
+  private class LogicalOperatorTokenHandler extends TokenHandler {
+    @Override
+    public void _handleToken(String token, ScanContext ctx) throws InvalidQueryException {
+      ctx.addToken(new Token(Token.TYPE.LOGICAL_OPERATOR, token));
+      ctx.setIgnoreTokens(false);
+    }
+
+    @Override
+    public Token.TYPE getType() {
+      return Token.TYPE.LOGICAL_OPERATOR;
+    }
+
+    @Override
+    public boolean handles(String token, Token.TYPE previousTokenType) {
+      return token.matches("[!&\\|]");
+    }
+  }
+
+  /**
+   * Logical Unary Operator token handler.
+   */
+  private class LogicalUnaryOperatorTokenHandler extends TokenHandler {
+    @Override
+    public void _handleToken(String token, ScanContext ctx) throws InvalidQueryException {
+      ctx.addToken(new Token(Token.TYPE.LOGICAL_UNARY_OPERATOR, token));
+    }
+
+    @Override
+    public Token.TYPE getType() {
+      return Token.TYPE.LOGICAL_UNARY_OPERATOR;
+    }
+
+    @Override
+    public boolean handles(String token, Token.TYPE previousTokenType) {
+      return "!".equals(token);
+    }
+  }
+}

+ 514 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/QueryParser.java

@@ -0,0 +1,514 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate;
+
+import org.apache.ambari.server.api.predicate.expressions.Expression;
+import org.apache.ambari.server.api.predicate.expressions.LogicalExpressionFactory;
+import org.apache.ambari.server.api.predicate.expressions.RelationalExpression;
+import org.apache.ambari.server.api.predicate.operators.*;
+import org.apache.ambari.server.controller.spi.Predicate;
+
+import java.util.*;
+
+/**
+ * Parser which produces a predicate instance from an array of tokens,
+ * which is generated by the lexer.
+ */
+public class QueryParser {
+
+  /**
+   * Map of token type to token handlers.
+   */
+  private static final Map<Token.TYPE, TokenHandler> TOKEN_HANDLERS =
+      new HashMap<Token.TYPE, TokenHandler>();
+
+  /**
+   * Constructor.
+   * Register token handlers.
+   *
+   */
+  public QueryParser() {
+    TOKEN_HANDLERS.put(Token.TYPE.BRACKET_OPEN, new BracketOpenTokenHandler());
+    TOKEN_HANDLERS.put(Token.TYPE.BRACKET_CLOSE, new BracketCloseTokenHandler());
+    TOKEN_HANDLERS.put(Token.TYPE.RELATIONAL_OPERATOR, new RelationalOperatorTokenHandler());
+    TOKEN_HANDLERS.put(Token.TYPE.LOGICAL_OPERATOR, new LogicalOperatorTokenHandler());
+    TOKEN_HANDLERS.put(Token.TYPE.LOGICAL_UNARY_OPERATOR, new LogicalUnaryOperatorTokenHandler());
+    TOKEN_HANDLERS.put(Token.TYPE.PROPERTY_OPERAND, new PropertyOperandTokenHandler());
+    TOKEN_HANDLERS.put(Token.TYPE.VALUE_OPERAND, new ValueOperandTokenHandler());
+    TOKEN_HANDLERS.put(Token.TYPE.RELATIONAL_OPERATOR_FUNC, new RelationalOperatorFuncTokenHandler());
+  }
+
+  /**
+   * Generate a Predicate instance from an array of tokens.
+   * Each input token contains a type and a value.
+   *
+   * Based on the token type and location, the tokens are first translated into a list of
+   * expressions, both relational and logical.  These expressions are then merged into a tree
+   * of expressions with a single root following operator precedence and explicit grouping rules.
+   * Depending on the query, this merging of expressions into a tree of expressions may occur in
+   * several passes, one pass per level of precedence starting at the highest level of precedence.
+   *
+   *  The predicate is built by traversing the expression tree in-order with each node expressing itself
+   *  as a predicate.
+   *
+   * @param tokens  an array of tokens which represent the query,
+   *                each token contains type and value information
+   *
+   * @return a new predicate instance based on the supplied tokens
+   * @throws InvalidQueryException if unable to parse the tokens and produce a predicate
+   */
+  public Predicate parse(Token[] tokens) throws InvalidQueryException {
+    ParseContext ctx = parseExpressions(tokens);
+
+    List<Expression> listExpressions       = ctx.getExpressions();
+    List<Expression> listMergedExpressions = mergeExpressions(listExpressions, ctx.getMaxPrecedence());
+
+    return listMergedExpressions.isEmpty() ? null :
+        listMergedExpressions.get(0).toPredicate();
+  }
+
+  /**
+   * Create parse context from an array of tokens. The parse context contains a list of expressions
+   * and other information about the expressions an parsed tokens.
+   *
+   * @param tokens  an array of tokens which represent the query,
+   *                each token contains type and value information
+   *
+   * @return a parse context which contains a list of expressions
+   * @throws InvalidQueryException if unable to properly parse the tokens into a parse context
+   */
+  private ParseContext parseExpressions(Token[] tokens) throws InvalidQueryException {
+    ParseContext ctx = new ParseContext(tokens);
+
+    while (ctx.getCurrentTokensIndex() < tokens.length) {
+      TOKEN_HANDLERS.get(tokens[ctx.getCurrentTokensIndex()].getType()).handleToken(ctx);
+    }
+
+    if (ctx.getPrecedenceLevel() != 0) {
+      throw new InvalidQueryException("Invalid query string: mismatched parentheses.");
+    }
+
+    return ctx;
+  }
+
+  /**
+   * Merge list of expressions into a tree of logical/relational expressions.
+   * This is done recursively in several passes, one per level of precedence starting at the
+   * highest precedence level. Recursion exits when a single expression remains.
+   *
+   * @param listExpressions  list of expressions to merge
+   * @param precedenceLevel  the precedence level that is to be merged
+   *
+   * @return  tree of expressions with a single root expression
+   */
+  private List<Expression> mergeExpressions(List<Expression> listExpressions, int precedenceLevel) {
+    if (listExpressions.size() > 1) {
+      Stack<Expression> stack = new Stack<Expression>();
+
+      stack.push(listExpressions.remove(0));
+      while (! listExpressions.isEmpty()) {
+        Expression exp = stack.pop();
+        Expression left = stack.empty() ? null : stack.pop();
+        Expression right = listExpressions.remove(0);
+        stack.addAll(exp.merge(left, right, precedenceLevel));
+      }
+      return mergeExpressions(new ArrayList<Expression>(stack), precedenceLevel - 1);
+    }
+    return listExpressions;
+  }
+
+  /**
+   * A parse context which contains information related to parsing the provided tokens into expressions.
+   */
+  private class ParseContext {
+    /**
+     * The current context precedence level.  This is dictated by bracket tokens.
+     */
+    private int m_precedence = 0;
+
+    /**
+     * Current position in tokens array
+     */
+    private int m_tokensIdx = 0;
+
+    /**
+     * Tokens
+     */
+    private Token[] m_tokens;
+
+    /**
+     * The type of the previous token used in validation.
+     */
+    private Token.TYPE m_previousTokenType = null;
+
+    /**
+     * The list of expressions which are generated from the tokens.
+     */
+    private List<Expression> m_listExpressions = new ArrayList<Expression>();
+
+    /**
+     * Highest precedence level in expression.
+     */
+    int m_maxPrecedence = 0;
+
+    public ParseContext(Token[] tokens) {
+      m_tokens = tokens;
+    }
+
+    /**
+     * Get array of all tokens.
+     * @return token array
+     */
+    public Token[] getTokens() {
+      return m_tokens;
+    }
+
+    /**
+     * Get the current position in the tokens array.
+     * @return the current tokens index
+     */
+    public int getCurrentTokensIndex() {
+      return m_tokensIdx;
+    }
+
+    /**
+     * Set the current position in the tokens array.
+     * Each handler should set this value after processing a token(s).
+     * @param idx  current tokens index
+     */
+    public void setCurrentTokensIndex(int idx) {
+      m_tokensIdx = idx;
+    }
+
+    /**
+     * Increment the context precedence level.
+     *
+     * @param val  how much the level is increased by
+     */
+    public void incPrecedenceLevel(int val) {
+      m_precedence += val;
+    }
+
+    /**
+     * Decrement the context precedence level.
+     *
+     * @param val  how much the level is decremented by
+     * @throws InvalidQueryException if the level is decremented below 0
+     */
+    public void decPrecedenceLevel(int val) throws InvalidQueryException {
+      m_precedence -= val;
+      if (m_precedence < 0) {
+        throw new InvalidQueryException("Invalid query string: mismatched parentheses.");
+      }
+    }
+
+    /**
+     * Get the current context precedence level.
+     *
+     * @return current context precedence level
+     */
+    public int getPrecedenceLevel() {
+      return m_precedence;
+    }
+
+    /**
+     * Get the list of generated expressions.
+     *
+     * @return the list of generated expressions
+     */
+    public List<Expression> getExpressions() {
+      return m_listExpressions;
+    }
+
+    /**
+     * Get the last expression.
+     *
+     * @return the last expression
+     */
+    public Expression getPrecedingExpression() {
+      return m_listExpressions == null ? null :
+          m_listExpressions.get(m_listExpressions.size() - 1);
+    }
+
+    /**
+     * Get the highest operator precedence in the list of generated expressions.
+     *
+     * @return the max operator precedence
+     */
+    public int getMaxPrecedence() {
+      return m_maxPrecedence;
+    }
+
+    /**
+     * Update the max precedence level.
+     * The max precedence level is only updated if the provided level > the current level.
+     *
+     * @param precedenceLevel the new value
+     */
+    public void updateMaxPrecedence(int precedenceLevel) {
+      if (precedenceLevel > m_maxPrecedence) {
+        m_maxPrecedence = precedenceLevel;
+      }
+    }
+
+    /**
+     * Add a generated expression.
+     *
+     * @param exp  the expression to add
+     */
+    public void addExpression(Expression exp) {
+      m_listExpressions.add(exp);
+    }
+
+    /**
+     * Set the token type of the current token
+     *
+     * @param type  type of the current token
+     */
+    private void setTokenType(Token.TYPE type) {
+      m_previousTokenType = type;
+    }
+
+    /**
+     * Get the last token type set.
+     *
+     * @return the last token type set
+     */
+    public Token.TYPE getPreviousTokenType() {
+      return m_previousTokenType;
+    }
+  }
+
+
+  /**
+   * Base token handler.
+   * Token handlers are responsible for handling the processing of a specific token type.
+   */
+  private abstract class TokenHandler {
+    /**
+     * Process a token. Handles common token processing functionality then delegates to the individual
+     * concrete handlers.
+     *
+     * @param ctx    the current parse context
+     * @throws InvalidQueryException if unable to process the token
+     */
+    public void handleToken(ParseContext ctx) throws InvalidQueryException {
+      Token token = ctx.getTokens()[ctx.getCurrentTokensIndex()];
+      if (! validate(ctx.getPreviousTokenType())) {
+        throw new InvalidQueryException("Unexpected token encountered in query string. Last Token Type=" +
+            ctx.getPreviousTokenType() + ", Current Token[type=" + token.getType() +
+            ", value='" + token.getValue() + "']");
+      }
+      ctx.setTokenType(token.getType());
+
+      int idxIncrement = _handleToken(ctx);
+      ctx.setCurrentTokensIndex(ctx.getCurrentTokensIndex() + idxIncrement);
+    }
+
+    /**
+     * Process a token.
+     *
+     * @param ctx    the current parse context
+     * @throws InvalidQueryException if unable to process the token
+     */
+    public abstract int _handleToken(ParseContext ctx) throws InvalidQueryException;
+
+    /**
+     * Validate the token based on the previous token.
+     *
+     * @param previousTokenType  the type of the previous token
+     * @return true if validation is successful, false otherwise
+     */
+    public abstract boolean validate(Token.TYPE previousTokenType);
+  }
+
+  /**
+   * Open Bracket token handler.
+   */
+  private class BracketOpenTokenHandler extends TokenHandler {
+
+    @Override
+    public int _handleToken(ParseContext ctx) {
+      ctx.incPrecedenceLevel(Operator.MAX_OP_PRECEDENCE);
+      return 1;
+    }
+
+    @Override
+    public boolean validate(Token.TYPE previousTokenType) {
+     return previousTokenType == null                        ||
+            previousTokenType == Token.TYPE.BRACKET_OPEN     ||
+            previousTokenType == Token.TYPE.LOGICAL_OPERATOR ||
+            previousTokenType == Token.TYPE.LOGICAL_UNARY_OPERATOR;
+    }
+  }
+
+  /**
+   * Close Bracket token handler
+   */
+  private class BracketCloseTokenHandler extends TokenHandler {
+    @Override
+    public int _handleToken(ParseContext ctx) throws InvalidQueryException{
+      ctx.decPrecedenceLevel(Operator.MAX_OP_PRECEDENCE);
+
+      return 1;
+    }
+
+    @Override
+    public boolean validate(Token.TYPE previousTokenType) {
+      return previousTokenType == Token.TYPE.VALUE_OPERAND ||
+             previousTokenType == Token.TYPE.BRACKET_CLOSE;
+    }
+  }
+
+  /**
+   * Relational Operator token handler
+   */
+  private class RelationalOperatorTokenHandler extends TokenHandler {
+    @Override
+    public int _handleToken(ParseContext ctx) throws InvalidQueryException {
+      Token token = ctx.getTokens()[ctx.getCurrentTokensIndex()];
+      RelationalOperator relationalOp = RelationalOperatorFactory.createOperator(token.getValue());
+      //todo: use factory to create expression
+      ctx.addExpression(new RelationalExpression(relationalOp));
+
+      return 1;
+    }
+
+    @Override
+    public boolean validate(Token.TYPE previousTokenType) {
+      return previousTokenType == null                     ||
+          previousTokenType == Token.TYPE.BRACKET_OPEN     ||
+          previousTokenType == Token.TYPE.LOGICAL_OPERATOR ||
+          previousTokenType == Token.TYPE.LOGICAL_UNARY_OPERATOR;
+    }
+  }
+
+  /**
+   * Relational Operator function token handler
+   */
+  private class RelationalOperatorFuncTokenHandler extends TokenHandler {
+    @Override
+    public int _handleToken(ParseContext ctx) throws InvalidQueryException {
+      Token[]            tokens       = ctx.getTokens();
+      int                idx          = ctx.getCurrentTokensIndex();
+      Token              token        = tokens[idx];
+      RelationalOperator relationalOp = RelationalOperatorFactory.createOperator(token.getValue());
+
+      ctx.addExpression(new RelationalExpression(relationalOp));
+      ctx.setCurrentTokensIndex(++idx);
+
+      TokenHandler propertyHandler = new PropertyOperandTokenHandler();
+      propertyHandler.handleToken(ctx);
+
+      // handle right operand if applicable to operator
+      idx = ctx.getCurrentTokensIndex();
+      if (ctx.getCurrentTokensIndex() < tokens.length &&
+          tokens[idx].getType().equals(Token.TYPE.VALUE_OPERAND)) {
+        TokenHandler valueHandler = new ValueOperandTokenHandler();
+        valueHandler.handleToken(ctx);
+      }
+
+      // skip closing bracket
+      idx = ctx.getCurrentTokensIndex();
+      if (idx >= tokens.length || tokens[idx].getType() != Token.TYPE.BRACKET_CLOSE) {
+        throw new InvalidQueryException("Missing closing bracket for in expression.") ;
+      }
+      return 1;
+    }
+
+    @Override
+    public boolean validate(Token.TYPE previousTokenType) {
+      return previousTokenType == null                     ||
+          previousTokenType == Token.TYPE.BRACKET_OPEN     ||
+          previousTokenType == Token.TYPE.LOGICAL_OPERATOR ||
+          previousTokenType == Token.TYPE.LOGICAL_UNARY_OPERATOR;
+    }
+  }
+
+  /**
+   * Logical Operator token handler
+   */
+  private class LogicalOperatorTokenHandler extends TokenHandler {
+    @Override
+    public int _handleToken(ParseContext ctx) throws InvalidQueryException {
+      Token token = ctx.getTokens()[ctx.getCurrentTokensIndex()];
+      LogicalOperator logicalOp = LogicalOperatorFactory.createOperator(token.getValue(), ctx.getPrecedenceLevel());
+      ctx.updateMaxPrecedence(logicalOp.getPrecedence());
+      ctx.addExpression(LogicalExpressionFactory.createLogicalExpression(logicalOp));
+
+      return 1;
+    }
+
+    @Override
+    public boolean validate(Token.TYPE previousTokenType) {
+      return previousTokenType == Token.TYPE.VALUE_OPERAND ||
+             previousTokenType == Token.TYPE.BRACKET_CLOSE;
+    }
+  }
+
+  /**
+   * Logical Unary Operator token handler
+   */
+  private class LogicalUnaryOperatorTokenHandler extends LogicalOperatorTokenHandler {
+    @Override
+    public boolean validate(Token.TYPE previousTokenType) {
+      return previousTokenType == null                 ||
+          previousTokenType == Token.TYPE.BRACKET_OPEN ||
+          previousTokenType == Token.TYPE.LOGICAL_OPERATOR;
+    }
+  }
+
+  /**
+   * Property Operand token handler
+   */
+  private class PropertyOperandTokenHandler extends TokenHandler {
+    @Override
+    public int _handleToken(ParseContext ctx) throws InvalidQueryException {
+      Token token = ctx.getTokens()[ctx.getCurrentTokensIndex()];
+      ctx.getPrecedingExpression().setLeftOperand(token.getValue());
+
+      return 1;
+    }
+
+    @Override
+    public boolean validate(Token.TYPE previousTokenType) {
+      return previousTokenType == Token.TYPE.RELATIONAL_OPERATOR ||
+          previousTokenType == Token.TYPE.RELATIONAL_OPERATOR_FUNC;
+    }
+  }
+
+  /**
+   * Value Operand token handler
+   */
+  private class ValueOperandTokenHandler extends TokenHandler {
+    @Override
+    public int _handleToken(ParseContext ctx) throws InvalidQueryException {
+      Token token = ctx.getTokens()[ctx.getCurrentTokensIndex()];
+      ctx.getPrecedingExpression().setRightOperand(token.getValue());
+
+      return 1;
+    }
+
+    @Override
+    public boolean validate(Token.TYPE previousTokenType) {
+      return previousTokenType == Token.TYPE.PROPERTY_OPERAND;
+    }
+  }
+}
+

+ 110 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/Token.java

@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate;
+
+/**
+ * Token representation which is generated by the lexer.
+ * Contains type and value information.
+ */
+public class Token {
+
+  /**
+   * Token types.
+   */
+  public enum TYPE {
+    /** Property name operand.  This is the left operand in relational expressions. */
+    PROPERTY_OPERAND,
+    /** Value operand.  This is the right operand in relational expressions. */
+    VALUE_OPERAND,
+    /** Relational operator */
+    RELATIONAL_OPERATOR,
+    /** Relational operator function */
+    RELATIONAL_OPERATOR_FUNC,
+    /** Logical operator */
+    LOGICAL_OPERATOR,
+    /** Logical unary operator such as !*/
+    LOGICAL_UNARY_OPERATOR,
+    /** Opening bracket */
+    BRACKET_OPEN,
+    /** Closing bracket */
+    BRACKET_CLOSE
+  }
+
+  /**
+   * Token type.
+   */
+  private TYPE m_type;
+
+  /**
+   * Token value.
+   */
+  private String m_value;
+
+
+  /**
+   * Constructor.
+   *
+   * @param type   type
+   * @param value  value
+   */
+  public Token(TYPE type, String value) {
+    m_type = type;
+    m_value = value;
+  }
+
+  /**
+   * Get the token type.
+   * @return token type
+   */
+  public TYPE getType() {
+    return m_type;
+  }
+
+  /**
+   * Get the token value.
+   * @return token value
+   */
+  public String getValue() {
+    return m_value;
+  }
+
+  @Override
+  public String toString() {
+    return "Token{ type=" + m_type + ", value='" + m_value + "' }";
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+
+    Token token = (Token) o;
+
+    return m_type == token.m_type &&
+        (m_value == null ? token.m_value == null : m_value.equals(token.m_value));
+  }
+
+  @Override
+  public int hashCode() {
+    int result = m_type.hashCode();
+    result = 31 * result + (m_value != null ? m_value.hashCode() : 0);
+
+    return result;
+  }
+}

+ 106 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/AbstractExpression.java

@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.expressions;
+
+import org.apache.ambari.server.api.predicate.operators.Operator;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Base class for expressions.
+ */
+public abstract class AbstractExpression<T> implements Expression<T> {
+
+  /**
+   * The operator.
+   */
+  private final Operator m_op;
+
+  /**
+   * The left operand.
+   * */
+  private T m_left = null;
+
+  /**
+   * The right operand.
+   */
+  private T m_right = null;
+
+  /**
+   * Constructor.
+   *
+   * @param op  the expressions operator
+   */
+  protected AbstractExpression(Operator op) {
+    m_op = op;
+  }
+
+  @Override
+  public void setLeftOperand(T left) {
+    m_left = left;
+  }
+
+  @Override
+  public void setRightOperand(T right) {
+    m_right = right;
+  }
+
+  @Override
+  public T getLeftOperand() {
+    return m_left;
+  }
+
+  @Override
+  public T getRightOperand() {
+    return m_right;
+  }
+
+  @Override
+  public Operator getOperator() {
+    return m_op;
+  }
+
+  @Override
+  public List<Expression> merge(Expression left, Expression right, int precedence) {
+    return defaultMerge(left, right);
+  }
+
+  /**
+   * Base merge implementation.
+   * No merge is done, simply returns the left expression, this and the right expression.
+   *
+   * @param left   the expression to the left of this expression
+   * @param right  the expression to the right of this expression
+   *
+   * @return a list containing the un-merged left expression, this and right expression
+   */
+  protected List<Expression> defaultMerge(Expression left, Expression right) {
+    List<Expression> listExpressions = new ArrayList<Expression>();
+    if (left != null) {
+      listExpressions.add(left);
+    }
+    listExpressions.add(this);
+    if (right != null) {
+      listExpressions.add(right);
+    }
+
+    return listExpressions;
+  }
+}

+ 89 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/Expression.java

@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.expressions;
+
+import org.apache.ambari.server.api.predicate.InvalidQueryException;
+import org.apache.ambari.server.api.predicate.operators.Operator;
+import org.apache.ambari.server.controller.spi.Predicate;
+
+import java.util.List;
+
+/**
+ * Expression representation.
+ * There are two types of expressions, relational and logical.
+ * Each expression has an operator and either 2 operands for binary
+ * expressions or 1 operand for unary expressions.
+ */
+public interface Expression<T> {
+
+  /**
+   * Merge expression with surrounding expressions.
+   *
+   * @param left        the preceding expression
+   * @param right       the following expression
+   * @param precedence  the precedence level being merged.  Only expressions at this precedence level
+   *                    should be merged. Others should simply return the left expression, themselves and
+   *                    the right expression in that order.
+   *
+   * @return a list of expressions after merging.  Do not return any null elements.
+   */
+  public List<Expression> merge(Expression left, Expression right, int precedence);
+
+
+  /**
+   * Get the predicate representation of the expression.
+   * @return a predicate instance for the expression
+   */
+  public Predicate toPredicate() throws InvalidQueryException;
+
+  /**
+   * Set the expressions left operand.
+   *
+   * @param left  the left operand
+   */
+  public void setLeftOperand(T left);
+
+  /**
+   * Set the expressions right operand.
+   *
+   * @param right  the right operand
+   */
+  public void setRightOperand(T right);
+
+  /**
+   * Get the left operand expression.
+   *
+   * @return the left operand
+   */
+  public T getLeftOperand();
+
+  /**
+   * Get the right operand expression.
+   *
+   * @return the right operand.
+   */
+  public T getRightOperand();
+
+  /**
+   * Get the expression operator.
+   *
+   * @return the logical operator for the expression
+   */
+  public Operator getOperator();
+}

+ 61 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/LogicalExpression.java

@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.expressions;
+
+import org.apache.ambari.server.api.predicate.InvalidQueryException;
+import org.apache.ambari.server.api.predicate.operators.LogicalOperator;
+import org.apache.ambari.server.controller.spi.Predicate;
+
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Logical expression implementation.
+ * Always a binary expression that consists of a logical operator and
+ * expressions for the left and right operands.
+ */
+public class LogicalExpression extends AbstractExpression<Expression> {
+
+  /**
+   * Constructor.
+   *
+   * @param op  the logical operator of the expression
+   */
+  public LogicalExpression(LogicalOperator op) {
+    super(op);
+  }
+
+
+  @Override
+  public Predicate toPredicate() throws InvalidQueryException {
+    return ((LogicalOperator) getOperator()).
+        toPredicate(getLeftOperand().toPredicate(), getRightOperand().toPredicate());
+  }
+
+  @Override
+  public List<Expression> merge(Expression left, Expression right, int precedence) {
+    if (getOperator().getPrecedence() == precedence && getLeftOperand() == null) {
+      setLeftOperand(left);
+      setRightOperand(right);
+      return Collections.<Expression>singletonList(this);
+    } else {
+      return defaultMerge(left, right);
+    }
+  }
+}

+ 47 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/LogicalExpressionFactory.java

@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.expressions;
+
+import org.apache.ambari.server.api.predicate.InvalidQueryException;
+import org.apache.ambari.server.api.predicate.operators.LogicalOperator;
+
+/**
+ * Factory of logical expression instances.
+ */
+public class LogicalExpressionFactory {
+  /**
+   * Create a logical expression instance.
+   *
+   * @param op  the logical operator
+   *
+   * @return a new logical expression instance
+   * @throws InvalidQueryException
+   */
+  public static LogicalExpression createLogicalExpression(LogicalOperator op) throws InvalidQueryException {
+    switch (op.getType()) {
+      case AND:
+      case OR:
+        return new LogicalExpression(op);
+      case NOT :
+        return new NotLogicalExpression(op);
+      default:
+        throw new RuntimeException("An invalid logical operator type was encountered: " + op);
+    }
+  }
+}

+ 65 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/NotLogicalExpression.java

@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.expressions;
+
+import org.apache.ambari.server.api.predicate.InvalidQueryException;
+import org.apache.ambari.server.api.predicate.operators.LogicalOperator;
+import org.apache.ambari.server.controller.predicate.BasePredicate;
+import org.apache.ambari.server.controller.predicate.NotPredicate;
+import org.apache.ambari.server.controller.spi.Predicate;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * A 'NOT' logical expression representation.
+ * Negates a corresponding right operand.
+ */
+public class NotLogicalExpression extends LogicalExpression {
+  /**
+   * Constructor.
+   *
+   * @param op  the logical operator
+   */
+  public NotLogicalExpression(LogicalOperator op) {
+    super(op);
+  }
+
+  @Override
+  public List<Expression> merge(Expression left, Expression right, int precedence) {
+    if (getOperator().getPrecedence() == precedence && getRightOperand() == null) {
+      List<Expression> listExpressions = new ArrayList<Expression>();
+      if (left != null) {
+        listExpressions.add(left);
+      }
+      setRightOperand(right);
+      listExpressions.add(this);
+      return listExpressions;
+    } else {
+      // do nothing, already merged
+      return defaultMerge(left, right);
+    }
+  }
+
+  @Override
+  public Predicate toPredicate() throws InvalidQueryException {
+    //todo: remove need to down cast to BasePredicate
+    return new NotPredicate((BasePredicate) getRightOperand().toPredicate());
+  }
+}

+ 52 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/RelationalExpression.java

@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.expressions;
+
+import org.apache.ambari.server.api.predicate.InvalidQueryException;
+import org.apache.ambari.server.api.predicate.operators.RelationalOperator;
+import org.apache.ambari.server.controller.spi.Predicate;
+
+/**
+ * Relational Expression.
+ * Consists of a property name for the left operand, a relational operator
+ * and a value as the right operand.
+ */
+public class RelationalExpression extends AbstractExpression<String> {
+
+  /**
+   * Constructor.
+   *
+   * @param op  relational operator
+   */
+  public RelationalExpression(RelationalOperator op) {
+    super(op);
+  }
+
+  @Override
+  public Predicate toPredicate() throws InvalidQueryException {
+    return ((RelationalOperator) getOperator()).
+        toPredicate(getLeftOperand(), getRightOperand());
+  }
+
+  @Override
+  public String toString() {
+    return "RelationalExpression{ property='" + getLeftOperand() + "\', value='"
+        + getRightOperand() + "\', op=" + getOperator() + " }";
+  }
+}

+ 69 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/AbstractOperator.java

@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.operators;
+
+/**
+ * Base operator implementation.
+ */
+public abstract class AbstractOperator implements Operator {
+  /**
+   * The precedence value for the current context.
+   */
+  private final int m_ctxPrecedence;
+
+
+  /**
+   * Constructor.
+   *
+   * @param ctxPrecedence  the context precedence value
+   */
+  protected AbstractOperator(int ctxPrecedence) {
+    m_ctxPrecedence = ctxPrecedence;
+  }
+
+  /**
+   * Return the base precedence for this operator.
+   * This is the value that is specific to the operator
+   * type and doesn't take context into account.
+   *
+   * @return the base precedence for this operator type
+   */
+  public int getBasePrecedence() {
+    // this value is used for all relational operators
+    // logical operators override this value
+    return -1;
+  }
+
+  @Override
+  public int getPrecedence() {
+    return getBasePrecedence() + m_ctxPrecedence;
+  }
+
+  @Override
+  public String toString() {
+    return getName();
+  }
+
+  /**
+   * Get the name of the operator.
+   *
+   * @return the operator name
+   */
+  public abstract String getName();
+}

+ 64 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/AndOperator.java

@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.operators;
+
+import org.apache.ambari.server.controller.predicate.AndPredicate;
+import org.apache.ambari.server.controller.predicate.BasePredicate;
+import org.apache.ambari.server.controller.spi.Predicate;
+
+/**
+ * And operator implementation.
+ */
+public class AndOperator extends AbstractOperator implements LogicalOperator {
+
+  /**
+   * Constructor.
+   *
+   * @param ctxPrecedence  precedence value for the current context
+   */
+  public AndOperator(int ctxPrecedence) {
+    super(ctxPrecedence);
+  }
+
+  @Override
+  public TYPE getType() {
+    return TYPE.AND;
+  }
+
+  @Override
+  public String getName() {
+    return "AndOperator";
+  }
+
+  @Override
+  public int getBasePrecedence() {
+    return 2;
+  }
+
+  @Override
+  public Predicate toPredicate(Predicate left, Predicate right) {
+    //todo: refactor to not need down casts
+    return new AndPredicate((BasePredicate) left, (BasePredicate) right);
+  }
+
+  @Override
+  public String toString() {
+    return getName() + "[precedence=" + getPrecedence() + "]";
+  }
+}

+ 22 - 13
ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/TestHostMappingProvider.java → ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/EqualsOperator.java

@@ -16,26 +16,35 @@
  * limitations under the License.
  */
 
-package org.apache.ambari.server.controller.jmx;
+package org.apache.ambari.server.api.predicate.operators;
 
-import java.util.HashMap;
-import java.util.Map;
+import org.apache.ambari.server.controller.predicate.EqualsPredicate;
+import org.apache.ambari.server.controller.spi.Predicate;
 
 /**
- *
+ * Equals operator implementation.
  */
-public class TestHostMappingProvider {
+public class EqualsOperator extends AbstractOperator implements RelationalOperator {
+
+  /**
+   * Constructor.
+   */
+  public EqualsOperator() {
+    super(0);
+  }
 
-  private static Map<String, String> HOST_MAPPING = new HashMap<String, String>();
+  @Override
+  public TYPE getType() {
+    return TYPE.EQUAL;
+  }
 
-  static {
-    HOST_MAPPING.put("domu-12-31-39-0e-34-e1.compute-1.internal", "ec2-50-17-129-192.compute-1.amazonaws.com");
-    HOST_MAPPING.put("ip-10-190-186-15.ec2.internal",             "ec2-23-21-8-226.compute-1.amazonaws.com");
-    HOST_MAPPING.put("domu-12-31-39-14-ee-b3.compute-1.internal", "ec2-23-23-71-42.compute-1.amazonaws.com");
-    HOST_MAPPING.put("ip-10-110-157-51.ec2.internal",             "ec2-107-22-121-67.compute-1.amazonaws.com");
+  @Override
+  public Predicate toPredicate(String prop, String val) {
+    return new EqualsPredicate<String>(prop, val);
   }
 
-  public static Map<String, String> getHostMap() {
-    return HOST_MAPPING;
+  @Override
+  public String getName() {
+    return "EqualsOperator";
   }
 }

+ 50 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/GreaterEqualsOperator.java

@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.operators;
+
+import org.apache.ambari.server.controller.predicate.GreaterEqualsPredicate;
+import org.apache.ambari.server.controller.spi.Predicate;
+
+/**
+ * Greater Than Or Equals operator implementation.
+ */
+public class GreaterEqualsOperator extends AbstractOperator implements RelationalOperator {
+
+  /**
+   * Constructor.
+   */
+  public GreaterEqualsOperator() {
+    super(0);
+  }
+
+  @Override
+  public TYPE getType() {
+    return TYPE.GREATER_EQUAL;
+  }
+
+  @Override
+  public Predicate toPredicate(String prop, String val) {
+    return new GreaterEqualsPredicate<String>(prop, val);
+  }
+
+  @Override
+  public String getName() {
+    return "GreaterEqualsOperator";
+  }
+}

+ 50 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/GreaterOperator.java

@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.operators;
+
+import org.apache.ambari.server.controller.predicate.GreaterPredicate;
+import org.apache.ambari.server.controller.spi.Predicate;
+
+/**
+ * Greater Than operator implementation.
+ */
+public class GreaterOperator extends AbstractOperator implements RelationalOperator {
+
+  /**
+   * Constructor.
+   */
+  public GreaterOperator() {
+    super(0);
+  }
+
+  @Override
+  public TYPE getType() {
+    return TYPE.GREATER;
+  }
+
+  @Override
+  public Predicate toPredicate(String prop, String val) {
+    return new GreaterPredicate<String>(prop, val);
+  }
+
+  @Override
+  public String getName() {
+    return "GreaterOperator";
+  }
+}

+ 54 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/InOperator.java

@@ -0,0 +1,54 @@
+package org.apache.ambari.server.api.predicate.operators;
+
+import org.apache.ambari.server.api.predicate.InvalidQueryException;
+import org.apache.ambari.server.controller.predicate.BasePredicate;
+import org.apache.ambari.server.controller.predicate.EqualsPredicate;
+import org.apache.ambari.server.controller.predicate.OrPredicate;
+import org.apache.ambari.server.controller.spi.Predicate;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * IN relational operator.
+ * This is a binary operator which takes a comma delimited right operand and
+ * creates equals predicates with the left operand and each right operand token.
+ * The equals predicates are combined with an OR predicate.
+ *
+ */
+public class InOperator extends AbstractOperator implements RelationalOperator {
+
+  public InOperator() {
+    super(0);
+  }
+
+  @Override
+  public String getName() {
+    return "InOperator";
+  }
+
+  @Override
+  public Predicate toPredicate(String prop, String val) throws InvalidQueryException {
+
+    if (val == null) {
+      throw new InvalidQueryException("IN operator is missing a required right operand.");
+    }
+
+    String[] tokens = val.split(",");
+    List<EqualsPredicate> listPredicates = new ArrayList<EqualsPredicate>();
+    for (String token : tokens) {
+      listPredicates.add(new EqualsPredicate(prop, token.trim()));
+    }
+    return listPredicates.size() == 1 ? listPredicates.get(0) :
+        buildOrPredicate(listPredicates);
+  }
+
+  private OrPredicate buildOrPredicate(List<EqualsPredicate> listPredicates) {
+    return new OrPredicate(listPredicates.toArray(new BasePredicate[listPredicates.size()]));
+  }
+
+  @Override
+  public TYPE getType() {
+    return TYPE.IN;
+  }
+}

+ 51 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/IsEmptyOperator.java

@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.operators;
+
+import org.apache.ambari.server.api.predicate.InvalidQueryException;
+import org.apache.ambari.server.controller.predicate.CategoryIsEmptyPredicate;
+import org.apache.ambari.server.controller.spi.Predicate;
+
+/**
+ * Operator that is used to determine if a category is empty, meaning that it doesn't
+ * contain any properties.
+ */
+public class IsEmptyOperator extends AbstractOperator implements RelationalOperator {
+  public IsEmptyOperator() {
+    super(0);
+  }
+
+  @Override
+  public String getName() {
+    return "IsEmptyOperator";
+  }
+
+  @Override
+  public Predicate toPredicate(String prop, String val) throws InvalidQueryException {
+    if (val != null) {
+      throw new InvalidQueryException("'isEmpty' operator shouldn't have a right operand but one exists: " + val);
+    }
+    return new CategoryIsEmptyPredicate(prop);
+  }
+
+  @Override
+  public TYPE getType() {
+    return TYPE.IS_EMPTY;
+  }
+}

+ 50 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LessEqualsOperator.java

@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.operators;
+
+import org.apache.ambari.server.controller.predicate.LessEqualsPredicate;
+import org.apache.ambari.server.controller.spi.Predicate;
+
+/**
+ * Less Than or Equals operator implementation.
+ */
+public class LessEqualsOperator extends AbstractOperator implements RelationalOperator {
+
+  /**
+   * Constructor.
+   */
+  public LessEqualsOperator() {
+    super(0);
+  }
+
+  @Override
+  public TYPE getType() {
+    return TYPE.LESS_EQUAL;
+  }
+
+  @Override
+  public Predicate toPredicate(String prop, String val) {
+    return new LessEqualsPredicate<String>(prop, val);
+  }
+
+  @Override
+  public String getName() {
+    return "LessEqualsOperator";
+  }
+}

+ 50 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LessOperator.java

@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.operators;
+
+import org.apache.ambari.server.controller.predicate.LessPredicate;
+import org.apache.ambari.server.controller.spi.Predicate;
+
+/**
+ * Less Than operator implementation.
+ */
+public class LessOperator extends AbstractOperator implements RelationalOperator {
+
+  /**
+   * Constructor.
+   */
+  public LessOperator() {
+    super(0);
+  }
+
+  @Override
+  public TYPE getType() {
+    return TYPE.LESS;
+  }
+
+  @Override
+  public Predicate toPredicate(String prop, String val) {
+    return new LessPredicate<String>(prop, val);
+  }
+
+  @Override
+  public String getName() {
+    return "LessOperator";
+  }
+}

+ 35 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LogicalOperator.java

@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.operators;
+
+import org.apache.ambari.server.controller.spi.Predicate;
+
+/**
+ * Logical operator external representation.
+ */
+public interface LogicalOperator extends Operator {
+  /**
+   * Create a predicate for this logical operator.
+   *
+   * @param left   left operand
+   * @param right  right operand
+   * @return a predicate instance for this operator
+   */
+  public Predicate toPredicate(Predicate left, Predicate right);
+}

+ 48 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LogicalOperatorFactory.java

@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.operators;
+
+import org.apache.ambari.server.api.predicate.InvalidQueryException;
+
+/**
+ * Factory of Logical Operators.
+ */
+public class LogicalOperatorFactory {
+  /**
+   * Creates a logical operator based on the operator token.
+   *
+   * @param operator      string representation of operator
+   * @param ctxPrecedence precedence value of current context
+   *
+   * @return a logical operator instance
+   * @throws InvalidQueryException if the operator string is invalid
+   */
+  public static LogicalOperator createOperator(String operator, int ctxPrecedence)
+      throws InvalidQueryException {
+    if ("&".equals(operator)) {
+      return new AndOperator(ctxPrecedence);
+    } else if ("|".equals(operator)) {
+      return new OrOperator(ctxPrecedence);
+    } else if ("!".equals(operator)) {
+      return new NotOperator(ctxPrecedence);
+    } else {
+      throw new RuntimeException("Invalid Logical Operator Type: " + operator);
+    }
+  }
+}

+ 51 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/NotEqualsOperator.java

@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.operators;
+
+import org.apache.ambari.server.controller.predicate.EqualsPredicate;
+import org.apache.ambari.server.controller.predicate.NotPredicate;
+import org.apache.ambari.server.controller.spi.Predicate;
+
+/**
+ * Not Equals relational operator implementation.
+ */
+public class NotEqualsOperator extends AbstractOperator implements RelationalOperator {
+
+  /**
+   * Constructor.
+   */
+  public NotEqualsOperator() {
+    super(0);
+  }
+
+  @Override
+  public TYPE getType() {
+    return TYPE.NOT_EQUAL;
+  }
+
+  @Override
+  public Predicate toPredicate(String prop, String val) {
+    return new NotPredicate(new EqualsPredicate<String>(prop, val));
+  }
+
+  @Override
+  public String getName() {
+    return "NotEqualsOperator";
+  }
+}

+ 63 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/NotOperator.java

@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.operators;
+
+import org.apache.ambari.server.controller.predicate.BasePredicate;
+import org.apache.ambari.server.controller.predicate.NotPredicate;
+import org.apache.ambari.server.controller.spi.Predicate;
+
+/**
+ * Not unary operator implementation.
+ */
+public class NotOperator extends AbstractOperator implements LogicalOperator {
+
+  /**
+   * Constructor.
+   *
+   * @param ctxPrecedence  the precedence value of the current context
+   */
+  public NotOperator(int ctxPrecedence) {
+    super(ctxPrecedence);
+  }
+
+  @Override
+  public TYPE getType() {
+    return TYPE.NOT;
+  }
+
+  @Override
+  public String getName() {
+    return "NotOperator";
+  }
+
+  @Override
+  public int getBasePrecedence() {
+    return 3;
+  }
+
+  @Override
+  public Predicate toPredicate(Predicate left, Predicate right) {
+    return new NotPredicate((BasePredicate) right);
+  }
+
+  @Override
+  public String toString() {
+    return getName() + "[precedence=" + getPrecedence() + "]";
+  }
+}

+ 63 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/Operator.java

@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.operators;
+
+/**
+ * Operator representation.
+ */
+public interface Operator {
+
+  /**
+   * Operator types.
+   */
+  public enum TYPE {
+    LESS,
+    LESS_EQUAL,
+    GREATER,
+    GREATER_EQUAL,
+    EQUAL,
+    NOT_EQUAL,
+    AND,
+    OR,
+    NOT,
+    IN,
+    IS_EMPTY
+  }
+
+  /**
+   * The highest base operator precedence level.
+   */
+  public static final int MAX_OP_PRECEDENCE = 3;
+
+  /**
+   * Get the operator type.
+   *
+   * @return the operator type
+   */
+  public TYPE getType();
+
+  /**
+   * Obtain the precedence of the operator.
+   * This value is calculated based on the operators base precedence and the context of the
+   * surrounding expressions.  Higher precedence values have higher precedence.
+   *
+   * @return  the precedence of this operator in it's current context
+   */
+  public int getPrecedence();
+}

+ 64 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/OrOperator.java

@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.operators;
+
+import org.apache.ambari.server.controller.predicate.BasePredicate;
+import org.apache.ambari.server.controller.predicate.OrPredicate;
+import org.apache.ambari.server.controller.spi.Predicate;
+
+/**
+ * Or operator implementation.
+ */
+public class OrOperator extends AbstractOperator implements LogicalOperator {
+
+  /**
+   * Constructor.
+   *
+   * @param ctxPrecedence  precedence value for the current context
+   */
+  public OrOperator(int ctxPrecedence) {
+    super(ctxPrecedence);
+  }
+
+  @Override
+  public TYPE getType() {
+    return TYPE.OR;
+  }
+
+  @Override
+  public String getName() {
+    return "OrOperator";
+  }
+
+  @Override
+  public int getBasePrecedence() {
+    return 1;
+  }
+
+  @Override
+  public Predicate toPredicate(Predicate left, Predicate right) {
+    //todo: refactor to remove down casts
+    return new OrPredicate((BasePredicate) left, (BasePredicate) right);
+  }
+
+  @Override
+  public String toString() {
+    return getName() + "[precedence=" + getPrecedence() + "]";
+  }
+}

+ 37 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/RelationalOperator.java

@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.operators;
+
+import org.apache.ambari.server.api.predicate.InvalidQueryException;
+import org.apache.ambari.server.controller.spi.Predicate;
+
+/**
+ * Relational operator external representation.
+ */
+public interface RelationalOperator extends Operator {
+  /**
+   * Create a predicate for this relational operator.
+   *
+   * @param prop  left operand
+   * @param val   right operand
+   * @return  a predicate instance for this operator.
+   * @throws  InvalidQueryException if unable to build the predicate because of invalid operands
+   */
+  public Predicate toPredicate(String prop, String val) throws InvalidQueryException;
+}

+ 57 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/RelationalOperatorFactory.java

@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.predicate.operators;
+
+import org.apache.ambari.server.api.predicate.InvalidQueryException;
+
+/**
+ * Factory of relational operators.
+ */
+public class RelationalOperatorFactory {
+  /**
+   * Create a relational operator based on the string representation
+   * of the operator.
+   *
+   * @param operator  the string representation of the operator
+   *
+   * @return relational operator for the given string
+   * @throws InvalidQueryException if an invalid operator is passed in
+   */
+  public static RelationalOperator createOperator(String operator) throws InvalidQueryException {
+    if ("!=".equals(operator)) {
+      return new NotEqualsOperator();
+    } else if ("=".equals(operator)) {
+      return new EqualsOperator();
+    } else if ("<=".equals(operator)) {
+      return new LessEqualsOperator();
+    } else if ("<".equals(operator)) {
+      return new LessOperator();
+    } else if (">=".equals(operator)) {
+      return new GreaterEqualsOperator();
+    } else if (">".equals(operator)) {
+      return new GreaterOperator();
+    } else if (".in(".equals(operator)) {
+      return new InOperator();
+    } else if (".isEmpty(".equals(operator)) {
+      return new IsEmptyOperator();
+    } else {
+      throw new RuntimeException("Invalid Operator Type: " + operator);
+    }
+  }
+}

+ 3 - 4
ambari-server/src/main/java/org/apache/ambari/server/api/query/Query.java

@@ -21,7 +21,6 @@ package org.apache.ambari.server.api.query;
 import org.apache.ambari.server.api.services.Result;
 import org.apache.ambari.server.controller.spi.*;
 
-import java.util.Map;
 import java.util.Set;
 
 
@@ -41,13 +40,13 @@ public interface Query {
   public void addProperty(String group, String property, TemporalInfo temporalInfo);
 
   /**
-   * Add a property to the query.
+   * Add a local (not sub-resource) property to the query.
    * This is the select portion of the query.
    *
    * @param property the property id which contains the group, property name
    *                 and whether the property is temporal
    */
-  public void addProperty(String property);
+  public void addLocalProperty(String property);
 
   /**
    * Obtain the properties of the query.
@@ -56,7 +55,7 @@ public interface Query {
    *
    * @return the query properties
    */
-  public Map<String, Set<String>> getProperties();
+  public Set<String> getProperties();
 
   /**
    * Execute the query.

+ 39 - 129
ambari-server/src/main/java/org/apache/ambari/server/api/query/QueryImpl.java

@@ -20,7 +20,6 @@ package org.apache.ambari.server.api.query;
 
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.api.services.ResultImpl;
-import org.apache.ambari.server.api.util.TreeNodeImpl;
 import org.apache.ambari.server.controller.utilities.ClusterControllerHelper;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.controller.predicate.AndPredicate;
@@ -47,7 +46,12 @@ public class QueryImpl implements Query {
   /**
    * Properties of the query which make up the select portion of the query.
    */
-  private Map<String, Set<String>> m_mapQueryProperties = new HashMap<String, Set<String>>();
+  private Set<String> m_setQueryProperties = new HashSet<String>();
+
+  /**
+   * Indicates that the query should include all available properties.
+   */
+  private boolean allProperties = false;
 
   /**
    * Map that associates each property set on the query to temporal data.
@@ -59,16 +63,6 @@ public class QueryImpl implements Query {
    */
   private Map<String, TemporalInfo> m_mapCategoryTemporalInfo = new HashMap<String, TemporalInfo>();
 
-  /**
-   * All properties that are available for the resource.
-   */
-  private Map<String, Set<String>> m_mapAllProperties;
-
-  /**
-   * Tree index of m_mapAllProperties.  Used to match sub-categories.
-   */
-  TreeNode<Set<String>> m_treeAllProperties = new TreeNodeImpl<Set<String>>(null, new HashSet<String>(), null);
-
   /**
    * Sub-resources of the resource which is being operated on.
    */
@@ -92,43 +86,27 @@ public class QueryImpl implements Query {
    */
   public QueryImpl(ResourceInstance resource) {
     m_resource = resource;
-    m_mapAllProperties = Collections.unmodifiableMap(getClusterController().
-        getSchema(resource.getResourceDefinition().getType()).getCategoryProperties());
-    buildAllPropertiesTree();
   }
 
   @Override
-  //todo: consider requiring a path and a property.  For categories the property name '*' could be used.
-  public void addProperty(String category, String property, TemporalInfo temporalInfo) {    
-    if (category == null && property.equals("*")) {
+  public void addProperty(String category, String name, TemporalInfo temporalInfo) {
+    if (category == null && name.equals("*")) {
       // wildcard
       addAllProperties(temporalInfo);
-    } else if (m_mapAllProperties.containsKey(category) && m_mapAllProperties.get(category).contains(property)) {
-      // local property
-      Set<String> setProps = m_mapQueryProperties.get(category);
-      if (setProps == null) {
-        setProps = new HashSet<String>();
-        m_mapQueryProperties.put(category, setProps);
-      }
-      setProps.add(property);
-      if (temporalInfo != null) {
-        m_mapPropertyTemporalInfo.put(PropertyHelper.getPropertyId(category, property), temporalInfo);
-      }
-    } else if (! addCategory(category, property, temporalInfo)){
-      // not a local category/property
-      boolean success = addPropertyToSubResource(category, property, temporalInfo);
-      if (!success) {
-        //TODO.  Remove when handled by back end
-        String propString = category == null ? property : property == null ? category : category + '/' + property;
-        throw new IllegalArgumentException("An invalid resource property was requested.  Resource: " +
-            m_resource.getResourceDefinition().getType() + ", Property: " + propString);
+    } else{
+      if (!addPropertyToSubResource(category, name, temporalInfo)){
+        String propertyId = PropertyHelper.getPropertyId(category, name.equals("*") ? null : name);
+        addLocalProperty(propertyId);
+        if (temporalInfo != null) {
+          m_mapCategoryTemporalInfo.put(propertyId, temporalInfo);
+        }
       }
     }
   }
 
   @Override
-  public void addProperty(String property) {
-    addProperty(PropertyHelper.getPropertyCategory(property), PropertyHelper.getPropertyName(property), null);
+  public void addLocalProperty(String property) {
+    m_setQueryProperties.add(property);
   }
 
   @Override
@@ -142,7 +120,7 @@ public class QueryImpl implements Query {
       result.getResultTree().setProperty("isCollection", "true");
     }
 
-    if (m_mapQueryProperties.isEmpty() && m_mapSubResources.isEmpty()) {
+    if (m_setQueryProperties.isEmpty() && m_mapSubResources.isEmpty()) {
       //Add sub resource properties for default case where no fields are specified.
       m_mapSubResources.putAll(m_resource.getSubResources());
     }
@@ -184,8 +162,8 @@ public class QueryImpl implements Query {
   }
 
   @Override
-  public Map<String, Set<String>> getProperties() {
-    return Collections.unmodifiableMap(m_mapQueryProperties);
+  public Set<String> getProperties() {
+    return Collections.unmodifiableSet(m_setQueryProperties);
   }
 
   @Override
@@ -200,7 +178,8 @@ public class QueryImpl implements Query {
   private void addCollectionProperties(Resource.Type resourceType) {
     Schema schema = getClusterController().getSchema(resourceType);
     // add pk
-    addProperty(schema.getKeyPropertyId(resourceType));
+    String property = schema.getKeyPropertyId(resourceType);
+    addProperty(PropertyHelper.getPropertyCategory(property), PropertyHelper.getPropertyName(property), null);
 
     for (Resource.Type type : m_resource.getIds().keySet()) {
       // add fk's
@@ -209,21 +188,15 @@ public class QueryImpl implements Query {
       //todo: component sub-resources.  Component will not have host fk.
       //todo: refactor so that null check is not required.
       if (keyPropertyId != null) {
-        addProperty(keyPropertyId);
+        addProperty(PropertyHelper.getPropertyCategory(keyPropertyId), PropertyHelper.getPropertyName(keyPropertyId), null);
       }
     }
   }
 
   private void addAllProperties(TemporalInfo temporalInfo) {
-    if (temporalInfo == null) {
-      m_mapQueryProperties.putAll(m_mapAllProperties);
-    } else {
-      for (Map.Entry<String, Set<String>> entry : m_mapAllProperties.entrySet()) {
-        String path = entry.getKey();
-        Set<String> setProps = entry.getValue();
-        m_mapQueryProperties.put(path, setProps);
-        m_mapCategoryTemporalInfo.put(path, temporalInfo);
-      }
+    allProperties = true;
+    if (temporalInfo != null) {
+      m_mapCategoryTemporalInfo.put(null, temporalInfo);
     }
 
     for (Map.Entry<String, ResourceInstance> entry : m_resource.getSubResources().entrySet()) {
@@ -234,39 +207,6 @@ public class QueryImpl implements Query {
     }
   }
 
-  private boolean addCategory(String category, String name, TemporalInfo temporalInfo) {
-    if (category != null) {
-      if (name != null && ! name.isEmpty()) {
-        name = category + '/' + name;
-      } else  {
-        name = category;
-      }
-    }
-    TreeNode<Set<String>> node = m_treeAllProperties.getChild(name);
-    if (node == null) {
-      return false;
-    }
-
-    addCategory(node, name, temporalInfo);
-    return true;
-  }
-
-  private void addCategory(TreeNode<Set<String>> node, String category, TemporalInfo temporalInfo) {
-    if (node != null) {
-      Set<String> setProps = m_mapQueryProperties.get(category);
-      if (setProps == null) {
-        setProps = new HashSet<String>();
-        m_mapQueryProperties.put(category, setProps);
-      }
-      setProps.addAll(node.getObject());
-      m_mapCategoryTemporalInfo.put(category, temporalInfo);
-
-      for (TreeNode<Set<String>> child : node.getChildren()) {
-        addCategory(child, category + '/' + child.getName(), temporalInfo);
-      }
-    }
-  }
-
   private boolean addPropertyToSubResource(String path, String property, TemporalInfo temporalInfo) {
     // cases:
     // - path is null, property is path (all sub-resource props will have a path)
@@ -336,51 +276,23 @@ public class QueryImpl implements Query {
     return predicate;
   }
 
-  private void buildAllPropertiesTree() {
-    // build index
-    for (String category : m_mapAllProperties.keySet()) {
-      TreeNode<Set<String>> node = m_treeAllProperties.getChild(category);
-      if (node == null) {
-        if (category == null) {
-          node = m_treeAllProperties.addChild(new HashSet<String>(), null);
-        } else {
-          String[] tokens = category.split("/");
-          node = m_treeAllProperties;
-          for (String t : tokens) {
-            TreeNode<Set<String>> child = node.getChild(t);
-            if (child == null) {
-              child = node.addChild(new HashSet<String>(), t);
-            }
-            node = child;
-          }
-        }
-      }
-      node.getObject().addAll(m_mapAllProperties.get(category));
-    }
-  }
-
   private Request createRequest() {
     Set<String> setProperties = new HashSet<String>();
 
-    Map<String, TemporalInfo> mapTemporalInfo = new HashMap<String, TemporalInfo>();
+    Map<String, TemporalInfo> mapTemporalInfo    = new HashMap<String, TemporalInfo>();
+    TemporalInfo              globalTemporalInfo = m_mapCategoryTemporalInfo.get(null);
 
-    for (Map.Entry<String, Set<String>> entry : m_mapQueryProperties.entrySet()) {
-      String group = entry.getKey();
-      for (String property : entry.getValue()) {
-        String propertyId = PropertyHelper.getPropertyId(group, property);
-
-        TemporalInfo temporalInfo = m_mapCategoryTemporalInfo.get(group);
-        if (temporalInfo == null) {
-          temporalInfo = m_mapPropertyTemporalInfo.get(propertyId);
-        }
-        if (temporalInfo != null) {
-          mapTemporalInfo.put(propertyId, temporalInfo);
-        }
-        setProperties.add(propertyId);
+    for (String group : m_setQueryProperties) {
+      TemporalInfo temporalInfo = m_mapCategoryTemporalInfo.get(group);
+      if (temporalInfo != null) {
+        mapTemporalInfo.put(group, temporalInfo);
+      } else if (globalTemporalInfo != null) {
+        mapTemporalInfo.put(group, globalTemporalInfo);
       }
+      setProperties.add(group);
     }
 
-    return PropertyHelper.getReadRequest(setProperties, mapTemporalInfo);
+    return PropertyHelper.getReadRequest(allProperties ? Collections.<String>emptySet() : setProperties, mapTemporalInfo);
   }
 
   private void setParentIdsOnSubResource(Resource resource, ResourceInstance r) {
@@ -416,10 +328,9 @@ public class QueryImpl implements Query {
 
     QueryImpl that = (QueryImpl) o;
 
-    return m_mapAllProperties.equals(that.m_mapAllProperties) &&
-           m_mapCategoryTemporalInfo.equals(that.m_mapCategoryTemporalInfo) &&
+    return m_mapCategoryTemporalInfo.equals(that.m_mapCategoryTemporalInfo) &&
            m_mapPropertyTemporalInfo.equals(that.m_mapPropertyTemporalInfo) &&
-           m_mapQueryProperties.equals(that.m_mapQueryProperties) &&
+           m_setQueryProperties.equals(that.m_setQueryProperties) &&
            m_mapSubResources.equals(that.m_mapSubResources) &&
            m_resource.equals(that.m_resource) &&
            m_userPredicate == null ? that.m_userPredicate == null : m_userPredicate.equals(that.m_userPredicate);
@@ -428,10 +339,9 @@ public class QueryImpl implements Query {
   @Override
   public int hashCode() {
     int result = m_resource.hashCode();
-    result = 31 * result + m_mapQueryProperties.hashCode();
+    result = 31 * result + m_setQueryProperties.hashCode();
     result = 31 * result + m_mapPropertyTemporalInfo.hashCode();
     result = 31 * result + m_mapCategoryTemporalInfo.hashCode();
-    result = 31 * result + m_mapAllProperties.hashCode();
     result = 31 * result + m_mapSubResources.hashCode();
     result = 31 * result + (m_userPredicate != null ? m_userPredicate.hashCode() : 0);
     return result;

+ 2 - 2
ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceImpl.java

@@ -110,11 +110,11 @@ public class ResourceInstanceImpl implements ResourceInstance {
         ResourceInstance resource = m_resourceFactory.createResource(subResDef.getType(), getIds());
 
         // ensure pk is returned
-        resource.getQuery().addProperty(m_controller.getSchema(
+        resource.getQuery().addLocalProperty(m_controller.getSchema(
             subResDef.getType()).getKeyPropertyId(subResDef.getType()));
         // add additionally required fk properties
         for (Resource.Type fkType : subResDef.getAdditionalForeignKeys()) {
-          resource.getQuery().addProperty(m_controller.getSchema(subResDef.getType()).getKeyPropertyId(fkType));
+          resource.getQuery().addLocalProperty(m_controller.getSchema(subResDef.getType()).getKeyPropertyId(fkType));
         }
 
         String subResourceName = subResDef.isCollection() ? resource.getResourceDefinition().getPluralName() :

+ 11 - 72
ambari-server/src/main/java/org/apache/ambari/server/api/services/BaseRequest.java

@@ -18,13 +18,14 @@
 
 package org.apache.ambari.server.api.services;
 
+import org.apache.ambari.server.api.predicate.InvalidQueryException;
+import org.apache.ambari.server.api.predicate.PredicateCompiler;
 import org.apache.ambari.server.api.resources.*;
 import org.apache.ambari.server.api.services.parsers.JsonPropertyParser;
 import org.apache.ambari.server.api.services.parsers.RequestBodyParser;
 import org.apache.ambari.server.api.services.serializers.JsonSerializer;
 import org.apache.ambari.server.api.services.serializers.ResultSerializer;
 import org.apache.ambari.server.controller.internal.TemporalInfoImpl;
-import org.apache.ambari.server.controller.predicate.*;
 import org.apache.ambari.server.controller.spi.Predicate;
 import org.apache.ambari.server.controller.spi.TemporalInfo;
 
@@ -57,11 +58,6 @@ public abstract class BaseRequest implements Request {
   private String m_body;
 
 
-  /**
-   * Predicate operators.
-   */
-  private Pattern m_pattern = Pattern.compile("!=|>=|<=|=|>|<");
-
   /**
    * Associated resource definition
    */
@@ -104,37 +100,12 @@ public abstract class BaseRequest implements Request {
   }
 
   @Override
-  public Predicate getQueryPredicate() {
-    //todo: parse during init
-    //not using getQueryParameters because it assumes '=' operator
-    String uri = getURI();
-    int qsBegin = uri.indexOf("?");
-
-    if (qsBegin == -1) return null;
-
-    String[] tokens = uri.substring(qsBegin + 1).split("&");
-
-    Set<BasePredicate> setPredicates = new HashSet<BasePredicate>();
-    for (String outerToken : tokens) {
-      if (outerToken.startsWith("_=")) {
-        // NOTE: This is to enable UI to pass a _= parameter for unique query 
-        // string even though the backend doesnt need it.
-        continue;
-      }
-      
-      if (outerToken != null &&  !outerToken.startsWith("fields")) {
-        setPredicates.add(outerToken.contains("|") ?
-            handleOrPredicate(outerToken) : createPredicate(outerToken));
-      }
-    }
+  public Predicate getQueryPredicate() throws InvalidQueryException {
+    String uri     = getURI();
+    int    qsBegin = uri.indexOf("?");
 
-    if (setPredicates.size() == 1) {
-      return setPredicates.iterator().next();
-    } else if (setPredicates.size() > 1) {
-      return new AndPredicate(setPredicates.toArray(new BasePredicate[setPredicates.size()]));
-    } else {
-      return null;
-    }
+    return (qsBegin == -1) ? null :
+        getPredicateCompiler().compile(uri.substring(qsBegin + 1));
   }
 
   @Override
@@ -205,43 +176,11 @@ public abstract class BaseRequest implements Request {
     return new ResultPostProcessorImpl(this);
   }
 
-  private BasePredicate createPredicate(String token) {
-
-    Matcher m = m_pattern.matcher(token);
-    m.find();
-
-    String propertyId = token.substring(0, m.start());
-    String     value      = token.substring(m.end());
-    String     operator   = m.group();
-
-    if (operator.equals("=")) {
-      return new EqualsPredicate<String>(propertyId, value);
-    } else if (operator.equals("!=")) {
-      return new NotPredicate(new EqualsPredicate<String>(propertyId, value));
-    } else if (operator.equals("<")) {
-      return new LessPredicate<String>(propertyId, value);
-    } else if (operator.equals(">"))  {
-      return new GreaterPredicate<String>(propertyId, value);
-    } else if (operator.equals("<=")) {
-      return new LessEqualsPredicate<String>(propertyId, value);
-    } else if (operator.equals(">=")) {
-      return new GreaterEqualsPredicate<String>(propertyId, value);
-    } else {
-      throw new RuntimeException("Unknown operator provided in predicate: " + operator);
-    }
-  }
-
-  private BasePredicate handleOrPredicate(String predicate) {
-    Set<BasePredicate> setPredicates = new HashSet<BasePredicate>();
-    String[] tokens = predicate.split("\\|");
-    for (String tok : tokens) {
-      setPredicates.add(createPredicate(tok));
-    }
-
-    return new OrPredicate(setPredicates.toArray(new BasePredicate[setPredicates.size()]));
-  }
-
   protected RequestBodyParser getHttpBodyParser() {
     return new JsonPropertyParser();
   }
+
+  protected PredicateCompiler getPredicateCompiler() {
+    return new PredicateCompiler();
+  }
 }

+ 3 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/services/Request.java

@@ -18,6 +18,7 @@
 
 package org.apache.ambari.server.api.services;
 
+import org.apache.ambari.server.api.predicate.InvalidQueryException;
 import org.apache.ambari.server.api.resources.ResourceDefinition;
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.api.services.serializers.ResultSerializer;
@@ -79,8 +80,9 @@ public interface Request {
    * such as 'AND'.
    *
    * @return the user defined predicate
+   * @throws InvalidQueryException if the query syntax is invalid
    */
-  public Predicate getQueryPredicate();
+  public Predicate getQueryPredicate() throws InvalidQueryException;
 
   /**
    * Obtain the partial response fields and associated temporal information which were provided

+ 21 - 0
ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java

@@ -109,6 +109,10 @@ public class Configuration {
   public static final String SRVR_HOSTS_MAPPING = 
       "server.hosts.mapping";
 
+  public static final String SSL_TRUSTSTORE_PATH_KEY = "ssl.trustStore.path";
+  public static final String SSL_TRUSTSTORE_PASSWORD_KEY = "ssl.trustStore.password";
+  public static final String SSL_TRUSTSTORE_TYPE_KEY = "ssl.trustStore.type";
+
   private static final String SRVR_KSTR_DIR_DEFAULT = ".";
   public static final String SRVR_CRT_NAME_DEFAULT = "ca.crt";
   public static final String SRVR_KEY_NAME_DEFAULT = "ca.key";
@@ -209,6 +213,23 @@ public class Configuration {
       }
     }
     configsMap.put(SRVR_CRT_PASS_KEY, randStr);
+
+    loadSSLParams();
+  }
+
+  /**
+   * Loads trusted certificates store properties
+   */
+  private void loadSSLParams(){
+    if (properties.getProperty(SSL_TRUSTSTORE_PATH_KEY) != null) {
+      System.setProperty("javax.net.ssl.trustStore", properties.getProperty(SSL_TRUSTSTORE_PATH_KEY));
+    }
+    if (properties.getProperty(SSL_TRUSTSTORE_PASSWORD_KEY) != null) {
+      System.setProperty("javax.net.ssl.trustStorePassword", properties.getProperty(SSL_TRUSTSTORE_PASSWORD_KEY));
+    }
+    if (properties.getProperty(SSL_TRUSTSTORE_TYPE_KEY) != null) {
+      System.setProperty("javax.net.ssl.trustStoreType", properties.getProperty(SSL_TRUSTSTORE_TYPE_KEY));
+    }
   }
 
 

+ 7 - 2
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java

@@ -1151,7 +1151,12 @@ public class AmbariManagementControllerImpl implements
       hosts = clusters.getHosts();
     } else {
       hosts = new ArrayList<Host>();
-      hosts.add(clusters.getHost(request.getHostname()));
+      try {
+        hosts.add(clusters.getHost(request.getHostname()));
+      } catch (HostNotFoundException e) {
+        // add cluster name
+        throw new HostNotFoundException(clusterName, hostName);
+      }
     }
 
     for (Host h : hosts) {
@@ -1161,7 +1166,7 @@ public class AmbariManagementControllerImpl implements
           r.setClusterName(clusterName);
           response.add(r);
         } else if (hostName != null) {
-          throw new HostNotFoundException(hostName);
+          throw new HostNotFoundException(clusterName, hostName);
         }
       } else {
         HostResponse r = h.convertToResponse();

+ 2 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java

@@ -214,7 +214,8 @@ public class AmbariServer {
       // sslConnectorOneWay.setNeedClientAuth(false);
       SslSelectChannelConnector sslConnectorOneWay = new SslSelectChannelConnector(contextFactory);
       sslConnectorOneWay.setPort(AGENT_ONE_WAY_AUTH);
-
+      sslConnectorOneWay.setAcceptors(2);
+      sslConnectorTwoWay.setAcceptors(2);
       serverForAgent.setConnectors(new Connector[]{ sslConnectorOneWay, sslConnectorTwoWay});
 
       ServletHolder sh = new ServletHolder(ServletContainer.class);

Some files were not shown because too many files changed in this diff