Browse Source

Backporting for branch-1.2.3. (yusaku)

git-svn-id: https://svn.apache.org/repos/asf/incubator/ambari/branches/branch-1.2.3@1481380 13f79535-47bb-0310-9956-ffa450edef68
Yusaku Sako 12 years ago
parent
commit
4d8de1421f
86 changed files with 1103 additions and 3380 deletions
  1. 0 912
      CHANGES.txt
  2. 2 2
      ambari-agent/pom.xml
  3. 0 37
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp
  4. 0 6
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
  5. 1 7
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
  6. 0 4
      ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-servicegroups.cfg.erb
  7. 13 25
      ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb
  8. 2 2
      ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/init.pp
  9. 0 24
      ambari-agent/src/main/puppet/modules/hdp-tez/manifests/init.pp
  10. 0 40
      ambari-agent/src/main/puppet/modules/hdp-tez/manifests/tez_client.pp
  11. 0 93
      ambari-agent/src/main/puppet/modules/hdp-yarn/files/validateYarnComponentStatus.py
  12. 0 48
      ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/historyserver.pp
  13. 0 24
      ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/historyserver/service_check.pp
  14. 1 5
      ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/init.pp
  15. 0 33
      ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/mapreducev2_client.pp
  16. 0 48
      ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/nodemanager.pp
  17. 4 9
      ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/params.pp
  18. 5 0
      ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/resourcemanager.pp
  19. 0 24
      ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/resourcemanager/service_check.pp
  20. 5 15
      ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/service.pp
  21. 0 60
      ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/smoketest.pp
  22. 0 34
      ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/yarn_client.pp
  23. 1 1
      ambari-agent/src/main/puppet/modules/hdp/files/changeToSecureUid.sh
  24. 0 6
      ambari-agent/src/main/puppet/modules/hdp/manifests/configfile.pp
  25. 0 12
      ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp
  26. 3 47
      ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
  27. 1 10
      ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
  28. 2 0
      ambari-agent/src/main/python/ambari_agent/Heartbeat.py
  29. 2 2
      ambari-client/pom.xml
  30. 2 2
      ambari-project/pom.xml
  31. 0 24
      ambari-server/docs/api/v1/cluster-resources.md
  32. 0 26
      ambari-server/docs/api/v1/component-resources.md
  33. 0 40
      ambari-server/docs/api/v1/create-cluster.md
  34. 0 33
      ambari-server/docs/api/v1/create-component.md
  35. 0 33
      ambari-server/docs/api/v1/create-host.md
  36. 0 33
      ambari-server/docs/api/v1/create-hostcomponent.md
  37. 0 33
      ambari-server/docs/api/v1/create-service.md
  38. 0 33
      ambari-server/docs/api/v1/delete-cluster.md
  39. 0 118
      ambari-server/docs/api/v1/host-component-resources.md
  40. 0 23
      ambari-server/docs/api/v1/host-resources.md
  41. 0 0
      ambari-server/docs/api/v1/index.md
  42. 0 114
      ambari-server/docs/api/v1/service-resources.md
  43. 0 94
      ambari-server/docs/api/v1/update-hostcomponent.md
  44. 0 73
      ambari-server/docs/api/v1/update-service.md
  45. 0 48
      ambari-server/docs/api/v1/update-services.md
  46. 3 3
      ambari-server/pom.xml
  47. 1 8
      ambari-server/src/main/java/org/apache/ambari/server/Role.java
  48. 4 6
      ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
  49. 46 49
      ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
  50. 4 1
      ambari-server/src/main/java/org/apache/ambari/server/agent/rest/AgentResource.java
  51. 1 6
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
  52. 3 84
      ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaMetric.java
  53. 10 3
      ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProvider.java
  54. 0 26
      ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java
  55. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java
  56. 2 1
      ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariLdapAuthenticationProvider.java
  57. 18 71
      ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariLdapAuthoritiesPopulator.java
  58. 0 133
      ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariLdapBindAuthenticator.java
  59. 0 10
      ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
  60. 0 3
      ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
  61. 0 76
      ambari-server/src/main/python/ambari-server.py
  62. 0 10
      ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HDFS/configuration/hdfs-site.xml
  63. 0 15
      ambari-server/src/main/resources/stacks/HDP/2.0.1/services/HDFS/configuration/global.xml
  64. 112 0
      ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCEv2/configuration/capacity-scheduler.xml
  65. 0 18
      ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCEv2/configuration/mapred-site.xml
  66. 172 0
      ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCEv2/configuration/yarn-site.xml
  67. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.0.1/services/TEZ/metainfo.xml
  68. 39 0
      ambari-server/src/main/resources/stacks/HDP/2.0.1/services/YARN/configuration/mapred-queue-acls.xml
  69. 531 0
      ambari-server/src/main/resources/stacks/HDP/2.0.1/services/YARN/configuration/mapred-site.xml
  70. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HBASE/configuration/hbase-site.xml
  71. 0 10
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/configuration/hdfs-site.xml
  72. 0 94
      ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
  73. 0 130
      ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
  74. 0 116
      ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaMetricTest.java
  75. 0 32
      ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariLdapAuthenticationProviderTest.java
  76. 12 13
      ambari-server/src/test/resources/test_api.sh
  77. 3 3
      ambari-web/app/config.js
  78. 2 1
      ambari-web/app/controllers/main/admin/user.js
  79. 2 2
      ambari-web/pom.xml
  80. 3 3
      contrib/addons/src/addOns/nagios/scripts/nagios_alerts.php
  81. 2 50
      docs/pom.xml
  82. 52 25
      docs/src/site/apt/index.apt
  83. 0 20
      docs/src/site/apt/irc.apt
  84. 25 6
      docs/src/site/apt/whats-new.apt
  85. 7 33
      docs/src/site/site.xml
  86. 1 58
      pom.xml

+ 0 - 912
CHANGES.txt

@@ -12,217 +12,22 @@ Trunk (unreleased changes):
 
 
  NEW FEATURES
  NEW FEATURES
 
 
- AMBARI-2096. Create smoke test for HISTORYSERVER. (swagle)
-
- AMBARI-2094. Create smoke test for NODEMANAGER component as a part of 
- MapReduce V2 Service check. (swagle)
-
- AMBARI-2093. Add Tez as a configurable Service in Hadoop 2.0 stack. (swagle)
-
- AMBARI-2031. AMBARI-2031. Add clover code coverage profile. 
- (Giridharan Kesavan via swagle)
-
- AMBARI-2050. Create smoke test for RESOURCEMANAGER component. (swagle)
-
- AMBARI-2049. Create ambari agent scripts for MAPREDUCEv2_CLIENT. (swagle)
-
- AMBARI-2048. Create ambari agent scripts for historyserver. (swagle)
-
- AMBARI-2046. Create ambari agent scripts for Hadoop 2.0 installation, node 
- manager. (swagle)
-
- AMBARI-2047. Create ambari agent scripts for yarn client. (swagle)
-
- AMBARI-1679. Create ambari agent scripts for Hadoop 2.0 installation, 
- configuration and management. (swagle)
-
- AMBARI-1680. Add Hadoop 2.0 stack definition to Ambari. (swagle)
-
- AMBARI-1908. HDFS Mirroring: Add Bread Crumbs and Validation. (Arun Kandregula
- via yusaku)
-
- AMBARI-1558. Script to add host components to existing hosts.
- (yusaku via jaimi)
-
- AMBARI-1936. Support for installing on mixed OS versions install + mgmt.
- (swagle)
-
- AMBARI-1924. Allow for users to customize Ganglia gmetad + gmond user 
- accounts. (Sumit Mohanty via swagle)
-
- AMBARI-1923. Allow for users to customize Nagios user accounts. 
- (Sumit Mohanty via swagle)
-
- AMBARI-1922. Support not root ssh via a user that can sudo in as root. 
- (Sumit Mohanty via swagle)
- 
- AMBARI-1914. Add Nagios alerts for Hue service. (swagle)
-
- AMBARI-1895. Refactor ajax requests. (srimanth)
-
- AMBARI-1868. Include stack version as a parameter in manifest. (swagle)
-
- AMBARI-1847. Make single PUT call for multiple host overrides. (srimanth)
-
- AMBARI-1857. Capacity Scheduler: field order for Add/Edit popup. (yusaku via
- srimanth)
-
- AMBARI-1855. Capacity Scheduler: when adding a new queue, populate 
- fields. (yusaku via srimanth)
- 
- AMBARI-1850. Update unit tests. (yusaku via srimanth)
-
- AMBARI-1829. HDFS Mirroring: Display Status and handle maintenance operations 
- like Stop, Suspend, Activate etc. (Arun Kandregula via srimanth)
-
- AMBARI-1840. For global properties show restart for appropriate services
- only. (srimanth)
-
- AMBARI-1800. Add "Admin > Misc" section to Ambari Web to show service user
- accounts. (yusaku)
-
- AMBARI-1756. Add ability to install and edit HUE as a service. (srimanth via 
- yusaku)
-
- AMBARI-1742. HDFS Mirroring: Edit/Delete Cluster. (Arun Kandregula via yusaku)
-
- AMBARI-1723. HDFS Mirroring: Edit/Delete Data Set. (srimanth via yusaku)
-
- AMBARI-1717. Add ability to start and stop all services from Services
- page. (Xi Wang via yusaku)
-
- AMBARI-1716. HDFS Mirroring: Add a cluster. (Arun Kandregula via yusaku)
-
- AMBARI-1710. HDFS Mirroring: Edit/Delete Data Set. (srimanth via yusaku)
-
- AMBARI-1699. HDFS Mirroring: Side Panel of individual jobs page.
- (yusaku)
-
- AMBARI-1698. Host Detail page needs to allow upgrade for host components
- that failed to upgrade. (yusaku)
-
- AMBARI-1696. Capacity Scheduler configuration UI. (yusaku)
-
- AMBARI-1693. HDFS Mirroring: Display Jobs table. (yusaku)
-
- AMBARI-1691. Add filtering by host-level status on Step 9 of Installer.
- (Xi Wang via yusaku)
-
- AMBARI-1668. HDFS Mirroring: Add Data Set Popup. (Arun Kandregula via
- yusaku)
-
- AMBARI-1650. Add Oracle and MySQL option for Oozie during Ambari cluster
- install. (Xi Wang via yusaku)
-
- AMBARI-1610. Expose ability to customize Hive Metastore log dir.
- (yusaku)
-
- AMBARI-1729. Creating smoke test for Hue service. (swagle)
-
- AMBARI-1776. ZooKeeper Servers needs to store correct kerberos principal 
- in zookeeper_jaas.conf. (swagle)
-
- AMBARI-1424. Upgrade enhancements for Ambari 1.3.0. (smohanty)
-
- AMBARI-1763. Integrate Frontend security work to enable security on
- HBase and ZooKeeper. (jaimin)
-
- AMBARI-1754. Add support to ensure that Ambari Server/Agent/Store are all of 
- compatible version. (smohanty)
-
- AMBARI-1752. Backend support for MySQL and Oracle for Oozie and Hive. (swagle)
-
- AMBARI-1751. Ambari oracle-linux as a supported OS type. (swagle)
-
- AMBARI-1728. Cleanup INFO Logging at the ambari agent to make it more useful 
- and less verbose. (swagle)
-
- AMBARI-1676. Ambari upgrade to 1.3.0 (core support). (smohanty)
-
- AMBARI-1708. Remove all hardcoded ports from agent scripts to read from 
- configs. (swagle)
-
- AMBARI-1692. Make changes to agent scripts to support secure HBase and Zk. (swagle)
-
- AMBARI-1707. Upgrade should check if another upgrade request is active as well as 
- if any MASTER components have not stopped. (Sumit Mohanty via swagle)
-
- AMBARI-1673. Configuring Hue to work with a secure HDP cluster and making changes 
- to the Enable Security feature. (swagle)
-
- AMBARI-1663. Allow adding host components to existing hosts. (Xi Wang via
- yusaku)
-
- AMBARI-1653. HDFS Mirroring: Display DataSets table. (yusaku)
-
- AMBARI-1658. Implement API/Service Provider for HDFS mirroring. (tbeerbower)
-
  AMBARI-1704. Add ability for host components to provide their current actual configs. (ncole)
  AMBARI-1704. Add ability for host components to provide their current actual configs. (ncole)
 
 
  AMBARI-1422. Allow client to specify a "context" value for asynchronous requests (jspeidel)
  AMBARI-1422. Allow client to specify a "context" value for asynchronous requests (jspeidel)
 
 
  AMBARI-1599. Add ability to report actual configuration applied to a host. (ncole)
  AMBARI-1599. Add ability to report actual configuration applied to a host. (ncole)
 
 
- AMBARI-1647. Integrate server and agent changes for upgrade on cluster. 
- (Sumit Mohanty via swagle)
-
- AMBARI-1626. API support to upgrade host component. (Sumit Mohanty via swagle)
-
- AMBARI-1601. Server level action support. (Sumit Mohanty via swagle)
-
- AMBARI-1620. Add heatmaps for Host and Hbase section. (jaimin)
- 
- AMBARI-1634. Integrate Frontend Security work to enable security on
- Oozie, Hive, and WebHCat Server. (jaimin)
-
- AMBARI-1633. Reassign Master Wizard - Step 5. (yusaku)
-
- AMBARI-1585. Creating the agent scripts for Hue server installation and 
- configuration on the Hue host. (swagle)
-
- AMBARI-1618. HDFS Mirroring: Create Mapper, Model, Mock Data for Cluster.
- (Arun Kandregula via yusaku)
-
- AMBARI-1607. HDFS Mirroring: Create Mapper, Model and Mock Data.
- (Arun Kandregula via yusaku)
-
- AMBARI-1602. Edit User - drop the requirement to specify the old 
- password. (swagle)
-
  AMBARI-1406. Provide API support for including query string in http message body. (jspeidel)
  AMBARI-1406. Provide API support for including query string in http message body. (jspeidel)
 
 
  AMBARI-1592. Change how configurations are propagated (ncole)
  AMBARI-1592. Change how configurations are propagated (ncole)
 
 
  AMBARI-1593. Change host override JSON to include version tag (ncole)
  AMBARI-1593. Change host override JSON to include version tag (ncole)
 
 
- AMBARI-1545. Integrate Frontend Security work to enable security on HDFS
-
- AMBARI-1555. Upgrade should validate that the from->to version is an allowed 
- combination. (Sumit Mohanty via swagle)
-
  AMBARI-1568. Update the version of ambari artifacts to 1.3.0 snapshot (ncole)
  AMBARI-1568. Update the version of ambari artifacts to 1.3.0 snapshot (ncole)
 
 
- AMBARI-1563. API Support:  Host-component resource should include its current 
- HA active/passive status. (Sumit Mohanty via swagle)
-
- AMBARI-1560. Upgrade action/task support in server. (Sumit Mohanty via swagle)
-
  AMBARI-1553. List cluster-level configurations with host-level, if any (ncole)
  AMBARI-1553. List cluster-level configurations with host-level, if any (ncole)
 
 
- AMBARI-1557. Adding Hue service to the HDP stack definition along with the 
- necessary configuration properties. (swagle)
-
- AMBARI-1554. API support for current version of Stack and available versions 
- to upgrade (Sumit Mohanty via swagle) 
-
- AMBARI-1511. Add ability to override configurations at the host level (ncole)
-
- AMBARI-1550. Modify existing puppet manifests to allow installing/configuring 
- multiple masters. (swagle)
-
- AMBARI-1545. Integrate Frontend Security work to enable security on HDFS
- and MapReduce installed cluster. (jaimin)
-
  AMBARI-1528. Upgrade request support at Ambari. (Sumit Mohanty via swagle)
  AMBARI-1528. Upgrade request support at Ambari. (Sumit Mohanty via swagle)
 
 
  AMBARI-1541. Upgrade task support in agent. (Sumit Mohanty via swagle)
  AMBARI-1541. Upgrade task support in agent. (Sumit Mohanty via swagle)
@@ -290,8 +95,6 @@ Trunk (unreleased changes):
 
 
  IMPROVEMENTS
  IMPROVEMENTS
 
 
- AMBARI-2111. Enable customization of smoke test user. (yusaku)
-
  AMBARI-2110. Update hive-site.xml, set fs.file.impl.disable.cache=true.
  AMBARI-2110. Update hive-site.xml, set fs.file.impl.disable.cache=true.
  (mahadev)
  (mahadev)
 
 
@@ -825,688 +628,8 @@ Trunk (unreleased changes):
  AMBARI-1489. Add hadoop-lzo to be one of the rpms to check for before
  AMBARI-1489. Add hadoop-lzo to be one of the rpms to check for before
  installation. (mahadev)
  installation. (mahadev)
 
 
- AMBARI-1642. Add ability for maintainence mode in Host Role Component in
- Ambari. (mahadev)
-
- AMBARI-1797. For global site properties, need property to services effected
- map. (mahadev)
-
- AMBARI-1384. WorkflowJsonService service doesn't use the API framework and is
- inconsistent with other API's. (billie)
-
- AMBARI-1871. ambari-agent RPM does not claim ownership of
- /var/lib/ambari-agent. (Matthew Farrellee via mahadev)
-
- AMBARI-1870. ambari-agent RPM claims ownership of /usr/sbin. (Matthew
- Farrellee via mahadev)
-
  BUG FIXES
  BUG FIXES
 
 
- AMBARI-2109. Sanitize KEYS and NOTICE.txt on trunk. (yusaku)
- 
- AMBARI-2108. Fix apache rat check issues for ambari (top-level dir).
- (yusaku)
-
- AMBARI-2105. Assign Slaves page allows the user to specify a host with no
- components on it. (jaimin)
-
- AMBARI-2106. Fix apache rat check issues for ambari-server and ambari-agent.
- (swagle)
-
- AMBARI-2107. Cluster CPU Chart is off the charts. (swagle)
-
- AMBARI-2102. Confusing message "ls: cannot access /usr/share/java/*oracle*:
- No such file or directory". (smohanty)
-
- AMBARI-2104. Fix apache rat check issues for ambari-web. (yusaku)
-
- AMBARI-2100. HBase throws AccessDeniedException. (yusaku)
-
- AMBARI-2099. Cluster install failed due to timeout and the user can proceed
- to cluster management; the user was not presented an option to retry install.
- (yusaku)
-
- AMBARI-2101. Hive service check (still) failing with file permissions.
- (swagle)
-
- AMBARI-2095. It's possible to get into a state where install retry is not
- possible if the agent stops heartbeating. (jaimin via yusaku)
-
- AMBARI-2091. Custom JDK path not used when adding new hosts. (yusaku)
-
- AMBARI-2089. Post Ambari upgrade, Hive and Oozie fail to start after
- reconfigure. (Xi Wang via yusaku)
-
- AMBARI-2084. Wrong host mapping in Assign Masters step. (yusaku)
-
- AMBARI-2098. Customizing webcat pid run directory fails service status. 
- (swagle)
-
- AMBARI-2076. DataNode install failed with custom users. (swagle)
-
- AMBARI-2085. UI allows user to set empty value for configs in
- Advanced category. (jaimin)
-
- AMBARI-2087. Tasks are not filtered by parent request id. (smohanty)
-
- AMBARI-2086. Agent on host with clients and DATANODE only seems to schedule 
- STATUS commands for several other services. (swagle)
-
- AMBARI-2088. Cluster installation times out at server side too fast. (swagle)
-
- AMBARI-2083. Upgrade fails on Sles. (smohanty)
-
- AMBARI-2082. Oozie service check fails. (jaimin)
-
- AMBARI-2081. changeUid.sh failing during installation. (swagle)
-
- AMBARI-2079. Can't change service configuration if heartbeat lost from
- service component host. (yusaku)
-
- AMBARI-2075. Admin role can't be assigned to LDAP user. (yusaku)
-
- AMBARI-2080. Cluster name and Background operations indicator should
- disappear on logout. (jaimin)
-
- AMBARI-2078. Hive Metastore host not changing on Assign Masters page. (jaimin)
-
- AMBARI-2077. Update stack mock data to make testMode functional on step4 of
- installer wizard. (jaimin)
-
- AMBARI-2076. DataNode install failed with custom users. (smohanty)
-
- AMBARI-2074. Deployment of HDP 1.2.1 fails on Sles. (smohanty)
-
- AMBARI-2073. After Ambari upgrade to 1.2.3, MapReduce service check fails 
- because uid of ambari_qa changed. (swagle)
-
- AMBARI-2067. hive-site.xml cannot be readonly for clients. (swagle)
-
- AMBARI-2068. "Preparing to install <component>" message needs spacing.
- (yusaku)
-
- AMBARI-1979. Last HeartBeat time and heartbeat status for agent take around 2-3 
- minutes to update on a server restart. (swagle)
-
- AMBARI-1983. Add new parameters to improve HBase MTTR. HDPLocal fixes. 
- (swagle)
-
- AMBARI-2066. HDFS shortcircuit skip checksum should be removed. (smohanty)
-
- AMBARI-2056. Show proper error message while user tries to save configurations 
- of partially stopped service. (srimanth)
-
- AMBARI-2064. Legend for zoomed-in graphs do not render properly in IE9.
- (yusaku)
-
- AMBARI-2063. Admin features not available for user with admin rights under
- certain conditions. (yusaku)
-
- AMBARI-2060. Initiate a recommission, on success, the operations dialog says
- decommission, not recommission. (yusaku)
-
- AMBARI-2058. Host Detail page: if the host component is in INSTALL_FAILED
- state, we should let the user reinstall it. (yusaku)
-
- AMBARI-2055. Oozie reconfig forces the user to enter bogus values for two
- parameters in order to save any changes. (yusaku)
-
- AMBARI-2054. If "Install from Local Repository" selected in install wizard,
- Add Host wizard not working. (yusaku)
-
- AMBARI-2053. Align "add hosts" button vertically with host health filter.
- (yusaku)
-
- AMBARI-2052. Fix delete user popup. (yusaku)
-
- AMBARI-2065. Hadoop group customization does not take affect. (smohanty)
-
- AMBARI-2062. Service versions shown during install dont match installed
- versions. (smohanty)
-
- AMBARI-2038. Services links on Dashboard connected to incorrect pages.
- (yusaku)
-
- AMBARI-2059. Add dependency for Nagios server on Hive Client install. (swagle)
-
- AMBARI-2044. hive-site.xml permission denied exception. (swagle)
-
- AMBARI-2057. Gmond left in init after install. (smohanty)
-
- AMBARI-2051. Remove hard-coded ports from agent scripts - Nagios. (swagle)
-
- AMBARI-2045. Add Unit test to verify, client re-install for install failed 
- client. (swagle)
-
- AMBARI-2044. hive-site.xml permission denied exception. (swagle)
-
- AMBARI-2041. If a host that has a service client installed and the host is down, 
- service start will fail. (swagle)
-
- AMBARI-2039. Service check should be scheduled on a client that is on
- a host in HEALTHY state - use correct state enum. (smohanty)
-
- AMBARI-2035. "Add local user" button is enabled but nothing happens upon
- clicking it under certain conditions. (yusaku)
-
- AMBARI-2034. Disable "Add Component" button in the Host Details page if the
- host is in UNKNOWN state or !isHeartbeating. (yusaku)
-
- AMBARI-2033. Decommission DataNode does not have any request context.
- (yusaku)
-
- AMBARI-2029. Error when loading /main/services directly. (yusaku)
- 
- AMBARI-2039. Service check should be scheduled on a client that is on
- a host in HEALTHY state. (smohanty)
-
- AMBARI-2037. Nagios web not installing as expected on Sles11. (swagle)
-
- AMBARI-1924. Allow for users to customize Ganglia gmetad + gmond user
- accounts. (smohanty)
-
- AMBARI-2024. Ambari Server becomes unresponsive after crashing on http reads 
- on jersey. (swagle)
-
- AMBARI-2020. Incorrect behavior of "Services" page. (yusaku)
-
- AMBARI-2018. Hosts page: no filter selection is shown after clicking on
- "Alerts" filter, navigating away, and coming back to Hosts page. (yusaku)
-
- AMBARI-2016. Hide Maintenance pulldown if no operation can be performed.
- (yusaku)
-
- AMBARI-2015. Host component start/stop causes "Uncaught TypeError: Cannot call
- method 'call' of undefined". (yusaku)
-
- AMBARI-2011. Add Hosts gets stuck at 33% (some hosts in the cluster were
- down). (yusaku)
-
- AMBARI-2014. Install Wizard/Add Host Wizard Review page: local repo option
- is always displayed as "No", even when it is enabled. (yusaku)
-
- AMBARI-2019. Cannot decommission data node (ensure recommission also works).
- (swagle)
- 
- AMBARI-2021. Hadoop installation on cluster with SUSE-11 failed. (smohanty)
-
- AMBARI-2010. Tasks do not timeout for failed hosts. (swagle)
-
- AMBARI-2012. Check Ambari-agent process - nagios alert is only being
- configured on the nagios-server host. (smohanty)
-
- AMBARI-2001. Filtering on Jobs table does not work under certain situations.
- (yusaku)
-
- AMBARI-2000. Undo links still remain after the config changes are saved.
- (yusaku)
-
- AMBARI-1999. Clicking on Cancel on the Service Config page should not reload
- the entire app. (yusaku)
-
- AMBARI-1998. Action buttons on host details page not formatted properly on
- Firefox. (yusaku)
-
- AMBARI-1997. Filtered hosts get out of sync with the filter selection. (yusaku)
-
- AMBARI-2009. task-log4j.properties file ownership should not be
- root. (smohanty)
-
- AMBARI-2008. Using mixed OS overwrites ambari.repo during install. (smohanty)
-
- AMBARI-1952. hadoop dependency version for ambari-log4j is hardcoded, making
- it regular expression based to pick latest from the repository. (smohanty)
-
- AMBARI-2007. Decom DataNode throws JS error. (smohanty)
-
- AMBARI-1994. Adding component to Host should should wire-up + adjust
- associated Nagios alerts. (smohanty)
-
- AMBARI-1753. Puppet paramter configuration not working as expected. (swagle)
-
- AMBARI-1978. Deploying HDP-1.3.0 results in several alerts - is it related to 
- hard-coded port. Incremental update. (swagle)
-
- AMBARI-1990. After successful registration, going back to the Confirm Hosts
- or re-installing agents from Install Options page causes host registration
- to fail. (smohanty)
-
- AMBARI-1991. Remove unused python files from ambari-agent. (smohanty)
-
- AMBARI-1984. WebHCat log and pid dirs configs should be under WebHCat >
- Advanced. (yusaku)
-
- AMBARI-1989. Add component shows the same component again even if the
- component is already added/installed/started. (yusaku)
-
- AMBARI-1988. Hostname pattern expression is broken. (yusaku)
-
- AMBARI-1986. HDFS General section has disappeared from Customize Services 
- step of the Install Wizard. (yusaku)
-
- AMBARI-1985. Incorrect behavior of "Undo" button for password fields. (yusaku)
-
- AMBARI-1702. Ambari/GSInstallers need to set the value of 
- mapred.jobtracker.completeuserjobs.maximum. New recommended value. (swagle)
-
- AMBARI-1983. Add new parameters to improve HBase MTTR. (swagle)
-
- AMBARI-1979. Last HeartBeat time and heartbeat status for agent take around 2-3 
- minutes to update on a server restart. (swagle)
-
- AMBARI-1978. Deploying HDP-1.3.0 results in several alerts - is it related to 
- hard-coded port. (swagle)
-
- AMBARI-1974. BootStrapTest is failing on the master build. (smohanty)
-
- AMBARI-1968. Hadoop Classpath is being overwridden which causes hive
- server/metastore to fail. (smohanty)
-
- AMBARI-1973. log4j Appender for RCA should be able to write the same database
- being used for Ambari Server (oracle/MySql). (smohanty)
-
- AMBARI-1972. Stacks2 api implemenation using the standard framework is not
- complete - does not show configuration tags. (smohanty)
-
- AMBARI-1954. Dashboard does not come up if the upgrade stack does not contain
- a service with the same name. (yusaku)
-
- AMBARI-1953. On Add Hosts, the request context for the start phase shows up
- as "Request Name Not Specified". (yusaku)
-
- AMBARI-1966. Client install tasks are shown twice in progress popup during
- start phase of install wizard (update API call to include
- params/reconfigure_client). (yusaku)
-
- AMBARI-1965. core-site properties are incorrectly populated in Advanced/
- General category of MapReduce service. (yusaku)
-
- AMBARI-1963. Deploying progress bar shows 0 tasks after installation failure
- and going back to a previous step to retry. (yusaku)
-
- AMBARI-1962. Host Check popup keeps the "rerun check" button disabled even
- after it is done and its hard to know if its actually run or not. (yusaku)
-
- AMBARI-1961. Select Services: clicking on "all" selects HUE even when HUE
- support is toggled off. (yusaku)
-
- AMBARI-1960. "Back" button can be pressed while host registration is taking
- process, even though the button seems disabled. (yusaku)
-
- AMBARI-1959. Cannot login to Ambari after login failure. (yusaku)
-
- AMBARI-1957. Hosts table: whether the alert filter is in effect or not is
- not clear. (yusaku)
-
- AMBARI-1956. Wrong install status shown in Add Service Wizard. (yusaku)
-
- AMBARI-1951. Ambari agent setup during bootstrap should install the same
- version of agent as the server. (smohanty)
-
- AMBARI-1950. Hadoop install was failed on SUSE-11.1sp1 cluster with all 
- services except Hue. (smohanty)
-
- AMBARI-1949. Reconfiguration of Services has issues and the configurations 
- save button does not take affect. (srimanth)
-
- AMBARI-1948. System logs are not present on tasktracker. (swagle)
-
- AMBARI-1947. Oozie Smoke test fails with errors on the start services/install 
- page. (swagle)
-
- AMBARI-1946. Heatmap memory should not include cached memory as part of
- "used". (Jeff Sposetti via yusaku)
-
- AMBARI-1944. All Service Smoke tests fail when run with service start. (swagle)
-
- AMBARI-1939. Make service restart feedback based on supports functionality. 
- (srimanth)
-
- AMBARI-1943. Properties that do not map to any global property are not being
- sent to server. (jaimin)
-
- AMBARI-1937. Ambari-web installer wizard doesn't work in test mode. (jaimin)
-
- AMBARI-1927. In background operations popup, requests with same context
- are showing hosts/tasks info from last request. (yusaku via jaimin)
-
- AMBARI-1907. Service check commands are not getting created on a
- cluster install -> start. (yusaku via jaimin)
-
- AMBARI-1942. Nagios server failed to start. (swagle)
-
- AMBARI-1938. Update mock data for stack HDP-1.3.0. (jaimin)
-
- AMBARI-1934. Security vulnerability with Ganglia and Nagios. (smohanty)
-
- AMBARI-1933. Test failure : testCascadeDeleteStages. (smohanty)
-
- AMBARI-1931. params/run_smoke_test=true is not taking effect. (smohanty)
-
- AMBARI-1919. JobTracker History Server failed to come up on 1.3.0 stack
- and the request for service stall is stalled. (smohanty)
-
- AMBARI-1900. Update the DDL update script to modify the table to includei
- ph_cpu_count. (smohanty)
-
- AMBARI-1926. One HBase master should have active HA status at all time. 
- (smohanty)
-
- AMBARI-1925. Remove "hadoop_deploy" user. (smohanty)
-
- AMBARI-1915. Client install tasks are shown twice in install progress 
- popup. (swagle)
-
- AMBARI-1916. Filter for showing only properties which need restart is 
- broken. (srimanth)
-
- AMBARI-1918. Set correct Oozie property for security instead of deprecated
- property. (jaimin)
-
- AMBARI-1917. Ambari Core-Site.xml Missing Property for LZO (enabled) -
- io.compression.codecs (jaimin).
-
- AMBARI-1889. Added documentation for configuration (ncole)
-
- AMBARI-1912: HBase master doesn't come up after disabling security. (jaimin)
-
- AMBARI-1902: RegionServer does not start in secure cluster. (jaimin)
-
- AMBARI-1903. Host Exception Popup layout and cosmetic issues. (srimanth)
-
- AMBARI-1901. Add additional tests for verifying request behavior based on 
- host role command results. (smohanty)
-
- AMBARI-1899. ambari-reset does not respect -s. (swagle)
-
- AMBARI-1898. Update stack definitions for 1.3.0. (smohanty)
-
- AMBARI-1886. Derived properties not being overridden for hosts. (srimanth)
-
- AMBARI-1896. Disable editing Capacity Scheduler on host configs. (srimanth)
-
- AMBARI-1894. Refactor configs of Capacity Scheduler category. (srimanth)
-
- AMBARI-1893. Parsing new alerts format fails. (srimanth)
-
- AMBARI-1891. Impossibility to scroll metric window after browser width 
- changing. (srimanth)
-
- AMBARI-1880. stacks2 API uses "type" to refer to config tags and no longer
- exposes "filename" as a property. (srimanth via yusaku)
-
- AMBARI-1873. HUE pid and log dir labels are flip flopped. (yusaku)
-
- AMBARI-1878. Host overrides functionality broken in wizard Step7 controller.
- (yusaku)
-
- AMBARI-1875. Restart Service tooltip overlaps another tooltip. (yusaku)
-
- AMBARI-1874. Add Service Wizard: remove the ability to install master
- components for already installed services. (yusaku)
-
- AMBARI-1872. Ambari FE is not setting proper value for 
- fs.checkpoint.edits.dir (jaimin)
-
- AMBARI-1869. Permission on agent site.pp files needs to be 660. (swagle)
-
- AMBARI-1867. Processing API requests takes too long. (swagle)
-
- AMBARI-1856. Queries for metrics to populate the dashboard graphs don't work
- with updated Ganglia. (tbeerbower)
-
- AMBARI-1862. Nagios credentials are freely available at ambari-agent.log.
- (smohanty)
-
- AMBARI-1726. It seems upgrades available at the FE is hard-coded to 1.3.0. 
- (yusaku via srimanth)
-
- AMBARI-1854. Wizards available for a non-administrator user. (yusaku via srimanth)
-
- AMBARI-1852. Upon clicking Services > Service > Config, a call to 
- "configurations resource is made and the server throws 400. (yusaku via srimanth)
-
- AMBARI-1851. Ambari Web behaves strangely when there is no Active HBase 
- Master. (yusaku via srimanth)
-
- AMBARI-1849. Cosmetic problems on HBase Dashboard. (yusaku via srimanth)
-
- AMBARI-1848. Install Wizard, Step 7: Oozie Database Derby option should say 
- "New Derby Database", not "Current Derby Database". (Xi Wang via srimanth)
-
- AMBARI-1860. Master broken - Cannot deploy services. (smohanty)
-
- AMBARI-1859. Cannot load Nagios Alerts due to 400 Bad Request. (smohanty)
-
- AMBARI-1842. Collapsable service restart message section should have pointer 
- cursor. (srimanth)
-
- AMBARI-1841. Properties that should be exposed in Advanced category
- are populated in Custom categories. (jaimin)
-
- AMBARI-1837. Few core-site properties vanished after seemingly benign 
- reconfiguration. (jaimin)
-
- AMBARI-1838. Cluster Management > Services > MapReduce > Config throws JS error
- and the page comes up blank. (jaimin)
-
- AMBARI-1836. Remove hard-coded ports from agent scripts. (swagle)
-
- AMBARI-1834. Reduce the number of states that a host component can be in.
- (smohanty)
-
- AMBARI-1789. Stopping and then Starting all services doesn't start 
- NameNode. (smohanty)
-
- AMBARI-1822. Hue service link points to wrong URL and no smoke test drop
- down is shown. (yusaku)
-
- AMBARI-1821. Upgrading component is not very clear and Upgrade action
- is not available. (yusaku)
-
- AMBARI-1820. Installer Step 7 - DataNode hosts, TaskTracker hosts, and
- RegionServer hosts not displayed correctly. (yusaku)
-
- AMBARI-1819. Ambari Installer: page refreshes upon hitting enter in text
- fields (Step 1 and Step 7). (yusaku)
-
- AMBARI-1813. The back button seems disabled during host registration (step 3),
- but you can actually click it to go back. (yusaku)
-
- AMBARI-1812. Unable to re-configure core-site. (yusaku)
-
- AMBARI-1811. Start/Stop service doesn't work. (yusaku)
-
- AMBARI-1810. Security Wizard - Progress popup is not filtering tasks
- correctly. (yusaku)
-
- AMBARI-1806. Maintenance checks issued from frontend does not have request
- context set appropriately. (yusaku)
-
- AMBARI-1804. Reassign master should show only the hosts that do not have
- another instance of the master for HBase. (yusaku)
-
- AMBARI-1803. Reassign HBase master menu displays multiple entries with no
- distinction when there are multiple HBase masters. (yusaku)
-
- AMBARI-1802. Install wizard and subsequent reconfig screens lose 'confirm'
- password content and show up as red even if the use is not editing these
- fields. (yusaku)
-
- AMBARI-1801. After adding hosts successfully, you need to refresh the hosts
- page manually to see the new hosts. (yusaku)
-
- AMBARI-1799. On service reconfig, Save button can be clicked even when there
- are validation errors. (yusaku)
-
- AMBARI-1796. Specific custom configs do not display after reload.
- (srimanth via yusaku)
-
- AMBARI-1768. Cluster install wizard does not succeed at service start.
- (yusaku)
-
- AMBARI-1755. Provide context for background operations. (srimanth via yusaku)
-
- AMBARI-1744. isAdmin doesn't switch after login/out. (srimanth via yusaku)
-
- AMBARI-1709. When all hosts are assigned a master component, the last host
- should have all slave components and clients (Step 6). (srimanth via yusaku)
-
- AMBARI-1695. Customize Services page - validation error count is not reflect
- in the service tab for host exceptions. (yusaku)
-
- AMBARI-1675. ASF license header missing from
- app/templates/main/admin/security/add/step2.hbs. (yusaku)
-
- AMBARI-1670. Changing service user name from web UI should also change
- configuration properties that depends on those user name. (jaimin via yusaku)
-
- AMBARI-1826. Use service stop and start for Nagios/Ganglia/MySQL rather than
- puppet artifacts for starting stopping these services. (smohanty)
-
- AMBARI-1818. HBase master shuts down immediately after start in a secure 
- cluster. (swagle)
-
- AMBARI-1816. Security wizard: Add missing secure configs to Hbase service and
- make "zookeeper" as default primary name for zookeeper principal. (jaimin)
-
- AMBARI-1791. Can not specify request context for smoke test request. (swagle)
-
- AMBARI-1788. JMX getSpec error filling up server logs. (swagle)
-
- AMBARI-1787. Nagios script causes Datanode error. (swagle)
- 
- AMBARI-1674. Jobtracker metric for maps_completed shows wrong value
- (tbeerbower)
-
- AMBARI-1786. Ambari server start fail after reset. (smohanty)
-
- AMBARI-1784. MapReduce service damaging after hadoop installation with 
- custom MapReduce user which contains symbol '-'. (smohanty)
-
- AMBARI-1774. Ambari does not push the config updates to the client/gateway 
- node. (swagle)
-
- AMBARI-1780. POSTing new cluster returns 500 exception. (smohanty)
-
- AMBARI-1781. Ambari Server should work with MySQL and Oracle where the 
- Ambari Server data might be stored. (smohanty)
-
- AMBARI-1775. Security wizard - Javascript error is thrown when zooKeeper
- is included as a secure service. (jaimin)
-
- AMBARI-1771. On clicking master component host on Oozie and Hive
- service page javascript error is encountered.(jaimin)
-
- AMBARI-1767. Add ability to customize "ambari_qa" user. (smohanty)
-
- AMBARI-1770. Hue installation fails due to manifest errors. (swagle)
-
- AMBARI-1764. Unable to get all tasks from more than one request_id by one
- request (tbeerbower)
-
- AMBARI-1766. Hide Java Home option on step-7 of Installer wizard. (jaimin)
-
- AMBARI-1765. Enable the Ganglia rrd files location to be configurable
- when Ganglia is selected as service. (jaimin)
-
- AMBARI-1762. SUSE_Unable to start hive. (swagle)
-
- AMBARI-1761. Update the DDL update script to modify the table to
- include ph_cpu_count. (smohanty)
-
- AMBARI-1759. Error in creating host component. (smohanty)
-
- AMBARI-1757. Add support for Stack 1.2.2 to Ambari. (smohanty)
-
- AMBARI-1749. Set default heap size for zookeeper. (swagle)
-
- AMBARI-1748. JDK option on the UI when used is not passed onto the global 
- parameters. (srimanth)
-
- AMBARI-1747. Added executable permission of generate monitor/server scripts.
- (smohanty)
-
- AMBARI-1747. File ownership needs more consistency for those installations 
- where root access is hard to get. (smohanty)
-
- AMBARI-1561. API should return nagios_alerts as a JSON, not a stringified 
- JSON. (smohanty)
-
- AMBARI-1507. Should not install HDPHBaseMaster, HDPNameNode and HDPJobTracker
- ganglia configs on every node. (smohanty)
-
- AMBARI-1746. Backend support for LDAP Group to Ambari Role Mapping. 
- (smohanty)
-
- AMBARI-1506. Installs HBase ganglia configs when HBase not installed.
- (smohanty)
-
- AMBARI-1739. HBase and Zk failed to start on secure install. (swagle)
-
- AMBARI-1732. Oozie service check fails in secure cluster. (jaimin)
-
- AMBARI 1733. Add service/component specific upgrade puppet files. (smohanty)
-
- AMBARI-1731. WebHcat smoke test fails for the secure cluster. (jaimin)
-
- AMBARI-1730. Hive Service check fails in non secure cluster. (jaimin)
-
- AMBARI-1724. Agent has it hard-coded that HDP repo file can only be 
- downloaded once. (smohanty)
-
- AMBARI-1715. Ambari Agent Unit Test Failure: TestFileUtil.py. (smohanty)
-
- AMBARI-1533. Add Nagios check for ambari-agent process for each host in 
- the cluster. (smohanty)
-
- AMBARI-1713. Need to delete private ssh key from /var/run/ambari-server
- /bootstrap/* on Ambari Server after bootstrap is complete. (swagle)
-
- AMBARI-1711. Trunk is broken due  to invalid argument to a puppet custom 
- function hdp_default. (swagle)
-
- AMBARI-1706. Security wizard: "Done" and "back" buttons on Apply step 
- should be disabled while step is in progress. (jaimin)
-
- AMBARI-1705. Remove redundant API calls to update service configuration
- while disabling security. (jaimin)
-
- AMBARI-1661. For custom advanced properties, a new config with an empty key
- can be added. (yusaku)
-
- AMBARI-1659. Arrows often do not show up on config category expander. (yusaku)
-
- AMBARI-1645. Undo should not be allowed on component hosts. (yusaku)
-
- AMBARI-1644. Service summary page flickers. (yusaku)
-
- AMBARI-1689. 500 Exception creating service component during install. (Sumit 
- Mohanty via swagle)
-
- AMBARI-1504. Hosts show physical CPUs, instead of cores. (Sumit Mohanty 
- via swagle)
-
- AMBARI-1685. Remove running of smoke tests by default when services or 
- master components are started. (Sumit Mohanty via swagle)
-
- AMBARI-1688. API support to return 10 most recent requests. (swagle)
-
- AMBARI-1439. rrd file location should be read from global config. 
- New patch for reopened bug. (swagle)
-
- AMBARI-1667. Starting all services fails on secure cluster (excluding 
- HBase and ZooKeeper). (swagle)
-
- AMBARI-1666. Oozie properties for principal and keytab not read from 
- oozie-site. (swagle)
-
- AMBARI-1660. Server seems to ignore failures if the prior stage has failed 
- before the next iteration of the scheduler. (Sumit Mohanty via swagle)
-
- AMBARI-1657. User directories on HDFS do not get created with custom names 
- provided from Ambari UI. (swagle)
-
  AMBARI-2072. Fix to remove actual_configs from cluster response. (ncole)
  AMBARI-2072. Fix to remove actual_configs from cluster response. (ncole)
 
 
  AMBARI-2036. Fix to send global configs with status_commands to agents. (ncole)
  AMBARI-2036. Fix to send global configs with status_commands to agents. (ncole)
@@ -1529,39 +652,12 @@ Trunk (unreleased changes):
 
 
  AMBARI-1655. DELETE is not successful against ClusterStateEntity (ncole)
  AMBARI-1655. DELETE is not successful against ClusterStateEntity (ncole)
 
 
- AMBARI-1439. rrd file location should be read from global config. (swagle)
-
- AMBARI-1648. Hue configuration - DB properties cannot be empty. (swagle)
-
- AMBARI-1641. Some map and reduce task metrics are missing for the
- tasktrackers in the API. (tbeerbower)
-
- AMBARI-1640. Erroneos property is not highlighted while landing on step7
- of Installer wizard. (jaimin)
-
- AMBARI-1637. JCE test for policy files fails during secure install. (swagle)
-
- AMBARI-1621. Config/Reconfig UI should not allow certain configs to have
- host-level overrides. (yusaku)
-
- AMBARI-1597. Templeton smoke test fails for secure cluster. (swagle)
-
- AMBARI-1600. Make component naming consistent. (yusaku)
-
- AMBARI-1625. Oozie start fails on secure cluster. (swagle)
-
  AMBARI-1627. Fix to remove host configuration overrides. (ncole)
  AMBARI-1627. Fix to remove host configuration overrides. (ncole)
 
 
  AMBARI-1592. Fix configuration propagation.
  AMBARI-1592. Fix configuration propagation.
 
 
  AMBARI-1619. Fix for category path separators.
  AMBARI-1619. Fix for category path separators.
 
 
- AMBARI-1616. Error during upgrading Ambari Server from 1.2.0/1.2.1 to 
- 1.2.2. (Sumit Mohanty via swagle)
-
- AMBARI-1603. JCE install on ambari-server fails if /tmp/HDP-artifacts does 
- not exists. (swagle)
-
  AMBARI-1612. Parameterizing nagios and ganglia monitoring rpm version.
  AMBARI-1612. Parameterizing nagios and ganglia monitoring rpm version.
  (Ashish Singh via yusaku)
  (Ashish Singh via yusaku)
 
 
@@ -1923,9 +1019,6 @@ Trunk (unreleased changes):
  AMBARI-1547. Fix ambari agent test cases that are failing due to missing
  AMBARI-1547. Fix ambari agent test cases that are failing due to missing
  directory. (mahadev)
  directory. (mahadev)
 
 
- AMBARI-1617. Host check is broken because of changing the serialization from
- jackson to gson. (mahadev)
-
 AMBARI-1.2.0 branch:
 AMBARI-1.2.0 branch:
 
 
  INCOMPATIBLE CHANGES
  INCOMPATIBLE CHANGES
@@ -1944,8 +1037,6 @@ AMBARI-1.2.0 branch:
  AMBARI-1202. Unncessary use of xml tree python library in ambari-server
  AMBARI-1202. Unncessary use of xml tree python library in ambari-server
  setup. Its not being used. (Siddharth Wagle via mahadev)
  setup. Its not being used. (Siddharth Wagle via mahadev)
 
 
- AMBARI-1769. Python REST client to invoke REST calls. (Subin M via mahadev)
-
  IMPROVEMENTS
  IMPROVEMENTS
 
 
  BUG FIXES
  BUG FIXES
@@ -2635,9 +1726,6 @@ AMBARI-666 branch:
 
 
   BUG FIXES
   BUG FIXES
 
 
-  AMBARI-1628. Tasktracker remains in STARTING state preventing Ambari 
-  to display proper status. (Sumit Mohanty via swagle)
-
   AMBARI-1160. Cannot add a hostname that has a number next to . in it.
   AMBARI-1160. Cannot add a hostname that has a number next to . in it.
   (yusaku)
   (yusaku)
 
 

+ 2 - 2
ambari-agent/pom.xml

@@ -19,14 +19,14 @@
   <parent>
   <parent>
     <groupId>org.apache.ambari</groupId>
     <groupId>org.apache.ambari</groupId>
     <artifactId>ambari-project</artifactId>
     <artifactId>ambari-project</artifactId>
-    <version>1.3.0-SNAPSHOT</version>
+    <version>1.2.3-SNAPSHOT</version>
     <relativePath>../ambari-project</relativePath>
     <relativePath>../ambari-project</relativePath>
   </parent>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.ambari</groupId>
   <groupId>org.apache.ambari</groupId>
   <artifactId>ambari-agent</artifactId>
   <artifactId>ambari-agent</artifactId>
   <packaging>pom</packaging>
   <packaging>pom</packaging>
-  <version>1.3.0-SNAPSHOT</version>
+  <version>1.2.3-SNAPSHOT</version>
   <name>Ambari Agent</name>
   <name>Ambari Agent</name>
   <description>Ambari Agent</description>
   <description>Ambari Agent</description>
   <properties>
   <properties>

+ 0 - 37
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp

@@ -165,43 +165,6 @@ define hdp-hadoop::namenode::create_app_directories($service_state)
         recursive_chmod => true
         recursive_chmod => true
       }
       }
     }
     }
-
-    if $stack_version in ("2.0.1") {
-      if ($hdp::params::nm_hosts != "") {
-        if ($hdp::params::yarn_log_aggregation_enabled == "true") {
-          $yarn_user = $hdp::params::yarn_user
-          $yarn_nm_app_log_dir = $hdp::params::yarn_nm_app_log_dir
-
-          hdp-hadoop::hdfs::directory{ $yarn_nm_app_log_dir:
-            service_state => $service_state,
-            owner => $yarn_user,
-            mode  => '744',
-            recursive_chmod => true
-          }
-        }
-      }
-
-
-      if ($hdp::params::hs_host != "") {
-        $mapred_user = $hdp::params::mapred_user
-        $mapreduce_jobhistory_intermediate_done_dir = $hdp::params::mapreduce_jobhistory_intermediate_done_dir
-        $mapreduce_jobhistory_done_dir = $hdp::params::mapreduce_jobhistory_done_dir
-
-        hdp-hadoop::hdfs::directory{ $mapreduce_jobhistory_intermediate_done_dir:
-          service_state => $service_state,
-          owner => $mapred_user,
-          mode  => '777',
-          recursive_chmod => true
-        }
-
-        hdp-hadoop::hdfs::directory{ $mapreduce_jobhistory_done_dir:
-          service_state => $service_state,
-          owner => $mapred_user,
-          mode  => '750',
-          recursive_chmod => true
-        }
-      }
-    }
   }
   }
 }
 }
 
 

+ 0 - 6
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp

@@ -77,12 +77,6 @@ class hdp-hadoop::params(
   
   
   $hadoop_libexec_dir = hdp_default("hadoop_libexec_dir","/usr/lib/hadoop/libexec")
   $hadoop_libexec_dir = hdp_default("hadoop_libexec_dir","/usr/lib/hadoop/libexec")
   
   
-  $mapreduce_libs_path = hdp_default("mapreduce_libs_path","/usr/lib/hadoop-mapreduce/*")
-  
-  $mapred_log_dir_prefix = hdp_default("mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-  $mapred_pid_dir_prefix = hdp_default("mapreduce_libs_path","/var/run/hadoop-mapreduce")
-
   ### compression related
   ### compression related
   if (($hdp::params::lzo_enabled == true) and ($hdp::params::snappy_enabled == true)) {
   if (($hdp::params::lzo_enabled == true) and ($hdp::params::snappy_enabled == true)) {
     $mapred_compress_map_output = true
     $mapred_compress_map_output = true

+ 1 - 7
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb

@@ -62,8 +62,6 @@ export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
 # Where log files are stored.  $HADOOP_HOME/logs by default.
 # Where log files are stored.  $HADOOP_HOME/logs by default.
 export HADOOP_LOG_DIR=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER
 export HADOOP_LOG_DIR=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER
 
 
-# History server logs
-export HADOOP_MAPRED_LOG_DIR=<%=scope.function_hdp_template_var("mapred_log_dir_prefix")%>/$USER
 
 
 # Where log files are stored in the secure data environment.
 # Where log files are stored in the secure data environment.
 export HADOOP_SECURE_DN_LOG_DIR=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$HADOOP_SECURE_DN_USER
 export HADOOP_SECURE_DN_LOG_DIR=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$HADOOP_SECURE_DN_USER
@@ -83,9 +81,6 @@ export HADOOP_SECURE_DN_LOG_DIR=<%=scope.function_hdp_template_var("hdfs_log_dir
 export HADOOP_PID_DIR=<%=scope.function_hdp_template_var("hadoop_pid_dir_prefix")%>/$USER
 export HADOOP_PID_DIR=<%=scope.function_hdp_template_var("hadoop_pid_dir_prefix")%>/$USER
 export HADOOP_SECURE_DN_PID_DIR=<%=scope.function_hdp_template_var("hadoop_pid_dir_prefix")%>/$HADOOP_SECURE_DN_USER
 export HADOOP_SECURE_DN_PID_DIR=<%=scope.function_hdp_template_var("hadoop_pid_dir_prefix")%>/$HADOOP_SECURE_DN_USER
 
 
-# History server pid
-export HADOOP_MAPRED_PID_DIR=<%=scope.function_hdp_template_var("mapred_pid_dir_prefix")%>/$USER
-
 # A string representing this instance of hadoop. $USER by default.
 # A string representing this instance of hadoop. $USER by default.
 export HADOOP_IDENT_STRING=$USER
 export HADOOP_IDENT_STRING=$USER
 
 
@@ -106,5 +101,4 @@ do
   JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
   JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
 done
 done
 #Add libraries required by nodemanager
 #Add libraries required by nodemanager
-MAPREDUCE_LIBS=<%=scope.function_hdp_template_var("mapreduce_libs_path")%> 
-export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}

+ 0 - 4
ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-servicegroups.cfg.erb

@@ -34,10 +34,6 @@ define servicegroup {
   servicegroup_name  ZOOKEEPER
   servicegroup_name  ZOOKEEPER
   alias  ZOOKEEPER Checks
   alias  ZOOKEEPER Checks
 }
 }
-define servicegroup {
-  servicegroup_name  AMBARI
-  alias  AMBARI Checks
-}
 define servicegroup {
 define servicegroup {
   servicegroup_name  HUE
   servicegroup_name  HUE
   alias  HUE Checks
   alias  HUE Checks

+ 13 - 25
ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb

@@ -76,18 +76,6 @@ define service {
         max_check_attempts      3
         max_check_attempts      3
 }
 }
 
 
-# AMBARI AGENT Checks
-define service {
-        hostgroup_name          agent-servers
-        use                     hadoop-service
-        service_description     AMBARI::Check ambari-agent process
-        servicegroups           AMBARI
-        check_command           check_ambari_agent_status
-        normal_check_interval   5
-        retry_check_interval    0.5
-        max_check_attempts      2
-}
-
 # NAGIOS SERVER ZOOKEEPER Checks
 # NAGIOS SERVER ZOOKEEPER Checks
 <%if scope.function_hdp_nagios_members_exist('zookeeper-servers')-%>
 <%if scope.function_hdp_nagios_members_exist('zookeeper-servers')-%>
 define service {
 define service {
@@ -124,7 +112,7 @@ define service {
         use                     hadoop-service
         use                     hadoop-service
         service_description     GANGLIA::Ganglia [gmetad] process down
         service_description     GANGLIA::Ganglia [gmetad] process down
         servicegroups           GANGLIA
         servicegroups           GANGLIA
-        check_command           check_tcp!<%=scope.function_hdp_template_var("ganglia_port")%>!-w 1 -c 1
+        check_command           check_tcp!8651!-w 1 -c 1
         normal_check_interval   0.25
         normal_check_interval   0.25
         retry_check_interval    0.25
         retry_check_interval    0.25
         max_check_attempts      4
         max_check_attempts      4
@@ -135,7 +123,7 @@ define service {
         use                     hadoop-service
         use                     hadoop-service
         service_description     GANGLIA::Ganglia Collector [gmond] process down alert for slaves
         service_description     GANGLIA::Ganglia Collector [gmond] process down alert for slaves
         servicegroups           GANGLIA
         servicegroups           GANGLIA
-        check_command           check_tcp!<%=scope.function_hdp_template_var("ganglia_collector_slaves_port")%>!-w 1 -c 1
+        check_command           check_tcp!8660!-w 1 -c 1
         normal_check_interval   0.25
         normal_check_interval   0.25
         retry_check_interval    0.25
         retry_check_interval    0.25
         max_check_attempts      4
         max_check_attempts      4
@@ -146,7 +134,7 @@ define service {
         use                     hadoop-service
         use                     hadoop-service
         service_description     GANGLIA::Ganglia Collector [gmond] process down alert for NameNode
         service_description     GANGLIA::Ganglia Collector [gmond] process down alert for NameNode
         servicegroups           GANGLIA
         servicegroups           GANGLIA
-        check_command           check_tcp!<%=scope.function_hdp_template_var("ganglia_collector_namenode_port")%>!-w 1 -c 1
+        check_command           check_tcp!8661!-w 1 -c 1
         normal_check_interval   0.25
         normal_check_interval   0.25
         retry_check_interval    0.25
         retry_check_interval    0.25
         max_check_attempts      4
         max_check_attempts      4
@@ -157,7 +145,7 @@ define service {
         use                     hadoop-service
         use                     hadoop-service
         service_description     GANGLIA::Ganglia Collector [gmond] process down alert for JobTracker
         service_description     GANGLIA::Ganglia Collector [gmond] process down alert for JobTracker
         servicegroups           GANGLIA
         servicegroups           GANGLIA
-        check_command           check_tcp!<%=scope.function_hdp_template_var("ganglia_collector_jobtracker_port")%>!-w 1 -c 1
+        check_command           check_tcp!8662!-w 1 -c 1
         normal_check_interval   0.25
         normal_check_interval   0.25
         retry_check_interval    0.25
         retry_check_interval    0.25
         max_check_attempts      4
         max_check_attempts      4
@@ -169,7 +157,7 @@ define service {
         use                     hadoop-service
         use                     hadoop-service
         service_description     GANGLIA::Ganglia Collector [gmond] process down alert for HBase Master
         service_description     GANGLIA::Ganglia Collector [gmond] process down alert for HBase Master
         servicegroups           GANGLIA
         servicegroups           GANGLIA
-        check_command           check_tcp!<%=scope.function_hdp_template_var("ganglia_collector_hbase_port")%>!-w 1 -c 1
+        check_command           check_tcp!8663!-w 1 -c 1
         normal_check_interval   0.25
         normal_check_interval   0.25
         retry_check_interval    0.25
         retry_check_interval    0.25
         max_check_attempts      4
         max_check_attempts      4
@@ -231,7 +219,7 @@ define service {
         use                     hadoop-service
         use                     hadoop-service
         service_description     NAMENODE::NameNode process down
         service_description     NAMENODE::NameNode process down
         servicegroups           HDFS
         servicegroups           HDFS
-        check_command           check_tcp!<%=scope.function_hdp_template_var("namenode_metadata_port")%>!-w 1 -c 1
+        check_command           check_tcp!8020!-w 1 -c 1
         normal_check_interval   0.5
         normal_check_interval   0.5
         retry_check_interval    0.25
         retry_check_interval    0.25
         max_check_attempts      3
         max_check_attempts      3
@@ -377,7 +365,7 @@ define service {
         use                     hadoop-service
         use                     hadoop-service
         service_description     ZOOKEEPER::ZooKeeper Server process down
         service_description     ZOOKEEPER::ZooKeeper Server process down
         servicegroups           ZOOKEEPER
         servicegroups           ZOOKEEPER
-        check_command           check_tcp!<%=scope.function_hdp_template_var("clientPort")%>!-w 1 -c 1
+        check_command           check_tcp!2181!-w 1 -c 1
         normal_check_interval   1
         normal_check_interval   1
         retry_check_interval    0.5
         retry_check_interval    0.5
         max_check_attempts      3
         max_check_attempts      3
@@ -440,9 +428,9 @@ define service {
         service_description     HIVE-METASTORE::Hive Metastore status check
         service_description     HIVE-METASTORE::Hive Metastore status check
         servicegroups           HIVE-METASTORE
         servicegroups           HIVE-METASTORE
         <%if scope.function_hdp_template_var("security_enabled")-%>
         <%if scope.function_hdp_template_var("security_enabled")-%>
-        check_command           check_hive_metastore_status!<%=scope.function_hdp_template_var("hive_metastore_port")%>!<%=scope.function_hdp_template_var("java64_home")%>!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>!<%=scope.function_hdp_template_var("kinit_path_local")%>/"/usr/bin/kinit"
+        check_command           check_hive_metastore_status!9083!<%=scope.function_hdp_template_var("java64_home")%>!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>!<%=scope.function_hdp_template_var("kinit_path_local")%>/"/usr/bin/kinit"
         <%else-%>
         <%else-%>
-        check_command           check_hive_metastore_status!<%=scope.function_hdp_template_var("hive_metastore_port")%>!<%=scope.function_hdp_template_var("java64_home")%>!false
+        check_command           check_hive_metastore_status!9083!<%=scope.function_hdp_template_var("java64_home")%>!false
         <%end-%>
         <%end-%>
         normal_check_interval   0.5
         normal_check_interval   0.5
         retry_check_interval    0.5
         retry_check_interval    0.5
@@ -457,9 +445,9 @@ define service {
         service_description     OOZIE::Oozie Server status check
         service_description     OOZIE::Oozie Server status check
         servicegroups           OOZIE
         servicegroups           OOZIE
         <%if scope.function_hdp_template_var("security_enabled")-%>
         <%if scope.function_hdp_template_var("security_enabled")-%>
-        check_command           check_oozie_status!<%=scope.function_hdp_template_var("oozie_server_port")%>!<%=scope.function_hdp_template_var("java64_home")%>!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>!<%=scope.function_hdp_template_var("kinit_path_local")%>/"/usr/bin/kinit"
+        check_command           check_oozie_status!11000!<%=scope.function_hdp_template_var("java64_home")%>!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>!<%=scope.function_hdp_template_var("kinit_path_local")%>/"/usr/bin/kinit"
         <%else-%>
         <%else-%>
-        check_command           check_oozie_status!<%=scope.function_hdp_template_var("oozie_server_port")%>!<%=scope.function_hdp_template_var("java64_home")%>!false
+        check_command           check_oozie_status!11000!<%=scope.function_hdp_template_var("java64_home")%>!false
         <%end-%>
         <%end-%>
         normal_check_interval   1
         normal_check_interval   1
         retry_check_interval    1
         retry_check_interval    1
@@ -474,9 +462,9 @@ define service {
         service_description     WEBHCAT::WebHCat Server status check
         service_description     WEBHCAT::WebHCat Server status check
         servicegroups           WEBHCAT 
         servicegroups           WEBHCAT 
         <%if scope.function_hdp_template_var("security_enabled")-%>
         <%if scope.function_hdp_template_var("security_enabled")-%>
-        check_command           check_templeton_status!<%=scope.function_hdp_template_var("templeton_port")%>!v1!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>!<%=scope.function_hdp_template_var("kinit_path_local")%>/"/usr/bin/kinit"
+        check_command           check_templeton_status!50111!v1!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>!<%=scope.function_hdp_template_var("kinit_path_local")%>/"/usr/bin/kinit"
         <%else-%>
         <%else-%>
-        check_command           check_templeton_status!<%=scope.function_hdp_template_var("templeton_port")%>!v1!false
+        check_command           check_templeton_status!50111!v1!false
         <%end-%>
         <%end-%>
         normal_check_interval   1
         normal_check_interval   1
         retry_check_interval    0.5
         retry_check_interval    0.5

+ 2 - 2
ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/init.pp

@@ -39,13 +39,13 @@ class hdp-oozie(
       configuration => $configuration['oozie-site'],
       configuration => $configuration['oozie-site'],
       owner => $oozie_user,
       owner => $oozie_user,
       group => $hdp::params::user_group,
       group => $hdp::params::user_group,
-      mode => '0660'
+      mode => '0664'
     }
     }
   } else {
   } else {
     file { "${oozie_config_dir}/oozie-site.xml":
     file { "${oozie_config_dir}/oozie-site.xml":
       owner => $oozie_user,
       owner => $oozie_user,
       group => $hdp::params::user_group,
       group => $hdp::params::user_group,
-      mode => '0660'
+      mode => '0664'
     }
     }
   }
   }
 
 

+ 0 - 24
ambari-agent/src/main/puppet/modules/hdp-tez/manifests/init.pp

@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class hdp-tez::initialize()
-{
-}

+ 0 - 40
ambari-agent/src/main/puppet/modules/hdp-tez/manifests/tez_client.pp

@@ -1,40 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-tez::tez_client(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-)
-{
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in 'installed_and_configured') {
-
-    include hdp-tez::initialize
-
-    $package_name = 'tez_client'
-
-    hdp::package{ $package_name :
-      ensure       => present,
-      package_type => $package_name
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

+ 0 - 93
ambari-agent/src/main/puppet/modules/hdp-yarn/files/validateYarnComponentStatus.py

@@ -1,93 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import urllib2, urllib
-import json
-
-RESOURCEMANAGER = 'rm'
-HISTORYSERVER ='hs'
-
-STARTED_STATE = 'STARTED'
-
-def validate(component, path, port):
-
-  try:
-    url = 'http://localhost:' + str(port) + path
-    opener = urllib2.build_opener()
-    urllib2.install_opener(opener)
-    request = urllib2.Request(url)
-    handler = urllib2.urlopen(request)
-    response = json.loads(handler.read())
-    is_valid = validateResponse(component, response)
-    if is_valid:
-      exit(0)
-    else:
-      exit(1)
-  except Exception as e:
-    print 'Error checking status of component', e
-    exit(1)
-
-
-def validateResponse(component, response):
-  try:
-    if component == RESOURCEMANAGER:
-      rm_state = response['clusterInfo']['state']
-      if rm_state == STARTED_STATE:
-        return True
-      else:
-        return False
-    elif component == HISTORYSERVER:
-      hs_start_time = response['historyInfo']['startedOn']
-      if hs_start_time > 0:
-        return True
-      else:
-        return False
-    else:
-      return False
-  except Exception as e:
-    print 'Error validation of response', e
-    return False
-
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
-  parser.add_option("-p", "--port", dest="port", help="Port for rest api of desired component")
-
-
-  (options, args) = parser.parse_args()
-
-  component = args[0]
-  
-  port = options.port
-  
-  if component == RESOURCEMANAGER:
-    path = '/ws/v1/cluster/info'
-  elif component == HISTORYSERVER:
-    path = '/ws/v1/history/info'
-  else:
-    parser.error("Invalid component")
-
-  validate(component, path, port)
-
-if __name__ == "__main__":
-  main()

+ 0 - 48
ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/historyserver.pp

@@ -1,48 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-yarn::historyserver(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-yarn::params
-{
-  $mapred_user = $hdp-yarn::params::mapred_user
-  
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in 'installed_and_configured') {
-  
-    include hdp-yarn::initialize
-
-    ##Process package
-    hdp-yarn::package{'mapreduce-historyserver':}
-
-  } elsif ($service_state in ['running','stopped']) {
-
-    include hdp-yarn::initialize
- 
-    hdp-yarn::service{ 'historyserver':
-      ensure       => $service_state,
-      user         => $mapred_user
-    }
-
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

+ 0 - 24
ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/historyserver/service_check.pp

@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-yarn::historyserver::service_check() inherits hdp-yarn::params
-{
-  class { 'hdp-yarn::smoketest': component_name => 'historyserver'}
-}

+ 1 - 5
ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/init.pp

@@ -22,17 +22,13 @@
 class hdp-yarn::initialize()
 class hdp-yarn::initialize()
 {
 {
   $yarn_user = $hdp-yarn::params::yarn_user
   $yarn_user = $hdp-yarn::params::yarn_user
-  $mapred_user = $hdp-yarn::params::mapred_user
   
   
   ##Process package
   ##Process package
   hdp-yarn::package{'yarn-common':}
   hdp-yarn::package{'yarn-common':}
   
   
-  # Create yarn user
+  # Create user
   hdp::user { $yarn_user:}
   hdp::user { $yarn_user:}
   
   
-  # Create mapred user
-  hdp::user { $mapred_user:}
-  
   #Generate common configs
   #Generate common configs
   hdp-yarn::generate_common_configs{'yarn-common-configs':}
   hdp-yarn::generate_common_configs{'yarn-common-configs':}
   
   

+ 0 - 33
ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/mapreducev2_client.pp

@@ -1,33 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class hdp-yarn::mapreducev2_client(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-yarn::params
-{
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in 'installed_and_configured') {
-
-    hdp-yarn::package{'hadoop-mapreduce-client':}
-
-  }
-}

+ 0 - 48
ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/nodemanager.pp

@@ -1,48 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-yarn::nodemanager(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-yarn::params
-{
-  $yarn_user = $hdp-yarn::params::yarn_user
-  
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in 'installed_and_configured') {
-  
-    include hdp-yarn::initialize
-
-    ##Process package
-    hdp-yarn::package{'yarn-nodemanager':}
-
-  } elsif ($service_state in ['running','stopped']) {
-
-    include hdp-yarn::initialize
- 
-    hdp-yarn::service{ 'nodemanager':
-      ensure       => $service_state,
-      user         => $yarn_user
-    }
-
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

+ 4 - 9
ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/params.pp

@@ -19,12 +19,12 @@
 #
 #
 #
 #
 class hdp-yarn::params(
 class hdp-yarn::params(
-) inherits hdp-hadoop::params 
+) inherits hdp::params 
 {
 {
 
 
   $conf_dir = $hdp::params::yarn_conf_dir 
   $conf_dir = $hdp::params::yarn_conf_dir 
-    
-  ## yarn-env 
+  
+  ## yarn-env
   $hadoop_libexec_dir = hdp_default("yarn/yarn-env/hadoop_libexec_dir","/usr/lib/hadoop/libexec")
   $hadoop_libexec_dir = hdp_default("yarn/yarn-env/hadoop_libexec_dir","/usr/lib/hadoop/libexec")
   
   
   $hadoop_common_home = hdp_default("yarn/yarn-env/hadoop_common_home","/usr/lib/hadoop")
   $hadoop_common_home = hdp_default("yarn/yarn-env/hadoop_common_home","/usr/lib/hadoop")
@@ -34,10 +34,5 @@ class hdp-yarn::params(
   
   
   $yarn_log_dir_prefix = hdp_default("hadoop/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
   $yarn_log_dir_prefix = hdp_default("hadoop/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
   $yarn_pid_dir_prefix = hdp_default("hadoop/yarn-env/yarn_pid_dir_prefix","/var/run/hadoop-yarn")
   $yarn_pid_dir_prefix = hdp_default("hadoop/yarn-env/yarn_pid_dir_prefix","/var/run/hadoop-yarn")
-  
-  ## yarn-site
-  $rm_webui_port = hdp_default("yarn-site/yarn.resourcemanager.webapp.address", "8088")
-  $nm_webui_port = hdp_default("yarn-site/yarn.nodemanager.webapp.address", "8042")
-  $hs_webui_port = hdp_default("yarn-site/mapreduce.jobhistory.address", "19888")
-
+ 
 }
 }

+ 5 - 0
ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/resourcemanager.pp

@@ -35,6 +35,11 @@ class hdp-yarn::resourcemanager(
 
 
   } elsif ($service_state in ['running','stopped']) {
   } elsif ($service_state in ['running','stopped']) {
   
   
+    if ( ($service_state == 'installed_and_configured') and
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+      hdp_fail("Security not yet implemented for resource manager")
+    }
+
     include hdp-yarn::initialize
     include hdp-yarn::initialize
  
  
     hdp-yarn::service{ 'resourcemanager':
     hdp-yarn::service{ 'resourcemanager':

+ 0 - 24
ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/resourcemanager/service_check.pp

@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-yarn::resourcemanager::service_check() inherits hdp-yarn::params
-{
-  class { 'hdp-yarn::smoketest': component_name => 'resourcemanager'}
-}

+ 5 - 15
ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/service.pp

@@ -28,25 +28,15 @@ define hdp-yarn::service(
 {
 {
 
 
   $security_enabled = $hdp::params::security_enabled
   $security_enabled = $hdp::params::security_enabled
-  
-  if ($name == 'historyserver') {
-    $log_dir = "${hdp-yarn::params::mapred_log_dir_prefix}"
-    $pid_dir = "${hdp-yarn::params::mapred_pid_dir_prefix}/${user}"
-    $daemon = "${hdp::params::mapred_bin}/mr-jobhistory-daemon.sh"
-    $pid_file = "${pid_dir}/mapred-${user}-${name}.pid"
-  } else {
-    $log_dir = "${hdp-yarn::params::yarn_log_dir_prefix}"
-    $pid_dir = "${hdp-yarn::params::yarn_pid_dir_prefix}/${user}"
-    $daemon = "${hdp::params::yarn_bin}/yarn-daemon.sh"
-    $pid_file = "${pid_dir}/yarn-${user}-${name}.pid"
-  }
-  
+  $log_dir = "${hdp-yarn::params::yarn_log_dir_prefix}"
+  $pid_dir = "${hdp-yarn::params::yarn_pid_dir_prefix}/${user}"
+  $yarn_daemon = "${hdp::params::yarn_bin}/yarn-daemon.sh"
   $hadoop_libexec_dir = $hdp-yarn::params::hadoop_libexec_dir
   $hadoop_libexec_dir = $hdp-yarn::params::hadoop_libexec_dir
    
    
-  $cmd = "export HADOOP_LIBEXEC_DIR=${hadoop_libexec_dir} && ${daemon} --config ${hdp-yarn::params::conf_dir}"
+  $cmd = "export HADOOP_LIBEXEC_DIR=${hadoop_libexec_dir} && ${yarn_daemon} --config ${hdp-yarn::params::conf_dir}"
   
   
   
   
-
+  $pid_file = "${pid_dir}/yarn-${user}-${name}.pid"
   
   
   
   
   
   

+ 0 - 60
ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/smoketest.pp

@@ -1,60 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-yarn::smoketest(
-  $component_name = undef
-)
-{
-  $rm_webui_port = $hdp-yarn::params::rm_webui_port
-  $nm_webui_port = $hdp-yarn::params::nm_webui_port
-  $hs_webui_port = $hdp-yarn::params::hs_webui_port
-
-  if ($component_name == 'resourcemanager') {
-    $component_type = 'rm'
-    $component_port = $rm_webui_port
-  } elsif ($component_name == 'historyserver') {
-    $component_type = 'hs' 
-    $component_port = $hs_webui_port
-  } else {
-    hdp_fail("Unsupported component name: $component_name")
-  }
-
-  $smoke_test_user = $hdp::params::smokeuser
-  
-  $validateStatusFileName = "validateYarnComponentStatus.py"
-  $validateStatusFilePath = "/tmp/$validateStatusFileName"
-
-  $validateStatusCmd = "su - ${smoke_test_user} -c 'python $validateStatusFilePath $component_type -p $component_port'"
-
-  file { $validateStatusFilePath:
-    ensure => present,
-    source => "puppet:///modules/hdp-yarn/$validateStatusFileName",
-    mode => '0755'
-  }
-
-  exec { $validateStatusFilePath:
-    command   => $validateStatusCmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-  }
-  File[$validateStatusFilePath] -> Exec[$validateStatusFilePath]
-}

+ 0 - 34
ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/yarn_client.pp

@@ -1,34 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-yarn::yarn_client(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-yarn::params
-{  
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in 'installed_and_configured') {
-  
-    include hdp-yarn::initialize
-
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

+ 1 - 1
ambari-agent/src/main/puppet/modules/hdp/files/changeToSecureUid.sh

@@ -55,4 +55,4 @@ old_uid=$(id -u $username)
 echo "Changing uid of $username from $old_uid to $newUid"
 echo "Changing uid of $username from $old_uid to $newUid"
 echo "Changing directory permisions for ${dir_array[@]}"
 echo "Changing directory permisions for ${dir_array[@]}"
 usermod -u $newUid $username && for dir in ${dir_array[@]} ; do chown -Rh $newUid $dir ; done
 usermod -u $newUid $username && for dir in ${dir_array[@]} ; do chown -Rh $newUid $dir ; done
-exit 0
+exit 0

+ 0 - 6
ambari-agent/src/main/puppet/modules/hdp/manifests/configfile.pp

@@ -29,9 +29,6 @@ define hdp::configfile(
   $namenode_host = $hdp::params::namenode_host,
   $namenode_host = $hdp::params::namenode_host,
   $jtnode_host = $hdp::params::jtnode_host,
   $jtnode_host = $hdp::params::jtnode_host,
   $snamenode_host = $hdp::params::snamenode_host,
   $snamenode_host = $hdp::params::snamenode_host,
-  $rm_host = $hdp::params::rm_host,
-  $nm_hosts = $hdp::params::nm_hosts,
-  $hs_host = $hdp::params::hs_host,
   $slave_hosts = $hdp::params::slave_hosts,
   $slave_hosts = $hdp::params::slave_hosts,
   $mapred_tt_hosts = $hdp::params::mapred_tt_hosts,
   $mapred_tt_hosts = $hdp::params::mapred_tt_hosts,
   $all_hosts = $hdp::params::all_hosts,
   $all_hosts = $hdp::params::all_hosts,
@@ -49,9 +46,6 @@ define hdp::configfile(
   $gateway_host = $hdp::params::gateway_host,
   $gateway_host = $hdp::params::gateway_host,
   $public_namenode_host = $hdp::params::public_namenode_host,
   $public_namenode_host = $hdp::params::public_namenode_host,
   $public_snamenode_host = $hdp::params::public_snamenode_host,
   $public_snamenode_host = $hdp::params::public_snamenode_host,
-  $public_rm_host = $hdp::params::public_rm_host,
-  $public_nm_hosts = $hdp::params::public_nm_hosts,
-  $public_hs_host = $hdp::params::public_hs_host,
   $public_jtnode_host = $hdp::params::public_jtnode_host,
   $public_jtnode_host = $hdp::params::public_jtnode_host,
   $public_hbase_master_hosts = $hdp::params::public_hbase_master_hosts,
   $public_hbase_master_hosts = $hdp::params::public_hbase_master_hosts,
   $public_zookeeper_hosts = $hdp::params::public_zookeeper_hosts,
   $public_zookeeper_hosts = $hdp::params::public_zookeeper_hosts,

+ 0 - 12
ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp

@@ -55,18 +55,6 @@ class hdp(
   $hbase_master_port = hdp_default("hbase-site/hbase.master.info.port","60010")
   $hbase_master_port = hdp_default("hbase-site/hbase.master.info.port","60010")
   $hbase_rs_port = hdp_default("hbase-site/hbase.regionserver.info.port","60030")
   $hbase_rs_port = hdp_default("hbase-site/hbase.regionserver.info.port","60030")
   
   
-  $ganglia_port = hdp_default("ganglia_port","8651")
-  $ganglia_collector_slaves_port = hdp_default("ganglia_collector_slaves_port","8660")
-  $ganglia_collector_namenode_port = hdp_default("ganglia_collector_namenode_port","8661")
-  $ganglia_collector_jobtracker_port = hdp_default("ganglia_collector_jobtracker_port","8662")
-  $ganglia_collector_hbase_port = hdp_default("ganglia_collector_hbase_port","8663")
-
-  $oozie_server_port = hdp_default("oozie_server_port","11000")
-
-  $templeton_port = hdp_default("webhcat-site/templeton.port","50111")
-
-  $namenode_metadata_port = hdp_default("namenode_metadata_port","8020")
-  
   #TODO: think not needed and also there seems to be a puppet bug around this and ldap
   #TODO: think not needed and also there seems to be a puppet bug around this and ldap
   class { 'hdp::snmp': service_state => 'running'}
   class { 'hdp::snmp': service_state => 'running'}
 
 

+ 3 - 47
ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp

@@ -36,7 +36,6 @@ class hdp::params()
     $oozie-site = $configuration['oozie-site']
     $oozie-site = $configuration['oozie-site']
     $sqoop-site = $configuration['sqoop-site']
     $sqoop-site = $configuration['sqoop-site']
     $webhcat-site = $configuration['webhcat-site']
     $webhcat-site = $configuration['webhcat-site']
-    $yarn-site = $configuration['yarn-site']
   }
   }
 
 
   ##### global state defaults ####
   ##### global state defaults ####
@@ -62,10 +61,6 @@ class hdp::params()
   $jtnode_host = hdp_default("jtnode_host")
   $jtnode_host = hdp_default("jtnode_host")
   $slave_hosts = hdp_default("slave_hosts")
   $slave_hosts = hdp_default("slave_hosts")
 
 
-  $rm_host = hdp_default("rm_host")
-  $nm_hosts = hdp_default("nm_hosts")
-  $hs_host = hdp_default("hs_host")
-
   $zookeeper_hosts = hdp_default("zookeeper_hosts")
   $zookeeper_hosts = hdp_default("zookeeper_hosts")
 
 
   $hbase_master_hosts = hdp_default("hbase_master_hosts", "")
   $hbase_master_hosts = hdp_default("hbase_master_hosts", "")
@@ -131,9 +126,6 @@ class hdp::params()
   if ($hostAttributes != undef) {
   if ($hostAttributes != undef) {
     $public_namenode_host = hdp_host_attribute($hostAttributes,"publicfqdn",$namenode_host)
     $public_namenode_host = hdp_host_attribute($hostAttributes,"publicfqdn",$namenode_host)
     $public_snamenode_host = hdp_host_attribute($hostAttributes,"publicfqdn",$snamenode_host)
     $public_snamenode_host = hdp_host_attribute($hostAttributes,"publicfqdn",$snamenode_host)
-    $public_rm_host = hdp_host_attribute($hostAttributes,"publicfqdn",$rm_host)
-    $public_nm_hosts = hdp_host_attribute($hostAttributes,"publicfqdn",$nm_hosts)
-    $public_hs_host = hdp_host_attribute($hostAttributes,"publicfqdn",$hs_host)
     $public_jtnode_host = hdp_host_attribute($hostAttributes,"publicfqdn",$jtnode_host)
     $public_jtnode_host = hdp_host_attribute($hostAttributes,"publicfqdn",$jtnode_host)
     $public_hbase_master_hosts = hdp_host_attribute($hostAttributes,"publicfqdn",$hbase_master_hosts)
     $public_hbase_master_hosts = hdp_host_attribute($hostAttributes,"publicfqdn",$hbase_master_hosts)
     $public_zookeeper_hosts = hdp_host_attribute($hostAttributes,"publicfqdn",$zookeeper_hosts)
     $public_zookeeper_hosts = hdp_host_attribute($hostAttributes,"publicfqdn",$zookeeper_hosts)
@@ -146,9 +138,6 @@ class hdp::params()
   } else {
   } else {
     $public_namenode_host = hdp_default("namenode_host")
     $public_namenode_host = hdp_default("namenode_host")
     $public_snamenode_host = hdp_default("snamenode_host")
     $public_snamenode_host = hdp_default("snamenode_host")
-    $public_rm_host = hdp_default("rm_host")
-    $public_nm_hosts = hdp_default("nm_hosts")
-    $public_hs_host = hdp_default("hs_host")
     $public_jtnode_host = hdp_default("jtnode_host")
     $public_jtnode_host = hdp_default("jtnode_host")
     $public_hbase_master_hosts = hdp_default("hbase_master_hosts")
     $public_hbase_master_hosts = hdp_default("hbase_master_hosts")
     $public_zookeeper_hosts = hdp_default("zookeeper_hosts")
     $public_zookeeper_hosts = hdp_default("zookeeper_hosts")
@@ -203,14 +192,6 @@ class hdp::params()
   $webhcat_apps_dir = hdp_default("webhcat_apps_dir", "/apps/webhcat")
   $webhcat_apps_dir = hdp_default("webhcat_apps_dir", "/apps/webhcat")
   $hbase_hdfs_root_dir = hdp_default("hbase-site/hbase.hdfs.root.dir","/apps/hbase/data")
   $hbase_hdfs_root_dir = hdp_default("hbase-site/hbase.hdfs.root.dir","/apps/hbase/data")
 
 
-  $yarn_nm_app_log_dir = hdp_default("yarn-site/yarn.nodemanager.remote-app-log-dir","/app-logs")
-
-  $yarn_log_aggregation_enabled = hdp_default("yarn-site/yarn.log-aggregation-enable","true")
-
-  $mapreduce_jobhistory_intermediate_done_dir = hdp_default("mapred-site/mapreduce.jobhistory.intermediate-done-dir","/mr-history/tmp")
-  
-  $mapreduce_jobhistory_done_dir = hdp_default("mapred-site/mapreduce.jobhistory.done-dir","/mr-history/done")
-  
   $user_group = hdp_default("user_group","hadoop")
   $user_group = hdp_default("user_group","hadoop")
 
 
   $ganglia_enabled = hdp_default("ganglia_enabled",true) 
   $ganglia_enabled = hdp_default("ganglia_enabled",true) 
@@ -305,7 +286,6 @@ class hdp::params()
       $hadoop_bin = "/usr/lib/hadoop/bin"
       $hadoop_bin = "/usr/lib/hadoop/bin"
     }
     }
     $yarn_bin = "/usr/lib/hadoop-yarn/sbin"
     $yarn_bin = "/usr/lib/hadoop-yarn/sbin"
-    $mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
     $hadoop_conf_dir = "/etc/hadoop/conf"
     $hadoop_conf_dir = "/etc/hadoop/conf"
     $yarn_conf_dir = "/etc/hadoop/conf"
     $yarn_conf_dir = "/etc/hadoop/conf"
     $zk_conf_dir = "/etc/zookeeper/conf"
     $zk_conf_dir = "/etc/zookeeper/conf"
@@ -396,14 +376,6 @@ class hdp::params()
         }
         }
       },
       },
 
 
-    hadoop-mapreduce-client => {
-      'ALL' => {
-        64 => {
-          'ALL' => ['hadoop-mapreduce']
-        }
-      }
-    },
-
     yarn-common => { 
     yarn-common => { 
       'ALL' => {
       'ALL' => {
         64 => {
         64 => {
@@ -415,7 +387,7 @@ class hdp::params()
     yarn-nodemanager => { 
     yarn-nodemanager => { 
       'ALL' => {
       'ALL' => {
         64 => {
         64 => {
-          'ALL' => ['hadoop-yarn-nodemanager']
+          'ALL' => ['hadoop-yarn-nodemanager', 'hadoop-yarn-proxyserver', 'hadoop-yarn-resourcemanager']
         }
         }
       }
       }
     },
     },
@@ -423,7 +395,7 @@ class hdp::params()
     yarn-proxyserver => { 
     yarn-proxyserver => { 
       'ALL' => {
       'ALL' => {
         64 => {
         64 => {
-          'ALL' => ['hadoop-yarn-proxyserver']
+          'ALL' => ['hadoop-yarn-proxyserver', 'hadoop-yarn-resourcemanager']
         }
         }
       }
       }
     },
     },
@@ -431,23 +403,7 @@ class hdp::params()
     yarn-resourcemanager => { 
     yarn-resourcemanager => { 
       'ALL' => {
       'ALL' => {
         64 => {
         64 => {
-          'ALL' => ['hadoop-yarn-resourcemanager', 'hadoop-mapreduce']
-        }
-      }
-    },
-
-    mapreduce-historyserver => { 
-      'ALL' => {
-        64 => {
-          'ALL' => ['hadoop-mapreduce-historyserver']
-        }
-      }
-    },
-
-    tez_client => { 
-      'ALL' => {
-        64 => {
-          'ALL' => ['tez']
+          'ALL' => ['hadoop-yarn-resourcemanager']
         }
         }
       }
       }
     },
     },

+ 1 - 10
ambari-agent/src/main/python/ambari_agent/AmbariConfig.py

@@ -83,12 +83,8 @@ rolesToClass = {
   'JOBTRACKER': 'hdp-hadoop::jobtracker',
   'JOBTRACKER': 'hdp-hadoop::jobtracker',
   'TASKTRACKER': 'hdp-hadoop::tasktracker',
   'TASKTRACKER': 'hdp-hadoop::tasktracker',
   'RESOURCEMANAGER': 'hdp-yarn::resourcemanager',
   'RESOURCEMANAGER': 'hdp-yarn::resourcemanager',
-  'NODEMANAGER': 'hdp-yarn::nodemanager',
-  'HISTORYSERVER': 'hdp-yarn::historyserver',
-  'YARN_CLIENT': 'hdp-yarn::yarn_client',
   'HDFS_CLIENT': 'hdp-hadoop::client',
   'HDFS_CLIENT': 'hdp-hadoop::client',
   'MAPREDUCE_CLIENT': 'hdp-hadoop::client',
   'MAPREDUCE_CLIENT': 'hdp-hadoop::client',
-  'MAPREDUCEv2_CLIENT': 'hdp-yarn::mapreducev2_client',
   'ZOOKEEPER_SERVER': 'hdp-zookeeper',
   'ZOOKEEPER_SERVER': 'hdp-zookeeper',
   'ZOOKEEPER_CLIENT': 'hdp-zookeeper::client',
   'ZOOKEEPER_CLIENT': 'hdp-zookeeper::client',
   'HBASE_MASTER': 'hdp-hbase::master',
   'HBASE_MASTER': 'hdp-hbase::master',
@@ -123,10 +119,7 @@ rolesToClass = {
   'WEBHCAT_SERVICE_CHECK': 'hdp-templeton::templeton::service_check',
   'WEBHCAT_SERVICE_CHECK': 'hdp-templeton::templeton::service_check',
   'DASHBOARD_SERVICE_CHECK': 'hdp-dashboard::dashboard::service_check',
   'DASHBOARD_SERVICE_CHECK': 'hdp-dashboard::dashboard::service_check',
   'DECOMMISSION_DATANODE': 'hdp-hadoop::hdfs::decommission',
   'DECOMMISSION_DATANODE': 'hdp-hadoop::hdfs::decommission',
-  'HUE_SERVICE_CHECK': 'hdp-hue::service_check',
-  'RESOURCEMANAGER_SERVICE_CHECK': 'hdp-yarn::resourcemanager::service_check',
-  'HISTORYSERVER_SERVICE_CHECK': 'hdp-yarn::historyserver::service_check',
-  'TEZ_CLIENT': 'hdp-tez::tez_client'
+  'HUE_SERVICE_CHECK': 'hdp-hue::service_check'
 }
 }
 
 
 serviceStates = {
 serviceStates = {
@@ -142,8 +135,6 @@ servicesToPidNames = {
   'JOBTRACKER': 'hadoop-{USER}-jobtracker.pid$',
   'JOBTRACKER': 'hadoop-{USER}-jobtracker.pid$',
   'TASKTRACKER': 'hadoop-{USER}-tasktracker.pid$',
   'TASKTRACKER': 'hadoop-{USER}-tasktracker.pid$',
   'RESOURCEMANAGER': 'yarn-{USER}-resourcemanager.pid$',
   'RESOURCEMANAGER': 'yarn-{USER}-resourcemanager.pid$',
-  'NODEMANAGER': 'yarn-{USER}-nodemanager.pid$',
-  'HISTORYSERVER': 'mapred-{USER}-historyserver.pid$',
   'OOZIE_SERVER': 'oozie.pid',
   'OOZIE_SERVER': 'oozie.pid',
   'ZOOKEEPER_SERVER': 'zookeeper_server.pid',
   'ZOOKEEPER_SERVER': 'zookeeper_server.pid',
   'TEMPLETON_SERVER': 'templeton.pid',
   'TEMPLETON_SERVER': 'templeton.pid',

+ 2 - 0
ambari-agent/src/main/python/ambari_agent/Heartbeat.py

@@ -57,6 +57,8 @@ class Heartbeat:
       heartbeat['reports'] = queueResult['reports']
       heartbeat['reports'] = queueResult['reports']
       heartbeat['componentStatus'] = queueResult['componentStatus']
       heartbeat['componentStatus'] = queueResult['componentStatus']
       pass
       pass
+    logger.info("Sending heartbeat with response id: " + str(id) + " and "
+      "timestamp: " + str(timestamp))
     logger.debug("Heartbeat : " + pformat(heartbeat))
     logger.debug("Heartbeat : " + pformat(heartbeat))
 
 
     if (int(id) >= 0) and state_interval > 0 and (int(id) % state_interval) == 0:
     if (int(id) >= 0) and state_interval > 0 and (int(id) % state_interval) == 0:

+ 2 - 2
ambari-client/pom.xml

@@ -19,14 +19,14 @@
   <parent>
   <parent>
     <groupId>org.apache.ambari</groupId>
     <groupId>org.apache.ambari</groupId>
     <artifactId>ambari-project</artifactId>
     <artifactId>ambari-project</artifactId>
-    <version>1.3.0-SNAPSHOT</version>
+    <version>1.2.3-SNAPSHOT</version>
     <relativePath>../ambari-project</relativePath>
     <relativePath>../ambari-project</relativePath>
   </parent>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.ambari</groupId>
   <groupId>org.apache.ambari</groupId>
   <artifactId>ambari-client</artifactId>
   <artifactId>ambari-client</artifactId>
   <packaging>pom</packaging>
   <packaging>pom</packaging>
-  <version>1.3.0-SNAPSHOT</version>
+  <version>1.2.3-SNAPSHOT</version>
   <name>Ambari client</name>
   <name>Ambari client</name>
   <description>Ambari Python client</description>
   <description>Ambari Python client</description>
   <properties>
   <properties>

+ 2 - 2
ambari-project/pom.xml

@@ -17,11 +17,11 @@
   <parent>
   <parent>
     <groupId>org.apache.ambari</groupId>
     <groupId>org.apache.ambari</groupId>
     <artifactId>ambari</artifactId>
     <artifactId>ambari</artifactId>
-    <version>1.3.0-SNAPSHOT</version>
+    <version>1.2.3-SNAPSHOT</version>
   </parent>
   </parent>
   <groupId>org.apache.ambari</groupId>
   <groupId>org.apache.ambari</groupId>
   <artifactId>ambari-project</artifactId>
   <artifactId>ambari-project</artifactId>
-  <version>1.3.0-SNAPSHOT</version>
+  <version>1.2.3-SNAPSHOT</version>
   <description>Apache Ambari Project POM</description>
   <description>Apache Ambari Project POM</description>
   <name>Apache Ambari Project POM</name>
   <name>Apache Ambari Project POM</name>
   <packaging>pom</packaging>
   <packaging>pom</packaging>

+ 0 - 24
ambari-server/docs/api/v1/cluster-resources.md

@@ -1,24 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-# Cluster Resources
-Cluster resources represent named Hadoop clusters.  Clusters are top level resources.
-
-- [List clusters](clusters.md)
-- [View cluster information](clusters-cluster.md)
-- [Create cluster](create-cluster.md)
-- [Delete cluster](delete-cluster.md)

+ 0 - 26
ambari-server/docs/api/v1/component-resources.md

@@ -1,26 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-# Component Resources
-Component resources are the individual components of a service (e.g. HDFS/NameNode and MapReduce/JobTracker).  Components are sub-resources of services.
- 
-
-- [List service components](components.md)
-- [View component information](components-component.md)
-- [Create component](create-component.md)
-
-

+ 0 - 40
ambari-server/docs/api/v1/create-cluster.md

@@ -1,40 +0,0 @@
-
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Create Cluster
-=====
-
-[Back to Resources](index.md#resources)
-
-Create a cluster named ‘c1’ with the property ‘Clusters/version’ = ‘HDP-1.2.0’.
-
-
-    POST /clusters/c1
-
-**Body**
-
-    {
-      "Clusters": {
-        "version" : "HDP-1.2.0”
-      }
-    }
-
-**Response**
-
-    200 OK
-    

+ 0 - 33
ambari-server/docs/api/v1/create-component.md

@@ -1,33 +0,0 @@
-
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Create Component
-=====
-
-[Back to Resources](index.md#resources)
-
-Create the NAMENODE component.
-
-
-    POST /clusters/c1/services/HDFS/components/NAMENODE
-
-
-**Response**
-
-    200 OK
-    

+ 0 - 33
ambari-server/docs/api/v1/create-host.md

@@ -1,33 +0,0 @@
-
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Create Host
-=====
-
-[Back to Resources](index.md#resources)
-
-Create the host named your.ambari.host.
-
-
-    POST /clusters/c1/hosts/your.ambari.host
-
-
-**Response**
-
-    200 OK
-    

+ 0 - 33
ambari-server/docs/api/v1/create-hostcomponent.md

@@ -1,33 +0,0 @@
-
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Create Host Component
-=====
-
-[Back to Resources](index.md#resources)
-
-Create the NAMENODE host component on your.ambari.host.
-
-
-    POST clusters/c1/hosts/your.ambari.host/host_components/NAMENODE
-
-
-**Response**
-
-    200 OK
-    

+ 0 - 33
ambari-server/docs/api/v1/create-service.md

@@ -1,33 +0,0 @@
-
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Create Service
-=====
-
-[Back to Resources](index.md#resources)
-
-Create the HDFS service.
-
-
-    POST /clusters/c1/services/HDFS
-
-
-**Response**
-
-    200 OK
-    

+ 0 - 33
ambari-server/docs/api/v1/delete-cluster.md

@@ -1,33 +0,0 @@
-
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Delete Cluster
-=====
-
-[Back to Resources](index.md#resources)
-
-Delete a cluster named ‘c1’.
-
-
-    DELETE /clusters/c1
-
-
-**Response**
-
-    200 OK
-    

+ 0 - 118
ambari-server/docs/api/v1/host-component-resources.md

@@ -1,118 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-# Host Component Resources
-###States
-
-The current state of a host component resource can be determined by looking at the ServiceComponentInfo/state property.
-
-
-    GET api/v1/clusters/c1/services/HDFS/components/NAMENODE?fields=ServiceComponentInfo/state
-
-**Response**
-
-    200 OK
-    {
-      "href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/NAMENODE?fields=ServiceComponentInfo/state",
-      "ServiceComponentInfo" : {
-        "cluster_name" : "c1",
-        "component_name" : "NAMENODE",
-        "state" : "INSTALLED",
-        "service_name" : "HDFS"
-      }
-    }
-
-The following table lists the possible values of the service resource ServiceComponentInfo/state property.
-<table>
-  <tr>
-    <th>State</th>
-    <th>Description</th>
-  </tr>
-  <tr>
-    <td>INIT</td>
-    <td>The initial clean state after the component is first created.</td>  
-  </tr>
-  <tr>
-    <td>INSTALLING</td>
-    <td>In the process of installing the component.</td>  
-  </tr>
-  <tr>
-    <td>INSTALL_FAILED</td>
-    <td>The component install failed.</td>  
-  </tr>
-  <tr>
-    <td>INSTALLED</td>
-    <td>The component has been installed successfully but is not currently running.</td>  
-  </tr>
-  <tr>
-    <td>STARTING</td>
-    <td>In the process of starting the component.</td>  
-  </tr>
-  <tr>
-    <td>STARTED</td>
-    <td>The component has been installed and started.</td>  
-  </tr>
-  <tr>
-    <td>STOPPING</td>
-    <td>In the process of stopping the component.</td>  
-  </tr>
-
-  <tr>
-    <td>UNINSTALLING</td>
-    <td>In the process of uninstalling the component.</td>  
-  </tr>
-  <tr>
-    <td>UNINSTALLED</td>
-    <td>The component has been successfully uninstalled.</td>  
-  </tr>
-  <tr>
-    <td>WIPING_OUT</td>
-    <td>In the process of wiping out the installed component.</td>  
-  </tr>
-  <tr>
-    <td>UPGRADING</td>
-    <td>In the process of upgrading the component.</td>  
-  </tr>
-  <tr>
-    <td>MAINTENANCE</td>
-    <td>The component has been marked for maintenance.</td>  
-  </tr>
-  <tr>
-    <td>UNKNOWN</td>
-    <td>The component state can not be determined.</td>  
-  </tr>
-</table>
-
-###Starting
-A component can be started through the API by setting its state to be STARTED (see [update host component](update-hostcomponent.md)).
-
-###Starting
-A component can be stopped through the API by setting its state to be INSTALLED (see [update host component](update-hostcomponent.md)).
-
-###Maintenance
-
-The user can update the desired state of a component through the API to be MAINTENANCE (see [update host component](update-hostcomponent.md)).  When a host component is into maintenance state it is basically taken off line. This state can be used, for example, to move a component like NameNode.  The NameNode component can be put in MAINTENANCE mode and then a new NameNode can be created for the service. 
-
-
-
-###Examples
-
-
-- [List host components](host-components.md)
-- [View host component information](host-component.md)
-- [Create host component](create-hostcomponent.md)
-- [Update host component](update-hostcomponent.md)

+ 0 - 23
ambari-server/docs/api/v1/host-resources.md

@@ -1,23 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-# Host Resources
- 
-
-- [List hosts](hosts.md)
-- [View host information](hosts-host.md)
-- [Create host](create-host.md)

File diff suppressed because it is too large
+ 0 - 0
ambari-server/docs/api/v1/index.md


+ 0 - 114
ambari-server/docs/api/v1/service-resources.md

@@ -1,114 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-# Service Resources
-Service resources are services of a Hadoop cluster (e.g. HDFS, MapReduce and Ganglia).  Service resources are sub-resources of clusters. 
-
-###States
-
-The current state of a service resource can be determined by looking at the ServiceInfo/state property.
-
-
-    GET api/v1/clusters/c1/services/HDFS?fields=ServiceInfo/state
-
-**Response**
-
-    200 OK
-    {
-      "href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS?fields=ServiceInfo/state",
-      "ServiceInfo" : {
-        "cluster_name" : "c1",
-        "state" : "INSTALLED",
-        "service_name" : "HDFS"
-      }
-    }
-
-The following table lists the possible values of the service resource ServiceInfo/state property.
-<table>
-  <tr>
-    <th>State</th>
-    <th>Description</th>
-  </tr>
-  <tr>
-    <td>INIT</td>
-    <td>The initial clean state after the service is first created.</td>  
-  </tr>
-  <tr>
-    <td>INSTALLING</td>
-    <td>In the process of installing the service.</td>  
-  </tr>
-  <tr>
-    <td>INSTALL_FAILED</td>
-    <td>The service install failed.</td>  
-  </tr>
-  <tr>
-    <td>INSTALLED</td>
-    <td>The service has been installed successfully but is not currently running.</td>  
-  </tr>
-  <tr>
-    <td>STARTING</td>
-    <td>In the process of starting the service.</td>  
-  </tr>
-  <tr>
-    <td>STARTED</td>
-    <td>The service has been installed and started.</td>  
-  </tr>
-  <tr>
-    <td>STOPPING</td>
-    <td>In the process of stopping the service.</td>  
-  </tr>
-
-  <tr>
-    <td>UNINSTALLING</td>
-    <td>In the process of uninstalling the service.</td>  
-  </tr>
-  <tr>
-    <td>UNINSTALLED</td>
-    <td>The service has been successfully uninstalled.</td>  
-  </tr>
-  <tr>
-    <td>WIPING_OUT</td>
-    <td>In the process of wiping out the installed service.</td>  
-  </tr>
-  <tr>
-    <td>UPGRADING</td>
-    <td>In the process of upgrading the service.</td>  
-  </tr>
-  <tr>
-    <td>MAINTENANCE</td>
-    <td>The service has been marked for maintenance.</td>  
-  </tr>
-  <tr>
-    <td>UNKNOWN</td>
-    <td>The service state can not be determined.</td>  
-  </tr>
-</table>
-
-###Starting
-A service can be started through the API by setting its state to be STARTED (see [update service](update-service.md)).
-
-###Starting
-A service can be stopped through the API by setting its state to be INSTALLED (see [update service](update-service.md)).
-
-###Examples
-
-- [List services](services.md)
-- [View service information](services-service.md)
-- [Create service](create-service.md)
-- [Update services](update-services.md)
-- [Update service](update-service.md)
-

+ 0 - 94
ambari-server/docs/api/v1/update-hostcomponent.md

@@ -1,94 +0,0 @@
-
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Update Host Component
-=====
-
-[Back to Resources](index.md#resources)
-
-###Start the NameNode Component
-Start the NAMENODE component by updating its state to 'STARTED'.
-
-
-    PUT api/v1/clusters/c1/hosts/hostname/host_components/NAMENODE
-    
-    {
-      "HostRoles":{
-        "state":"STARTED"
-      }
-    }
-
-
-**Response**
-
-    202 Accepted
-    {
-      "href" : "http://your.ambari.server:8080/api/v1/clusters/c1/requests/12",
-      "Requests" : {
-        "id" : 12,
-        "status" : "InProgress"
-      }
-    }
-    
-###Stop the NameNode Component
-Stop the NAMENODE component by updating its state to 'INSTALLED'.
-
-
-    PUT api/v1/clusters/c1/hosts/hostname/host_components/NAMENODE
-    
-    {
-      "HostRoles":{
-        "state":"INSTALLED"
-      }
-    }
-
-
-**Response**
-
-    202 Accepted
-    {
-      "href" : "http://your.ambari.server:8080/api/v1/clusters/c1/requests/13",
-      "Requests" : {
-        "id" : 13,
-        "status" : "InProgress"
-      }
-    }
-    
-###Set MAINTENANCE Mode    
-Put the NAMENODE component into 'MAINTENANCE' mode.
-
-
-    PUT api/v1/clusters/c1/hosts/hostname/host_components/NAMENODE
-    
-    {
-      "HostRoles":{
-        "state":"MAINTENANCE"
-      }
-    }
-
-
-**Response**
-
-    202 Accepted
-    {
-      "href" : "http://your.ambari.server:8080/api/v1/clusters/c1/requests/14",
-      "Requests" : {
-        "id" : 14,
-        "status" : "InProgress"
-      }
-    }    

+ 0 - 73
ambari-server/docs/api/v1/update-service.md

@@ -1,73 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Update Service
-=====
-
-[Back to Resources](index.md#resources)
-
-###Start the HDFS Service
-Start the HDFS service (update the state of the HDFS service to be ‘STARTED’).
-
-
-    PUT /clusters/c1/services/HDFS/
-
-**Body**
-
-    {
-      "ServiceInfo": {
-        "state" : "STARTED”
-      }
-    }
-
-
-**Response**
-
-    202 Accepted
-    {
-      "href" : "http://your.ambari.server/api/v1/clusters/c1/requests/3",
-      "Requests" : {
-        "id" : 3,
-        "status" : "InProgress"
-      } 
-    }
-
-###Stop the HDFS Service
-Stop the HDFS service (update the state of the HDFS service to be ‘INSTALLED’).
-
-
-    PUT /clusters/c1/services/HDFS/
-
-**Body**
-
-    {
-      "ServiceInfo": {
-        "state" : "INSTALLED”
-      }
-    }
-
-
-**Response**
-
-    202 Accepted
-    {
-      "href" : "http://your.ambari.server/api/v1/clusters/c1/requests/3",
-      "Requests" : {
-        "id" : 4,
-        "status" : "InProgress"
-      } 
-    }

+ 0 - 48
ambari-server/docs/api/v1/update-services.md

@@ -1,48 +0,0 @@
-
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Update Services
-=====
-
-[Back to Resources](index.md#resources)
-
-)Update the state ofall ‘INSTALLED’  servicse to be ‘STARTED’.
-
-
-    PUT/clusters/c1/services?ServiceInfo/state=INSTALLED/
-
-**Body**
-
-    {
-      "ServiceInfo": {
-        "state" : "STARTED”
-      }
-    }
-
-
-**Response**
-
-    202 Accepted
-    {
-      "href" : "http://your.ambari.server/api/v1/clusters/c1/requests/3",
-      "Requests" : {
-        "id" : 3,
-        "status" : "InProgress"
-      } 
-    }
-    

+ 3 - 3
ambari-server/pom.xml

@@ -16,7 +16,7 @@
   <parent>
   <parent>
     <groupId>org.apache.ambari</groupId>
     <groupId>org.apache.ambari</groupId>
     <artifactId>ambari-project</artifactId>
     <artifactId>ambari-project</artifactId>
-    <version>1.3.0-SNAPSHOT</version>
+    <version>1.2.3-SNAPSHOT</version>
     <relativePath>../ambari-project</relativePath>
     <relativePath>../ambari-project</relativePath>
   </parent>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <modelVersion>4.0.0</modelVersion>
@@ -24,7 +24,7 @@
   <artifactId>ambari-server</artifactId>
   <artifactId>ambari-server</artifactId>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
   <name>Ambari Server</name>
   <name>Ambari Server</name>
-  <version>1.3.0-SNAPSHOT</version>
+  <version>1.2.3-SNAPSHOT</version>
   <description>Ambari Server</description>
   <description>Ambari Server</description>
   <properties>
   <properties>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
@@ -400,7 +400,7 @@
               </environmentVariables>
               </environmentVariables>
               <skip>${skipTests}</skip>
               <skip>${skipTests}</skip>
             </configuration>
             </configuration>
-            <id>python-test</id>
+            <id>default-cli</id>
             <phase>test</phase>
             <phase>test</phase>
             <goals>
             <goals>
               <goal>exec</goal>
               <goal>exec</goal>

+ 1 - 8
ambari-server/src/main/java/org/apache/ambari/server/Role.java

@@ -35,7 +35,6 @@ public enum Role {
   JOBTRACKER,
   JOBTRACKER,
   TASKTRACKER,
   TASKTRACKER,
   MAPREDUCE_CLIENT,
   MAPREDUCE_CLIENT,
-  MAPREDUCEv2_CLIENT,
   JAVA_JCE,
   JAVA_JCE,
   HADOOP_CLIENT,
   HADOOP_CLIENT,
   JOBTRACKER_SERVICE_CHECK,
   JOBTRACKER_SERVICE_CHECK,
@@ -70,11 +69,5 @@ public enum Role {
   DECOMMISSION_DATANODE,
   DECOMMISSION_DATANODE,
   HUE_SERVER,
   HUE_SERVER,
   AMBARI_SERVER_ACTION,
   AMBARI_SERVER_ACTION,
-  RESOURCEMANAGER,
-  RESOURCEMANAGER_SERVICE_CHECK,
-  HISTORYSERVER_SERVICE_CHECK,
-  NODEMANAGER,
-  YARN_CLIENT,
-  HISTORYSERVER,
-  TEZ_CLIENT
+  RESOURCEMANAGER
 }
 }

+ 4 - 6
ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java

@@ -105,12 +105,10 @@ public class HeartBeatHandler {
       return createRegisterCommand();
       return createRegisterCommand();
     }
     }
 
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Received heartbeat from host"
-          + ", hostname=" + hostname
-          + ", currentResponseId=" + currentResponseId
-          + ", receivedResponseId=" + heartbeat.getResponseId());
-    }
+    LOG.info("Received heartbeat from host"
+        + ", hostname=" + hostname
+        + ", currentResponseId=" + currentResponseId
+        + ", receivedResponseId=" + heartbeat.getResponseId());
 
 
     if (heartbeat.getResponseId() == currentResponseId - 1) {
     if (heartbeat.getResponseId() == currentResponseId - 1) {
       LOG.warn("Old responseId received - response was lost - returning cached response");
       LOG.warn("Old responseId received - response was lost - returning cached response");

+ 46 - 49
ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java

@@ -178,62 +178,59 @@ public class HeartbeatMonitor implements Runnable {
     List<StatusCommand> cmds = new ArrayList<StatusCommand>();
     List<StatusCommand> cmds = new ArrayList<StatusCommand>();
     
     
     for (Cluster cl : fsm.getClustersForHost(hostname)) {
     for (Cluster cl : fsm.getClustersForHost(hostname)) {
+      
       for (ServiceComponentHost sch : cl.getServiceComponentHosts(hostname)) {
       for (ServiceComponentHost sch : cl.getServiceComponentHosts(hostname)) {
         String serviceName = sch.getServiceName();
         String serviceName = sch.getServiceName();
-        Service service = cl.getService(sch.getServiceName());
-        ServiceComponent sc = service.getServiceComponent(sch
-          .getServiceComponentName());
-        // Do not send status commands for client components
-        if (!sc.isClientComponent()) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Live status will include status of service " + serviceName + " of cluster " + cl.getClusterName());
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Live status will include status of service " + serviceName + " of cluster " + cl.getClusterName());
+        }
+        
+        Map<String, Map<String, String>> configurations = new TreeMap<String, Map<String, String>>();
+        
+        // get the cluster config for type 'global'
+        // apply service overrides, if the tag is not the same
+        // apply host overrides, if any
+        
+        Config clusterConfig = cl.getDesiredConfigByType("global");
+        if (null != clusterConfig) {
+          // cluster config for 'global'
+          Map<String,String> props = new HashMap<String, String>(clusterConfig.getProperties());
+
+          // apply service overrides, only if the tag is not the same (for when service configs are overrides)
+          Service service = cl.getService(sch.getServiceName());
+          Config svcConfig = service.getDesiredConfigs().get("global");
+          if (null != svcConfig && !svcConfig.getVersionTag().equals(clusterConfig.getVersionTag())) {
+            props.putAll(svcConfig.getProperties());
           }
           }
-
-          Map<String, Map<String, String>> configurations = new TreeMap<String, Map<String, String>>();
-
-          // get the cluster config for type 'global'
-          // apply service overrides, if the tag is not the same
+          
           // apply host overrides, if any
           // apply host overrides, if any
-
-          Config clusterConfig = cl.getDesiredConfigByType("global");
-          if (null != clusterConfig) {
-            // cluster config for 'global'
-            Map<String, String> props = new HashMap<String, String>(clusterConfig.getProperties());
-
-            // apply service overrides, only if the tag is not the same (for when service configs are overrides)
-            Config svcConfig = service.getDesiredConfigs().get("global");
-            if (null != svcConfig && !svcConfig.getVersionTag().equals(clusterConfig.getVersionTag())) {
-              props.putAll(svcConfig.getProperties());
+          Host host = fsm.getHost(hostname);
+          DesiredConfig dc = host.getDesiredConfigs(cl.getClusterId()).get("global");
+          if (null != dc) {
+            Config hostConfig = cl.getConfig("global", dc.getVersion());
+            if (null != hostConfig) {
+              props.putAll(hostConfig.getProperties());
             }
             }
-
-            // apply host overrides, if any
-            Host host = fsm.getHost(hostname);
-            DesiredConfig dc = host.getDesiredConfigs(cl.getClusterId()).get("global");
-            if (null != dc) {
-              Config hostConfig = cl.getConfig("global", dc.getVersion());
-              if (null != hostConfig) {
-                props.putAll(hostConfig.getProperties());
-              }
-            }
-
-            configurations.put("global", props);
-          }
-
-          // HACK - if any service exists with global tag, and we have none, use
-          // that instead
-          if (configurations.isEmpty()) {
-            Config config = service.getDesiredConfigs().get("global");
-            if (null != config)
-              configurations.put("global", new HashMap<String, String>(config.getProperties()));
           }
           }
-
-          StatusCommand statusCmd = new StatusCommand();
-          statusCmd.setClusterName(cl.getClusterName());
-          statusCmd.setServiceName(serviceName);
-          statusCmd.setComponentName(sch.getServiceComponentName());
-          statusCmd.setConfigurations(configurations);
-          cmds.add(statusCmd);
+          
+          configurations.put("global", props);
+        }
+        
+        // HACK - if any service exists with global tag, and we have none, use
+        // that instead
+        if (configurations.isEmpty()) {
+          Service service = cl.getService(sch.getServiceName());
+          Config config = service.getDesiredConfigs().get("global");
+          if (null != config)
+            configurations.put("global", new HashMap<String,String>(config.getProperties()));
         }
         }
+        
+        StatusCommand statusCmd = new StatusCommand();
+        statusCmd.setClusterName(cl.getClusterName());
+        statusCmd.setServiceName(serviceName);
+        statusCmd.setComponentName(sch.getServiceComponentName());
+        statusCmd.setConfigurations(configurations);
+        cmds.add(statusCmd);
       }
       }
     }
     }
     return cmds;
     return cmds;

+ 4 - 1
ambari-server/src/main/java/org/apache/ambari/server/agent/rest/AgentResource.java

@@ -106,7 +106,10 @@ public class AgentResource {
     HeartBeatResponse heartBeatResponse;
     HeartBeatResponse heartBeatResponse;
     try {
     try {
       heartBeatResponse = hh.handleHeartBeat(message);
       heartBeatResponse = hh.handleHeartBeat(message);
-      LOG.debug("Sending heartbeat response " + heartBeatResponse);
+      LOG.info("Sending heartbeat response with response id " + heartBeatResponse.getResponseId());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Response details " + heartBeatResponse);
+      }
     } catch (Exception e) {
     } catch (Exception e) {
       LOG.info("Error in HeartBeat", e);
       LOG.info("Error in HeartBeat", e);
       throw new WebApplicationException(500);
       throw new WebApplicationException(500);

+ 1 - 6
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java

@@ -3713,8 +3713,7 @@ public class AmbariManagementControllerImpl implements
     for (TaskStatusRequest request : requests) {
     for (TaskStatusRequest request : requests) {
       if (request.getTaskId() != null) {
       if (request.getTaskId() != null) {
         taskIds.add(request.getTaskId());
         taskIds.add(request.getTaskId());
-      }
-      if (request.getRequestId() != null) {
+      } else {
         requestIds.add(request.getRequestId());
         requestIds.add(request.getRequestId());
       }
       }
     }
     }
@@ -3724,10 +3723,6 @@ public class AmbariManagementControllerImpl implements
       responses.add(new TaskStatusResponse(command));
       responses.add(new TaskStatusResponse(command));
     }
     }
 
 
-    if (responses.size() == 0) {
-      throw new ObjectNotFoundException("Task resource doesn't exist.");
-    }
-
     return responses;
     return responses;
   }
   }
 
 

+ 3 - 84
ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaMetric.java

@@ -18,10 +18,6 @@
 
 
 package org.apache.ambari.server.controller.ganglia;
 package org.apache.ambari.server.controller.ganglia;
 
 
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
 import org.codehaus.jackson.annotate.JsonIgnoreProperties;
 import org.codehaus.jackson.annotate.JsonIgnoreProperties;
 @JsonIgnoreProperties(ignoreUnknown = true)
 @JsonIgnoreProperties(ignoreUnknown = true)
 
 
@@ -62,22 +58,6 @@ public class GangliaMetric {
    * The temporal data points.
    * The temporal data points.
    */
    */
   private Number[][] datapoints;
   private Number[][] datapoints;
-  
-  
-  private static final Set<String> PERCENTAGE_METRIC;
-
-  //BUG-3386 Cluster CPU Chart is off the charts
-  // Here can be added other percentage metrics
-  static {
-    Set<String> temp = new HashSet<String>();
-    temp.add("cpu_wio");
-    /*temp.add("cpu_idle");
-    temp.add("cpu_nice");
-    temp.add("cpu_aidle");
-    temp.add("cpu_system");
-    temp.add("cpu_user");*/
-    PERCENTAGE_METRIC = Collections.unmodifiableSet(temp);
-  }
 
 
 
 
   // ----- GangliaMetric -----------------------------------------------------
   // ----- GangliaMetric -----------------------------------------------------
@@ -126,46 +106,11 @@ public class GangliaMetric {
     return datapoints;
     return datapoints;
   }
   }
 
 
-
   public void setDatapoints(Number[][] datapoints) {
   public void setDatapoints(Number[][] datapoints) {
     this.datapoints = datapoints;
     this.datapoints = datapoints;
-  } 
-  
-  public void setDatapointsFromList(List<GangliaMetric.TemporalMetric> listTemporalMetrics) { 
-    //this.datapoints = datapoints;
-    Number[][] datapointsArray = new Number[listTemporalMetrics.size()][2];
-    int cnt = 0;
-    if (PERCENTAGE_METRIC.contains(metric_name)) {
-      int firstIndex = 0;
-      int lastIndex = listTemporalMetrics.size() - 1;
-      for (int i = firstIndex; i <= lastIndex; ++i) {
-        GangliaMetric.TemporalMetric m = listTemporalMetrics.get(i);
-        Number val = m.getValue();
-        if (100.0 >= val.doubleValue()) {
-          datapointsArray[cnt][0] = val;
-          datapointsArray[cnt][1] = m.getTime();
-          cnt++;
-        }
-      }
-    } else {
-      int firstIndex = 0;
-      int lastIndex = listTemporalMetrics.size() - 1;
-      for (int i = firstIndex; i <= lastIndex; ++i) {
-        GangliaMetric.TemporalMetric m = listTemporalMetrics.get(i);
-        datapointsArray[i][0] = m.getValue();
-        datapointsArray[i][1] = m.getTime();
-        cnt++;
-      }
-    }
-
-    this.datapoints = new Number[cnt][2];
-    for (int i = 0; i < this.datapoints.length; i++) {
-      this.datapoints[i][0] = datapointsArray[i][0];
-      this.datapoints[i][1] = datapointsArray[i][1];
-    }
-
   }
   }
 
 
+
   // ----- Object overrides --------------------------------------------------
   // ----- Object overrides --------------------------------------------------
 
 
   @Override
   @Override
@@ -214,19 +159,9 @@ public class GangliaMetric {
   public static class TemporalMetric {
   public static class TemporalMetric {
     private Number m_value;
     private Number m_value;
     private Number m_time;
     private Number m_time;
-    private boolean isInvalid;
-
-    public boolean isIsInvalid() {
-      return isInvalid;
-    }
 
 
-    public TemporalMetric(String value, Number time) {
-      isInvalid = false;
-      try{
-        m_value = convertToNumber(value);
-      } catch (NumberFormatException e) {
-        isInvalid = true;
-      }
+    public TemporalMetric(Number value, Number time) {
+      m_value = value;
       m_time = time;
       m_time = time;
     }
     }
 
 
@@ -237,21 +172,5 @@ public class GangliaMetric {
     public Number getTime() {
     public Number getTime() {
       return m_time;
       return m_time;
     }
     }
-    
-    private Number convertToNumber(String s) throws NumberFormatException {
-      Number res;
-      if(s.contains(".")){
-        Double d = Double.parseDouble(s);
-        if(d.isNaN() || d.isInfinite()){
-          throw new NumberFormatException(s);
-        } else {
-          res = d;
-        } 
-      } else {
-        res = Long.parseLong(s);
-      }
-      return res;
-    }
-    
   }
   }
 }
 }

+ 10 - 3
ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProvider.java

@@ -413,13 +413,20 @@ public abstract class GangliaPropertyProvider extends AbstractPropertyProvider {
 
 
           String val = reader.readLine();
           String val = reader.readLine();
           while(! val.equals("[AMBARI_DP_END]")) {
           while(! val.equals("[AMBARI_DP_END]")) {
-            GangliaMetric.TemporalMetric tm = new GangliaMetric.TemporalMetric(val, time);
-            if (!tm.isIsInvalid()) listTemporalMetrics.add(tm);
+            listTemporalMetrics.add(
+                new GangliaMetric.TemporalMetric(convertToNumber(val), time));
             time += step;
             time += step;
             val = reader.readLine();
             val = reader.readLine();
           }
           }
 
 
-          metric.setDatapointsFromList(listTemporalMetrics);
+          //todo: change setter in GangliaMetric to take collection
+          Number[][] datapointsArray = new Number[listTemporalMetrics.size()][2];
+          for (int i = 0; i < listTemporalMetrics.size(); ++i) {
+            GangliaMetric.TemporalMetric m = listTemporalMetrics.get(i);
+            datapointsArray[i][0] = m.getValue();
+            datapointsArray[i][1] = m.getTime();
+          }
+          metric.setDatapoints(datapointsArray);
 
 
           ResourceKey key = new ResourceKey(metric.getHost_name(), metric.getCluster_name());
           ResourceKey key = new ResourceKey(metric.getHost_name(), metric.getCluster_name());
           Set<Resource> resourceSet = resources.get(key);
           Set<Resource> resourceSet = resources.get(key);

+ 0 - 26
ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java

@@ -97,18 +97,6 @@ public class RoleCommandOrder {
     // Starts
     // Starts
     addDependency(Role.SECONDARY_NAMENODE, RoleCommand.START, Role.NAMENODE,
     addDependency(Role.SECONDARY_NAMENODE, RoleCommand.START, Role.NAMENODE,
         RoleCommand.START);
         RoleCommand.START);
-    addDependency(Role.RESOURCEMANAGER, RoleCommand.START, Role.NAMENODE,
-        RoleCommand.START);
-    addDependency(Role.RESOURCEMANAGER, RoleCommand.START, Role.DATANODE,
-        RoleCommand.START);
-    addDependency(Role.NODEMANAGER, RoleCommand.START, Role.NAMENODE,
-        RoleCommand.START);
-    addDependency(Role.NODEMANAGER, RoleCommand.START, Role.DATANODE,
-        RoleCommand.START);
-    addDependency(Role.HISTORYSERVER, RoleCommand.START, Role.NAMENODE,
-        RoleCommand.START);
-    addDependency(Role.HISTORYSERVER, RoleCommand.START, Role.DATANODE,
-        RoleCommand.START);
     addDependency(Role.HBASE_MASTER, RoleCommand.START, Role.ZOOKEEPER_SERVER,
     addDependency(Role.HBASE_MASTER, RoleCommand.START, Role.ZOOKEEPER_SERVER,
         RoleCommand.START);
         RoleCommand.START);
     addDependency(Role.HBASE_MASTER, RoleCommand.START, Role.NAMENODE,
     addDependency(Role.HBASE_MASTER, RoleCommand.START, Role.NAMENODE,
@@ -159,8 +147,6 @@ public class RoleCommandOrder {
         Role.JOBTRACKER, RoleCommand.START);
         Role.JOBTRACKER, RoleCommand.START);
     addDependency(Role.MAPREDUCE_SERVICE_CHECK, RoleCommand.EXECUTE,
     addDependency(Role.MAPREDUCE_SERVICE_CHECK, RoleCommand.EXECUTE,
         Role.TASKTRACKER, RoleCommand.START);
         Role.TASKTRACKER, RoleCommand.START);
-    addDependency(Role.RESOURCEMANAGER_SERVICE_CHECK, RoleCommand.EXECUTE,
-        Role.RESOURCEMANAGER, RoleCommand.START);
     addDependency(Role.OOZIE_SERVICE_CHECK, RoleCommand.EXECUTE,
     addDependency(Role.OOZIE_SERVICE_CHECK, RoleCommand.EXECUTE,
         Role.OOZIE_SERVER, RoleCommand.START);
         Role.OOZIE_SERVER, RoleCommand.START);
     addDependency(Role.WEBHCAT_SERVICE_CHECK, RoleCommand.EXECUTE,
     addDependency(Role.WEBHCAT_SERVICE_CHECK, RoleCommand.EXECUTE,
@@ -202,22 +188,10 @@ public class RoleCommandOrder {
         Role.JOBTRACKER, RoleCommand.STOP);
         Role.JOBTRACKER, RoleCommand.STOP);
     addDependency(Role.NAMENODE, RoleCommand.STOP,
     addDependency(Role.NAMENODE, RoleCommand.STOP,
         Role.TASKTRACKER, RoleCommand.STOP);
         Role.TASKTRACKER, RoleCommand.STOP);
-    addDependency(Role.NAMENODE, RoleCommand.STOP,
-        Role.RESOURCEMANAGER, RoleCommand.STOP);
-    addDependency(Role.NAMENODE, RoleCommand.STOP,
-        Role.NODEMANAGER, RoleCommand.STOP);
-    addDependency(Role.NAMENODE, RoleCommand.STOP,
-        Role.HISTORYSERVER, RoleCommand.STOP);
     addDependency(Role.DATANODE, RoleCommand.STOP,
     addDependency(Role.DATANODE, RoleCommand.STOP,
         Role.JOBTRACKER, RoleCommand.STOP);
         Role.JOBTRACKER, RoleCommand.STOP);
     addDependency(Role.DATANODE, RoleCommand.STOP,
     addDependency(Role.DATANODE, RoleCommand.STOP,
         Role.TASKTRACKER, RoleCommand.STOP);
         Role.TASKTRACKER, RoleCommand.STOP);
-    addDependency(Role.DATANODE, RoleCommand.STOP,
-        Role.RESOURCEMANAGER, RoleCommand.STOP);
-    addDependency(Role.DATANODE, RoleCommand.STOP,
-        Role.NODEMANAGER, RoleCommand.STOP);
-    addDependency(Role.DATANODE, RoleCommand.STOP,
-        Role.HISTORYSERVER, RoleCommand.STOP);
 
 
     addDependency(Role.SECONDARY_NAMENODE, RoleCommand.UPGRADE,
     addDependency(Role.SECONDARY_NAMENODE, RoleCommand.UPGRADE,
         Role.NAMENODE, RoleCommand.UPGRADE);
         Role.NAMENODE, RoleCommand.UPGRADE);

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java

@@ -73,7 +73,7 @@ public class HostRoleCommandDAO {
   public List<HostRoleCommandEntity> findByRequestAndTaskIds(Collection<Long> requestIds, Collection<Long> taskIds) {
   public List<HostRoleCommandEntity> findByRequestAndTaskIds(Collection<Long> requestIds, Collection<Long> taskIds) {
     TypedQuery<HostRoleCommandEntity> query = entityManagerProvider.get().createQuery(
     TypedQuery<HostRoleCommandEntity> query = entityManagerProvider.get().createQuery(
         "SELECT DISTINCT task FROM HostRoleCommandEntity task " +
         "SELECT DISTINCT task FROM HostRoleCommandEntity task " +
-            "WHERE task.requestId IN ?1 AND task.taskId IN ?2 " +
+            "WHERE task.requestId IN ?1 OR task.taskId IN ?2 " +
             "ORDER BY task.taskId", HostRoleCommandEntity.class
             "ORDER BY task.taskId", HostRoleCommandEntity.class
     );
     );
     return daoUtils.selectList(query, requestIds, taskIds);
     return daoUtils.selectList(query, requestIds, taskIds);

+ 2 - 1
ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariLdapAuthenticationProvider.java

@@ -28,6 +28,7 @@ import org.springframework.security.core.Authentication;
 import org.springframework.security.core.AuthenticationException;
 import org.springframework.security.core.AuthenticationException;
 import org.springframework.security.core.userdetails.UsernameNotFoundException;
 import org.springframework.security.core.userdetails.UsernameNotFoundException;
 import org.springframework.security.ldap.DefaultSpringSecurityContextSource;
 import org.springframework.security.ldap.DefaultSpringSecurityContextSource;
+import org.springframework.security.ldap.authentication.BindAuthenticator;
 import org.springframework.security.ldap.authentication.LdapAuthenticationProvider;
 import org.springframework.security.ldap.authentication.LdapAuthenticationProvider;
 import org.springframework.security.ldap.search.FilterBasedLdapUserSearch;
 import org.springframework.security.ldap.search.FilterBasedLdapUserSearch;
 
 
@@ -97,7 +98,7 @@ public class AmbariLdapAuthenticationProvider implements AuthenticationProvider
 
 
       FilterBasedLdapUserSearch userSearch = new FilterBasedLdapUserSearch(userSearchBase, userSearchFilter, springSecurityContextSource);
       FilterBasedLdapUserSearch userSearch = new FilterBasedLdapUserSearch(userSearchBase, userSearchFilter, springSecurityContextSource);
 
 
-      AmbariLdapBindAuthenticator bindAuthenticator = new AmbariLdapBindAuthenticator(springSecurityContextSource, configuration);
+      BindAuthenticator bindAuthenticator = new BindAuthenticator(springSecurityContextSource);
       bindAuthenticator.setUserSearch(userSearch);
       bindAuthenticator.setUserSearch(userSearch);
 
 
       LdapAuthenticationProvider authenticationProvider = new LdapAuthenticationProvider(bindAuthenticator, authoritiesPopulator);
       LdapAuthenticationProvider authenticationProvider = new LdapAuthenticationProvider(bindAuthenticator, authoritiesPopulator);

+ 18 - 71
ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariLdapAuthoritiesPopulator.java

@@ -33,7 +33,7 @@ import org.springframework.security.ldap.userdetails.LdapAuthoritiesPopulator;
 import java.util.Collection;
 import java.util.Collection;
 
 
 /**
 /**
- * Provides authorities population for LDAP user from LDAP catalog
+ * Provides authorities population for LDAP user from local DB
  */
  */
 public class AmbariLdapAuthoritiesPopulator implements LdapAuthoritiesPopulator {
 public class AmbariLdapAuthoritiesPopulator implements LdapAuthoritiesPopulator {
   private static final Logger log = LoggerFactory.getLogger(AmbariLdapAuthoritiesPopulator.class);
   private static final Logger log = LoggerFactory.getLogger(AmbariLdapAuthoritiesPopulator.class);
@@ -43,8 +43,6 @@ public class AmbariLdapAuthoritiesPopulator implements LdapAuthoritiesPopulator
   UserDAO userDAO;
   UserDAO userDAO;
   RoleDAO roleDAO;
   RoleDAO roleDAO;
 
 
-  private static final String AMBARI_ADMIN_LDAP_ATTRIBUTE_KEY = "ambari_admin";
-
   @Inject
   @Inject
   public AmbariLdapAuthoritiesPopulator(Configuration configuration, AuthorizationHelper authorizationHelper,
   public AmbariLdapAuthoritiesPopulator(Configuration configuration, AuthorizationHelper authorizationHelper,
                                         UserDAO userDAO, RoleDAO roleDAO) {
                                         UserDAO userDAO, RoleDAO roleDAO) {
@@ -70,80 +68,29 @@ public class AmbariLdapAuthoritiesPopulator implements LdapAuthoritiesPopulator
       newUser.setLdapUser(true);
       newUser.setLdapUser(true);
       newUser.setUserName(username);
       newUser.setUserName(username);
 
 
-      //Adding a default "user" role
-      addRole(newUser, configuration.getConfigsMap().
-          get(Configuration.USER_ROLE_NAME_KEY));
-    }
-
-    user = userDAO.findLdapUserByName(username);
+      String roleName = (configuration.getConfigsMap().get(Configuration.USER_ROLE_NAME_KEY));
+      log.info("Using default role name " + roleName);
 
 
-    //Adding an "admin" user role if user is a member of ambari administrators
-    // LDAP group
-    Boolean isAdmin =
-        (Boolean) userData.getObjectAttribute(AMBARI_ADMIN_LDAP_ATTRIBUTE_KEY);
-    if ((isAdmin != null) && isAdmin) {
-      log.info("Adding admin role to LDAP user " + username);
-      addRole(user, configuration.getConfigsMap().
-          get(Configuration.ADMIN_ROLE_NAME_KEY));
-    } else {
-      removeRole(user, configuration.getConfigsMap().
-          get(Configuration.ADMIN_ROLE_NAME_KEY));
-    }
-
-    user = userDAO.findLdapUserByName(username);
-    return authorizationHelper.convertRolesToAuthorities(user.getRoleEntities());
-  }
+      RoleEntity role = roleDAO.findByName(roleName);
 
 
-  /**
-   * Adds role to user's role entities
-   * Adds user to roleName's user entities
-   *
-   * @param user - the user entity to be modified
-   * @param roleName - the role to add to user's roleEntities
-   */
-  private void addRole(UserEntity user, String roleName) {
-    log.info("Using default role name " + roleName);
-
-    RoleEntity roleEntity = roleDAO.findByName(roleName);
-
-    if (roleEntity == null) {
-      log.info("Role " + roleName + " not present in local DB - creating");
-      roleEntity = new RoleEntity();
-      roleEntity.setRoleName(roleName);
-      roleDAO.create(roleEntity);
-      roleEntity = roleDAO.findByName(roleEntity.getRoleName());
-    }
+      if (role == null) {
+        log.info("Role " + roleName + " not present in local DB - creating");
+        role = new RoleEntity();
+        role.setRoleName(roleName);
+        roleDAO.create(role);
+        role = roleDAO.findByName(role.getRoleName());
+      }
 
 
-    UserEntity userEntity = userDAO.findLdapUserByName(user.getUserName());
-    if (userEntity == null) {
-      userDAO.create(user);
-      userEntity = userDAO.findLdapUserByName(user.getUserName());
-    }
+      userDAO.create(newUser);
 
 
-    if (!userEntity.getRoleEntities().contains(roleEntity)) {
-      userEntity.getRoleEntities().add(roleEntity);
-      roleEntity.getUserEntities().add(userEntity);
-      roleDAO.merge(roleEntity);
-      userDAO.merge(userEntity);
-    }
-  }
+      user = userDAO.findLdapUserByName(newUser.getUserName());
 
 
-  /**
-   * Remove role "roleName" from user "user"
-   * @param user
-   * @param roleName
-   */
-  private void removeRole(UserEntity user, String roleName) {
-    UserEntity userEntity = userDAO.findByPK(user.getUserId());
-    RoleEntity roleEntity = roleDAO.findByName(roleName);
-
-    if (userEntity.getRoleEntities().contains(roleEntity)) {
-      log.info("Removing admin role from LDAP user " + user.getUserName());
-      userEntity.getRoleEntities().remove(roleEntity);
-      roleEntity.getUserEntities().remove(userEntity);
-      userDAO.merge(userEntity);
-      roleDAO.merge(roleEntity);
+      user.getRoleEntities().add(role);
+      role.getUserEntities().add(user);
+      roleDAO.merge(role);
+      userDAO.merge(user);
     }
     }
 
 
+    return authorizationHelper.convertRolesToAuthorities(user.getRoleEntities());
   }
   }
 }
 }

+ 0 - 133
ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariLdapBindAuthenticator.java

@@ -1,133 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security.authorization;
-
-
-import org.apache.ambari.server.configuration.Configuration;
-import org.springframework.ldap.core.AttributesMapper;
-import org.springframework.ldap.core.DirContextOperations;
-import org.springframework.ldap.core.LdapTemplate;
-import org.springframework.ldap.core.support.BaseLdapPathContextSource;
-import org.springframework.security.core.Authentication;
-import org.springframework.security.ldap.authentication.BindAuthenticator;
-
-import java.util.*;
-import javax.naming.*;
-import javax.naming.directory.Attributes;
-
-
-/**
- * An authenticator which binds as a user and checks if user should get ambari
- * admin authorities according to LDAP group membership
- */
-public class AmbariLdapBindAuthenticator extends BindAuthenticator {
-
-  private Configuration configuration;
-
-  private static final String AMBARI_ADMIN_LDAP_ATTRIBUTE_KEY = "ambari_admin";
-
-  public AmbariLdapBindAuthenticator(BaseLdapPathContextSource contextSource,
-                                     Configuration configuration) {
-    super(contextSource);
-    this.configuration = configuration;
-  }
-
-  @Override
-  public DirContextOperations authenticate(Authentication authentication) {
-
-    DirContextOperations user = super.authenticate(authentication);
-
-    return setAmbariAdminAttr(user);
-  }
-
-  /**
-   *  Checks weather user is a member of ambari administrators group in LDAP. If
-   *  yes, sets user's ambari_admin attribute to true
-   * @param user
-   * @return
-   */
-  private DirContextOperations setAmbariAdminAttr(DirContextOperations user) {
-    LdapServerProperties ldapServerProperties =
-        configuration.getLdapServerProperties();
-
-    String baseDn = ldapServerProperties.getBaseDN().toLowerCase();
-    String groupBase = ldapServerProperties.getGroupBase().toLowerCase();
-    String groupObjectClass = ldapServerProperties.getGroupObjectClass();
-    String groupMembershipAttr = ldapServerProperties.getGroupMembershipAttr();
-    String adminGroupMappingRules =
-        ldapServerProperties.getAdminGroupMappingRules();
-    final String groupNamingAttribute =
-        ldapServerProperties.getGroupNamingAttr();
-    String groupSearchFilter = ldapServerProperties.getGroupSearchFilter();
-
-    //If groupBase is set incorrectly or isn't set - search in BaseDn
-    int indexOfBaseDn = groupBase.indexOf(baseDn);
-    groupBase = indexOfBaseDn <= 0 ? "" : groupBase.substring(0,indexOfBaseDn - 1);
-
-    StringBuilder filterBuilder = new StringBuilder();
-
-    filterBuilder.append("(&(");
-    filterBuilder.append(groupMembershipAttr);
-    filterBuilder.append("=");
-    filterBuilder.append(user.getNameInNamespace());//DN
-
-    if ((groupSearchFilter == null) || groupSearchFilter.equals("") ) {
-      //If groupSearchFilter is not specified, build it from other authorization
-      // group properties
-      filterBuilder.append(")(objectclass=");
-      filterBuilder.append(groupObjectClass);
-      filterBuilder.append(")(|");
-      String[] adminGroupMappingRegexs = adminGroupMappingRules.split(",");
-      for (String adminGroupMappingRegex : adminGroupMappingRegexs) {
-        filterBuilder.append("(");
-        filterBuilder.append(groupNamingAttribute);
-        filterBuilder.append("=");
-        filterBuilder.append(adminGroupMappingRegex);
-        filterBuilder.append(")");
-      }
-      filterBuilder.append(")");
-    } else {
-      filterBuilder.append(")");
-      filterBuilder.append(groupSearchFilter);
-    }
-    filterBuilder.append(")");
-
-    AttributesMapper attributesMapper = new AttributesMapper() {
-      public Object mapFromAttributes(Attributes attrs)
-          throws NamingException {
-        return attrs.get(groupNamingAttribute).get();
-      }
-    };
-
-    LdapTemplate ldapTemplate = new LdapTemplate((getContextSource()));
-    ldapTemplate.setIgnorePartialResultException(true);
-    ldapTemplate.setIgnoreNameNotFoundException(true);
-
-    List<String> ambariAdminGroups = ldapTemplate.search(
-        groupBase,filterBuilder.toString(),attributesMapper);
-
-    //user has admin role granted, if user is a member of at least 1 group,
-    // which matches the rules in configuration
-    if (ambariAdminGroups.size() > 0) {
-      user.setAttributeValue(AMBARI_ADMIN_LDAP_ATTRIBUTE_KEY, true);
-    }
-
-    return user;
-  }
-
-}

+ 0 - 10
ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java

@@ -205,11 +205,6 @@ public class Users {
   public synchronized void addRoleToUser(User user, String role)
   public synchronized void addRoleToUser(User user, String role)
       throws AmbariException {
       throws AmbariException {
 
 
-    if (userDAO.findLdapUserByName(user.getUserName()) != null) {
-      LOG.warn("Trying to add a role to the LDAP user"
-          + ", user=" + user.getUserName());
-      throw new AmbariException("Roles are not editable for LDAP users");
-    }
 
 
     UserEntity userEntity = userDAO.findByPK(user.getUserId());
     UserEntity userEntity = userDAO.findByPK(user.getUserId());
     if (userEntity == null) {
     if (userEntity == null) {
@@ -239,11 +234,6 @@ public class Users {
   public synchronized void removeRoleFromUser(User user, String role)
   public synchronized void removeRoleFromUser(User user, String role)
       throws AmbariException {
       throws AmbariException {
 
 
-    if (userDAO.findLdapUserByName(user.getUserName()) != null) {
-      LOG.warn("Trying to add a role to the LDAP user"
-          + ", user=" + user.getUserName());
-      throw new AmbariException("Roles are not editable for LDAP users");
-    }
 
 
     UserEntity userEntity = userDAO.findByPK(user.getUserId());
     UserEntity userEntity = userDAO.findByPK(user.getUserId());
     if (userEntity == null) {
     if (userEntity == null) {

+ 0 - 3
ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java

@@ -86,9 +86,6 @@ public class StageUtils {
     componentToClusterInfoKeyMap.put("NAMENODE", "namenode_host");
     componentToClusterInfoKeyMap.put("NAMENODE", "namenode_host");
     componentToClusterInfoKeyMap.put("JOBTRACKER", "jtnode_host");
     componentToClusterInfoKeyMap.put("JOBTRACKER", "jtnode_host");
     componentToClusterInfoKeyMap.put("SNAMENODE", "snamenode_host");
     componentToClusterInfoKeyMap.put("SNAMENODE", "snamenode_host");
-    componentToClusterInfoKeyMap.put("RESOURCEMANAGER", "rm_host");
-    componentToClusterInfoKeyMap.put("NODEMANAGER", "nm_hosts");
-    componentToClusterInfoKeyMap.put("HISTORYSERVER", "hs_host");
     componentToClusterInfoKeyMap.put("ZOOKEEPER_SERVER", "zookeeper_hosts");
     componentToClusterInfoKeyMap.put("ZOOKEEPER_SERVER", "zookeeper_hosts");
     componentToClusterInfoKeyMap.put("HBASE_MASTER", "hbase_master_hosts");
     componentToClusterInfoKeyMap.put("HBASE_MASTER", "hbase_master_hosts");
     componentToClusterInfoKeyMap.put("HBASE_REGIONSERVER", "hbase_rs_hosts");
     componentToClusterInfoKeyMap.put("HBASE_REGIONSERVER", "hbase_rs_hosts");

+ 0 - 76
ambari-server/src/main/python/ambari-server.py

@@ -144,11 +144,6 @@ OS_TYPE_PROPERTY = "server.os_type"
 JDK_DOWNLOAD_CMD = "curl --create-dirs -o {0} {1}"
 JDK_DOWNLOAD_CMD = "curl --create-dirs -o {0} {1}"
 JDK_DOWNLOAD_SIZE_CMD = "curl -I {0}"
 JDK_DOWNLOAD_SIZE_CMD = "curl -I {0}"
 
 
-#JCE Policy files
-JCE_POLICY_FILENAME = "jce_policy-6.zip"
-JCE_DOWNLOAD_CMD = "curl -o {0} {1}"
-JCE_MIN_FILESIZE = 5000
-
 def configure_pg_hba_ambaridb_users():
 def configure_pg_hba_ambaridb_users():
   args = optparse.Values()
   args = optparse.Values()
   configure_postgres_username_password(args)
   configure_postgres_username_password(args)
@@ -569,10 +564,6 @@ def download_jdk(args):
   if args.java_home and os.path.exists(args.java_home):
   if args.java_home and os.path.exists(args.java_home):
     print_warning_msg("JAVA_HOME " + args.java_home
     print_warning_msg("JAVA_HOME " + args.java_home
                     + " must be valid on ALL hosts")
                     + " must be valid on ALL hosts")
-    print_warning_msg("Please make sure the JCE Unlimited Strength "
-                      "Jurisdiction Policy Files 6, "
-                      "are dwonloaded on all "
-                      "hosts")
     write_property(JAVA_HOME_PROPERTY, args.java_home)
     write_property(JAVA_HOME_PROPERTY, args.java_home)
     return 0
     return 0
 
 
@@ -656,75 +647,8 @@ def download_jdk(args):
       format(JDK_INSTALL_DIR, jdk_version)
       format(JDK_INSTALL_DIR, jdk_version)
   write_property(JAVA_HOME_PROPERTY, "{0}/{1}".
   write_property(JAVA_HOME_PROPERTY, "{0}/{1}".
       format(JDK_INSTALL_DIR, jdk_version))
       format(JDK_INSTALL_DIR, jdk_version))
-  jce_download = download_jce_policy(properties, ok)
-  if (jce_download == -1):
-    print "JCE Policy files are required for secure HDP setup. Please ensure " \
-          " all hosts have the JCE unlimited strength policy 6, files."
   return 0
   return 0
 
 
-def download_jce_policy(properties, accpeted_bcl):
-  try:
-    jce_url = properties['jce_policy.url']
-    resources_dir = properties['resources.dir']
-  except (KeyError), e:
-    print 'Property ' + str(e) + ' is not defined in properties file'
-    return -1
-  dest_file = resources_dir + os.sep + JCE_POLICY_FILENAME
-  if not os.path.exists(dest_file):
-    print 'Downloading JCE Policy archive from ' + jce_url + ' to ' + dest_file
-    try:
-      size_command = JDK_DOWNLOAD_SIZE_CMD.format(jce_url);
-      #Get Header from url,to get file size then
-      retcode, out, err = run_os_command(size_command)
-      if out.find("Content-Length") == -1:
-        print "Request header doesn't contain Content-Length";
-        return -1
-      start_with = int(out.find("Content-Length") + len("Content-Length") + 2)
-      end_with = out.find("\r\n", start_with)
-      src_size = int(out[start_with:end_with])
-      print_info_msg('JCE zip distribution size is ' + str(src_size) + 'bytes')
-      file_exists = os.path.isfile(dest_file)
-      file_size = -1
-      if file_exists:
-        file_size = os.stat(dest_file).st_size
-      if file_exists and file_size == src_size:
-        print_info_msg("File already exists")
-      else:
-        #BCL license before download
-        jce_download_cmd = JCE_DOWNLOAD_CMD.format(dest_file, jce_url)
-        print_info_msg("JCE download cmd: " + jce_download_cmd)
-        if (accpeted_bcl == True):
-          retcode, out, err = run_os_command(jce_download_cmd)
-          if retcode == 0:
-            print 'Successfully downloaded JCE Policy archive to ' + dest_file
-          else:
-            return -1
-        else:
-          ok = get_YN_input("To download the JCE Policy archive you must "
-                            "accept the license terms found at "
-                            "http://www.oracle.com/technetwork/java/javase"
-                            "/terms/license/index.html"
-                            "Not accepting might result in failure when "
-                            "setting up HDP security. \nDo you accept the "
-                            "Oracle Binary Code License Agreement [y/n] (y)? ", True)
-          if (ok == True):
-            retcode, out, err = run_os_command(jce_download_cmd)
-            if retcode == 0:
-              print 'Successfully downloaded JCE Policy archive to ' + dest_file
-          else:
-            return -1
-    except Exception, e:
-      print_error_msg('Failed to download JCE Policy archive: ' + str(e))
-      return -1
-    downloaded_size = os.stat(dest_file).st_size
-    if downloaded_size != src_size or downloaded_size < JCE_MIN_FILESIZE:
-      print_error_msg('Size of downloaded JCE Policy archive is '
-                      + str(downloaded_size) + ' bytes, it is probably \
-                    damaged or incomplete')
-      return -1
-  else:
-    print "JCE Policy archive already exists, using " + dest_file
-
 class RetCodeException(Exception): pass
 class RetCodeException(Exception): pass
 
 
 def install_jdk(dest_file):
 def install_jdk(dest_file):

+ 0 - 10
ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HDFS/configuration/hdfs-site.xml

@@ -412,14 +412,4 @@ don't exist, they will be created with this permission.</description>
   <description>Number of failed disks datanode would tolerate</description>
   <description>Number of failed disks datanode would tolerate</description>
 </property>
 </property>
 
 
-  <property>
-    <name>dfs.namenode.check.stale.datanode</name>
-    <value>true</value>
-    <description>
-      With this setting, the datanodes that have not replied to the heartbeat
-      for more than 30s (i.e. in a stale state) are used for reads only if all
-      other remote replicas have failed.
-    </description>
-  </property>
-
 </configuration>
 </configuration>

+ 0 - 15
ambari-server/src/main/resources/stacks/HDP/2.0.1/services/HDFS/configuration/global.xml

@@ -36,21 +36,6 @@
     <value></value>
     <value></value>
     <description>Secondary NameNode.</description>
     <description>Secondary NameNode.</description>
   </property>
   </property>
-  <property>
-    <name>rm_host</name>
-    <value></value>
-    <description>Resource Manager.</description>
-  </property>
-  <property>
-    <name>nm_hosts</name>
-    <value></value>
-    <description>List of Node Manager Hosts.</description>
-  </property>
-  <property>
-    <name>hs_host</name>
-    <value></value>
-    <description>History Server.</description>
-  </property>
   <property>
   <property>
     <name>fs_checkpoint_dir</name>
     <name>fs_checkpoint_dir</name>
     <value>/hadoop/hdfs/namesecondary</value>
     <value>/hadoop/hdfs/namesecondary</value>

+ 112 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCEv2/configuration/capacity-scheduler.xml

@@ -0,0 +1,112 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-applications</name>
+    <value>10000</value>
+    <description>
+      Maximum number of applications that can be pending and running.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+    <value>0.1</value>
+    <description>
+      Maximum percent of resources in the cluster which can be used to run 
+      application masters i.e. controls number of concurrent running
+      applications.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.queues</name>
+    <value>default</value>
+    <description>
+      The queues at the this level (root is the root queue).
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.capacity</name>
+    <value>100</value>
+    <description>
+      The total capacity as a percentage out of 100 for this queue.
+      If it has child queues then this includes their capacity as well.
+      The child queues capacity should add up to their parent queue's capacity
+      or less.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.capacity</name>
+    <value>100</value>
+    <description>Default queue target capacity.</description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+    <value>1</value>
+    <description>
+      Default queue user limit a percentage from 0.0 to 1.0.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+    <value>100</value>
+    <description>
+      The maximum capacity of the default queue. 
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.state</name>
+    <value>RUNNING</value>
+    <description>
+      The state of the default queue. State can be one of RUNNING or STOPPED.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_submit_jobs</name>
+    <value>*</value>
+    <description>
+      The ACL of who can submit jobs to the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
+    <value>*</value>
+    <description>
+      The ACL of who can administer jobs on the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.acl_administer_queues</name>
+    <value>*</value>
+    <description>
+      The ACL for who can administer this queue i.e. change sub-queue 
+      allocations.
+    </description>
+  </property>
+
+</configuration>

+ 0 - 18
ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCEv2/configuration/mapred-site.xml

@@ -528,22 +528,4 @@ user)</description>
   <description> Comma separated list of queues configured for this jobtracker.</description>
   <description> Comma separated list of queues configured for this jobtracker.</description>
 </property>
 </property>
 
 
-<property>
-  <name>mapreduce.shuffle.port</name>
-  <value>8081</value>
-  <description>Default port that the ShuffleHandler will run on. ShuffleHandler is a service run at the NodeManager to facilitate transfers of intermediate Map outputs to requesting Reducers.</description>
-</property>
-
-<property>
-  <name>mapreduce.jobhistory.intermediate-done-dir</name>
-  <value>/mr-history/tmp</value>
-  <description>Directory where history files are written by MapReduce jobs.</description>
-</property>
-
-<property>
-  <name>mapreduce.jobhistory.done-dir</name>
-  <value>/mr-history/done</value>
-  <description>Directory where history files are managed by the MR JobHistory Server.</description>
-</property>
-
 </configuration>
 </configuration>

+ 172 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCEv2/configuration/yarn-site.xml

@@ -0,0 +1,172 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- ResourceManager -->
+
+  <property>
+    <name>yarn.resourcemanager.resource-tracker.address</name>
+    <value>TODO-RMNODE-HOSTNAME:8025</value>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.scheduler.address</name>
+    <value>TODO-RMNODE-HOSTNAME:8030</value>
+  </property>
+  
+  <property>
+    <name>yarn.resourcemanager.address</name>
+    <value>TODO-RMNODE-HOSTNAME:8050</value>
+  </property>
+
+	<property>
+    <name>yarn.resourcemanager.admin.address</name>
+    <value>TODO-RMNODE-HOSTNAME:8141</value>
+	</property>
+
+  <property>
+   <name>yarn.resourcemanager.scheduler.class</name>
+   <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.minimum-allocation-mb</name>
+    <value>1024</value>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.maximum-allocation-mb</name>
+    <value>8192</value>
+  </property>
+
+<!-- NodeManager -->
+
+  <property>
+    <name>yarn.nodemanager.address</name>
+    <value>0.0.0.0:45454</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.local-dirs</name>
+    <value>TODO-YARN-LOCAL-DIR</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.resource.memory-mb</name>
+    <value>8192</value>
+    <description>Amount of physical memory, in MB, that can be allocated
+      for containers.</description>
+  </property>
+
+  <property>
+    <name>yarn.application.classpath</name>
+    <value>/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value>
+  <description>Classpath for typical applications.</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.vmem-pmem-ratio</name>
+    <value>2.1</value>
+    <description>Ratio between virtual memory to physical memory when
+    setting memory limits for containers. Container allocations are
+    expressed in terms of physical memory, and virtual memory usage
+    is allowed to exceed this allocation by this ratio.
+    </description>
+  </property>
+  
+  <property>
+    <name>yarn.nodemanager.container-executor.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
+    <description>ContainerExecutor for launching containers</description>
+  </property>
+ 
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce.shuffle</value>
+    <description>Auxilliary services of NodeManager</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.aux-services.\1.class</name>
+    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log-dirs</name>
+    <value>TODO-YARN-LOG-DIR</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.container-monitor.interval-ms</name>
+    <value>3000</value>
+    <description>The interval, in milliseconds, for which the node manager
+    waits  between two cycles of monitoring its containers' memory usage. 
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.health-checker.script.path</name>
+    <value>/etc/hadoop/conf/health_check</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.health-checker.interval-ms</name>
+    <value>135000</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
+    <value>60000</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log.retain-second</name>
+    <value>604800</value>
+  </property>
+
+  <property>
+    <name>yarn.log-aggregation-enable</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir</name>
+    <value>/app-logs</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
+    <value>logs</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log-aggregation.compression-type</name>
+    <value>gz</value> 
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.delete.debug-delay-sec</name>
+    <value>36000</value>
+  </property>
+
+
+</configuration>

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.1/services/TEZ/metainfo.xml

@@ -17,12 +17,12 @@
 -->
 -->
 <metainfo>
 <metainfo>
     <user>root</user>
     <user>root</user>
-    <comment>Tez is the next generation Hadoop Query Processing framework written on top of YARN</comment>
+    <comment>This is comment for TEZ service</comment>
     <version>0.1.0.22-1</version>
     <version>0.1.0.22-1</version>
 
 
     <components>
     <components>
         <component>
         <component>
-            <name>TEZ_CLIENT</name>
+            <name>TEZ</name>
             <category>CLIENT</category>
             <category>CLIENT</category>
         </component>
         </component>
     </components>
     </components>

+ 39 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.1/services/YARN/configuration/mapred-queue-acls.xml

@@ -0,0 +1,39 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- mapred-queue-acls.xml -->
+<configuration>
+
+
+<!-- queue default -->
+
+  <property>
+    <name>mapred.queue.default.acl-submit-job</name>
+    <value>*</value>
+  </property>
+
+  <property>
+    <name>mapred.queue.default.acl-administer-jobs</name>
+    <value>*</value>
+  </property>
+
+  <!-- END ACLs -->
+
+</configuration>

+ 531 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.1/services/YARN/configuration/mapred-site.xml

@@ -0,0 +1,531 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- i/o properties -->
+
+  <property>
+    <name>io.sort.mb</name>
+    <value></value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>io.sort.record.percent</name>
+    <value>.2</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>io.sort.spill.percent</name>
+    <value></value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>io.sort.factor</name>
+    <value>100</value>
+    <description>No description</description>
+  </property>
+
+<!-- map/reduce properties -->
+
+<property>
+  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
+  <value>250</value>
+  <description>Normally, this is the amount of time before killing
+  processes, and the recommended-default is 5.000 seconds - a value of
+  5000 here.  In this case, we are using it solely to blast tasks before
+  killing them, and killing them very quickly (1/4 second) to guarantee
+  that we do not leave VMs around for later jobs.
+  </description>
+</property>
+
+  <property>
+    <name>mapred.job.tracker.handler.count</name>
+    <value>50</value>
+    <description>
+    The number of server threads for the JobTracker. This should be roughly
+    4% of the number of tasktracker nodes.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.system.dir</name>
+    <value>/mapred/system</value>
+    <description>No description</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker</name>
+    <!-- cluster variant -->
+    <value></value>
+    <description>No description</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.http.address</name>
+    <!-- cluster variant -->
+    <value></value>
+    <description>No description</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <!-- cluster specific -->
+    <name>mapred.local.dir</name>
+    <value></value>
+    <description>No description</description>
+    <final>true</final>
+  </property>
+
+  <property>
+  <name>mapreduce.cluster.administrators</name>
+  <value> hadoop</value>
+  </property>
+
+  <property>
+    <name>mapred.reduce.parallel.copies</name>
+    <value>30</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.tasktracker.map.tasks.maximum</name>
+    <value></value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.tasktracker.reduce.tasks.maximum</name>
+    <value></value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>tasktracker.http.threads</name>
+    <value>50</value>
+  </property>
+
+  <property>
+    <name>mapred.map.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>If true, then multiple instances of some map tasks
+               may be executed in parallel.</description>
+  </property>
+
+  <property>
+    <name>mapred.reduce.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>If true, then multiple instances of some reduce tasks
+               may be executed in parallel.</description>
+  </property>
+
+  <property>
+    <name>mapred.reduce.slowstart.completed.maps</name>
+    <value>0.05</value>
+  </property>
+
+  <property>
+    <name>mapred.inmem.merge.threshold</name>
+    <value>1000</value>
+    <description>The threshold, in terms of the number of files
+  for the in-memory merge process. When we accumulate threshold number of files
+  we initiate the in-memory merge and spill to disk. A value of 0 or less than
+  0 indicates we want to DON'T have any threshold and instead depend only on
+  the ramfs's memory consumption to trigger the merge.
+  </description>
+  </property>
+
+  <property>
+    <name>mapred.job.shuffle.merge.percent</name>
+    <value>0.66</value>
+    <description>The usage threshold at which an in-memory merge will be
+  initiated, expressed as a percentage of the total memory allocated to
+  storing in-memory map outputs, as defined by
+  mapred.job.shuffle.input.buffer.percent.
+  </description>
+  </property>
+
+  <property>
+    <name>mapred.job.shuffle.input.buffer.percent</name>
+    <value>0.7</value>
+    <description>The percentage of memory to be allocated from the maximum heap
+  size to storing map outputs during the shuffle.
+  </description>
+  </property>
+
+  <property>
+    <name>mapred.map.output.compression.codec</name>
+    <value></value>
+    <description>If the map outputs are compressed, how should they be
+      compressed
+    </description>
+  </property>
+
+<property>
+  <name>mapred.output.compression.type</name>
+  <value>BLOCK</value>
+  <description>If the job outputs are to compressed as SequenceFiles, how should
+               they be compressed? Should be one of NONE, RECORD or BLOCK.
+  </description>
+</property>
+
+
+  <property>
+    <name>mapred.jobtracker.completeuserjobs.maximum</name>
+    <value>5</value>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.taskScheduler</name>
+    <value></value>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.restart.recover</name>
+    <value>false</value>
+    <description>"true" to enable (job) recovery upon restart,
+               "false" to start afresh
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.reduce.input.buffer.percent</name>
+    <value>0.0</value>
+    <description>The percentage of memory- relative to the maximum heap size- to
+  retain map outputs during the reduce. When the shuffle is concluded, any
+  remaining map outputs in memory must consume less than this threshold before
+  the reduce can begin.
+  </description>
+  </property>
+
+ <property>
+  <name>mapreduce.reduce.input.limit</name>
+  <value>10737418240</value>
+  <description>The limit on the input size of the reduce. (This value
+  is 10 Gb.)  If the estimated input size of the reduce is greater than
+  this value, job is failed. A value of -1 means that there is no limit
+  set. </description>
+</property>
+
+
+  <!-- copied from kryptonite configuration -->
+  <property>
+    <name>mapred.compress.map.output</name>
+    <value></value>
+  </property>
+
+
+  <property>
+    <name>mapred.task.timeout</name>
+    <value>600000</value>
+    <description>The number of milliseconds before a task will be
+  terminated if it neither reads an input, writes an output, nor
+  updates its status string.
+  </description>
+  </property>
+
+  <property>
+    <name>jetty.connector</name>
+    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.task.tracker.task-controller</name>
+    <value></value>
+   <description>
+     TaskController which is used to launch and manage task execution.
+  </description>
+  </property>
+
+  <property>
+    <name>mapred.child.root.logger</name>
+    <value>INFO,TLA</value>
+  </property>
+
+  <property>
+    <name>mapred.child.java.opts</name>
+    <value></value>
+
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.cluster.map.memory.mb</name>
+    <value></value>
+  </property>
+
+  <property>
+    <name>mapred.cluster.reduce.memory.mb</name>
+    <value></value>
+  </property>
+
+  <property>
+    <name>mapred.job.map.memory.mb</name>
+    <value></value>
+  </property>
+
+  <property>
+    <name>mapred.job.reduce.memory.mb</name>
+    <value></value>
+  </property>
+
+  <property>
+    <name>mapred.cluster.max.map.memory.mb</name>
+    <value></value>
+  </property>
+
+  <property>
+    <name>mapred.cluster.max.reduce.memory.mb</name>
+    <value></value>
+  </property>
+
+<property>
+  <name>mapred.hosts</name>
+  <value></value>
+</property>
+
+<property>
+  <name>mapred.hosts.exclude</name>
+  <value></value>
+</property>
+
+<property>
+  <name>mapred.max.tracker.blacklists</name>
+  <value>16</value>
+  <description>
+    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
+  </description>
+</property>
+
+<property>
+  <name>mapred.healthChecker.script.path</name>
+  <value></value>
+</property>
+
+<property>
+  <name>mapred.healthChecker.interval</name>
+  <value>135000</value>
+</property>
+
+<property>
+  <name>mapred.healthChecker.script.timeout</name>
+  <value>60000</value>
+</property>
+
+<property>
+  <name>mapred.job.tracker.persist.jobstatus.active</name>
+  <value>false</value>
+  <description>Indicates if persistency of job status information is
+  active or not.
+  </description>
+</property>
+
+<property>
+  <name>mapred.job.tracker.persist.jobstatus.hours</name>
+  <value>1</value>
+  <description>The number of hours job status information is persisted in DFS.
+    The job status information will be available after it drops of the memory
+    queue and between jobtracker restarts. With a zero value the job status
+    information is not persisted at all in DFS.
+  </description>
+</property>
+
+<property>
+  <name>mapred.job.tracker.persist.jobstatus.dir</name>
+  <value></value>
+  <description>The directory where the job status information is persisted
+   in a file system to be available after it drops of the memory queue and
+   between jobtracker restarts.
+  </description>
+</property>
+
+<property>
+  <name>mapred.jobtracker.retirejob.check</name>
+  <value>10000</value>
+</property>
+
+<property>
+  <name>mapred.jobtracker.retirejob.interval</name>
+  <value>0</value>
+</property>
+
+<property>
+  <name>mapred.job.tracker.history.completed.location</name>
+  <value>/mapred/history/done</value>
+  <description>No description</description>
+</property>
+
+<property>
+  <name>mapred.task.maxvmem</name>
+  <value></value>
+  <final>true</final>
+   <description>No description</description>
+</property>
+
+<property>
+  <name>mapred.jobtracker.maxtasks.per.job</name>
+  <value></value>
+  <final>true</final>
+  <description>The maximum number of tasks for a single job.
+  A value of -1 indicates that there is no maximum.  </description>
+</property>
+
+<property>
+  <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
+  <value>false</value>
+</property>
+
+<property>
+  <name>mapred.userlog.retain.hours</name>
+  <value></value>
+</property>
+
+<property>
+  <name>mapred.job.reuse.jvm.num.tasks</name>
+  <value>1</value>
+  <description>
+    How many tasks to run per jvm. If set to -1, there is no limit
+  </description>
+  <final>true</final>
+</property>
+
+<property>
+  <name>mapreduce.jobtracker.kerberos.principal</name>
+  <value></value>
+  <description>
+      JT user name key.
+ </description>
+</property>
+
+<property>
+  <name>mapreduce.tasktracker.kerberos.principal</name>
+   <value></value>
+  <description>
+       tt user name key. "_HOST" is replaced by the host name of the task tracker.
+   </description>
+</property>
+
+
+  <property>
+    <name>hadoop.job.history.user.location</name>
+    <value>none</value>
+    <final>true</final>
+  </property>
+
+
+ <property>
+   <name>mapreduce.jobtracker.keytab.file</name>
+   <value></value>
+   <description>
+       The keytab for the jobtracker principal.
+   </description>
+
+</property>
+
+ <property>
+   <name>mapreduce.tasktracker.keytab.file</name>
+   <value></value>
+    <description>The filename of the keytab for the task tracker</description>
+ </property>
+
+ <property>
+   <name>mapreduce.jobtracker.staging.root.dir</name>
+   <value>/user</value>
+ <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
+   name. It is a path in the default file system.</description>
+ </property>
+
+ <property>
+      <name>mapreduce.tasktracker.group</name>
+      <value>hadoop</value>
+      <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
+
+ </property>
+
+  <property>
+    <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
+    <value>50000000</value>
+    <final>true</final>
+     <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
+    initialize.
+   </description>
+  </property>
+  <property>
+    <name>mapreduce.history.server.embedded</name>
+    <value>false</value>
+    <description>Should job history server be embedded within Job tracker
+process</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapreduce.history.server.http.address</name>
+    <!-- cluster variant -->
+    <value></value>
+    <description>Http address of the history server</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.kerberos.principal</name>
+    <!-- cluster variant -->
+  <value></value>
+    <description>Job history user name key. (must map to same user as JT
+user)</description>
+  </property>
+
+ <property>
+   <name>mapreduce.jobhistory.keytab.file</name>
+    <!-- cluster variant -->
+   <value></value>
+   <description>The keytab for the job history server principal.</description>
+ </property>
+
+<property>
+  <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
+  <value>180</value>
+  <description>
+    3-hour sliding window (value is in minutes)
+  </description>
+</property>
+
+<property>
+  <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
+  <value>15</value>
+  <description>
+    15-minute bucket size (value is in minutes)
+  </description>
+</property>
+
+<property>
+  <name>mapred.queue.names</name>
+  <value>default</value>
+  <description> Comma separated list of queues configured for this jobtracker.</description>
+</property>
+
+</configuration>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HBASE/configuration/hbase-site.xml

@@ -160,7 +160,7 @@
   </property>
   </property>
   <property>
   <property>
     <name>zookeeper.session.timeout</name>
     <name>zookeeper.session.timeout</name>
-    <value>60000</value>
+    <value></value>
     <description>ZooKeeper session timeout.
     <description>ZooKeeper session timeout.
       HBase passes this to the zk quorum as suggested maximum time for a
       HBase passes this to the zk quorum as suggested maximum time for a
       session (This setting becomes zookeeper's 'maxSessionTimeout').  See
       session (This setting becomes zookeeper's 'maxSessionTimeout').  See

+ 0 - 10
ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/configuration/hdfs-site.xml

@@ -412,14 +412,4 @@ don't exist, they will be created with this permission.</description>
   <description>Number of failed disks datanode would tolerate</description>
   <description>Number of failed disks datanode would tolerate</description>
 </property>
 </property>
 
 
-  <property>
-    <name>dfs.namenode.check.stale.datanode</name>
-    <value>true</value>
-    <description>
-      With this setting, the datanodes that have not replied to the heartbeat
-      for more than 30s (i.e. in a stale state) are used for reads only if all
-      other remote replicas have failed.
-    </description>
-  </property>
-
 </configuration>
 </configuration>

+ 0 - 94
ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java

@@ -18,7 +18,6 @@
 package org.apache.ambari.server.agent;
 package org.apache.ambari.server.agent;
 
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Matchers.eq;
@@ -167,99 +166,6 @@ public class TestHeartbeatMonitor {
     assertTrue("HeartbeatMonitor should not generate StatusCommands for host2 because it has no services", cmds.isEmpty());
     assertTrue("HeartbeatMonitor should not generate StatusCommands for host2 because it has no services", cmds.isEmpty());
   }
   }
 
 
-  @Test
-  public void testNoStatusCommandForClientComponents() throws Exception {
-    Clusters clusters = injector.getInstance(Clusters.class);
-    clusters.addHost(hostname1);
-    clusters.getHost(hostname1).setOsType("centos6");
-    clusters.getHost(hostname1).persist();
-    clusters.addHost(hostname2);
-    clusters.getHost(hostname2).setOsType("centos6");
-    clusters.getHost(hostname2).persist();
-    clusters.addCluster(clusterName);
-    Cluster cluster = clusters.getCluster(clusterName);
-    cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
-    Set<String> hostNames = new HashSet<String>() {{
-      add(hostname1);
-      add(hostname2);
-    }};
-
-    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
-    Config config = configFactory.createNew(cluster, "global",
-      new HashMap<String, String>() {{
-        put("a", "b");
-      }});
-    config.setVersionTag("version1");
-    cluster.addConfig(config);
-    cluster.addDesiredConfig(config);
-
-
-    clusters.mapHostsToCluster(hostNames, clusterName);
-    Service hdfs = cluster.addService(serviceName);
-    hdfs.persist();
-    hdfs.addServiceComponent(Role.DATANODE.name()).persist();
-    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost
-      (hostname1).persist();
-    hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost
-      (hostname1).persist();
-    hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).
-      addServiceComponentHost(hostname1).persist();
-    hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
-    hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost
-      (hostname1).persist();
-    hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost
-      (hostname2).persist();
-
-    ActionQueue aq = new ActionQueue();
-    ActionManager am = mock(ActionManager.class);
-    HeartbeatMonitor hm = new HeartbeatMonitor(clusters, aq, am,
-      heartbeatMonitorWakeupIntervalMS);
-    HeartBeatHandler handler = new HeartBeatHandler(clusters, aq, am, injector);
-    Register reg = new Register();
-    reg.setHostname(hostname1);
-    reg.setResponseId(12);
-    reg.setTimestamp(System.currentTimeMillis() - 300);
-    reg.setAgentVersion(ambariMetaInfo.getServerVersion());
-    HostInfo hi = new HostInfo();
-    hi.setOS("Centos5");
-    reg.setHardwareProfile(hi);
-    handler.handleRegistration(reg);
-
-    HeartBeat hb = new HeartBeat();
-    hb.setHostname(hostname1);
-    hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, "cool"));
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(12);
-    handler.handleHeartBeat(hb);
-
-    List<StatusCommand> cmds = hm.generateStatusCommands(hostname1);
-    assertTrue("HeartbeatMonitor should generate StatusCommands for host1",
-      cmds.size() == 3);
-    assertEquals("HDFS", cmds.get(0).getServiceName());
-    boolean containsDATANODEStatus = false;
-    boolean containsNAMENODEStatus = false;
-    boolean containsSECONDARY_NAMENODEStatus = false;
-    boolean containsHDFS_CLIENTStatus = false;
-    for (StatusCommand cmd : cmds) {
-      containsDATANODEStatus |= cmd.getComponentName().equals("DATANODE");
-      containsNAMENODEStatus |= cmd.getComponentName().equals("NAMENODE");
-      containsSECONDARY_NAMENODEStatus |= cmd.getComponentName().
-        equals("SECONDARY_NAMENODE");
-      containsHDFS_CLIENTStatus |= cmd.getComponentName().equals
-        ("HDFS_CLIENT");
-      assertTrue(cmd.getConfigurations().size() > 0);
-    }
-    assertTrue(containsDATANODEStatus);
-    assertTrue(containsNAMENODEStatus);
-    assertTrue(containsSECONDARY_NAMENODEStatus);
-    assertFalse(containsHDFS_CLIENTStatus);
-
-    cmds = hm.generateStatusCommands(hostname2);
-    assertTrue("HeartbeatMonitor should not generate StatusCommands for host2" +
-      " because it has only client components", cmds.isEmpty());
-  }
 
 
   @Test
   @Test
   public void testHeartbeatStateCommandsEnqueueing() throws AmbariException, InterruptedException,
   public void testHeartbeatStateCommandsEnqueueing() throws AmbariException, InterruptedException,

+ 0 - 130
ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java

@@ -36,7 +36,6 @@ import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ClusterNotFoundException;
 import org.apache.ambari.server.ClusterNotFoundException;
 import org.apache.ambari.server.DuplicateResourceException;
 import org.apache.ambari.server.DuplicateResourceException;
 import org.apache.ambari.server.ParentObjectNotFoundException;
 import org.apache.ambari.server.ParentObjectNotFoundException;
-import org.apache.ambari.server.ObjectNotFoundException;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.StackAccessException;
 import org.apache.ambari.server.StackAccessException;
@@ -75,9 +74,7 @@ import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent
 import org.apache.ambari.server.utils.StageUtils;
 import org.apache.ambari.server.utils.StageUtils;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Before;
-import org.junit.Rule;
 import org.junit.Test;
 import org.junit.Test;
-import org.junit.rules.ExpectedException;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 import com.google.inject.Guice;
 import com.google.inject.Guice;
@@ -122,9 +119,6 @@ public class AmbariManagementControllerTest {
   private Users users;
   private Users users;
   private EntityManager entityManager;
   private EntityManager entityManager;
 
 
-  @Rule
-  public ExpectedException expectedException = ExpectedException.none();
-
   @Before
   @Before
   public void setup() throws Exception {
   public void setup() throws Exception {
     injector = Guice.createInjector(new InMemoryDefaultTestModule());
     injector = Guice.createInjector(new InMemoryDefaultTestModule());
@@ -6048,128 +6042,4 @@ public class AmbariManagementControllerTest {
     }
     }
   }
   }
 
 
-  @Test
-  public void testGetTasksByRequestId() throws AmbariException {
-    final long requestId1 = 1;
-    final long requestId2 = 2;
-    final String clusterName = "c1";
-    final String hostName1 = "h1";
-    final String context = "Test invocation";
-
-    clusters.addCluster(clusterName);
-    clusters.getCluster(clusterName).setDesiredStackVersion(new StackId("HDP-0.1"));
-    clusters.addHost(hostName1);
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost(hostName1).persist();
-
-    clusters.mapHostsToCluster(new HashSet<String>(){
-      {add(hostName1);}}, clusterName);
-
-
-    List<Stage> stages = new ArrayList<Stage>();
-    stages.add(new Stage(requestId1, "/a1", clusterName, context));
-    stages.get(0).setStageId(1);
-    stages.get(0).addHostRoleExecutionCommand(hostName1, Role.HBASE_MASTER,
-            RoleCommand.START,
-            new ServiceComponentHostStartEvent(Role.HBASE_MASTER.toString(),
-                    hostName1, System.currentTimeMillis(),
-                    new HashMap<String, String>()),
-            clusterName, "HBASE");
-
-    stages.add(new Stage(requestId1, "/a2", clusterName, context));
-    stages.get(1).setStageId(2);
-    stages.get(1).addHostRoleExecutionCommand(hostName1, Role.HBASE_CLIENT,
-            RoleCommand.START,
-            new ServiceComponentHostStartEvent(Role.HBASE_CLIENT.toString(),
-                    hostName1, System.currentTimeMillis(),
-                    new HashMap<String, String>()), clusterName, "HBASE");
-
-    stages.add(new Stage(requestId1, "/a3", clusterName, context));
-    stages.get(2).setStageId(3);
-    stages.get(2).addHostRoleExecutionCommand(hostName1, Role.HBASE_CLIENT,
-            RoleCommand.START,
-            new ServiceComponentHostStartEvent(Role.HBASE_CLIENT.toString(),
-                    hostName1, System.currentTimeMillis(),
-                    new HashMap<String, String>()), clusterName, "HBASE");
-
-
-    stages.add(new Stage(requestId2, "/a4", clusterName, context));
-    stages.get(3).setStageId(4);
-    stages.get(3).addHostRoleExecutionCommand(hostName1, Role.HBASE_CLIENT,
-            RoleCommand.START,
-            new ServiceComponentHostStartEvent(Role.HBASE_CLIENT.toString(),
-                    hostName1, System.currentTimeMillis(),
-                    new HashMap<String, String>()), clusterName, "HBASE");
-
-    stages.add(new Stage(requestId2, "/a5", clusterName, context));
-    stages.get(4).setStageId(5);
-    stages.get(4).addHostRoleExecutionCommand(hostName1, Role.HBASE_CLIENT,
-            RoleCommand.START,
-            new ServiceComponentHostStartEvent(Role.HBASE_CLIENT.toString(),
-                    hostName1, System.currentTimeMillis(),
-                    new HashMap<String, String>()), clusterName, "HBASE");
-
-    actionDB.persistActions(stages);
-
-    Set<TaskStatusRequest> taskStatusRequests;
-    Set<TaskStatusResponse> taskStatusResponses;
-
-    //check count of tasks by requestId1
-    taskStatusRequests = new HashSet<TaskStatusRequest>(){
-      {
-        add(new TaskStatusRequest(requestId1, null));
-      }
-    };
-    taskStatusResponses = controller.getTaskStatus(taskStatusRequests);
-    assertEquals(3, taskStatusResponses.size());
-
-    //check a taskId that requested by requestId1 and task id
-    taskStatusRequests = new HashSet<TaskStatusRequest>(){
-      {
-        add(new TaskStatusRequest(requestId1, 2L));
-      }
-    };
-    taskStatusResponses = controller.getTaskStatus(taskStatusRequests);
-    assertEquals(1, taskStatusResponses.size());
-    assertEquals(2L, taskStatusResponses.iterator().next().getTaskId());
-
-    //check count of tasks by requestId2
-    taskStatusRequests = new HashSet<TaskStatusRequest>(){
-      {
-        add(new TaskStatusRequest(requestId2, null));
-      }
-    };
-    taskStatusResponses = controller.getTaskStatus(taskStatusRequests);
-    assertEquals(2, taskStatusResponses.size());
-
-    //check a taskId that requested by requestId2 and task id
-    taskStatusRequests = new HashSet<TaskStatusRequest>(){
-      {
-        add(new TaskStatusRequest(requestId2, 5L));
-      }
-    };
-    taskStatusResponses = controller.getTaskStatus(taskStatusRequests);
-    assertEquals(5L, taskStatusResponses.iterator().next().getTaskId());
-
-    //verify that task from second request (requestId2) does not present in first request (requestId1)
-    taskStatusRequests = new HashSet<TaskStatusRequest>(){
-      {
-        add(new TaskStatusRequest(requestId1, 5L));
-      }
-    };
-    expectedException.expect(ObjectNotFoundException.class);
-    expectedException.expectMessage("Task resource doesn't exist.");
-    controller.getTaskStatus(taskStatusRequests);
-
-    //verify that task from first request (requestId1) does not present in second request (requestId2)
-    taskStatusRequests = new HashSet<TaskStatusRequest>(){
-      {
-        add(new TaskStatusRequest(requestId2, 2L));
-      }
-    };
-    expectedException.expect(ObjectNotFoundException.class);
-    expectedException.expectMessage("Task resource doesn't exist.");
-    controller.getTaskStatus(taskStatusRequests);
-  }
-
 }
 }

+ 0 - 116
ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaMetricTest.java

@@ -1,116 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.ganglia;
-
-import java.util.ArrayList;
-import java.util.List;
-import org.junit.Test;
-import static org.junit.Assert.*;
-
-/**
- *
- * @author root
- */
-public class GangliaMetricTest {
-  
-
-
-  /**
-   * Test of setDatapoints method, of class GangliaMetric.
-   */
-  @Test
-  public void testSetDatapointsOfPercentValue() {
-    System.out.println("setDatapoints");
-    List<GangliaMetric.TemporalMetric> listTemporalMetrics =
-              new ArrayList<GangliaMetric.TemporalMetric>();
-    GangliaMetric instance = new GangliaMetric();
-    instance.setDs_name("dsName");
-    instance.setCluster_name("c1");
-    instance.setHost_name("localhost");
-    instance.setMetric_name("cpu_wio");
-    
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("111.0", new Long(1362440880)));
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("11.0", new Long(1362440881)));
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("100.0", new Long(1362440882)));
-    instance.setDatapointsFromList(listTemporalMetrics);
-    assertTrue(instance.getDatapoints().length == 2);
-  }
-
-  /**
-   * Test of setDatapoints method, of class GangliaMetric.
-   */
-  //@Test
-  public void testSetDatapointsOfgcTimeMillisValue() {
-    System.out.println("setDatapoints");
-    List<GangliaMetric.TemporalMetric> listTemporalMetrics =
-              new ArrayList<GangliaMetric.TemporalMetric>();
-    GangliaMetric instance = new GangliaMetric();
-    instance.setDs_name("dsName");
-    instance.setCluster_name("c1");
-    instance.setHost_name("localhost");
-    instance.setMetric_name("jvm.metrics.gcTimeMillis");
-    
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("0.0", new Long(1)));
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("0.0", new Long(2)));
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("0.0", new Long(3)));
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("111.0", new Long(4)));
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("11.0", new Long(5)));
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("100.0", new Long(6)));
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("0.0", new Long(7)));
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("11.0", new Long(8)));
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("0.0", new Long(9)));
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("0.0", new Long(10)));
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("0.0", new Long(11)));
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("0.0", new Long(12)));
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("11.0", new Long(13)));
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("100.0", new Long(14)));
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("0.0", new Long(15)));
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("0.0", new Long(16)));
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("0.0", new Long(17)));
-    listTemporalMetrics.add(new GangliaMetric.TemporalMetric("0.0", new Long(18)));
-    instance.setDatapointsFromList(listTemporalMetrics);
-    System.out.println(instance.toString());
-    assertTrue(instance.getDatapoints().length == 11);
-  }  
-  
-    /**
-   * Test of GangliaMetric.TemporalMetric constructor.
-   */
-  @Test
-  public void testTemporalMetricFineValue() {
-    System.out.println("GangliaMetric.TemporalMetric");
-    GangliaMetric.TemporalMetric tm;
-    tm = new GangliaMetric.TemporalMetric("100", new Long(1362440880));
-    assertFalse("GangliaMetric.TemporalMetric is valid", tm.isIsInvalid());
-  }
-
-    /**
-   * Test of GangliaMetric.TemporalMetric constructor.
-   */
-  @Test
-  public void testTemporalMetricIsNaNValue() {
-    System.out.println("GangliaMetric.TemporalMetric");
-    GangliaMetric.TemporalMetric tm;
-    tm = new GangliaMetric.TemporalMetric("any string", new Long(1362440880));
-    assertTrue("GangliaMetric.TemporalMetric is invalid", tm.isIsInvalid());
-  }
-  
-
-  
-}

+ 0 - 32
ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariLdapAuthenticationProviderTest.java

@@ -97,38 +97,6 @@ public class AmbariLdapAuthenticationProviderTest{
     Assert.assertTrue(auth == null);
     Assert.assertTrue(auth == null);
   }
   }
 
 
-  @Test
-  public void testLdapAdminGroupToRolesMapping() throws Exception {
-
-    Authentication authentication;
-
-    authentication =
-        new UsernamePasswordAuthenticationToken("allowedAdmin", "password");
-    Authentication result = authenticationProvider.authenticate(authentication);
-    assertTrue(result.isAuthenticated());
-
-    UserEntity allowedAdminEntity = userDAO.findLdapUserByName("allowedAdmin");
-
-    authentication =
-        new UsernamePasswordAuthenticationToken("allowedUser", "password");
-    authenticationProvider.authenticate(authentication);
-    UserEntity allowedUserEntity = userDAO.findLdapUserByName("allowedUser");
-
-
-    RoleEntity adminRole = roleDAO.findByName(
-        configuration.getConfigsMap().get(Configuration.ADMIN_ROLE_NAME_KEY));
-    RoleEntity userRole = roleDAO.findByName(
-        configuration.getConfigsMap().get(Configuration.USER_ROLE_NAME_KEY));
-
-
-    assertTrue(allowedAdminEntity.getRoleEntities().contains(userRole));
-    assertTrue(allowedAdminEntity.getRoleEntities().contains(adminRole));
-
-    assertTrue(allowedUserEntity.getRoleEntities().contains(userRole));
-    assertFalse(allowedUserEntity.getRoleEntities().contains(adminRole));
-
-
-  }
 
 
   @AfterClass
   @AfterClass
   public static void afterClass() {
   public static void afterClass() {

+ 12 - 13
ambari-server/src/test/resources/test_api.sh

@@ -21,18 +21,18 @@ curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/ZOOKEEPER
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HBASE
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HBASE
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/GANGLIA
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/GANGLIA
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/NAGIOS
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/NAGIOS
-
-curl -i -X PUT -d '{"Clusters": {"desired_config": {"type": "core-site", "tag": "version1", "properties" : { "fs.default.name" : "localhost:8020"}}}}' http://localhost:8080/api/v1/clusters/c1
-curl -i -X PUT -d '{"Clusters": {"desired_config": {"type": "core-site", "tag": "version2", "properties" : { "fs.default.name" : "localhost:8020"}}}}' http://localhost:8080/api/v1/clusters/c1
-curl -i -X PUT -d '{"Clusters": {"desired_config": {"type": "core-site", "tag": "version1"}}}' http://localhost:8080/api/v1/clusters/c1
-curl -i -X PUT -d '{"Clusters": {"desired_config": {"type": "hdfs-site", "tag": "version1", "properties" : { "dfs.datanode.data.dir.perm" : "750"}}}}' http://localhost:8080/api/v1/clusters/c1
-curl -i -X PUT -d '{"Clusters": {"desired_config": {"type": "global", "tag": "version1", "properties" : { "hbase_hdfs_root_dir" : "/apps/hbase/"}}}}' http://localhost:8080/api/v1/clusters/c1
-curl -i -X PUT -d '{"Clusters": {"desired_config": {"type": "mapred-site", "tag": "version1", "properties" : { "mapred.job.tracker" : "localhost:50300", "mapreduce.history.server.embedded": "false", "mapreduce.history.server.http.address": "localhost:51111"}}}}' http://localhost:8080/api/v1/clusters/c1
-curl -i -X PUT -d '{"Clusters": {"desired_config": {"type": "hbase-site", "tag": "version1", "properties" : { "hbase.rootdir" : "hdfs://localhost:8020/apps/hbase/", "hbase.cluster.distributed" : "true", "hbase.zookeeper.quorum": "localhost", "zookeeper.session.timeout": "60000" }}}}' http://localhost:8080/api/v1/clusters/c1
-curl -i -X PUT -d '{"Clusters": {"desired_config": {"type": "hbase-env", "tag": "version1", "properties" : { "hbase_hdfs_root_dir" : "/apps/hbase/"}}}}' http://localhost:8080/api/v1/clusters/c1
-curl -i -X PUT -d '{"Clusters": {"desired_config": {"type": "nagios-global", "tag": "version2", "properties" : { "nagios_web_login" : "nagiosadmin", "nagios_web_password" : "password", "nagios_contact": "a\u0040b.c" }}}}' http://localhost:8080/api/v1/clusters/c1
-curl -i -X PUT -d '{"Clusters": {"desired_config": {"type": "nagios-global", "tag": "version1", "properties" : { "nagios_web_login" : "nagiosadmin", "nagios_web_password" : "password"  }}}}' http://localhost:8080/api/v1/clusters/c1
-
+curl -i -X POST -d '{"type": "core-site", "tag": "version1", "properties" : { "fs.default.name" : "localhost:8020"}}' http://localhost:8080/api/v1/clusters/c1/configurations
+curl -i -X POST -d '{"type": "hdfs-site", "tag": "version1", "properties" : { "dfs.datanode.data.dir.perm" : "750"}}' http://localhost:8080/api/v1/clusters/c1/configurations
+curl -i -X POST -d '{"type": "global", "tag": "version1", "properties" : { "hbase_hdfs_root_dir" : "/apps/hbase/"}}' http://localhost:8080/api/v1/clusters/c1/configurations
+curl -i -X POST -d '{"type": "mapred-site", "tag": "version1", "properties" : { "mapred.job.tracker" : "localhost:50300", "mapreduce.history.server.embedded": "false", "mapreduce.history.server.http.address": "localhost:51111"}}' http://localhost:8080/api/v1/clusters/c1/configurations
+curl -i -X POST -d '{"type": "hbase-site", "tag": "version1", "properties" : { "hbase.rootdir" : "hdfs://localhost:8020/apps/hbase/", "hbase.cluster.distributed" : "true", "hbase.zookeeper.quorum": "localhost", "zookeeper.session.timeout": "60000" }}' http://localhost:8080/api/v1/clusters/c1/configurations
+curl -i -X POST -d '{"type": "hbase-env", "tag": "version1", "properties" : { "hbase_hdfs_root_dir" : "/apps/hbase/"}}' http://localhost:8080/api/v1/clusters/c1/configurations
+curl -i -X POST -d '{"type": "nagios-global", "tag": "version2", "properties" : { "nagios_web_login" : "nagiosadmin", "nagios_web_password" : "password", "nagios_contact": "a\u0040b.c" }}' http://localhost:8080/api/v1/clusters/c1/configurations
+curl -i -X POST -d '{"type": "nagios-global", "tag": "version1", "properties" : { "nagios_web_login" : "nagiosadmin", "nagios_web_password" : "password"  }}' http://localhost:8080/api/v1/clusters/c1/configurations
+curl -i -X PUT -d '{"config": {"core-site": "version1", "hdfs-site": "version1", "global" : "version1" }}'  http://localhost:8080/api/v1/clusters/c1/services/HDFS
+curl -i -X PUT -d '{"config": {"core-site": "version1", "mapred-site": "version1"}}'  http://localhost:8080/api/v1/clusters/c1/services/MAPREDUCE
+curl -i -X PUT -d '{"config": {"hbase-site": "version1", "hbase-env": "version1"}}'  http://localhost:8080/api/v1/clusters/c1/services/HBASE
+curl -i -X PUT -d '{"config": {"nagios-global": "version2" }}'  http://localhost:8080/api/v1/clusters/c1/services/NAGIOS
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HDFS/components/NAMENODE
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HDFS/components/NAMENODE
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HDFS/components/SECONDARY_NAMENODE
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HDFS/components/SECONDARY_NAMENODE
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HDFS/components/DATANODE
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HDFS/components/DATANODE
@@ -60,7 +60,6 @@ curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/HBASE_REGIONSERVER
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/HBASE_REGIONSERVER
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/HBASE_CLIENT
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/HBASE_CLIENT
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/NAGIOS_SERVER
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/NAGIOS_SERVER
-curl -i -X PUT -d '{"Hosts": {"desired_config": {"type": "core-site", "tag": "version3", "properties" : { "nagios_web_login" : "nagiosadmin", "nagios_web_password" : "password"  }}}}' http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST
 curl -i -X PUT  -d '{"ServiceInfo": {"state" : "INSTALLED"}}'   http://localhost:8080/api/v1/clusters/c1/services?state=INIT
 curl -i -X PUT  -d '{"ServiceInfo": {"state" : "INSTALLED"}}'   http://localhost:8080/api/v1/clusters/c1/services?state=INIT
 #curl -i -X PUT  -d '{"ServiceInfo": {"state" : "STARTED"}}'   http://localhost:8080/api/v1/clusters/c1/services?state=INSTALLED
 #curl -i -X PUT  -d '{"ServiceInfo": {"state" : "STARTED"}}'   http://localhost:8080/api/v1/clusters/c1/services?state=INSTALLED
 # http://localhost:8080/api/v1/clusters/c1/requests/2
 # http://localhost:8080/api/v1/clusters/c1/requests/2

+ 3 - 3
ambari-web/app/config.js

@@ -24,8 +24,8 @@ App.skipBootstrap = false;
 App.alwaysGoToInstaller = false;
 App.alwaysGoToInstaller = false;
 App.testEnableSecurity = true; // By default enable security is tested; turning it false tests disable security
 App.testEnableSecurity = true; // By default enable security is tested; turning it false tests disable security
 App.apiPrefix = '/api/v1';
 App.apiPrefix = '/api/v1';
-App.defaultStackVersion = 'HDP-1.3.0';
-App.defaultLocalStackVersion = 'HDPLocal-1.3.0';
+App.defaultStackVersion = 'HDP-1.2.1';
+App.defaultLocalStackVersion = 'HDPLocal-1.2.1';
 App.defaultJavaHome = '/usr/jdk/jdk1.6.0_31';
 App.defaultJavaHome = '/usr/jdk/jdk1.6.0_31';
 App.timeout = 180000; // default AJAX timeout
 App.timeout = 180000; // default AJAX timeout
 App.maxRetries = 3; // max number of retries for certain AJAX calls
 App.maxRetries = 3; // max number of retries for certain AJAX calls
@@ -50,7 +50,7 @@ App.supports = {
   hiveOozieExtraDatabases: false,
   hiveOozieExtraDatabases: false,
   multipleHBaseMasters: false,
   multipleHBaseMasters: false,
   addMasters: false,
   addMasters: false,
-  customizeSmokeTestUser: true,
+  customizeSmokeTestUser: false,
   hue: false,
   hue: false,
   ldapGroupMapping: false
   ldapGroupMapping: false
 };
 };

+ 2 - 1
ambari-web/app/controllers/main/admin/user.js

@@ -39,10 +39,11 @@ App.MainAdminUserController = Em.Controller.extend({
 
 
       return;
       return;
     }
     }
+    ;
 
 
     App.ModalPopup.show({
     App.ModalPopup.show({
       header:Em.I18n.t('admin.users.delete.header').format(event.context.get('userName')),
       header:Em.I18n.t('admin.users.delete.header').format(event.context.get('userName')),
-      body:Em.I18n.t('question.sure').format(''),
+      body:Em.I18n.t('question.sure'),
       primary:Em.I18n.t('yes'),
       primary:Em.I18n.t('yes'),
       secondary:Em.I18n.t('no'),
       secondary:Em.I18n.t('no'),
 
 

+ 2 - 2
ambari-web/pom.xml

@@ -19,7 +19,7 @@
   <parent>
   <parent>
     <groupId>org.apache.ambari</groupId>
     <groupId>org.apache.ambari</groupId>
     <artifactId>ambari-project</artifactId>
     <artifactId>ambari-project</artifactId>
-    <version>1.3.0-SNAPSHOT</version>
+    <version>1.2.3-SNAPSHOT</version>
     <relativePath>../ambari-project</relativePath>
     <relativePath>../ambari-project</relativePath>
   </parent>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <modelVersion>4.0.0</modelVersion>
@@ -27,7 +27,7 @@
   <artifactId>ambari-web</artifactId>
   <artifactId>ambari-web</artifactId>
   <packaging>pom</packaging>
   <packaging>pom</packaging>
   <name>Ambari Web</name>
   <name>Ambari Web</name>
-  <version>1.3.0-SNAPSHOT</version>
+  <version>1.2.3-SNAPSHOT</version>
   <description>Ambari Web</description>
   <description>Ambari Web</description>
   <properties>
   <properties>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>

+ 3 - 3
contrib/addons/src/addOns/nagios/scripts/nagios_alerts.php

@@ -381,11 +381,11 @@ function hdp_mon_generate_response( $response_data )
         $pieces[0] = "HIVE";
         $pieces[0] = "HIVE";
         break;
         break;
       case "ZKSERVERS":
       case "ZKSERVERS":
-	    $pieces[0] = "ZOOKEEPER";
+	      $pieces[0] = "ZOOKEEPER";
         break;
         break;
       case "AMBARI":
       case "AMBARI":
-	    $pieces[0] = "AMBARI";
-      break;      
+	      $pieces[0] = "AMBARI";
+        break;
       case "NAGIOS":
       case "NAGIOS":
       case "HDFS":
       case "HDFS":
       case "MAPREDUCE":
       case "MAPREDUCE":

+ 2 - 50
docs/pom.xml

@@ -28,7 +28,7 @@
     <modelVersion>4.0.0</modelVersion>
     <modelVersion>4.0.0</modelVersion>
 
 
     <groupId>org.apache.ambari</groupId>
     <groupId>org.apache.ambari</groupId>
-    <version>1.2.2-SNAPSHOT</version>
+    <version>1.2.0-SNAPSHOT</version>
     <artifactId>ambari</artifactId>
     <artifactId>ambari</artifactId>
     <packaging>pom</packaging>
     <packaging>pom</packaging>
 
 
@@ -118,18 +118,6 @@
                 <role>PMC</role>
                 <role>PMC</role>
             </roles>
             </roles>
         </developer>
         </developer>
-        <developer>
-            <id>billie</id>
-            <name>Billie Rinaldi</name>
-            <email>billie@apache.org</email>
-            <timezone>-8</timezone>
-            <roles>
-                <role>Committer</role>
-            </roles>
-            <organization>
-                Hortonworks
-            </organization>
-        </developer>
         <developer>            
         <developer>            
             <id>ddas</id>
             <id>ddas</id>
             <name>Devaraj Das</name>
             <name>Devaraj Das</name>
@@ -237,19 +225,7 @@
             <organization>
             <organization>
                 Hortonworks                
                 Hortonworks                
             </organization>            
             </organization>            
-        </developer>
-        <developer>
-            <id>ncole</id>
-            <name>Nate Cole</name>
-            <email>ncole@apache.org</email>
-            <timezone>-8</timezone>
-            <roles>
-                <role>Committer</role>
-            </roles>
-            <organization>
-                Hortonworks
-            </organization>
-        </developer>
+        </developer>              
         <developer>
         <developer>
             <id>ramya</id>
             <id>ramya</id>
             <name>Ramya Sunil</name>
             <name>Ramya Sunil</name>
@@ -274,30 +250,6 @@
                 Hortonworks
                 Hortonworks
             </organization>            
             </organization>            
         </developer>
         </developer>
-        <developer>
-            <id>smohanty</id>
-            <name>Sumit Mohanty</name>
-            <email>smohanty@apache.org</email>
-            <timezone>-8</timezone>
-            <roles>
-                <role>Committer</role>
-            </roles>
-            <organization>
-                Hortonworks
-            </organization>
-        </developer>
-        <developer>
-            <id>swagle</id>
-            <name>Siddharth Wagle</name>
-            <email>swagle@apache.org</email>
-            <timezone>-8</timezone>
-            <roles>
-                <role>Committer</role>
-            </roles>
-            <organization>
-                Hortonworks
-            </organization>
-        </developer>
         <developer>
         <developer>
             <id>srimanth</id>
             <id>srimanth</id>
             <name>Srimanth Gunturi</name>
             <name>Srimanth Gunturi</name>

+ 52 - 25
docs/src/site/apt/index.apt

@@ -15,20 +15,26 @@
 ~~
 ~~
 Introduction
 Introduction
 
 
-  The Apache Ambari project is aimed at making Hadoop management simpler by developing software for provisioning, managing, and monitoring Apache Hadoop clusters.
-  Ambari provides an intuitive, easy-to-use Hadoop management web UI backed by its RESTful APIs.
+  Apache Ambari is a web-based tool for provisioning, managing, and monitoring Apache Hadoop clusters. The set of
+  Hadoop components that are currently supported by Ambari includes:
 
 
-  The set of Hadoop components that are currently supported by Ambari includes:
+  * {{{http://hadoop.apache.org/docs/hdfs} Apache Hadoop - HDFS}}
 
 
-  {{{http://hadoop.apache.org/docs/hdfs} HDFS}},
-  {{{http://hadoop.apache.org/docs/mapreduce} MapReduce}}
-  {{{http://hive.apache.org} Hive}},
-  {{{http://incubator.apache.org/hcatalog} HCatalog}},
-  {{{http://hbase.apache.org} HBase}},
-  {{{http://zookeeper.apache.org} ZooKeeper}},
-  {{{http://incubator.apache.org/oozie/} Oozie}},
-  {{{http://pig.apache.org} Pig}},
-  {{{http://sqoop.apache.org} Sqoop}}
+  * {{{http://hadoop.apache.org/docs/mapreduce} Apache Hadoop - MapReduce}}
+
+  * {{{http://hive.apache.org} Apache Hive}}
+
+  * {{{http://incubator.apache.org/hcatalog} Apache HCatalog}}
+
+  * {{{http://hbase.apache.org} Apache HBase}}
+
+  * {{{http://zookeeper.apache.org} Apache Zookeeper}}
+
+  * {{{http://incubator.apache.org/oozie/} Apache Oozie}}
+
+  * {{{http://pig.apache.org} Apache Pig}}
+
+  * {{{http://sqoop.apache.org} Apache Sqoop}}
 
 
   []
   []
 
 
@@ -36,7 +42,7 @@ Introduction
 
 
   * Provision a Hadoop Cluster
   * Provision a Hadoop Cluster
   
   
-    * Ambari provides a step-by-step wizard for installing Hadoop services across any number of hosts.
+    * Ambari provides an easy-to-use, step-by-step wizard for installing Hadoop services across any number of hosts.
     
     
     * Ambari handles configuration of Hadoop services for the cluster.
     * Ambari handles configuration of Hadoop services for the cluster.
 
 
@@ -55,31 +61,52 @@ Introduction
     * Ambari leverages {{{http://ganglia.sourceforge.net/} Ganglia}} for metrics collection.
     * Ambari leverages {{{http://ganglia.sourceforge.net/} Ganglia}} for metrics collection.
 
 
     * Ambari leverages {{{http://www.nagios.org/} Nagios}} for system alerting and will send emails when your attention is needed (e.g., a node goes down, remaining disk space is low, etc).
     * Ambari leverages {{{http://www.nagios.org/} Nagios}} for system alerting and will send emails when your attention is needed (e.g., a node goes down, remaining disk space is low, etc).
+  
+  []
+
+Ambari Source
+
+  Follow the  {{{./1.2.0/installing-hadoop-using-ambari/content/index.html} installation guide for 1.2.0 (stable)}} or check out the work going on in {{{./whats-new.html} trunk}} for the upcoming 1.2.1 release.
+
+Roadmap
+
+  * Support for additional Operating Systems
+
+    * Ambari currently supports 64-bit RHEL/CentOS 5 + 6 and SLES 11
 
 
   []
   []
 
 
-  Ambari enables Application Developers and System Integrators to:
+  * RESTful API for integration
+  
+    * Ambari will expose a unified, RESTful API to enable third-party applications to integrate
+      Hadoop cluster management and monitoring capabilities (1.2.0)
+
+  []
 
 
-  * Easily integrate Hadoop provisioning, management, and monitoring capabilities to their own applications with the {{{https://github.com/apache/ambari/blob/trunk/ambari-server/docs/api/v1/index.md} Ambari REST APIs}}.
+  * Granular configurations
 
 
-Getting Started with Ambari
+      * Ambari currently applies service configurations at the cluster-level. For more
+        flexibility, Ambari will allow for configurations in a more granular manner, such as
+        applying a set of configurations to a specific group of hosts.
 
 
-  Follow the {{{./1.2.2/installing-hadoop-using-ambari/content/index.html} installation guide for Ambari 1.2.2}}.
+  []
 
 
-  Note: Ambari currently supports 64-bit RHEL/CentOS 5 + 6 and SLES 11.
+  * Security
 
 
-Get Involved
+      * Installation of secure Hadoop clusters (Kerberos-based)
 
 
-  Visit the {{{https://cwiki.apache.org/confluence/display/AMBARI/Ambari} Ambari Wiki}} for design documents, roadmap, development guidelines, etc.
+      * Role-based user authentication, authorization, and auditing
 
 
-  The first {{{http://www.meetup.com/Apache-Ambari-User-Group} Ambari User Group Meetup}} took place on April 2 in Palo Alto, California, USA.  {{{http://www.meetup.com/Apache-Ambari-User-Group/events/109316812/} See the slides and WebEx session from the Meetup}}.  
+      * Support for LDAP and Active Directory (1.2.0)
 
 
-What's New?
+  []
 
 
-  Check out the work going on for the {{{./whats-new.html} upcoming 1.3.0 release}}.
+  * Visualization
 
 
-Disclaimer
+      * Interactive visualization of the current state of the cluster for a number of key metrics (1.2.0)
 
 
-  Apache Ambari is an effort undergoing incubation at The Apache Software Foundation (ASF) sponsored by the Apache Incubator PMC. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.
+      * Interactive visualization of historical states of the cluster for a number of key metrics
 
 
+      * Interactive visualization of Pig, Hive, and MapReduce jobs (1.2.0)
 
 
+  []

+ 0 - 20
docs/src/site/apt/irc.apt

@@ -1,20 +0,0 @@
-~~ Licensed to the Apache Software Foundation (ASF) under one or more
-~~ contributor license agreements.  See the NOTICE file distributed with
-~~ this work for additional information regarding copyright ownership.
-~~ The ASF licenses this file to You under the Apache License, Version 2.0
-~~ (the "License"); you may not use this file except in compliance with
-~~ the License.  You may obtain a copy of the License at
-~~
-~~     http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an "AS IS" BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License.
-~~
-
-Ambari IRC Channel
-
- There is an IRC channel dedicated to ambari at irc.freenode.org. The name of the channel is #apacheambari.
- The IRC channel can be used for online discussion about ambari related stuff, but developers should be careful to transfer all the official decisions or useful discussions to the issue tracking system.

+ 25 - 6
docs/src/site/apt/whats-new.apt

@@ -14,9 +14,28 @@
 ~~ limitations under the License.
 ~~ limitations under the License.
 ~~
 ~~
 
 
-What's New in Ambari 1.3.0?
+What's New?
 
 
- * Check back later for the new features in 1.3.0. 
+  There’s a lot of activity occurring in trunk. We’ve had a great response from the
+  community and received a lot of input into the direction. To date, we have added
+  many new features. To highlight a few...
+
+  * Added more options on provisioning so you can better target your cluster
+    setup for your needs
+
+  * More service controls, including the ability to run ad hoc smoke tests
+
+  * Exposed a RESTful API for cluster metrics (with management and
+    provisioning to follow)
+
+  * Moved to a JavaScript front-end for Ambari Web and refreshed the entire UI
+    experience
+
+  * Used that RESTful API and added more charts and heatmaps to the dashboard
+
+  * Reduced the number of dependencies on the Ambari Server + Ambari Agent
+
+  * Added support for SLES 11
 
 
 Getting Ambari Source
 Getting Ambari Source
 
 
@@ -28,14 +47,14 @@ $ svn checkout http://svn.apache.org/repos/asf/incubator/ambari/trunk ambari
 
 
 JIRA
 JIRA
 
 
-  You can follow all the work going on by watching the Ambari JIRA project. You can see the
-  JIRA details for Ambari 1.3.0 here:
+  You can follow all the work going on by watching the Ambari JIRA project. The next release of Apache Ambari will be version 1.2.1. You can see the
+  JIRA details for this release here:
 
 
-  {{{https://issues.apache.org/jira/issues/?jql=fixVersion%20%3D%20%221.3.0%22%20AND%20project%20%3D%20AMBARI} https://issues.apache.org/jira/issues/?jql=fixVersion%20%3D%20%221.3.0%22%20AND%20project%20%3D%20AMBARI}}
+  {{{https://issues.apache.org/jira/issues/?jql=fixVersion%20%3D%20%221.2.1%22%20AND%20project%20%3D%20AMBARI} https://issues.apache.org/jira/issues/?jql=fixVersion%20%3D%20%221.2.1%22%20AND%20project%20%3D%20AMBARI}}
 
 
 User Guide
 User Guide
 
 
-  Take a look at {{{http://incubator.apache.org/ambari/1.2.2/installing-hadoop-using-ambari/content/index.html} how to install a Hadoop cluster using Ambari 1.2.2}}.
+  {{{http://incubator.apache.org/ambari/1.2.1/installing-hadoop-using-ambari/content/index.html} See how to install a Hadoop cluster using Ambari 1.2.1}}
 
 
 Stay Tuned
 Stay Tuned
 
 

+ 7 - 33
docs/src/site/site.xml

@@ -28,9 +28,7 @@
   <custom>
   <custom>
     <fluidoSkin>
     <fluidoSkin>
       <topBarEnabled>true</topBarEnabled>
       <topBarEnabled>true</topBarEnabled>
-      <sideBarEnabled>true</sideBarEnabled>
-      <leftColumnClass>span2</leftColumnClass>
-      <bodyColumnClass>span10</bodyColumnClass>
+      <sideBarEnabled>false</sideBarEnabled>
     </fluidoSkin>
     </fluidoSkin>
   </custom>
   </custom>
   <bannerLeft>
   <bannerLeft>
@@ -61,62 +59,38 @@
           })();
           })();
        </script>
        </script>
        <!-- End of Google analytics -->
        <!-- End of Google analytics -->
-       <style>
-           .well.sidebar-nav {
-             background-color: #fff;
-           }
-           a.externalLink[href^=http] {
-             background-image: none;
-             padding-right: 0;
-           }
-           body.topBarEnabled {
-             padding-top: 40px;
-           }
-           #leftColumn .nav-list .active a {
-             background-color: #a0a0a0;
-           }
-           .nav-list .active a:hover {
-             background-color: #a0a0a0;
-           }
-       </style>
     </head>
     </head>
 
 
     <links>
     <links>
       <item name="JIRA" href="https://issues.apache.org/jira/browse/AMBARI" />
       <item name="JIRA" href="https://issues.apache.org/jira/browse/AMBARI" />
       <item name="SVN" href="https://svn.apache.org/repos/asf/incubator/ambari/" />
       <item name="SVN" href="https://svn.apache.org/repos/asf/incubator/ambari/" />
-      <item name="Wiki" href="https://cwiki.apache.org/confluence/display/AMBARI/Ambari" />
+      <item name="WIKI" href="https://cwiki.apache.org/confluence/display/AMBARI/Ambari" />
     </links>
     </links>
 
 
     <breadcrumbs>
     <breadcrumbs>
-      <item name="Incubator" href="../"/>
-      <item name="Ambari" href="http://incubator.apache.org/ambari/"/>
+      <item name="Ambari" href="index.html"/>
     </breadcrumbs>
     </breadcrumbs>
 
 
     <menu name="Ambari">
     <menu name="Ambari">
-      <item name="Overview" href="index.html"/>
       <item name="What's New?" href="whats-new.html"/>
       <item name="What's New?" href="whats-new.html"/>
+      <item name="About" href="index.html"/>
+      <item name="Wiki" href="https://cwiki.apache.org/confluence/display/AMBARI/Ambari"/>
       <item name="Project Team" href="team-list.html"/>
       <item name="Project Team" href="team-list.html"/>
-      <item name="IRC Channel" href="irc.html"/>
       <item name="Mailing Lists" href="mail-lists.html"/>
       <item name="Mailing Lists" href="mail-lists.html"/>
       <item name="Issue Tracking" href="issue-tracking.html"/>
       <item name="Issue Tracking" href="issue-tracking.html"/>
-      <item name="User Group" href="http://www.meetup.com/Apache-Ambari-User-Group"/>
-      <item name="Project License" href="http://www.apache.org/licenses/"/>
+      <item name="Project License" href="license.html"/>
     </menu>
     </menu>
 
 
     <menu name="Releases">
     <menu name="Releases">
-        <item name="1.2.2" href="http://www.apache.org/dist/incubator/ambari/ambari-1.2.2/"/>
-        <item name="1.2.1" href="http://www.apache.org/dist/incubator/ambari/ambari-1.2.1/"/>
+        <item name="1.2.1 (coming soon)" href="whats-new.html"/>
         <item name="1.2.0" href="http://www.apache.org/dist/incubator/ambari/ambari-1.2.0/"/>
         <item name="1.2.0" href="http://www.apache.org/dist/incubator/ambari/ambari-1.2.0/"/>
         <item name="0.9" href="http://www.apache.org/dist/incubator/ambari/ambari-0.9-incubating/"/>
         <item name="0.9" href="http://www.apache.org/dist/incubator/ambari/ambari-0.9-incubating/"/>
     </menu>
     </menu>
 
 
     <menu name="Documentation">
     <menu name="Documentation">
-        <item name="Install Guide for 1.2.2" href="1.2.2/installing-hadoop-using-ambari/content/index.html"/>
         <item name="Install Guide for 1.2.1" href="1.2.1/installing-hadoop-using-ambari/content/index.html"/>
         <item name="Install Guide for 1.2.1" href="1.2.1/installing-hadoop-using-ambari/content/index.html"/>
         <item name="Install Guide for 1.2.0" href="1.2.0/installing-hadoop-using-ambari/content/index.html"/>
         <item name="Install Guide for 1.2.0" href="1.2.0/installing-hadoop-using-ambari/content/index.html"/>
         <item name="Install Guide for 0.9" href="install-0.9.html"/>
         <item name="Install Guide for 0.9" href="install-0.9.html"/>
-        <item name="API Reference" href="https://github.com/apache/ambari/blob/trunk/ambari-server/docs/api/v1/index.md"/>
-        <item name="Wiki" href="https://cwiki.apache.org/confluence/display/AMBARI/Ambari"/>
     </menu>
     </menu>
 
 
     <footer>
     <footer>

+ 1 - 58
pom.xml

@@ -21,11 +21,10 @@
   <artifactId>ambari</artifactId>
   <artifactId>ambari</artifactId>
   <packaging>pom</packaging>
   <packaging>pom</packaging>
   <name>Ambari Main</name>
   <name>Ambari Main</name>
-  <version>1.3.0-SNAPSHOT</version>
+  <version>1.2.3-SNAPSHOT</version>
   <description>Ambari</description>
   <description>Ambari</description>
   <properties>
   <properties>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-    <clover.license>${user.home}/clover.license</clover.license>
   </properties>
   </properties>
   <pluginRepositories>
   <pluginRepositories>
     <pluginRepository>
     <pluginRepository>
@@ -34,12 +33,6 @@
       <url>http://download.java.net/maven/2/</url>
       <url>http://download.java.net/maven/2/</url>
       <layout>default</layout>
       <layout>default</layout>
     </pluginRepository>
     </pluginRepository>
-    <pluginRepository>
-       <id>maven2-repository.atlassian</id>
-       <name>Atlassian Maven Repository</name>
-       <url>https://maven.atlassian.com/repository/public</url>
-       <layout>default</layout>
-     </pluginRepository>
     <pluginRepository>
     <pluginRepository>
       <id>maven2-glassfish-repository.dev.java.net</id>
       <id>maven2-glassfish-repository.dev.java.net</id>
       <name>Java.net Repository for Maven</name>
       <name>Java.net Repository for Maven</name>
@@ -157,54 +150,4 @@
       </plugin>
       </plugin>
     </plugins>
     </plugins>
   </build>
   </build>
-  <profiles>
-    <profile>
-      <id>clover</id>
-      <activation>
-        <activeByDefault>false</activeByDefault>
-        <property>
-          <name>clover</name>
-        </property>
-      </activation>
-      <properties>
-        <maven.clover.licenseLocation>${clover.license}</maven.clover.licenseLocation>
-        <clover.version>3.1.11</clover.version>
-      </properties>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>com.atlassian.maven.plugins</groupId>
-            <artifactId>maven-clover2-plugin</artifactId>
-            <version>${clover.version}</version>
-            <configuration>
-              <includesAllSourceRoots>true</includesAllSourceRoots>
-              <includesTestSourceRoots>true</includesTestSourceRoots>
-              <targetPercentage>50%</targetPercentage>
-              <generateHtml>true</generateHtml>
-              <generateXml>true</generateXml>
-              <excludes>
-                <exclude>**/generated/**</exclude>
-              </excludes>
-            </configuration>
-            <executions>
-              <execution>
-                <id>clover-setup</id>
-                <phase>process-sources</phase>
-                <goals>
-                  <goal>setup</goal>
-                </goals>
-              </execution>
-              <execution>
-                <id>clover</id>
-                <phase>test</phase>
-                <goals>
-                  <goal>clover</goal>
-                </goals>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-  </profiles>
 </project>
 </project>

Some files were not shown because too many files changed in this diff