فهرست منبع

AMBARI-974. Decommissioning of datanodes. (Jitendra Pandey via mahadev)

git-svn-id: https://svn.apache.org/repos/asf/incubator/ambari/branches/AMBARI-666@1406498 13f79535-47bb-0310-9956-ffa450edef68
Mahadev Konar 12 سال پیش
والد
کامیت
36d646ee25
100فایلهای تغییر یافته به همراه4866 افزوده شده و 2414 حذف شده
  1. 2 0
      AMBARI-666-CHANGES.txt
  2. 1 1
      ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp
  3. 0 191
      ambari-agent/src/main/puppet/modules/hdp/manifests/lib/puppet/parser/functions/pkgName.rb
  4. 1 1
      ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
  5. 5 2
      ambari-agent/src/main/python/ambari_agent/RepoInstaller.py
  6. 8 4
      ambari-agent/src/main/python/ambari_agent/puppetExecutor.py
  7. 1 0
      ambari-agent/src/main/python/ambari_agent/rolesToClass.dict
  8. 2 1
      ambari-server/src/main/java/org/apache/ambari/server/Role.java
  9. 123 59
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
  10. 0 62
      ambari-server/src/main/java/org/apache/ambari/server/controller/OperationRequest.java
  11. 0 58
      ambari-server/src/main/java/org/apache/ambari/server/controller/TrackActionResponse.java
  12. 14 16
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ActionResourceProvider.java
  13. 0 764
      ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/JDBCManagementController.java
  14. 5 3
      ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostEntity.java
  15. 0 65
      ambari-server/src/main/java/org/apache/ambari/server/state/StackVersion.java
  16. 0 75
      ambari-server/src/main/java/org/apache/ambari/server/state/job/Job.java
  17. 0 39
      ambari-server/src/main/java/org/apache/ambari/server/state/job/JobCompletedEvent.java
  18. 0 44
      ambari-server/src/main/java/org/apache/ambari/server/state/job/JobEvent.java
  19. 0 39
      ambari-server/src/main/java/org/apache/ambari/server/state/job/JobFailedEvent.java
  20. 0 38
      ambari-server/src/main/java/org/apache/ambari/server/state/job/JobId.java
  21. 0 314
      ambari-server/src/main/java/org/apache/ambari/server/state/job/JobImpl.java
  22. 0 37
      ambari-server/src/main/java/org/apache/ambari/server/state/job/JobProgressUpdateEvent.java
  23. 0 39
      ambari-server/src/main/java/org/apache/ambari/server/state/job/JobState.java
  24. 0 37
      ambari-server/src/main/java/org/apache/ambari/server/state/job/NewJobEvent.java
  25. 17 0
      ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
  26. 1 1
      ambari-server/src/main/resources/ca.config
  27. 0 137
      ambari-server/src/main/resources/stacks/HDP/0.1/services/HDFS/configuration/hbase-site.xml
  28. 0 121
      ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/PropertyIdImpl.java
  29. 0 113
      ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/ResourceImpl.java
  30. 0 153
      ambari-server/src/test/java/org/apache/ambari/server/state/job/JobTest.java
  31. 1 0
      contrib/addons/package/rpm/.gitignore
  32. 84 0
      contrib/addons/package/rpm/create_ganglia_addon_rpm.sh
  33. 84 0
      contrib/addons/package/rpm/create_nagios_addon_rpm.sh
  34. 75 0
      contrib/addons/package/rpm/hdp_mon_ganglia_addons.spec
  35. 82 0
      contrib/addons/package/rpm/hdp_mon_nagios_addons.spec
  36. 0 0
      contrib/addons/src/.gitignore
  37. 4 0
      contrib/addons/src/addOns/ganglia/conf/cluster_HDPJobTracker.json
  38. 3 0
      contrib/addons/src/addOns/ganglia/conf/cluster_HDPNameNode.json
  39. 4 0
      contrib/addons/src/addOns/ganglia/conf/cluster_HDPSlaves.json
  40. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_disk_report.json
  41. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_hlog_report.json
  42. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_hlog_split_size_report.json
  43. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_master_cluster_requests_report.json
  44. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_compaction_queue_size_report.json
  45. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_flush_queue_size_report.json
  46. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_fs_read_latency_report.json
  47. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_fs_write_latency_report.json
  48. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_read_requests_report.json
  49. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_regions_report.json
  50. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_write_requests_report.json
  51. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_capacity_remaining_report.json
  52. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_io_report.json
  53. 14 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_ops_report.json
  54. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_pending_replication_blocks_report.json
  55. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_under_replicated_blocks_report.json
  56. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_heartbeats_report.json
  57. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_completed_report.json
  58. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_failed_report.json
  59. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_running_report.json
  60. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_submitted_report.json
  61. 14 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_map_slot_report.json
  62. 12 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_mapreduce_report.json
  63. 14 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_reduce_slot_report.json
  64. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jvm_gc_report.json
  65. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jvm_mem_heap_used_report.json
  66. 16 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jvm_threads_report.json
  67. 12 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpc_latency_report.json
  68. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpc_queue_time_avg_time_report.json
  69. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpc_queue_time_num_ops_report.json
  70. 10 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpcdetailed_heartbeat_num_ops_report.json
  71. 12 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_tasktracker_mapreduce_report.json
  72. 14 0
      contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_tasktracker_task_report.json
  73. 195 0
      contrib/addons/src/addOns/nagios/plugins/check_aggregate.php
  74. 91 0
      contrib/addons/src/addOns/nagios/plugins/check_hadoop.sh
  75. 86 0
      contrib/addons/src/addOns/nagios/plugins/check_hbase.sh
  76. 72 0
      contrib/addons/src/addOns/nagios/plugins/check_hdfs_blocks.php
  77. 68 0
      contrib/addons/src/addOns/nagios/plugins/check_hdfs_capacity.php
  78. 32 0
      contrib/addons/src/addOns/nagios/plugins/check_hive_metastore_status.sh
  79. 59 0
      contrib/addons/src/addOns/nagios/plugins/check_name_dir_status.php
  80. 35 0
      contrib/addons/src/addOns/nagios/plugins/check_oozie_status.sh
  81. 67 0
      contrib/addons/src/addOns/nagios/plugins/check_rpcq_latency.php
  82. 73 0
      contrib/addons/src/addOns/nagios/plugins/check_webui.sh
  83. 101 0
      contrib/addons/src/addOns/nagios/plugins/sys_logger.py
  84. 450 0
      contrib/addons/src/addOns/nagios/scripts/nagios_alerts.php
  85. 93 0
      contrib/addons/test/dataServices/jmx/data/cluster_configuration.json
  86. 93 0
      contrib/addons/test/dataServices/jmx/data/cluster_configuration.json.nohbase
  87. 866 0
      contrib/addons/test/dataServices/jmx/data/sample_hbasemaster_jmx.json
  88. 44 0
      contrib/addons/test/dataServices/jmx/data/sample_jobtracker_jmx.json
  89. 376 0
      contrib/addons/test/dataServices/jmx/data/sample_namenode_jmx.json
  90. 120 0
      contrib/addons/test/dataServices/jmx/test_config_load.php
  91. 255 0
      contrib/addons/test/dataServices/jmx/test_jmx_parsing.php
  92. 398 0
      contrib/addons/test/nagios/plugins/test_sys_logger.py
  93. 122 0
      contrib/addons/test/ui/json/alerts.json
  94. 119 0
      contrib/addons/test/ui/json/clusterSummary.json
  95. 24 0
      contrib/addons/test/ui/json/get_graph_info_all.json
  96. 0 0
      contrib/addons/test/ui/json/hbaseSummary.json
  97. 24 0
      contrib/addons/test/ui/json/hdfsSummary.json
  98. 26 0
      contrib/addons/test/ui/json/mrSummary.json
  99. 36 0
      contrib/addons/utils/dataServices/ganglia/generateAll.sh
  100. 66 0
      contrib/addons/utils/dataServices/ganglia/generate_dashboard_hdp_json.php

+ 2 - 0
AMBARI-666-CHANGES.txt

@@ -12,6 +12,8 @@ AMBARI-666 branch (unreleased changes)
 
   NEW FEATURES
 
+  AMBARI-974. Decommissioning of datanodes. (Jitendra Pandey via mahadev)
+
   AMBARI-975. Fix support for cascading updates to configs. (Hitesh Shah
   via mahadev)
 

+ 1 - 1
ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp

@@ -232,7 +232,7 @@ define hdp::exec(
   $user = undef,
   $creates = undef,
   $tries = 1,
-  $timeout = 900,
+  $timeout = 300,
   $try_sleep = undef,
   $initial_wait = undef,
   $logoutput = 'on_failure',

+ 0 - 191
ambari-agent/src/main/puppet/modules/hdp/manifests/lib/puppet/parser/functions/pkgName.rb

@@ -1,191 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-# Returns package name(s) for the specific OS and architecture
-# Params:
-#
-# - name of the package
-# - OS name
-# - OS architecture
-#
-# If there are no approprite OS/architecture, it will search default entries (named as ALL)
-module Puppet::Parser::Functions
-  newfunction(:pkgName, :type => :rvalue) do |args|
-    packageName = args[0]
-    os = args[1]
-    arch = args[2]
-    ALL = 'ALL'
-
-    # First level (packages): packageName => OS hashmap
-    # Second level (OS hashmap): Architecture => real package name(s)
-    packages = {
-      'ganglia-monitor' => {
-        'ALL' => {
-          64 => 'ganglia-gmond-3.2.0'
-        }
-      },
-      'ganglia-server' => {
-        'ALL' => {
-          64 => 'ganglia-gmetad-3.2.0'
-        }
-      },
-      'ganglia-gweb' => {
-        'ALL' => {
-          64 => 'gweb'
-        }
-      },
-      'ganglia-hdp-gweb-addons' => {
-        ALL => {
-          64 => 'hdp_mon_ganglia_addons'
-        }
-      },
-      'glibc' => {
-        'rhel6' => {
-          ALL => ['glibc','glibc.i686']
-        }
-      },
-      'nagios-addons' => {
-        ALL => {
-          64 => 'hdp_mon_nagios_addons'
-        }
-      },
-      'nagios-server' => {
-        ALL => {
-          64 => 'nagios-3.2.3'
-        }
-      },
-      'nagios-plugins' => {
-        ALL => {
-          64 => 'nagios-plugins-1.4.9'
-        }
-      },
-      'nagios-fping' => {
-        ALL => {
-          64 =>'fping'
-        }
-      },
-      'nagios-php-pecl-json' => {
-        ALL => {
-          64 => 'php-pecl-json.x86_64'
-        }
-      },
-      'snmp' => {
-        ALL => {
-          64 => ['net-snmp','net-snmp-utils'],
-        }
-      },
-      'dashboard' => {
-        ALL => {
-          64 => 'hdp_mon_dashboard'
-        }
-      },
-      'templeton' => {
-        ALL => {
-          ALL => 'templeton'
-        }
-      },
-      'oozie-client' => {
-        ALL => {
-          64 => 'oozie-client.noarch'
-        }
-      },
-      'oozie-server' => {
-        ALL => {
-          64 => 'oozie.noarch'
-        }
-      },
-      'lzo' => {
-        'rhel5' => {
-          ALL => ['lzo','lzo.i386','lzo-devel','lzo-devel.i386']
-        },
-        'rhel6' => {
-          ALL => ['lzo','lzo.i686','lzo-devel','lzo-devel.i686']
-        }
-      },
-      #TODO: make these two consistent on whether case of 64/32 bits
-      'snappy' => {
-        ALL => {
-          32 =>  ['snappy','snappy-devel'],
-          64 => ['snappy','snappy-devel']
-        }
-      },
-      'mysql' => {
-        ALL => {
-          32 =>  ['mysql','mysql-server']
-        }
-      },
-      'mysql-connector' => {
-        ALL => {
-          64 =>  ['mysql-connector-java']
-        }
-      },
-      'extjs' => {
-        ALL => {
-          64 =>  ['extjs-2.2-1']
-        }
-      },
-      'templeton-tar-hive' => {
-        ALL => {
-          64 => ['templeton-tar-hive-0.0.1.14-1']
-        }
-      },
-      'templeton-tar-pig' => {
-        ALL => {
-          64 => ['templeton-tar-pig-0.0.1.14-1']
-        }
-      }
-    }
-    ########################################################
-    ########################################################
-    # seeking package hashmap
-    pkgHash = nil
-    
-    if has_key(packages, packageName) 
-      pkgHash = packages[packageName]
-    else 
-      print "Wrong package name: " + packageName
-      return nil
-    end
-    
-    # seeking os hashmap
-    osHash = nil
-    
-    if has_key(pkgHash, os) 
-      osHash = pkgHash[os]
-    elsif has_key(pkgHash, ALL) 
-      osHash = pkgHash[ALL]
-    else 
-      print "Wrong package name: " + packageName
-      return nil
-    end
-    
-    #seeking arhitecture 
-    result = nil
-    
-    if has_key(osHash, arch) 
-      result = osHash[arch]
-    elsif has_key(osHash, ALL)
-      result = osHash[ALL]
-    end
-    
-    return result
-  end
-end

+ 1 - 1
ambari-agent/src/main/python/ambari_agent/AmbariConfig.py

@@ -39,7 +39,7 @@ serviceToPidMapFile=servicesToPidNames.dict
 pidLookupPath=/var/run/
 
 [stack]
-installprefix=/var/ambari/
+installprefix=/tmp
 
 [puppet]
 puppetmodules=/var/lib/ambari-agent/puppet/

+ 5 - 2
ambari-agent/src/main/python/ambari_agent/RepoInstaller.py

@@ -23,7 +23,8 @@ import os
 import json
 from shell import shellRunner
 from manifestGenerator import writeImports
-
+from pprint import pprint, pformat
+import ast
 
 PUPPET_EXT=".pp"
 
@@ -44,11 +45,13 @@ class RepoInstaller:
       params = self.parsedJson['hostLevelParams']
     if params.has_key('repo_info'):
       self.repoInfoList = params['repo_info']
+    self.repoInfoList = ast.literal_eval(self.repoInfoList)
 
   def generateFiles(self):
     repoPuppetFiles = []
     for repo in self.repoInfoList:
-      repoFile = open(self.path + os.sep + repo['repoId'] + '-' + str(self.taskId) + PUPPET_EXT, 'w+')
+      repoFile = open(self.path + os.sep + repo['repoId'] + '-' + 
+                      str(self.taskId) + PUPPET_EXT, 'w+')
       writeImports(repoFile, self.modulesdir, inputFileName='imports.txt')
       
       baseUrl = ''

+ 8 - 4
ambari-agent/src/main/python/ambari_agent/puppetExecutor.py

@@ -130,17 +130,21 @@ class puppetExecutor:
 
     if error == self.NO_ERROR:
       if result.has_key("stdout"):
-        result["stdout"] = result["stdout"] + os.linesep + str(grep.tail(puppetOutput, self.OUTPUT_LAST_LINES))
+        result["stdout"] = result["stdout"] + os.linesep + \
+          str(grep.tail(puppetOutput, self.OUTPUT_LAST_LINES))
       else:
         result["stdout"] = grep.tail(puppetOutput, self.OUTPUT_LAST_LINES)
     else:
       if result.has_key("stdout"):
-        result["stdout"] = result["stdout"] + os.linesep + str(grep.grep(puppetOutput, "err", self.ERROR_LAST_LINES_BEFORE, self.ERROR_LAST_LINES_AFTER))
+        result["stdout"] = result["stdout"] + os.linesep + \
+        str(grep.grep(puppetOutput, "err", self.ERROR_LAST_LINES_BEFORE, 
+                      self.ERROR_LAST_LINES_AFTER))
       else:
-        result["stdout"] = str(grep.grep(puppetOutput, "err", self.ERROR_LAST_LINES_BEFORE, self.ERROR_LAST_LINES_AFTER))
+        result["stdout"] = str(grep.grep(puppetOutput, "err", 
+                                         self.ERROR_LAST_LINES_BEFORE, 
+                                         self.ERROR_LAST_LINES_AFTER))
 	
     logger.info("ExitCode : "  + str(result["exitcode"]))
-
     return result
  
 def main():

+ 1 - 0
ambari-agent/src/main/python/ambari_agent/rolesToClass.dict

@@ -38,3 +38,4 @@ PIG_SERVICE_CHECK = hdp-pig::pig::service_check
 SQOOP_SERVICE_CHECK = hdp-sqoop::sqoop::service_check
 TEMPLETON_SERVICE_CHECK = hdp-templeton::templeton::service_check
 DASHBOARD_SERVICE_CHECK = hdp-dashboard::dashboard::service_check
+DECOMMISSION_DATANODE = hdp-hadoop::hdfs::decommission

+ 2 - 1
ambari-server/src/main/java/org/apache/ambari/server/Role.java

@@ -68,5 +68,6 @@ public enum Role {
   GANGLIA_MONITOR,
   GMOND_SERVICE_CHECK,
   GMETAD_SERVICE_CHECK,
-  MONTOR_WEBSERVER
+  MONTOR_WEBSERVER,
+  DECOMMISSION_DATANODE
 }

+ 123 - 59
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java

@@ -2762,6 +2762,109 @@ public class AmbariManagementControllerImpl implements
     return null;
   }
 
+  private void addServiceCheckAction(ActionRequest actionRequest, Stage stage)
+      throws AmbariException {
+    String clusterName = actionRequest.getClusterName();
+    String componentName = actionMetadata.getClient(actionRequest
+        .getServiceName());
+
+    String hostName;
+    if (componentName != null) {
+      Map<String, ServiceComponentHost> components = clusters
+          .getCluster(clusterName).getService(actionRequest.getServiceName())
+          .getServiceComponent(componentName).getServiceComponentHosts();
+
+      if (components.isEmpty()) {
+        throw new AmbariException("Hosts not found, component="
+            + componentName + ", service=" + actionRequest.getServiceName()
+            + ", cluster=" + clusterName);
+      }
+
+      hostName = components.keySet().iterator().next();
+    } else {
+      Map<String, ServiceComponent> components = clusters
+          .getCluster(clusterName).getService(actionRequest.getServiceName())
+          .getServiceComponents();
+
+      if (components.isEmpty()) {
+        throw new AmbariException("Components not found, service="
+            + actionRequest.getServiceName() + ", cluster=" + clusterName);
+      }
+
+      ServiceComponent serviceComponent = components.values().iterator()
+          .next();
+
+      if (serviceComponent.getServiceComponentHosts().isEmpty()) {
+        throw new AmbariException("Hosts not found, component="
+            + serviceComponent.getName() + ", service="
+            + actionRequest.getServiceName() + ", cluster=" + clusterName);
+      }
+
+      hostName = serviceComponent.getServiceComponentHosts().keySet()
+          .iterator().next();
+    }
+
+    stage.addHostRoleExecutionCommand(hostName, Role.valueOf(actionRequest
+        .getActionName()), RoleCommand.EXECUTE,
+        new ServiceComponentHostOpInProgressEvent(componentName, hostName,
+            System.currentTimeMillis()), clusterName, actionRequest
+            .getServiceName());
+
+    stage.getExecutionCommand(hostName, actionRequest.getActionName())
+        .setRoleParams(actionRequest.getParameters());
+  }
+  
+  private void addDecommissionDatanodeAction(
+      ActionRequest decommissionRequest, Stage stage)
+      throws AmbariException {
+    // Find hdfs admin host, just decommission from namenode.
+    String clusterName = decommissionRequest.getClusterName();
+    String serviceName = decommissionRequest.getServiceName();
+    String namenodeHost = clusters.getCluster(clusterName)
+        .getService(serviceName).getServiceComponent(Role.NAMENODE.toString())
+        .getServiceComponentHosts().keySet().iterator().next();
+
+    Map<String, ServiceComponentHost> datanodeHostMap = clusters
+        .getCluster(clusterName).getService(serviceName)
+        .getServiceComponent(Role.DATANODE.toString())
+        .getServiceComponentHosts();
+
+    String excludeFileTag = null;
+    if (decommissionRequest.getParameters() != null
+        && (decommissionRequest.getParameters().get("excludeFileTag") != null)) {
+      excludeFileTag = decommissionRequest.getParameters()
+          .get("excludeFileTag");
+    }
+
+    if (excludeFileTag == null) {
+      throw new AmbariException("No exclude file specified");
+    }
+
+    Config config = clusters.getCluster(clusterName).getDesiredConfig(
+        "hdfs-exclude-file", excludeFileTag);
+    
+    Map<String, Map<String, String>> configurations =
+        new TreeMap<String, Map<String, String>>();
+    configurations.put(config.getType(), config.getProperties());
+
+    Config hdfsSiteConfig = clusters.getCluster(clusterName).getService("HDFS")
+        .getDesiredConfigs().get("hdfs-site");
+
+    configurations
+        .put(hdfsSiteConfig.getType(), hdfsSiteConfig.getProperties());
+
+    stage.addHostRoleExecutionCommand(
+        namenodeHost,
+        Role.DECOMMISSION_DATANODE,
+        RoleCommand.EXECUTE,
+        new ServiceComponentHostOpInProgressEvent(Role.DECOMMISSION_DATANODE
+            .toString(), namenodeHost, System.currentTimeMillis()),
+        clusterName, serviceName);
+    stage.getExecutionCommand(namenodeHost,
+        Role.DECOMMISSION_DATANODE.toString())
+        .setConfigurations(configurations);
+  }
+
   @Override
   public RequestStatusResponse createActions(Set<ActionRequest> request)
       throws AmbariException {
@@ -2769,8 +2872,6 @@ public class AmbariManagementControllerImpl implements
 
     String logDir = ""; //TODO empty for now
 
-    Stage stage = null;
-
     for (ActionRequest actionRequest : request) {
       if (actionRequest.getClusterName() == null
           || actionRequest.getClusterName().isEmpty()
@@ -2782,73 +2883,36 @@ public class AmbariManagementControllerImpl implements
             + actionRequest.getClusterName() + ", service="
             + actionRequest.getServiceName() + ", action="
             + actionRequest.getActionName());
+      } else if (clusterName == null) {
+        clusterName = actionRequest.getClusterName();
+      } else if (!clusterName.equals(actionRequest.getClusterName())) {
+        throw new AmbariException("Requests for different clusters found");
       }
     }
 
+    Stage stage = stageFactory.createNew(actionManager.getNextRequestId(),
+        logDir, clusterName);
+    stage.setStageId(0);
     for (ActionRequest actionRequest : request) {
-      if (clusterName == null) {
-        clusterName = actionRequest.getClusterName();
-        if (clusterName == null || clusterName.isEmpty()) {
-          throw new AmbariException("Empty cluster name in request");
-        }
-        stage = stageFactory.createNew(actionManager.getNextRequestId(), logDir, clusterName);
-        stage.setStageId(0L);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Creating Stage requestId={}, stageId={}, clusterName={}",
-            new Object[]{stage.getRequestId(), stage.getStageId(), stage.getClusterName()});
-        }
+      if (actionRequest.getActionName().contains("SERVICE_CHECK")) {
+        addServiceCheckAction(actionRequest, stage);
+      } else if (actionRequest.getActionName().equals("DECOMMISSION_DATANODE")) {
+        LOG.info("DEBUG : excludeFileTag :"+actionRequest.getParameters().get("excludeFileTag"));
+        addDecommissionDatanodeAction(actionRequest, stage);
       } else {
-        if (!clusterName.equals(actionRequest.getClusterName())) {
-          throw new AmbariException("Requests for different clusters found");
-        }
+        throw new AmbariException("Unsupported action");
       }
-
-      String componentName = actionMetadata.getClient(actionRequest.getServiceName());
-
-      String hostName;
-      if (componentName != null) {
-        Map<String, ServiceComponentHost> components = clusters.getCluster(clusterName).
-            getService(actionRequest.getServiceName()).getServiceComponent(componentName).getServiceComponentHosts();
-
-        if (components.isEmpty()) {
-          throw new AmbariException("Hosts not found, component=" + componentName +
-              ", service=" + actionRequest.getServiceName() + ", cluster=" + clusterName);
-        }
-
-        hostName = components.keySet().iterator().next();
-      } else {
-        Map<String, ServiceComponent> components = clusters.getCluster(clusterName).
-            getService(actionRequest.getServiceName()).getServiceComponents();
-
-        if (components.isEmpty()) {
-          throw new AmbariException("Components not found, service=" + actionRequest.getServiceName() +
-              ", cluster=" + clusterName);
-        }
-
-        ServiceComponent serviceComponent = components.values().iterator().next();
-
-        if (serviceComponent.getServiceComponentHosts().isEmpty()) {
-          throw new AmbariException("Hosts not found, component=" + serviceComponent.getName() +
-              ", service=" + actionRequest.getServiceName() + ", cluster=" + clusterName);
-        }
-
-        hostName = serviceComponent.getServiceComponentHosts().keySet().iterator().next();
-      }
-
-      stage.addHostRoleExecutionCommand(hostName, Role.valueOf(actionRequest.getActionName()), RoleCommand.EXECUTE,
-          new ServiceComponentHostOpInProgressEvent(componentName, hostName, System.currentTimeMillis()),
-          clusterName, actionRequest.getServiceName());
-
-      stage.getExecutionCommand(hostName, actionRequest.getActionName()).setRoleParams(actionRequest.getParameters());
     }
-
-    if (stage != null) {
-      actionManager.sendActions(Arrays.asList(stage));
+    
+    RoleGraph rg = new RoleGraph(rco);
+    rg.build(stage);
+    List<Stage> stages = rg.getStages();
+    
+    if (stages != null && !stages.isEmpty()) {
+      actionManager.sendActions(stages);
       return getRequestStatusResponse(stage.getRequestId());
     } else {
       throw new AmbariException("Stage was not created");
     }
-
   }
-
 }

+ 0 - 62
ambari-server/src/main/java/org/apache/ambari/server/controller/OperationRequest.java

@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller;
-
-import java.util.Map;
-
-public class OperationRequest {
-  private String clusterName; 
-
-  private String serviceName;
-  
-  private String operationName; //for CREATE only
-
-  private Map<String, String> parameters; //for CREATE only
-
-  public String getClusterName() {
-    return clusterName;
-  }
-
-  public void setClusterName(String clusterName) {
-    this.clusterName = clusterName;
-  }
-
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  public String getOperationName() {
-    return operationName;
-  }
-
-  public void setOperationName(String operationName) {
-    this.operationName = operationName;
-  }
-
-  public Map<String, String> getParameters() {
-    return parameters;
-  }
-
-  public void setParameters(Map<String, String> parameters) {
-    this.parameters = parameters;
-  }
-}

+ 0 - 58
ambari-server/src/main/java/org/apache/ambari/server/controller/TrackActionResponse.java

@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-public class TrackActionResponse {
-
-  // Request ID for tracking async operations
-  private final Long requestId;
-
-  // TODO how are logs to be sent back?
-  private String logs;
-
-  // TODO stage specific information
-
-  public TrackActionResponse(Long requestId) {
-    super();
-    this.requestId = requestId;
-  }
-
-  /**
-   * @return the logs
-   */
-  public String getLogs() {
-    return logs;
-  }
-
-  /**
-   * @param logs the logs to set
-   */
-  public void setLogs(String logs) {
-    this.logs = logs;
-  }
-
-  /**
-   * @return the requestId
-   */
-  public long getRequestId() {
-    return requestId;
-  }
-
-
-}

+ 14 - 16
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ActionResourceProvider.java

@@ -30,8 +30,11 @@ import org.apache.ambari.server.controller.utilities.PropertyHelper;
 
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Set;
 
 /**
@@ -71,21 +74,7 @@ class ActionResourceProvider extends ResourceProviderImpl {
   @Override
   public Set<Resource> getResources(Request request, Predicate predicate)
       throws AmbariException {
-    ActionRequest actionRequest = getRequest(getProperties(predicate));
-
-    // TODO : handle multiple requests
-    Set<ActionResponse> responses = getManagementController().getActions(
-        Collections.singleton(actionRequest));
-
-    Set<Resource> resources = new HashSet<Resource>();
-    for (ActionResponse response : responses) {
-      Resource resource = new ResourceImpl(Resource.Type.Action);
-      resource.setProperty(ACTION_CLUSTER_NAME_PROPERTY_ID, response.getClusterName());
-      resource.setProperty(ACTION_SERVICE_NAME_PROPERTY_ID, response.getServiceName());
-      resource.setProperty(ACTION_ACTION_NAME_PROPERTY_ID, response.getActionName());
-      resources.add(resource);
-    }
-    return resources;
+    throw new UnsupportedOperationException("Not currently supported.");
   }
 
   @Override
@@ -105,10 +94,19 @@ class ActionResourceProvider extends ResourceProviderImpl {
   }
 
   private ActionRequest getRequest(Map<PropertyId, Object> properties) {
+    Map<String, String> params = new HashMap<String, String>();
+    Iterator<Entry<PropertyId, Object>> it1 = properties.entrySet().iterator();
+    while (it1.hasNext()) {
+      Entry<PropertyId, Object> entry = it1.next();
+      if (entry.getKey().getCategory().equals("parameters")
+          && null != entry.getValue()) {
+        params.put(entry.getKey().getName(), entry.getValue().toString());
+      }
+    }
     return new ActionRequest(
         (String)  properties.get(ACTION_CLUSTER_NAME_PROPERTY_ID),
         (String)  properties.get(ACTION_SERVICE_NAME_PROPERTY_ID),
         (String)  properties.get(ACTION_ACTION_NAME_PROPERTY_ID),
-        null);
+        params);
   }
 }

+ 0 - 764
ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/JDBCManagementController.java

@@ -1,764 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.jdbc;
-
-import java.sql.Connection;
-import java.sql.DatabaseMetaData;
-import java.sql.ResultSet;
-import java.sql.ResultSetMetaData;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.ClusterRequest;
-import org.apache.ambari.server.controller.ClusterResponse;
-import org.apache.ambari.server.controller.ConfigurationRequest;
-import org.apache.ambari.server.controller.ConfigurationResponse;
-import org.apache.ambari.server.controller.HostRequest;
-import org.apache.ambari.server.controller.HostResponse;
-import org.apache.ambari.server.controller.OperationRequest;
-import org.apache.ambari.server.controller.ServiceComponentHostRequest;
-import org.apache.ambari.server.controller.ServiceComponentHostResponse;
-import org.apache.ambari.server.controller.ServiceComponentRequest;
-import org.apache.ambari.server.controller.ServiceComponentResponse;
-import org.apache.ambari.server.controller.ServiceRequest;
-import org.apache.ambari.server.controller.ServiceResponse;
-import org.apache.ambari.server.controller.TrackActionResponse;
-import org.apache.ambari.server.controller.internal.PropertyIdImpl;
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.predicate.AndPredicate;
-import org.apache.ambari.server.controller.predicate.BasePredicate;
-import org.apache.ambari.server.controller.predicate.EqualsPredicate;
-import org.apache.ambari.server.controller.predicate.PredicateVisitorAcceptor;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.PropertyId;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PredicateHelper;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-/**
- * Generic JDBC implementation of a management controller.
- */
-public class JDBCManagementController implements AmbariManagementController {
-  /**
-   * The connection factory.
-   */
-  private final ConnectionFactory connectionFactory;
-
-  /**
-   * Mapping of resource type to the name of the primary table for the resource.
-   */
-  private final Map<Resource.Type, String> resourceTables;
-
-  /**
-   * Primary key mappings.
-   */
-  private final Map<String, Set<PropertyId>> primaryKeys = new HashMap<String, Set<PropertyId>>();
-
-  /**
-   * Key mappings used for joins.
-   */
-  private final Map<String, Map<PropertyId, PropertyId>> importedKeys = new HashMap<String, Map<PropertyId, PropertyId>>();
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Construct a new JDBC management controller with the given JDBC connection.
-   *
-   * @param connectionFactory  the connection factory
-   */
-  public JDBCManagementController(ConnectionFactory connectionFactory, Map<Resource.Type, String> resourceTables) {
-    this.connectionFactory = connectionFactory;
-    this.resourceTables = resourceTables;
-  }
-
-  // ----- AmbariManagementController ----------------------------------------
-
-  @Override
-  public void createCluster(ClusterRequest request) throws AmbariException {
-//    createResources(Resource.Type.Cluster, request);
-  }
-
-  @Override
-  public TrackActionResponse createConfiguration(ConfigurationRequest request)
-      throws AmbariException {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override
-  public Set<ClusterResponse> getClusters(ClusterRequest request) throws AmbariException {
-//    return getResources(Resource.Type.Cluster, request, predicate);
-    return null;
-  }
-
-  @Override
-  public Set<ServiceResponse> getServices(ServiceRequest request) throws AmbariException {
-    return null;
-  }
-
-  @Override
-  public Set<ServiceComponentResponse> getComponents(ServiceComponentRequest request) throws AmbariException {
-    return null;
-  }
-
-  @Override
-  public Set<HostResponse> getHosts(HostRequest request) throws AmbariException {
-    return null;
-  }
-
-  @Override
-  public Set<ServiceComponentHostResponse> getHostComponents(ServiceComponentHostRequest request) throws AmbariException {
-    return null;
-  }
-
-  @Override
-  public Set<ConfigurationResponse> getConfigurations(ConfigurationRequest request) throws AmbariException {
-    return null;
-  }
-
-  @Override
-  public TrackActionResponse updateCluster(ClusterRequest request) throws AmbariException {
-//    updateResources(Resource.Type.Cluster, request, predicate);
-    return null;
-  }
-
-
-  @Override
-  public void deleteCluster(ClusterRequest request) throws AmbariException {
-//    deleteResources(Resource.Type.Cluster, predicate);
-  }
-
-
-  // ----- Helper methods ----------------------------------------------------
-
-  /**
-   * Create the resources defined by the properties in the given request object.
-   *
-   * @param type     the resource type
-   * @param request  the request object which defines the set of properties
-   *                 for the resource to be created
-   */
-  private void createResources(Resource.Type type, Request request) {
-    try {
-      Connection connection = connectionFactory.getConnection();
-
-      try {
-
-        Set<Map<PropertyId, Object>> propertySet = request.getProperties();
-
-        for (Map<PropertyId, Object> properties : propertySet) {
-          String sql = getInsertSQL(resourceTables.get(type), properties);
-
-          Statement statement = connection.createStatement();
-
-          statement.execute(sql);
-        }
-      } finally {
-        connection.close();
-      }
-
-    } catch (SQLException e) {
-      throw new IllegalStateException("DB error : ", e);
-    }
-  }
-
-  /**
-   * Get a set of {@link Resource resources} based on the given request and predicate
-   * information.
-   *
-   * @param type       the resource type
-   * @param request    the request object which defines the desired set of properties
-   * @param predicate  the predicate object which can be used to filter which
-   *                   resources are returned
-   * @return a set of resources based on the given request and predicate information
-   */
-  private Set<Resource> getResources(Resource.Type type, Request request, Predicate predicate) {
-
-    Set<Resource> resources = new HashSet<Resource>();
-    Set<PropertyId> propertyIds = new HashSet<PropertyId>(request.getPropertyIds());
-    if (predicate != null) {
-      propertyIds.addAll(PredicateHelper.getPropertyIds(predicate));
-    }
-
-    try {
-      Connection connection = connectionFactory.getConnection();
-
-      try {
-
-        for (String table : getTables(propertyIds)) {
-          getImportedKeys(connection, table);
-        }
-        String sql = getSelectSQL(propertyIds, predicate);
-        Statement statement = connection.createStatement();
-
-        ResultSet rs = statement.executeQuery(sql);
-        ResultSetMetaData metaData = rs.getMetaData();
-        int columnCount = metaData.getColumnCount();
-
-        while (rs.next()) {
-          final ResourceImpl resource = new ResourceImpl(type);
-          for (int i = 1; i <= columnCount; ++i) {
-            PropertyIdImpl propertyId = new PropertyIdImpl(metaData.getColumnName(i), metaData.getTableName(i), false);
-            if (propertyIds.contains(propertyId)) {
-              resource.setProperty(propertyId, rs.getString(i));
-            }
-          }
-          resources.add(resource);
-        }
-
-      } finally {
-        connection.close();
-      }
-
-    } catch (SQLException e) {
-      throw new IllegalStateException("DB error : ", e);
-    }
-
-    return resources;
-  }
-
-  /**
-   * Update the host resources selected by the given predicate with the properties
-   * from the given request object.
-   *
-   * @param type       the resource type
-   * @param request    the request object which defines the set of properties
-   *                   for the resources to be updated
-   * @param predicate  the predicate object which can be used to filter which
-   *                   host resources are updated
-   */
-  private void updateResources(Resource.Type type, Request request, Predicate predicate) {
-    try {
-      Connection connection = connectionFactory.getConnection();
-      try {
-        Set<Map<PropertyId, Object>> propertySet = request.getProperties();
-
-        Map<PropertyId, Object> properties = propertySet.iterator().next();
-
-        String resourceTable = resourceTables.get(type);
-
-        predicate = getPredicate(connection, resourceTable, predicate);
-
-        if (predicate == null) {
-          return;
-        }
-
-        String sql = getUpdateSQL(resourceTable, properties, predicate);
-
-        Statement statement = connection.createStatement();
-
-        statement.execute(sql);
-      } finally {
-        connection.close();
-      }
-
-    } catch (SQLException e) {
-      throw new IllegalStateException("DB error : ", e);
-    }
-  }
-
-  /**
-   * Delete the resources selected by the given predicate.
-   *
-   * @param type      the resource type
-   * @param predicate the predicate object which can be used to filter which
-   *                  resources are deleted
-   */
-  private void deleteResources(Resource.Type type, Predicate predicate) {
-    try {
-      Connection connection = connectionFactory.getConnection();
-      try {
-        String resourceTable = resourceTables.get(type);
-
-        predicate = getPredicate(connection, resourceTable, predicate);
-
-        if (predicate == null) {
-          return;
-        }
-
-        String sql = getDeleteSQL(resourceTable, predicate);
-
-        Statement statement = connection.createStatement();
-        statement.execute(sql);
-      } finally {
-        connection.close();
-      }
-
-    } catch (SQLException e) {
-      throw new IllegalStateException("DB error : ", e);
-    }
-  }
-
-  /**
-   * Lazily populate the imported key mappings for the given table.
-   *
-   * @param connection  the connection to use to obtain the database meta data
-   * @param table       the table
-   *
-   * @throws SQLException thrown if the meta data for the given connection cannot be obtained
-   */
-  private void getImportedKeys(Connection connection, String table) throws SQLException {
-    if (!this.importedKeys.containsKey(table)) {
-
-      Map<PropertyId, PropertyId> importedKeys = new HashMap<PropertyId, PropertyId>();
-      this.importedKeys.put(table, importedKeys);
-
-      DatabaseMetaData metaData = connection.getMetaData();
-
-      ResultSet rs = metaData.getImportedKeys(connection.getCatalog(), null, table);
-
-      while (rs.next()) {
-
-        PropertyId pkPropertyId = PropertyHelper.getPropertyId(
-            rs.getString("PKCOLUMN_NAME"), rs.getString("PKTABLE_NAME"));
-
-        PropertyId fkPropertyId = PropertyHelper.getPropertyId(
-            rs.getString("FKCOLUMN_NAME"), rs.getString("FKTABLE_NAME"));
-
-        importedKeys.put(pkPropertyId, fkPropertyId);
-      }
-    }
-  }
-
-  /**
-   * Lazily populate the primary key mappings for the given table.
-   *
-   * @param connection  the connection to use to obtain the database meta data
-   * @param table       the table
-   *
-   * @throws SQLException thrown if the meta data for the given connection cannot be obtained
-   */
-  private void getPrimaryKeys(Connection connection, String table) throws SQLException {
-
-    if (!this.primaryKeys.containsKey(table)) {
-
-      Set<PropertyId> primaryKeys = new HashSet<PropertyId>();
-      this.primaryKeys.put(table, primaryKeys);
-
-      DatabaseMetaData metaData = connection.getMetaData();
-
-      ResultSet rs = metaData.getPrimaryKeys(connection.getCatalog(), null, table);
-
-      while (rs.next()) {
-
-        PropertyId pkPropertyId = PropertyHelper.getPropertyId(
-            rs.getString("COLUMN_NAME"), rs.getString("TABLE_NAME"));
-
-        primaryKeys.add(pkPropertyId);
-      }
-    }
-  }
-
-  /**
-   * Create a new predicate if the given predicate doesn't work for the given table.  Use the
-   * given predicate and join to the given table to get the primary key values to create a new
-   * predicate. (Could probably do this with INNER JOIN???)
-   *
-   * @param connection  the JDBC connection
-   * @param table       the resource table
-   * @param predicate   the predicate
-   *
-   * @return the new predicate
-   *
-   * @throws SQLException thrown if an exception occurred operating on the given connection
-   */
-  private Predicate getPredicate(Connection connection, String table, Predicate predicate) throws SQLException {
-
-    Set<String> predicateTables = getTables(PredicateHelper.getPropertyIds(predicate));
-
-    if (predicateTables.size() > 1 || !predicateTables.contains(table)) {
-      for (String predicateTable : predicateTables){
-        getImportedKeys(connection, predicateTable);
-      }
-
-      getPrimaryKeys(connection, table);
-      getImportedKeys(connection, table);
-
-      Set<PropertyId>   pkPropertyIds = primaryKeys.get(table);
-      String            sql           = getSelectSQL(pkPropertyIds, predicate);
-      Statement         statement     = connection.createStatement();
-      ResultSet         rs            = statement.executeQuery(sql);
-      ResultSetMetaData metaData      = rs.getMetaData();
-      int               columnCount   = metaData.getColumnCount();
-
-      Set<BasePredicate> predicates = new HashSet<BasePredicate>();
-      while (rs.next()) {
-        for (int i = 1; i <= columnCount; ++i) {
-          PropertyIdImpl propertyId = new PropertyIdImpl(metaData.getColumnName(i), metaData.getTableName(i), false);
-          if (pkPropertyIds.contains(propertyId)) {
-            predicates.add(new EqualsPredicate(propertyId, rs.getString(i)));
-          }
-        }
-      }
-
-      predicate = predicates.size() == 0 ? null : predicates.size() > 1 ?
-          new AndPredicate(predicates.toArray(new BasePredicate[2])) :
-          predicates.iterator().next();
-    }
-    return predicate;
-  }
-
-  /**
-   * Get an insert SQL statement based on the given properties.
-   *
-   * @param table      the table
-   * @param properties  the properties
-   *
-   * @return the insert SQL
-   */
-  private String getInsertSQL(String table, Map<PropertyId, Object> properties) {
-
-    StringBuilder columns = new StringBuilder();
-    StringBuilder values = new StringBuilder();
-
-    for (Map.Entry<PropertyId, Object> entry : properties.entrySet()) {
-      PropertyId propertyId = entry.getKey();
-      Object propertyValue = entry.getValue();
-
-      if (columns.length() > 0) {
-        columns.append(", ");
-      }
-      columns.append(propertyId.getName());
-
-      if (values.length() > 0) {
-        values.append(", ");
-      }
-
-      if (propertyValue instanceof String) {
-        values.append("'");
-        values.append(propertyValue);
-        values.append("'");
-      } else {
-        values.append(propertyValue);
-      }
-    }
-
-    return "insert into " + table + " (" +
-        columns + ") values (" + values + ")";
-  }
-
-  /**
-   * Get a select SQL statement based on the given property ids and predicate.
-   *
-   * @param propertyIds  the property ids
-   * @param predicate    the predicate
-   *
-   * @return the select SQL
-   */
-  private String getSelectSQL(Set<PropertyId> propertyIds, Predicate predicate) {
-
-    StringBuilder columns = new StringBuilder();
-    Set<String> tableSet = new HashSet<String>();
-
-    for (PropertyId propertyId : propertyIds) {
-      if (columns.length() > 0) {
-        columns.append(", ");
-      }
-      columns.append(propertyId.getCategory()).append(".").append(propertyId.getName());
-      tableSet.add(propertyId.getCategory());
-    }
-
-    boolean haveWhereClause = false;
-    StringBuilder whereClause = new StringBuilder();
-    if (predicate != null && predicate instanceof PredicateVisitorAcceptor) {
-
-      SQLPredicateVisitor visitor = new SQLPredicateVisitor();
-      PredicateHelper.visit(predicate, visitor);
-      whereClause.append(visitor.getSQL());
-
-      for (PropertyId propertyId : PredicateHelper.getPropertyIds(predicate)) {
-        tableSet.add(propertyId.getCategory());
-      }
-      haveWhereClause = true;
-    }
-
-    StringBuilder joinClause = new StringBuilder();
-
-    if (tableSet.size() > 1) {
-
-      for (String table : tableSet) {
-        Map<PropertyId, PropertyId> joinKeys = importedKeys.get(table);
-        if (joinKeys != null) {
-          for (Map.Entry<PropertyId, PropertyId> entry : joinKeys.entrySet()) {
-            String category1 = entry.getKey().getCategory();
-            String category2 = entry.getValue().getCategory();
-            if (tableSet.contains(category1) && tableSet.contains(category2)) {
-              if (haveWhereClause) {
-                joinClause.append(" AND ");
-              }
-              joinClause.append(category1).append(".").append(entry.getKey().getName());
-              joinClause.append(" = ");
-              joinClause.append(category2).append(".").append(entry.getValue().getName());
-              tableSet.add(category1);
-              tableSet.add(category2);
-
-              haveWhereClause = true;
-            }
-          }
-        }
-      }
-    }
-
-    StringBuilder tables = new StringBuilder();
-
-    for (String table : tableSet) {
-      if (tables.length() > 0) {
-        tables.append(", ");
-      }
-      tables.append(table);
-    }
-
-    String sql = "select " + columns + " from " + tables;
-
-    if (haveWhereClause) {
-      sql = sql + " where " + whereClause + joinClause;
-    }
-    return sql;
-  }
-
-  /**
-   * Get a delete SQL statement based on the given predicate.
-   *
-   * @param table      the table
-   * @param predicate  the predicate
-   *
-   * @return the delete SQL statement
-   */
-  private String getDeleteSQL(String table, Predicate predicate) {
-
-    StringBuilder whereClause = new StringBuilder();
-    if (predicate instanceof BasePredicate) {
-
-      BasePredicate basePredicate = (BasePredicate) predicate;
-
-      SQLPredicateVisitor visitor = new SQLPredicateVisitor();
-      basePredicate.accept(visitor);
-      whereClause.append(visitor.getSQL());
-
-      return "delete from " + table + " where " + whereClause;
-    }
-    throw new IllegalStateException("Can't generate SQL.");
-  }
-
-  /**
-   * Get an update SQL statement based on the given properties and predicate.
-   *
-   * @param table       the table
-   * @param properties  the properties
-   * @param predicate   the predicate
-   *
-   * @return the update SQL statement
-   */
-  private String getUpdateSQL(String table, Map<PropertyId, Object> properties, Predicate predicate) {
-
-    if (predicate instanceof BasePredicate) {
-
-      StringBuilder whereClause = new StringBuilder();
-
-      BasePredicate basePredicate = (BasePredicate) predicate;
-
-      SQLPredicateVisitor visitor = new SQLPredicateVisitor();
-      basePredicate.accept(visitor);
-      whereClause.append(visitor.getSQL());
-
-      StringBuilder setClause = new StringBuilder();
-      for (Map.Entry<PropertyId, Object> entry : properties.entrySet()) {
-
-        if (setClause.length() > 0) {
-          setClause.append(", ");
-        }
-        setClause.append(entry.getKey().getName());
-        setClause.append(" = ");
-        Object propertyValue = entry.getValue();
-
-        if (propertyValue instanceof String) {
-          setClause.append("'");
-          setClause.append(propertyValue);
-          setClause.append("'");
-        } else {
-          setClause.append(propertyValue);
-        }
-      }
-
-      return "update " + table + " set " + setClause + " where " + whereClause;
-    }
-    throw new IllegalStateException("Can't generate SQL.");
-  }
-
-  /**
-   * Get the set of tables associated with the given property ids.
-   *
-   * @param propertyIds  the property ids
-   *
-   * @return the set of tables
-   */
-  private static Set<String> getTables(Set<PropertyId> propertyIds) {
-    Set<String> tables = new HashSet<String>();
-    for (PropertyId propertyId : propertyIds) {
-      tables.add(propertyId.getCategory());
-    }
-    return tables;
-  }
-
-  @Override
-  public void createServices(Set<ServiceRequest> request)
-      throws AmbariException {
-    // TODO Auto-generated method stub
-
-  }
-
-  @Override
-  public void createComponents(Set<ServiceComponentRequest> request)
-      throws AmbariException {
-    // TODO Auto-generated method stub
-
-  }
-
-  @Override
-  public void createHosts(Set<HostRequest> request) throws AmbariException {
-    // TODO Auto-generated method stub
-
-  }
-
-  @Override
-  public void createHostComponents(Set<ServiceComponentHostRequest> request)
-      throws AmbariException {
-    // TODO Auto-generated method stub
-
-  }
-
-  @Override
-  public TrackActionResponse updateServices(Set<ServiceRequest> request)
-      throws AmbariException {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override
-  public TrackActionResponse updateComponents(
-      Set<ServiceComponentRequest> request) throws AmbariException {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override
-  public void updateHosts(Set<HostRequest> request)
-      throws AmbariException {
-    // TODO Auto-generated method stub
-  }
-
-  @Override
-  public TrackActionResponse updateHostComponents(
-      Set<ServiceComponentHostRequest> request) throws AmbariException {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override
-  public TrackActionResponse deleteServices(Set<ServiceRequest> request)
-      throws AmbariException {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override
-  public TrackActionResponse deleteComponents(
-      Set<ServiceComponentRequest> request) throws AmbariException {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override
-  public void deleteHosts(Set<HostRequest> request)
-      throws AmbariException {
-    // TODO Auto-generated method stub
-  }
-
-  @Override
-  public TrackActionResponse deleteHostComponents(
-      Set<ServiceComponentHostRequest> request) throws AmbariException {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override
-  public TrackActionResponse createOperations(Set<OperationRequest> request)
-      throws AmbariException {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override
-  public void getOperations(Set<OperationRequest> request)
-      throws AmbariException {
-    // TODO Auto-generated method stub
-
-  }
-
-  @Override
-  public Set<ClusterResponse> getClusters(Set<ClusterRequest> requests)
-      throws AmbariException {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override
-  public Set<ServiceResponse> getServices(Set<ServiceRequest> requests)
-      throws AmbariException {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override
-  public Set<ServiceComponentResponse> getComponents(
-      Set<ServiceComponentRequest> requests) throws AmbariException {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override
-  public Set<HostResponse> getHosts(Set<HostRequest> requests)
-      throws AmbariException {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override
-  public Set<ServiceComponentHostResponse> getHostComponents(
-      Set<ServiceComponentHostRequest> requests) throws AmbariException {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override
-  public Set<ConfigurationResponse> getConfigurations(
-      Set<ConfigurationRequest> requests) throws AmbariException {
-    // TODO Auto-generated method stub
-    return null;
-  }
-}
-

+ 5 - 3
ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostEntity.java

@@ -111,7 +111,7 @@ public class HostEntity {
   private String disksInfo = "";
 
   @javax.persistence.Column(name = "disks_info", nullable = false, insertable = true, 
-		  updatable = true, length = 2000)
+		  updatable = true, length = 20000)
   @Basic
   public String getDisksInfo() {
     return disksInfo;
@@ -123,7 +123,8 @@ public class HostEntity {
 
   private String osInfo = "";
 
-  @javax.persistence.Column(name = "os_info", nullable = false, insertable = true, updatable = true)
+  @javax.persistence.Column(name = "os_info", nullable = false, insertable = true, updatable = true,
+      length = 20000)
   @Basic
   public String getOsInfo() {
     return osInfo;
@@ -183,7 +184,8 @@ public class HostEntity {
 
   private String hostAttributes = "";
 
-  @javax.persistence.Column(name = "host_attributes", nullable = false, insertable = true, updatable = true)
+  @javax.persistence.Column(name = "host_attributes", nullable = false, insertable = true, updatable = true,
+      length = 20000)
   @Basic
   public String getHostAttributes() {
     return hostAttributes;

+ 0 - 65
ambari-server/src/main/java/org/apache/ambari/server/state/StackVersion.java

@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-public class StackVersion {
-  private String stackVersion;
-
-  public StackVersion(String stackVersion) {
-    super();
-    this.stackVersion = stackVersion;
-  }
-
-  /**
-   * @return the stackVersion
-   */
-  public String getStackVersion() {
-    return stackVersion;
-  }
-
-  /**
-   * @param stackVersion the stackVersion to set
-   */
-  public void setStackVersion(String stackVersion) {
-    this.stackVersion = stackVersion;
-  }
-
-  @Override
-  public boolean equals(Object object) {
-    if (!(object instanceof StackVersion)) {
-      return false;
-    }
-    if (this == object) {
-      return true;
-    }
-    StackVersion s = (StackVersion) object;
-    return stackVersion.equals(s.stackVersion);
-  }
-
-  @Override
-  public int hashCode() {
-    int result = stackVersion != null ? stackVersion.hashCode() : 0;
-    return result;
-  }
-
-  public String toString() {
-    return this.stackVersion;
-  }
-
-}

+ 0 - 75
ambari-server/src/main/java/org/apache/ambari/server/state/job/Job.java

@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.job;
-
-import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
-
-public interface Job {
-
-  /**
-   * Get the Job ID for the action
-   * @return JobId
-   */
-  public JobId getId();
-
-  // TODO requires some form of JobType to ensure only one running
-  // job per job type
-  // There may be gotchas such as de-commissioning should be allowed to happen
-  // on more than one host at a time
-
-
-  /**
-   * Get Start Time of the job
-   * @return Start time as a unix timestamp
-   */
-  public long getStartTime();
-
-  /**
-   * Get the last update time of the Job when its progress status
-   * was updated
-   * @return Last Update Time as a unix timestamp
-   */
-  public long getLastUpdateTime();
-
-  /**
-   * Time when the Job completed
-   * @return Completion Time as a unix timestamp
-   */
-  public long getCompletionTime();
-
-  /**
-   * Get the current state of the Job
-   * @return JobState
-   */
-  public JobState getState();
-
-  /**
-   * Set the State of the Job
-   * @param state JobState
-   */
-  public void setState(JobState state);
-
-  /**
-   * Send a JobEvent to the Job's StateMachine
-   * @param event JobEvent
-   * @throws InvalidStateTransitionException
-   */
-  public void handleEvent(JobEvent event)
-      throws InvalidStateTransitionException;
-}

+ 0 - 39
ambari-server/src/main/java/org/apache/ambari/server/state/job/JobCompletedEvent.java

@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.job;
-
-public class JobCompletedEvent extends JobEvent {
-
-  private final long completionTime;
-
-  // TODO
-  // need to add job report
-
-  public JobCompletedEvent(JobId jobId, long completionTime) {
-    super(JobEventType.JOB_COMPLETED, jobId);
-    this.completionTime = completionTime;
-  }
-
-  /**
-   * @return the completionTime
-   */
-  public long getCompletionTime() {
-    return completionTime;
-  }
-}

+ 0 - 44
ambari-server/src/main/java/org/apache/ambari/server/state/job/JobEvent.java

@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.job;
-
-import org.apache.ambari.server.state.fsm.event.AbstractEvent;
-
-/**
- * Base class for all events that affect the Job FSM
- */
-public abstract class JobEvent extends AbstractEvent<JobEventType> {
-
-  /**
-   * JobId identifying the job
-   */
-  private final JobId jobId;
-
-  public JobEvent(JobEventType type, JobId jobId) {
-    super(type);
-    this.jobId = jobId;
-  }
-
-  /**
-   * @return the jobId
-   */
-  public JobId getJobId() {
-    return jobId;
-  }
-}

+ 0 - 39
ambari-server/src/main/java/org/apache/ambari/server/state/job/JobFailedEvent.java

@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.job;
-
-public class JobFailedEvent extends JobEvent {
-
-  private final long completionTime;
-
-  // TODO
-  // need to add job report
-
-  public JobFailedEvent(JobId jobId, long completionTime) {
-    super(JobEventType.JOB_FAILED, jobId);
-    this.completionTime = completionTime;
-  }
-
-  /**
-   * @return the completionTime
-   */
-  public long getCompletionTime() {
-    return completionTime;
-  }
-}

+ 0 - 38
ambari-server/src/main/java/org/apache/ambari/server/state/job/JobId.java

@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.job;
-
-// TODO
-public class JobId {
-
-  final long jobId;
-
-  final JobType jobType;
-
-  public JobId(long jobId, JobType jobType) {
-    super();
-    this.jobId = jobId;
-    this.jobType = jobType;
-  }
-
-  public String toString() {
-    return "[ jobId=" + jobId
-        + ", jobType=" + jobType + "]";
-  }
-}

+ 0 - 314
ambari-server/src/main/java/org/apache/ambari/server/state/job/JobImpl.java

@@ -1,314 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.job;
-
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
-import org.apache.ambari.server.state.fsm.SingleArcTransition;
-import org.apache.ambari.server.state.fsm.StateMachine;
-import org.apache.ambari.server.state.fsm.StateMachineFactory;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-public class JobImpl implements Job {
-
-  private static final Log LOG = LogFactory.getLog(JobImpl.class);
-
-  private final Lock readLock;
-  private final Lock writeLock;
-
-  private JobId id;
-
-  private long startTime;
-  private long lastUpdateTime;
-  private long completionTime;
-
-  // TODO
-  // need to add job report
-
-  private static final StateMachineFactory
-    <JobImpl, JobState, JobEventType, JobEvent>
-      stateMachineFactory
-        = new StateMachineFactory<JobImpl, JobState,
-          JobEventType, JobEvent>
-            (JobState.INIT)
-
-    // define the state machine of a Job
-
-    .addTransition(JobState.INIT, JobState.IN_PROGRESS,
-        JobEventType.JOB_IN_PROGRESS, new JobProgressUpdateTransition())
-    .addTransition(JobState.INIT, JobState.COMPLETED,
-        JobEventType.JOB_COMPLETED, new JobCompletedTransition())
-    .addTransition(JobState.INIT, JobState.FAILED,
-        JobEventType.JOB_FAILED, new JobFailedTransition())
-    .addTransition(JobState.INIT, JobState.IN_PROGRESS,
-        JobEventType.JOB_IN_PROGRESS, new JobProgressUpdateTransition())
-    .addTransition(JobState.IN_PROGRESS, JobState.IN_PROGRESS,
-        JobEventType.JOB_IN_PROGRESS, new JobProgressUpdateTransition())
-    .addTransition(JobState.IN_PROGRESS, JobState.COMPLETED,
-        JobEventType.JOB_COMPLETED, new JobCompletedTransition())
-    .addTransition(JobState.IN_PROGRESS, JobState.FAILED,
-        JobEventType.JOB_FAILED, new JobFailedTransition())
-    .addTransition(JobState.COMPLETED, JobState.INIT,
-        JobEventType.JOB_INIT, new NewJobTransition())
-    .addTransition(JobState.FAILED, JobState.INIT,
-        JobEventType.JOB_INIT, new NewJobTransition())
-    .installTopology();
-
-  private final StateMachine<JobState, JobEventType, JobEvent>
-      stateMachine;
-
-  public JobImpl(JobId id, long startTime) {
-    super();
-    this.id = id;
-    this.stateMachine = stateMachineFactory.make(this);
-    ReadWriteLock rwLock = new ReentrantReadWriteLock();
-    this.readLock = rwLock.readLock();
-    this.writeLock = rwLock.writeLock();
-    this.startTime = startTime;
-    this.lastUpdateTime = -1;
-    this.completionTime = -1;
-  }
-
-  private void reset() {
-    try {
-      writeLock.lock();
-      this.startTime = -1;
-      this.lastUpdateTime = -1;
-      this.completionTime = -1;
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  static class NewJobTransition
-     implements SingleArcTransition<JobImpl, JobEvent> {
-
-    @Override
-    public void transition(JobImpl job, JobEvent event) {
-      NewJobEvent e = (NewJobEvent) event;
-      // TODO audit logs
-      job.reset();
-      job.setId(e.getJobId());
-      job.setStartTime(e.getStartTime());
-      LOG.info("Launching a new Job"
-          + ", jobId=" + job.getId()
-          + ", startTime=" + job.getStartTime());
-    }
-  }
-
-  static class JobProgressUpdateTransition
-      implements SingleArcTransition<JobImpl, JobEvent> {
-
-    @Override
-    public void transition(JobImpl job, JobEvent event) {
-      JobProgressUpdateEvent e = (JobProgressUpdateEvent) event;
-      job.setLastUpdateTime(e.getProgressUpdateTime());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Progress update for Job"
-            + ", jobId=" + job.getId()
-            + ", startTime=" + job.getStartTime()
-            + ", lastUpdateTime=" + job.getLastUpdateTime());
-      }
-    }
-  }
-
-  static class JobCompletedTransition
-     implements SingleArcTransition<JobImpl, JobEvent> {
-
-    @Override
-    public void transition(JobImpl job, JobEvent event) {
-      // TODO audit logs
-      JobCompletedEvent e = (JobCompletedEvent) event;
-      job.setCompletionTime(e.getCompletionTime());
-      job.setLastUpdateTime(e.getCompletionTime());
-
-      LOG.info("Job completed successfully"
-          + ", jobId=" + job.getId()
-          + ", startTime=" + job.getStartTime()
-          + ", completionTime=" + job.getCompletionTime());
-    }
-  }
-
-  static class JobFailedTransition
-      implements SingleArcTransition<JobImpl, JobEvent> {
-
-    @Override
-    public void transition(JobImpl job, JobEvent event) {
-      // TODO audit logs
-      JobFailedEvent e = (JobFailedEvent) event;
-      job.setCompletionTime(e.getCompletionTime());
-      job.setLastUpdateTime(e.getCompletionTime());
-      LOG.info("Job failed to complete"
-          + ", jobId=" + job.getId()
-          + ", startTime=" + job.getStartTime()
-          + ", completionTime=" + job.getCompletionTime());
-    }
-  }
-
-
-  @Override
-  public JobState getState() {
-    try {
-      readLock.lock();
-      return stateMachine.getCurrentState();
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setState(JobState state) {
-    try {
-      writeLock.lock();
-      stateMachine.setCurrentState(state);
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public void handleEvent(JobEvent event)
-      throws InvalidStateTransitionException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Handling Job event, eventType=" + event.getType().name()
-          + ", event=" + event.toString());
-    }
-    JobState oldState = getState();
-    try {
-      writeLock.lock();
-      try {
-        stateMachine.doTransition(event.getType(), event);
-      } catch (InvalidStateTransitionException e) {
-        LOG.error("Can't handle Job event at current state"
-            + ", jobId=" + this.getId()
-            + ", currentState=" + oldState
-            + ", eventType=" + event.getType()
-            + ", event=" + event);
-        throw e;
-      }
-    }
-    finally {
-      writeLock.unlock();
-    }
-    if (oldState != getState()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Job transitioned to a new state"
-            + ", jobId=" + this.getId()
-            + ", oldState=" + oldState
-            + ", currentState=" + getState()
-            + ", eventType=" + event.getType().name()
-            + ", event=" + event);
-      }
-    }
-  }
-
-  @Override
-  public JobId getId() {
-    try {
-      readLock.lock();
-      return id;
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  private void setId(JobId id) {
-    try {
-      writeLock.lock();
-      this.id = id;
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public long getStartTime() {
-    try {
-      readLock.lock();
-      return startTime;
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  public void setStartTime(long startTime) {
-    try {
-      writeLock.lock();
-      this.startTime = startTime;
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public long getLastUpdateTime() {
-    try {
-      readLock.lock();
-      return lastUpdateTime;
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  public void setLastUpdateTime(long lastUpdateTime) {
-    try {
-      writeLock.lock();
-      this.lastUpdateTime = lastUpdateTime;
-    }
-    finally {
-      writeLock.unlock();
-    }
-
-  }
-
-  @Override
-  public long getCompletionTime() {
-    try {
-      readLock.lock();
-      return completionTime;
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  public void setCompletionTime(long completionTime) {
-    try {
-      writeLock.lock();
-      this.completionTime = completionTime;
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-
-}

+ 0 - 37
ambari-server/src/main/java/org/apache/ambari/server/state/job/JobProgressUpdateEvent.java

@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.job;
-
-public class JobProgressUpdateEvent extends JobEvent {
-
-  private final long progressUpdateTime;
-
-  public JobProgressUpdateEvent(JobId jobId, long progressUpdateTime) {
-    super(JobEventType.JOB_IN_PROGRESS, jobId);
-    this.progressUpdateTime = progressUpdateTime;
-  }
-
-  /**
-   * @return the progressUpdateTime
-   */
-  public long getProgressUpdateTime() {
-    return progressUpdateTime;
-  }
-
-}

+ 0 - 39
ambari-server/src/main/java/org/apache/ambari/server/state/job/JobState.java

@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.job;
-
-public enum JobState {
-  /**
-   * Initial state for the Job.
-   * When a new action is triggered or set in motion.
-   */
-  INIT,
-  /**
-   * State when the job is triggered on the cluster,
-   */
-  IN_PROGRESS,
-  /**
-   * State of successful completion
-   */
-  COMPLETED,
-  /**
-   * Job failed to complete successfully
-   */
-  FAILED
-}

+ 0 - 37
ambari-server/src/main/java/org/apache/ambari/server/state/job/NewJobEvent.java

@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.job;
-
-public class NewJobEvent extends JobEvent {
-
-  private final long startTime;
-
-  public NewJobEvent(JobId jobId, long startTime) {
-    super(JobEventType.JOB_INIT, jobId);
-    this.startTime = startTime;
-  }
-
-  /**
-   * @return the start time of the Job
-   */
-  public long getStartTime() {
-    return startTime;
-  }
-
-}

+ 17 - 0
ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java

@@ -184,4 +184,21 @@ public class StageUtils {
     }
     return info;
   }
+
+  public static String getHostsToDecommission(List<String> hosts) {
+    StringBuilder builder = new StringBuilder();
+    builder.append("[");
+    boolean first = true;
+    for (String host : hosts) {
+      if (!first) {
+        builder.append(",");
+      } else {
+        first = false;
+      }
+      builder.append("'");
+      builder.append(host);
+      builder.append("'");
+    }
+    return builder.toString();
+  }
 }

+ 1 - 1
ambari-server/src/main/resources/ca.config

@@ -1,7 +1,7 @@
 [ ca ]
 default_ca             = CA_CLIENT
 [ CA_CLIENT ]
-dir		       = /var/lib/ambari-server/keys/db
+dir		                 = keystore/db
 certs                  = $dir/certs
 new_certs_dir          = $dir/newcerts
 

+ 0 - 137
ambari-server/src/main/resources/stacks/HDP/0.1/services/HDFS/configuration/hbase-site.xml

@@ -1,137 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.regionserver.msginterval</name>
-    <value>1000</value>
-    <description>Interval between messages from the RegionServer to HMaster
-    in milliseconds.  Default is 15. Set this value low if you want unit
-    tests to be responsive.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.pause</name>
-    <value>5000</value>
-    <description>General client pause value.  Used mostly as value to wait
-    before running a retry of a failed get, region lookup, etc.</description>
-  </property>
-  <property>
-    <name>hbase.master.meta.thread.rescanfrequency</name>
-    <value>10000</value>
-    <description>How long the HMaster sleeps (in milliseconds) between scans of
-    the root and meta tables.
-    </description>
-  </property>
-  <property>
-    <name>hbase.server.thread.wakefrequency</name>
-    <value>1000</value>
-    <description>Time to sleep in between searches for work (in milliseconds).
-    Used as sleep interval by service threads such as META scanner and log roller.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>5</value>
-    <description>Count of RPC Server instances spun up on RegionServers
-    Same property is used by the HMaster for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.period</name>
-    <value>6000</value>
-    <description>Length of time the master will wait before timing out a region
-    server lease. Since region servers report in every second (see above), this
-    value has been reduced so that the master will notice a dead region server
-    sooner. The default is 30 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value>-1</value>
-    <description>The port for the hbase master web UI
-    Set to -1 if you do not want the info server to run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value>-1</value>
-    <description>The port for the hbase regionserver web UI
-    Set to -1 if you do not want the info server to run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port.auto</name>
-    <value>true</value>
-    <description>Info server auto port bind. Enables automatic port
-    search if hbase.regionserver.info.port is already in use.
-    Enabled for testing to run multiple tests on one machine.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.thread.wakefrequency</name>
-    <value>3000</value>
-    <description>The interval between checks for expired region server leases.
-    This value has been reduced due to the other reduced values above so that
-    the master will notice a dead region server sooner. The default is 15 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.optionalcacheflushinterval</name>
-    <value>10000</value>
-    <description>
-    Amount of time to wait since the last time a region was flushed before
-    invoking an optional cache flush. Default 60,000.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.safemode</name>
-    <value>false</value>
-    <description>
-    Turn on/off safe mode in region server. Always on for production, always off
-    for tests.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>67108864</value>
-    <description>
-    Maximum desired file size for an HRegion.  If filesize exceeds
-    value + (value / 2), the HRegion is split in two.  Default: 256M.
-
-    Keep the maximum filesize small so we split more often in tests.
-    </description>
-  </property>
-  <property>
-    <name>hadoop.log.dir</name>
-    <value>${user.dir}/../logs</value>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>21818</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-</configuration>

+ 0 - 121
ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/PropertyIdImpl.java

@@ -1,121 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.predicate;
-
-import org.apache.ambari.server.controller.spi.PropertyId;
-
-/**
- * Simple PropertyId implementation.
- */
-public class PropertyIdImpl implements PropertyId {
-  /**
-   * The property name.
-   */
-  private String name;
-
-  /**
-   * The category name.
-   */
-  private String category;
-
-  /**
-   * Indicates whether or not this property is temporal.
-   */
-  private boolean temporal;
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Create a property id.  Required for JSON serialization.
-   */
-  public PropertyIdImpl() {
-  }
-
-  /**
-   * Create a property id.
-   *
-   * @param name      the property name.
-   * @param category  the property category.
-   * @param temporal  a temporal indicator
-   */
-  public PropertyIdImpl(String name, String category, boolean temporal) {
-    this.name     = name;
-    this.category = category;
-    this.temporal = temporal;
-  }
-
-
-  // ----- PropertyId --------------------------------------------------------
-
-  public String getName() {
-    return name;
-  }
-
-  public String getCategory() {
-    return category;
-  }
-
-  public boolean isTemporal() {
-    return temporal;
-  }
-
-
-  // ----- Object overrides --------------------------------------------------
-
-  @Override
-  public int hashCode() {
-    return name.hashCode() +
-        (category == null ? 0 : category.hashCode()) +
-        (temporal ? 1 : 0);
-  }
-
-  @Override
-  public boolean equals(Object o) {
-
-    if (this == o) {
-      return true;
-    }
-
-    if (!(o.getClass().equals(PropertyIdImpl.class))) {
-      return false;
-    }
-
-    PropertyIdImpl that = (PropertyIdImpl) o;
-
-    return this.name.equals(that.getName()) &&
-        equals(this.category, that.getCategory()) &&
-        this.isTemporal() == that.isTemporal();
-  }
-
-  @Override
-  public String toString() {
-    return "PropertyId[" + category + ", " + name + "]";
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  private static boolean equals(Object o1, Object o2) {
-    if (o1 == null) {
-      return o2 == null;
-    }
-    return o2 != null && o1.equals(o2);
-  }
-}

+ 0 - 113
ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/ResourceImpl.java

@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.predicate;
-
-import org.apache.ambari.server.api.util.TreeNode;
-import org.apache.ambari.server.controller.spi.PropertyId;
-import org.apache.ambari.server.controller.spi.Resource;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Simple resources implementation.
- */
-public class ResourceImpl implements Resource {
-
-  /**
-   * The resources type.
-   */
-  private final Type type;
-
-  /**
-   * The map of categories/properties for this resources.
-   */
-  private final Map<String, Map<String, Object>> categories = new HashMap<String, Map<String, Object>>();
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Create a resources of the given type.
-   *
-   * @param type  the resources type
-   */
-  public ResourceImpl(Type type) {
-    this.type = type;
-  }
-
-
-  // ----- Resource ----------------------------------------------------------
-
-  @Override
-  public Type getType() {
-    return type;
-  }
-
-  @Override
-  public Map<String, Map<String, Object>> getPropertiesMap() {
-    return categories;
-  }
-
-  @Override
-  public TreeNode<Map<String, Object>> getProperties() {
-    return null;
-  }
-
-  @Override
-  public void setProperty(PropertyId id, Object value) {
-    String category = id.getCategory();
-
-    Map<String, Object> properties = categories.get(category);
-
-    if (properties == null) {
-      properties = new HashMap<String, Object>();
-      categories.put(category, properties);
-    }
-
-    properties.put(id.getName(), value);
-  }
-
-  @Override
-  public Object getPropertyValue(PropertyId id) {
-
-    Map<String, Object> properties = categories.get(id.getCategory());
-
-    if (properties != null) {
-      return properties.get(id.getName());
-    }
-    return null;
-  }
-
-
-  // ----- Object overrides --------------------------------------------------
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-
-    sb.append("Resource : ").append(type).append("\n");
-    for (Map.Entry<String, Map<String, Object>> catEntry : categories.entrySet()) {
-      for (Map.Entry<String, Object> propEntry : catEntry.getValue().entrySet()) {
-        sb.append("    ").append(catEntry.getKey()).append(".").append(propEntry.getKey()).append(" : ").append(propEntry.getValue()).append("\n");
-      }
-    }
-    return sb.toString();
-  }
-}

+ 0 - 153
ambari-server/src/test/java/org/apache/ambari/server/state/job/JobTest.java

@@ -1,153 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.job;
-
-import org.apache.ambari.server.state.job.Job;
-import org.apache.ambari.server.state.job.JobCompletedEvent;
-import org.apache.ambari.server.state.job.JobEvent;
-import org.apache.ambari.server.state.job.JobFailedEvent;
-import org.apache.ambari.server.state.job.JobId;
-import org.apache.ambari.server.state.job.JobImpl;
-import org.apache.ambari.server.state.job.JobProgressUpdateEvent;
-import org.apache.ambari.server.state.job.JobState;
-import org.apache.ambari.server.state.job.JobType;
-import org.apache.ambari.server.state.job.NewJobEvent;
-import org.junit.Assert;
-import org.junit.Test;
-
-public class JobTest {
-
-  private Job createNewJob(long id, String jobName, long startTime) {
-    JobId jId = new JobId(id, new JobType(jobName));
-    Job job = new JobImpl(jId, startTime);
-    return job;
-  }
-
-  private Job getRunningJob(long id, String jobName, long startTime)
-      throws Exception {
-    Job job = createNewJob(id, jobName, startTime);
-    verifyProgressUpdate(job, ++startTime);
-    return job;
-  }
-
-  private Job getCompletedJob(long id, String jobName, long startTime,
-      boolean failedJob) throws Exception {
-    Job job = getRunningJob(1, "JobNameFoo", startTime);
-    completeJob(job, failedJob, ++startTime);
-    return job;
-  }
-
-  private void verifyNewJob(Job job, long startTime) {
-    Assert.assertEquals(JobState.INIT, job.getState());
-    Assert.assertEquals(startTime, job.getStartTime());
-  }
-
-
-  @Test
-  public void testNewJob() {
-    long currentTime = System.currentTimeMillis();
-    Job job = createNewJob(1, "JobNameFoo", currentTime);
-    verifyNewJob(job, currentTime);
-  }
-
-  private void verifyProgressUpdate(Job job, long updateTime)
-      throws Exception {
-    JobProgressUpdateEvent e = new JobProgressUpdateEvent(job.getId(),
-        updateTime);
-    job.handleEvent(e);
-    Assert.assertEquals(JobState.IN_PROGRESS, job.getState());
-    Assert.assertEquals(updateTime, job.getLastUpdateTime());
-  }
-
-
-  @Test
-  public void testJobProgressUpdates() throws Exception {
-    long currentTime = 1;
-    Job job = createNewJob(1, "JobNameFoo", currentTime);
-    verifyNewJob(job, currentTime);
-
-    verifyProgressUpdate(job, ++currentTime);
-    verifyProgressUpdate(job, ++currentTime);
-    verifyProgressUpdate(job, ++currentTime);
-
-  }
-
-  private void completeJob(Job job, boolean failJob, long endTime)
-      throws Exception {
-    JobEvent e = null;
-    JobState endState = null;
-    if (failJob) {
-      e = new JobFailedEvent(job.getId(), endTime);
-      endState = JobState.FAILED;
-    } else {
-      e = new JobCompletedEvent(job.getId(), endTime);
-      endState = JobState.COMPLETED;
-    }
-    job.handleEvent(e);
-    Assert.assertEquals(endState, job.getState());
-    Assert.assertEquals(endTime, job.getLastUpdateTime());
-    Assert.assertEquals(endTime, job.getCompletionTime());
-  }
-
-
-  @Test
-  public void testJobSuccessfulCompletion() throws Exception {
-    long currentTime = 1;
-    Job job = getRunningJob(1, "JobNameFoo", currentTime);
-    completeJob(job, false, ++currentTime);
-  }
-
-  @Test
-  public void testJobFailedCompletion() throws Exception {
-    long currentTime = 1;
-    Job job = getRunningJob(1, "JobNameFoo", currentTime);
-    completeJob(job, true, ++currentTime);
-  }
-
-  @Test
-  public void completeNewJob() throws Exception {
-    long currentTime = 1;
-    Job job = createNewJob(1, "JobNameFoo", currentTime);
-    verifyNewJob(job, currentTime);
-    completeJob(job, false, ++currentTime);
-  }
-
-  @Test
-  public void failNewJob() throws Exception {
-    long currentTime = 1;
-    Job job = createNewJob(1, "JobNameFoo", currentTime);
-    verifyNewJob(job, currentTime);
-    completeJob(job, true, ++currentTime);
-  }
-
-  @Test
-  public void reInitCompletedJob() throws Exception {
-    Job job = getCompletedJob(1, "JobNameFoo", 1, false);
-    JobId jId = new JobId(2, new JobType("JobNameFoo"));
-    NewJobEvent e = new NewJobEvent(jId, 100);
-    job.handleEvent(e);
-    Assert.assertEquals(JobState.INIT, job.getState());
-    Assert.assertEquals(100, job.getStartTime());
-    Assert.assertEquals(-1, job.getLastUpdateTime());
-    Assert.assertEquals(-1, job.getCompletionTime());
-    Assert.assertEquals(2, job.getId().jobId);
-  }
-
-
-}

+ 1 - 0
contrib/addons/package/rpm/.gitignore

@@ -0,0 +1 @@
+build

+ 84 - 0
contrib/addons/package/rpm/create_ganglia_addon_rpm.sh

@@ -0,0 +1,84 @@
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+CUR_DIR=`pwd`
+
+BASEDIR="$( cd "$( dirname "$0" )" && pwd )"
+
+if [[ -z "${BUILD_DIR}" ]]; then
+  BUILD_DIR="${BASEDIR}/build/"
+fi
+
+if [[ -z "${VERSION}" ]]; then
+  VERSION="0.0.2.15"
+fi
+
+if [[ -z "${RELEASE}" ]]; then
+  RELEASE="1"
+fi
+
+#rm -rf ${BUILD_DIR}/*
+
+PKG_NAME="hdp_mon_ganglia_addons"
+
+MON_TAR_DIR="${BUILD_DIR}/${PKG_NAME}-$VERSION/"
+
+mkdir -p "${MON_TAR_DIR}"
+cp -r ${BASEDIR}/../../src/addOns/ganglia/* ${MON_TAR_DIR}
+
+TAR_DEST="${BUILD_DIR}/${PKG_NAME}-$VERSION.tar.gz"
+
+cd ${BUILD_DIR};
+tar -zcf "${TAR_DEST}" "${PKG_NAME}-$VERSION/"
+
+RPM_BUILDDIR=${BUILD_DIR}/rpmbuild/
+
+mkdir -p ${RPM_BUILDDIR}
+mkdir -p ${RPM_BUILDDIR}/SOURCES/
+mkdir -p ${RPM_BUILDDIR}/SPECS/
+mkdir -p ${RPM_BUILDDIR}/BUILD/
+mkdir -p ${RPM_BUILDDIR}/RPMS/
+mkdir -p ${RPM_BUILDDIR}/SRPMS/
+
+cp -f ${BASEDIR}/${PKG_NAME}.spec ${RPM_BUILDDIR}/SPECS/
+cp -f ${TAR_DEST} ${RPM_BUILDDIR}/SOURCES/
+
+cd ${RPM_BUILDDIR}
+
+cmd="rpmbuild --define \"_topdir ${RPM_BUILDDIR}\" \
+    -bb ${RPM_BUILDDIR}/SPECS/${PKG_NAME}.spec"
+
+echo $cmd
+eval $cmd
+ret=$?
+if [[ "$ret" != "0" ]]; then
+  echo "Error: rpmbuild failed, error=$ret"
+  exit 1
+fi
+
+cd ${CUR_DIR}
+
+RPM_DEST="${RPM_BUILDDIR}/RPMS/noarch/${PKG_NAME}-$VERSION-$RELEASE.noarch.rpm"
+if [[ ! -f "${RPM_DEST}" ]]; then
+  echo "Error: ${RPM_DEST} does not exist"
+  exit 1
+fi
+
+exit 0

+ 84 - 0
contrib/addons/package/rpm/create_nagios_addon_rpm.sh

@@ -0,0 +1,84 @@
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+CUR_DIR=`pwd`
+
+BASEDIR="$( cd "$( dirname "$0" )" && pwd )"
+
+if [[ -z "${BUILD_DIR}" ]]; then
+  BUILD_DIR="${BASEDIR}/build/"
+fi
+
+if [[ -z "${VERSION}" ]]; then
+  VERSION="0.0.2.15"
+fi
+
+if [[ -z "${RELEASE}" ]]; then
+  RELEASE="1"
+fi
+
+#rm -rf ${BUILD_DIR}/*
+
+PKG_NAME="hdp_mon_nagios_addons"
+
+MON_TAR_DIR="${BUILD_DIR}/${PKG_NAME}-$VERSION/"
+
+mkdir -p "${MON_TAR_DIR}"
+cp -r ${BASEDIR}/../../src/addOns/nagios/* ${MON_TAR_DIR}
+
+TAR_DEST="${BUILD_DIR}/${PKG_NAME}-$VERSION.tar.gz"
+
+cd ${BUILD_DIR};
+tar -zcf "${TAR_DEST}" "${PKG_NAME}-$VERSION/"
+
+RPM_BUILDDIR=${BUILD_DIR}/rpmbuild/
+
+mkdir -p ${RPM_BUILDDIR}
+mkdir -p ${RPM_BUILDDIR}/SOURCES/
+mkdir -p ${RPM_BUILDDIR}/BUILD/
+mkdir -p ${RPM_BUILDDIR}/SPECS/
+mkdir -p ${RPM_BUILDDIR}/RPMS/
+mkdir -p ${RPM_BUILDDIR}/SRPMS/
+
+cp -f ${BASEDIR}/${PKG_NAME}.spec ${RPM_BUILDDIR}/SPECS/
+cp -f ${TAR_DEST} ${RPM_BUILDDIR}/SOURCES/
+
+cd ${RPM_BUILDDIR}
+
+cmd="rpmbuild --define \"_topdir ${RPM_BUILDDIR}\" \
+  -bb ${RPM_BUILDDIR}/SPECS/${PKG_NAME}.spec"
+
+echo $cmd
+eval $cmd
+ret=$?
+if [[ "$ret" != "0" ]]; then
+  echo "Error: rpmbuild failed, error=$ret"
+  exit 1
+fi
+
+cd ${CUR_DIR}
+
+RPM_DEST="${RPM_BUILDDIR}/RPMS/noarch/${PKG_NAME}-$VERSION-$RELEASE.noarch.rpm"
+if [[ ! -f "${RPM_DEST}" ]]; then
+  echo "Error: ${RPM_DEST} does not exist"
+  exit 1
+fi
+
+exit 0

+ 75 - 0
contrib/addons/package/rpm/hdp_mon_ganglia_addons.spec

@@ -0,0 +1,75 @@
+##
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#
+# RPM Spec file for Ganglia Add-ons for HDP Monitoring Dashboard
+#
+
+Summary: Ganglia Add-ons for HDP Monitoring Dashboard
+Name: hdp_mon_ganglia_addons
+Version: 0.0.2.15
+URL: http://hortonworks.com
+Release: 1
+License: Apache License, Version 2.0
+Vendor: Hortonworks <ambari-group@hortonworks.com>
+Group: System Environment/Base
+Source: %{name}-%{version}.tar.gz
+Buildroot: %{_tmppath}/%{name}-%{version}-buildroot
+Requires: gweb >= 2.2
+
+%if 0%{?suse_version}
+%define graphd_dir /srv/www/htdocs/ganglia/graph.d/
+%else
+%define graphd_dir /var/www/html/ganglia/graph.d/
+%endif
+%define gconf_dir /var/lib/ganglia/conf/
+
+BuildArchitectures: noarch
+
+%description
+This package provides add-on graphs and configurations for ganglia to provide 
+for a better monitoring integration with a Hadoop Cluster
+
+%prep
+%setup -q -n %{name}-%{version}
+%build
+
+%install
+# Flush any old RPM build root
+%__rm -rf $RPM_BUILD_ROOT
+
+%__mkdir -p $RPM_BUILD_ROOT/%{graphd_dir}/
+%__mkdir -p $RPM_BUILD_ROOT/%{gconf_dir}/
+
+%__cp -rf conf/* $RPM_BUILD_ROOT/%{gconf_dir}/
+%__cp -rf graph.d/* $RPM_BUILD_ROOT/%{graphd_dir}/
+
+
+%files
+%defattr(-,root,root)
+%{graphd_dir}/*
+%{gconf_dir}/*
+
+%clean
+%__rm -rf $RPM_BUILD_ROOT
+
+%changelog
+* Fri Feb 17 2011 Hortonworks <ambari-group@hortonworks.com>
+- Initial version

+ 82 - 0
contrib/addons/package/rpm/hdp_mon_nagios_addons.spec

@@ -0,0 +1,82 @@
+##
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#
+# RPM Spec file for Nagios Add-ons for HDP Monitoring Dashboard
+#
+
+Summary: Nagios Add-ons for HDP Monitoring Dashboard
+Name: hdp_mon_nagios_addons
+Version: 0.0.2.15
+URL: http://hortonworks.com
+Release: 1
+License: Apache License, Version 2.0
+Vendor: Hortonworks <ambari-group@hortonworks.com>
+Group: System Environment/Base
+Source: %{name}-%{version}.tar.gz
+Buildroot: %{_tmppath}/%{name}-%{version}-buildroot
+Requires: nagios, nagios-plugins, php >= 5
+%define nagioshdpscripts_dir %{_prefix}/share/hdp/nagios
+%define nagiosplugin_dir %{_libdir}/nagios/plugins
+%if 0%{?suse_version}
+%define httpd_confdir %{_sysconfdir}/apache2/conf.d
+%else
+%define httpd_confdir %{_sysconfdir}/httpd/conf.d
+%endif
+BuildArchitectures: noarch
+
+%description
+This package provides add-on helper scripts and plugins for nagios for 
+monitoring of a Hadoop Cluster
+
+%prep
+%setup -q -n %{name}-%{version}
+%build
+
+%install
+# Flush any old RPM build root
+%__rm -rf $RPM_BUILD_ROOT
+
+%__mkdir -p $RPM_BUILD_ROOT/%{nagioshdpscripts_dir}/
+%__mkdir -p $RPM_BUILD_ROOT/%{nagiosplugin_dir}/
+%__mkdir -p $RPM_BUILD_ROOT/%{httpd_confdir}/
+
+%__cp -rf scripts/* $RPM_BUILD_ROOT/%{nagioshdpscripts_dir}/
+%__cp -rf plugins/* $RPM_BUILD_ROOT/%{nagiosplugin_dir}/
+echo "Alias /hdp %{_prefix}/share/hdp" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
+echo "<Directory /usr/share/hdp>" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
+echo "  Options None" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
+echo "  AllowOverride None" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
+echo "  Order allow,deny" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
+echo "  Allow from all" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
+echo "</Directory>" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
+
+%files
+%defattr(-,root,root)
+%{nagioshdpscripts_dir}/*
+%attr(0755,root,root)%{nagiosplugin_dir}/*
+%{httpd_confdir}/hdp_mon_nagios_addons.conf
+
+%clean
+%__rm -rf $RPM_BUILD_ROOT
+
+%changelog
+* Fri Feb 17 2011 Hortonworks <ambari-group@hortonworks.com>
+- Initial version

+ 0 - 0
contrib/addons/src/.gitignore


+ 4 - 0
contrib/addons/src/addOns/ganglia/conf/cluster_HDPJobTracker.json

@@ -0,0 +1,4 @@
+{
+  "included_reports": 
+    ["hdp_mon_jobtracker_map_slot_report","hdp_mon_jobtracker_reduce_slot_report","hdp_mon_jobtracker_mapreduce_report","hdp_mon_rpc_latency_report","hdp_mon_jvm_gc_report"]
+}

+ 3 - 0
contrib/addons/src/addOns/ganglia/conf/cluster_HDPNameNode.json

@@ -0,0 +1,3 @@
+{
+  "included_reports": ["hdp_mon_hdfs_ops_report","hdp_mon_rpc_latency_report","hdp_mon_jvm_gc_report","hdp_mon_jvm_threads_report"]
+}

+ 4 - 0
contrib/addons/src/addOns/ganglia/conf/cluster_HDPSlaves.json

@@ -0,0 +1,4 @@
+{
+	"included_reports": 
+    ["hdp_mon_hdfs_io_report","hdp_mon_tasktracker_task_report","hdp_mon_tasktracker_mapreduce_report"]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_disk_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "disk_report",
+   "report_type" : "standard",
+   "title" : "Disk Report",
+   "vertical_label" : "GB",
+   "series" : [
+      { "metric": "disk_total", "color": "ffea00", "label": "Total Disk Space", "line_width": "2", "type": "line" },
+      { "metric": "disk_free", "color": "3333bb", "label": "Disk Space Available", "line_width": "2", "type": "stack" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_hlog_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "hbase_hlog_report",
+   "report_type" : "standard",
+   "title" : "Avg Time in HLog file split",
+   "vertical_label" : "Milliseconds",
+   "series" : [
+      { "metric": "hbase.master.splitTime_avg_time", "color": "ff0000", "label": "Average Time", 
+        "line_width": "2", "type": "stack" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_hlog_split_size_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "hlog_split_size_report",
+   "report_type" : "standard",
+   "title" : "Avg HLog split file size",
+   "vertical_label" : "Bytes",
+   "series" : [
+      { "metric": "hbase.master.splitSize_avg_time", "color": "ff0000", "label": "Avg Split Size", 
+        "line_width": "2", "type": "stack" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_master_cluster_requests_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "hbase_master_cluster_requests_report",
+   "report_type" : "standard",
+   "title" : "Cluster Requests",
+   "vertical_label" : "Request count",
+   "series" : [
+      { "metric": "hbase.master.cluster_requests", "color": "ff0000", "label": "Cluster Requests", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_compaction_queue_size_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "hbase_regionserver_compaction_queue_size_report",
+   "report_type" : "standard",
+   "title" : "Total Compaction Queue Size",
+   "vertical_label" : "Queue Size",
+   "series" : [
+      { "metric": "hbase.regionserver.compactionQueueSize", "color": "ff0000", "label": "Compaction Queue Size", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_flush_queue_size_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "hbase_regionserver_flush_queue_size_report",
+   "report_type" : "standard",
+   "title" : "Total flush Queue size",
+   "vertical_label" : "Queue Size",
+   "series" : [
+      { "metric": "hbase.regionserver.flushQueueSize", "color": "ff0000", "label": "flushQueueSize", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_fs_read_latency_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "hbase_regionserver_fs_read_latency_report",
+   "report_type" : "standard",
+   "title" : "Region Server FS Read Latency",
+   "vertical_label" : "Milliseconds",
+   "series" : [
+      { "metric": "hbase.regionserver.fsReadLatency_avg_time", "color": "ff0000", "label": "Read Latency", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_fs_write_latency_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "hbase_regionserver_fs_write_latency_report",
+   "report_type" : "standard",
+   "title" : "Region Server FS Write Latency",
+   "vertical_label" : "Milliseconds",
+   "series" : [
+      { "metric": "hbase.regionserver.fsWriteLatency_avg_time", "color": "ff0000", "label": "Write Latency", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_read_requests_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "hbase_regionserver_read_requests_report",
+   "report_type" : "standard",
+   "title" : "Region Server Read Requests",
+   "vertical_label" : "Request count",
+   "series" : [
+      { "metric": "hbase.regionserver.readRequestsCount", "color": "ff0000", "label": "Read Requests", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_regions_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "hbase_regionserver_regions_report",
+   "report_type" : "standard",
+   "title" : "Total Cluster Regions",
+   "vertical_label" : "Region count",
+   "series" : [
+      { "metric": "hbase.regionserver.regions", "color": "ff0000", "label": "Regions", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_write_requests_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "hbase_regionserver_write_requests_report",
+   "report_type" : "standard",
+   "title" : "Region Server Write Requests",
+   "vertical_label" : "Request count",
+   "series" : [
+      { "metric": "hbase.regionserver.writeRequestsCount", "color": "ff0000", "label": "Write Requests", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_capacity_remaining_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "hdfs_capacity_remaining_report",
+   "report_type" : "standard",
+   "title" : "HDFS Capacity Remaining",
+   "vertical_label" : "GB",
+   "series" : [
+      { "metric": "dfs.FSNamesystem.CapacityRemainingGB", "color": "ff0000", "label": "Capacity Remaining", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_io_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "hdfs_io_report",
+   "report_type" : "standard",
+   "title" : "HDFS I/O",
+   "vertical_label" : "Bytes/Sec",
+   "series" : [
+      { "metric": "dfs.datanode.bytes_written", "color": "ff0000", "label": "Bytes Written/Sec", "line_width": "2", "type": "line" },
+      { "metric": "dfs.datanode.bytes_read", "color": "0000ff", "label": "Bytes Read/Sec", "line_width": "2", "type": "line" }
+   ]
+}

+ 14 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_ops_report.json

@@ -0,0 +1,14 @@
+{
+   "report_name" : "hdfs_ops_report",
+   "report_type" : "standard",
+   "title" : "HDFS Operations",
+   "vertical_label" : "Operations/Sec",
+   "series" : [
+      { "metric": "dfs.namenode.CreateFileOps", "color": "00ff00", "label": "File Creation", 
+        "line_width": "2", "type": "stack" },
+      { "metric": "dfs.namenode.DeleteFileOps", "color": "ff0000", "label": "File Deletion", 
+        "line_width": "2", "type": "stack" },
+      { "metric": "dfs.namenode.FileInfoOps", "color": "0000ff", "label": "File Info", 
+        "line_width": "2", "type": "stack" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_pending_replication_blocks_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "hdfs_pending_replication_blocks_report",
+   "report_type" : "standard",
+   "title" : "HDFS Blocks Pending Replication",
+   "vertical_label" : "Block count",
+   "series" : [
+      { "metric": "dfs.FSNamesystem.PendingReplicationBlocks", "color": "ff0000", "label": "Blocks Pending Replication", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_under_replicated_blocks_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "hdfs_under_replicated_blocks_report",
+   "report_type" : "standard",
+   "title" : "HDFS Under-Replicated Blocks",
+   "vertical_label" : "Block Count",
+   "series" : [
+      { "metric": "dfs.FSNamesystem.UnderReplicatedBlocks", "color": "ff0000", "label": "Under-Replicated Blocks", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_heartbeats_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "jobtracker_heartbeats_report",
+   "report_type" : "standard",
+   "title" : "JobTracker Heartbeats",
+   "vertical_label" : "Heartbeats/Sec",
+   "series" : [
+      { "metric": "mapred.jobtracker.heartbeats", "color": "ff0000", "label": "Heartbeats/Sec", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_completed_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "jobtracker_jobs_completed_report",
+   "report_type" : "standard",
+   "title" : "Jobs Completion rate",
+   "vertical_label" : "Jobs/Sec",
+   "series" : [
+      { "metric": "mapred.jobtracker.jobs_completed", "color": "ff0000", "label": "Jobs Completed", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_failed_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "jobtracker_jobs_failed_report",
+   "report_type" : "standard",
+   "title" : "Jobs Failure rate",
+   "vertical_label" : "Jobs/Sec",
+   "series" : [
+      { "metric": "mapred.jobtracker.jobs_failed", "color": "ff0000", "label": "Failed Jobs/Sec", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_running_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "jobtracker_jobs_running_report",
+   "report_type" : "standard",
+   "title" : "Jobs Running",
+   "vertical_label" : "Number Of Jobs",
+   "series" : [
+      { "metric": "mapred.jobtracker.jobs_running", "color": "ff0000", "label": "Jobs Running", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_submitted_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "jobtracker_jobs_submitted_report",
+   "report_type" : "standard",
+   "title" : "Jobs Submission rate",
+   "vertical_label" : "Jobs/Sec",
+   "series" : [
+      { "metric": "mapred.jobtracker.jobs_submitted", "color": "ff0000", "label": "Submitted Jobs/Sec", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 14 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_map_slot_report.json

@@ -0,0 +1,14 @@
+{
+   "report_name" : "jobtracker_map_slot_report",
+   "report_type" : "standard",
+   "title" : "Map Slot Utilization",
+   "vertical_label" : "Slots",
+   "series" : [
+      { "metric": "mapred.jobtracker.map_slots", "color": "ff0000", "label": "Total", 
+        "line_width": "2", "type": "line" },
+      { "metric": "mapred.jobtracker.occupied_map_slots", "color": "ff6ca9", "label": "Occupied",
+        "line_width": "2", "type": "stack" },
+      { "metric": "mapred.jobtracker.reserved_map_slots", "color": "ff6600", "label": "Reserved",
+        "line_width": "2", "type": "stack" }
+   ]
+}

+ 12 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_mapreduce_report.json

@@ -0,0 +1,12 @@
+{
+   "report_name" : "jobtracker_mapreduce_report",
+   "report_type" : "standard",
+   "title" : "Waiting Map/Reduce tasks",
+   "vertical_label" : "Tasks",
+   "series" : [
+      { "metric": "mapred.jobtracker.waiting_maps", "color": "ff0000", "label": "Waiting Maps", 
+        "line_width": "2", "type": "stack" },
+      { "metric": "mapred.jobtracker.waiting_reduces", "color": "0000ff", "label": "Waiting Reduces", 
+        "line_width": "2", "type": "stack" }
+   ]
+}

+ 14 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_reduce_slot_report.json

@@ -0,0 +1,14 @@
+{
+   "report_name" : "jobtracker_reduce_slot_report",
+   "report_type" : "standard",
+   "title" : "Reduce Slot Utilization",
+   "vertical_label" : "Slots",
+   "series" : [
+      { "metric": "mapred.jobtracker.reduce_slots", "color": "0000ff", "label": "Total", 
+        "line_width": "2", "type": "line" },
+      { "metric": "mapred.jobtracker.occupied_reduce_slots", "color": "06f7ff", "label": "Occupied", 
+        "line_width": "2", "type": "stack" },
+      { "metric": "mapred.jobtracker.reserved_reduce_slots", "color": "009999", "label": "Reserved",
+        "line_width": "2", "type": "stack" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jvm_gc_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "jvm_gc_report",
+   "report_type" : "standard",
+   "title" : "Time spent in Garbage Collection",
+   "vertical_label" : "Milliseconds",
+   "series" : [
+      { "metric": "jvm.metrics.gcTimeMillis", "color": "ff0000", "label": "Time Spent", 
+        "line_width": "2", "type": "stack" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jvm_mem_heap_used_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "jvm_mem_heap_used_report",
+   "report_type" : "standard",
+   "title" : "JVM Heap Memory Used",
+   "vertical_label" : "MB",
+   "series" : [
+      { "metric": "jvm.metrics.memHeapUsedM", "color": "ff0000", "label": "Heap Memory Used", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 16 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jvm_threads_report.json

@@ -0,0 +1,16 @@
+{
+   "report_name" : "jvm_threads_report",
+   "report_type" : "standard",
+   "title" : "JVM Threads Status",
+   "vertical_label" : "Number Of Threads",
+   "series" : [
+      { "metric": "jvm.metrics.threadsBlocked", "color": "ff0000", "label": "Blocked", 
+        "line_width": "2", "type": "stack" },
+      { "metric": "jvm.metrics.threadsWaiting", "color": "ff6600", "label": "Waiting",
+        "line_width": "2", "type": "stack" },
+      { "metric": "jvm.metrics.threadsTimedWaiting", "color": "ffff00", "label": "Timed Waiting",
+        "line_width": "2", "type": "stack" },
+      { "metric": "jvm.metrics.threadsRunnable", "color": "00ff00", "label": "Runnable", 
+        "line_width": "2", "type": "stack" }
+   ]
+}

+ 12 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpc_latency_report.json

@@ -0,0 +1,12 @@
+{
+   "report_name" : "rpc_latency_report",
+   "report_type" : "standard",
+   "title" : "Average RPC Latencies",
+   "vertical_label" : "Seconds",
+   "series" : [
+      { "metric": "rpc.rpc.RpcProcessingTime_avg_time", "color": "0000ff", "label": "Average Processing Time", 
+        "line_width": "2", "type": "stack" },
+      { "metric": "rpc.rpc.RpcQueueTime_avg_time", "color": "ff0000", "label": "Average Queue Time", 
+        "line_width": "2", "type": "stack" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpc_queue_time_avg_time_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "rpc_queue_time_avg_time_report",
+   "report_type" : "standard",
+   "title" : "Average RPC Wait Time",
+   "vertical_label" : "Seconds",
+   "series" : [
+      { "metric": "rpc.rpc.RpcQueueTime_avg_time", "color": "ff0000", "label": "Avg RPC Wait Time", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpc_queue_time_num_ops_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "rpc_queue_time_num_ops_report",
+   "report_type" : "standard",
+   "title" : "Average RPC Operations",
+   "vertical_label" : "Operations/Sec",
+   "series" : [
+      { "metric": "rpc.rpc.RpcQueueTime_num_ops", "color": "ff0000", "label": "Avg RPC Ops", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 10 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpcdetailed_heartbeat_num_ops_report.json

@@ -0,0 +1,10 @@
+{
+   "report_name" : "rpcdetailed_heartbeat_num_ops_report",
+   "report_type" : "standard",
+   "title" : "Heartbeats",
+   "vertical_label" : "Heartbeats/Sec",
+   "series" : [
+      { "metric": "rpcdetailed.rpcdetailed.sendHeartbeat_num_ops", "color": "ff0000", "label": "Heartbeats/Sec", 
+        "line_width": "2", "type": "line" }
+   ]
+}

+ 12 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_tasktracker_mapreduce_report.json

@@ -0,0 +1,12 @@
+{
+   "report_name" : "tasktracker_mapreduce_report",
+   "report_type" : "standard",
+   "title" : "Running Maps and Reduces",
+   "vertical_label" : "Number of Maps/Reduces",
+   "series" : [
+      { "metric": "mapred.tasktracker.maps_running", "color": "ff0000", "label": "Running Maps", 
+        "line_width": "2", "type": "stack" },
+      { "metric": "mapred.tasktracker.reduces_running", "color": "0000ff", "label": "Running Reduces", 
+        "line_width": "2", "type": "stack" }
+   ]
+}

+ 14 - 0
contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_tasktracker_task_report.json

@@ -0,0 +1,14 @@
+{
+   "report_name" : "tasktracker_task_report",
+   "report_type" : "standard",
+   "title" : "Task Status",
+   "vertical_label" : "Number Of Tasks",
+   "series" : [
+      { "metric": "mapred.tasktracker.tasks_completed", "color": "00ff00", "label": "Completed", 
+        "line_width": "2", "type": "stack" },
+      { "metric": "mapred.tasktracker.tasks_failed_timeout", "color": "ffff00", "label": "Failed Timeout", 
+        "line_width": "2", "type": "stack" },
+      { "metric": "mapred.tasktracker.tasks_failed_ping", "color": "ff0000", "label": "Failed Ping", 
+        "line_width": "2", "type": "stack" }
+   ]
+}

+ 195 - 0
contrib/addons/src/addOns/nagios/plugins/check_aggregate.php

@@ -0,0 +1,195 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+  $options = getopt ("f:s:n:w:c:t:");
+  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options) 
+      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
+    usage();
+    exit(3);
+  }
+  $status_file=$options['f'];
+  $status_code=$options['s'];
+  $type=$options['t'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  if ($type == "service" && !array_key_exists('n', $options)) {
+    echo "Service description not provided -n option\n";
+    exit(3);
+  } 
+  if ($type == "service") {
+    $service_name=$options['n'];
+    /* echo "DESC: " . $service_name . "\n"; */
+  }
+  
+  $result = array();
+  $status_file_content = file_get_contents($status_file);
+
+  $counts;
+  if ($type == "service") {
+    $counts=query_alert_count($status_file_content, $service_name, $status_code);
+  } else {
+    $counts=query_host_count($status_file_content, $status_code);
+  }
+
+  if ($counts['total'] == 0) {
+    $percent = 0;
+  } else {
+    $percent = ($counts['actual']/$counts['total'])*100;
+  }
+  if ($percent >= $crit) {
+    echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+    exit (2);
+  }
+  if ($percent >= $warn) {
+    echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+    exit (1);
+  }
+  echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+  exit(0);
+
+
+  # Functions 
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
+  }
+
+  /* Query host count */
+  function query_host_count ($status_file_content, $status_code) {
+    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
+    $hostcounts_object = array ();
+    $total_hosts = 0;
+    $hosts = 0;
+    foreach ($matches[0] as $object) {
+      $total_hosts++;
+      if (getParameter($object, "current_state") == $status_code) {
+        $hosts++;
+      } 
+    }
+    $hostcounts_object['total'] = $total_hosts;
+    $hostcounts_object['actual'] = $hosts;
+    return $hostcounts_object;
+  }
+
+  /* Query Alert counts */
+  function query_alert_count ($status_file_content, $service_name, $status_code) {
+    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
+    $alertcounts_objects = array ();
+    $total_alerts=0;
+    $alerts=0;
+    foreach ($matches[0] as $object) {
+      if (getParameter($object, "service_description") == $service_name) {
+        $total_alerts++;
+        if (getParameter($object, "current_state") >= $status_code) {
+          $alerts++;
+        } 
+      }
+    }
+    $alertcounts_objects['total'] = $total_alerts;
+    $alertcounts_objects['actual'] = $alerts;
+    return $alertcounts_objects;
+  }
+
+  function get_service_type($service_description)
+  {
+    $pieces = explode("::", $service_description);
+    switch ($pieces[0]) {
+      case "NAMENODE":
+        $pieces[0] = "HDFS";
+        break; 
+      case "JOBTRACKER":
+        $pieces[0] = "MAPREDUCE";
+        break; 
+      case "HBASEMASTER":
+        $pieces[0] = "HBASE";
+        break; 
+      case "SYSTEM":
+      case "HDFS":
+      case "MAPREDUCE":
+      case "HBASE":
+        break; 
+      default:
+        $pieces[0] = "UNKNOWN";
+    }
+    return $pieces[0];
+  }
+
+  function getParameter($object, $key)
+  {
+    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
+    $num_mat = preg_match($pattern, $object, $matches);
+    $value = "";
+    if ($num_mat) {
+      $value = $matches[1];
+    }
+    return $value;
+  }
+
+function indent($json) {
+
+    $result      = '';
+    $pos         = 0;
+    $strLen      = strlen($json);
+    $indentStr   = '  ';
+    $newLine     = "\n";
+    $prevChar    = '';
+    $outOfQuotes = true;
+
+    for ($i=0; $i<=$strLen; $i++) {
+
+        // Grab the next character in the string.
+        $char = substr($json, $i, 1);
+
+        // Are we inside a quoted string?
+        if ($char == '"' && $prevChar != '\\') {
+            $outOfQuotes = !$outOfQuotes;
+
+        // If this character is the end of an element,
+        // output a new line and indent the next line.
+        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
+            $result .= $newLine;
+            $pos --;
+            for ($j=0; $j<$pos; $j++) {
+                $result .= $indentStr;
+            }
+        }
+
+        // Add the character to the result string.
+        $result .= $char;
+
+        // If the last character was the beginning of an element,
+        // output a new line and indent the next line.
+        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
+            $result .= $newLine;
+            if ($char == '{' || $char == '[') {
+                $pos ++;
+            }
+
+            for ($j = 0; $j < $pos; $j++) {
+                $result .= $indentStr;
+            }
+        }
+
+        $prevChar = $char;
+    }
+
+    return $result;
+}
+?>
+

+ 91 - 0
contrib/addons/src/addOns/nagios/plugins/check_hadoop.sh

@@ -0,0 +1,91 @@
+#!/bin/bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+user=""
+secure="false"
+keytab=""
+while getopts ":u:k:s" opt; do
+  case $opt in
+    u)
+      user=$OPTARG;
+      ;;
+    k)
+      keytab=$OPTARG;
+      ;;
+    s)
+      secure="true";
+      ;;
+    \?)
+      echo "Invalid option: -$OPTARG" >&2
+      exit 3
+      ;;
+    :)
+      echo "UNKNOWNOption -$OPTARG requires an argument." >&2
+      exit 3
+      ;;
+  esac
+done
+
+outfile="/tmp/nagios-hadoop-check.out"
+curtime=`date +"%F-%H-%M-%S"`
+fname="nagios-hadoop-check-${curtime}"
+
+if [[ "$user" == "" ]]; then
+  echo "INVALID: user argument not specified";
+  exit 3;
+fi
+if [[ "$keytab" == "" ]]; then 
+  keytab="/homes/$user/$user.headless.keytab"
+fi
+
+if [[ "$secure" == "true" ]]; then
+  sudo -u $user -i "/usr/kerberos/bin/kinit -kt $keytab $user" > ${outfile} 2>&1
+fi
+
+sudo -u $user -i "hadoop dfs -copyFromLocal /etc/passwd ${fname}.input " > ${outfile} 2>&1
+if [[ "$?" -ne "0" ]]; then 
+  echo "CRITICAL: Error copying file to HDFS. See error output in ${outfile} on nagios server";
+  exit 2; 
+fi
+sudo -u $user -i "hadoop dfs -ls" > ${outfile} 2>&1
+if [[ "$?" -ne "0" ]]; then 
+  echo "CRITICAL: Error listing HDFS files. See error output in ${outfile} on nagios server";
+  exit 2; 
+fi
+sudo -u $user -i "hadoop jar /usr/share/hadoop/hadoop-examples-*.jar wordcount ${fname}.input ${fname}.out" >> ${outfile} 2>&1
+if [[ "$?" -ne "0" ]]; then 
+  echo "CRITICAL: Error running M/R job. See error output in ${outfile} on nagios server";
+  exit 2; 
+fi
+sudo -u $user -i "hadoop fs -rmr -skipTrash ${fname}.out" >> ${outfile} 2>&1
+if [[ "$?" -ne "0" ]]; then 
+  echo "CRITICAL: Error removing M/R job output. See error output in ${outfile} on nagios server";
+  exit 2; 
+fi
+sudo -u $user -i "hadoop fs -rm -skipTrash ${fname}.input" >> ${outfile} 2>&1
+if [[ "$?" -ne "0" ]]; then 
+  echo "CRITICAL: Error removing M/R job input. See error output in ${outfile} on nagios server";
+  exit 2; 
+fi
+
+echo "OK: M/R WordCount Job ran successfully"
+exit 0;

+ 86 - 0
contrib/addons/src/addOns/nagios/plugins/check_hbase.sh

@@ -0,0 +1,86 @@
+#!/bin/bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+user=""
+secure="false"
+keytab=""
+while getopts ":u:k:s" opt; do
+  case $opt in
+    u)
+      user=$OPTARG;
+      ;;
+    k)
+      keytab=$OPTARG;
+      ;;
+    s)
+      secure="true";
+      ;;
+    \?)
+      echo "Invalid option: -$OPTARG" >&2
+      exit 3
+      ;;
+    :)
+      echo "UNKNOWNOption -$OPTARG requires an argument." >&2
+      exit 3
+      ;;
+  esac
+done
+
+outfile="/tmp/nagios-hbase-check.out"
+curtime=`date +"%F-%H-%M-%S"`
+fname="nagios-hbase-check-${curtime}"
+
+if [[ "$user" == "" ]]; then
+  echo "INVALID: user argument not specified";
+  exit 3;
+fi
+if [[ "$keytab" == "" ]]; then 
+  keytab="/homes/$user/$user.headless.keytab"
+fi
+
+if [[ "$secure" == "true" ]]; then
+  sudo -u $user -i "/usr/kerberos/bin/kinit -kt $keytab $user" > ${outfile} 2>&1
+fi
+
+output=`sudo -u $user -i "echo status | /usr/bin/hbase --config /etc/hbase shell"`
+(IFS='')
+tmpOutput=$(echo $output | grep -v '0 servers')
+if [[ "$?" -ne "0" ]]; then 
+  echo "CRITICAL: No region servers are running";
+  exit 2; 
+fi
+sudo -u $user -i "echo disable \'nagios_test_table\' | /usr/bin/hbase --config /etc/hbase shell" > ${outfile} 2>&1
+sudo -u $user -i "echo drop \'nagios_test_table\' | /usr/bin/hbase --config /etc/hbase shell" > ${outfile} 2>&1
+sudo -u $user -i "echo create \'nagios_test_table\', \'family\' | /usr/bin/hbase --config /etc/hbase shell" > ${outfile} 2>&1
+sudo -u $user -i "echo put \'nagios_test_table\', \'row01\', \'family:col01\', \'value1\' | /usr/bin/hbase --config /etc/hbase shell" > ${outfile} 2>&1
+output=`sudo -u $user -i "echo scan \'nagios_test_table\' | /usr/bin/hbase --config /etc/hbase shell"`
+(IFS='')
+tmpOutput=$(echo $output | grep -v '1 row(s) in')
+if [[ "$?" -ne "1" ]]; then 
+  echo "CRITICAL: Error populating HBase table";
+  exit 2; 
+fi
+sudo -u $user -i "echo disable \'nagios_test_table\' | /usr/bin/hbase --config /etc/hbase shell" > ${outfile} 2>&1
+sudo -u $user -i "echo drop \'nagios_test_table\' | /usr/bin/hbase --config /etc/hbase shell" > ${outfile} 2>&1
+
+echo "OK: HBase transaction completed successfully"
+exit 0;

+ 72 - 0
contrib/addons/src/addOns/nagios/plugins/check_hdfs_blocks.php

@@ -0,0 +1,72 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * check the corrupt or missing blocks % is > threshod
+ * check_jmx -H hostaddress -p port -w 1% -c 1%
+ */
+
+  $options = getopt ("h:p:w:c:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
+      || !array_key_exists('c', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+
+  /* Get the json document */
+  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemMetrics");
+  $json_array = json_decode($json_string, true);
+  $m_percent = 0;
+  $c_percent = 0;
+  $object = $json_array['beans'][0];
+  $missing_blocks = $object['MissingBlocks'];
+  $corrupt_blocks = $object['CorruptBlocks'];
+  $total_blocks = $object['BlocksTotal'];
+  if($total_blocks == 0) {
+    $m_percent = 0;
+    $c_percent = 0;
+  } else {
+    $m_percent = ($missing_blocks/$total_blocks)*100;
+    $c_percent = ($corrupt_blocks/$total_blocks)*100;
+  }
+  $out_msg = "corrupt_blocks:<" . $corrupt_blocks . 
+             ">, missing_blocks:<" . $missing_blocks . 
+             ">, total_blocks:<" . $total_blocks . ">";
+  
+  if ($m_percent > $crit || $c_percent > $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($m_percent > $warn || $c_percent > $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%>\n";
+  }
+?>

+ 68 - 0
contrib/addons/src/addOns/nagios/plugins/check_hdfs_capacity.php

@@ -0,0 +1,68 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * check the % HDFS capacity used >= warn and critical limits.
+ * check_jmx -H hostaddress -p port -w 1 -c 1
+ */
+
+  $options = getopt ("h:p:w:c:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
+      || !array_key_exists('c', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+
+  /* Get the json document */
+  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState");
+  $json_array = json_decode($json_string, true);
+  $percent = 0;
+  $object = $json_array['beans'][0];
+  $CapacityUsed = $object['CapacityUsed'];
+  $CapacityRemaining = $object['CapacityRemaining'];
+  $CapacityTotal = $CapacityUsed + $CapacityRemaining;
+  if($CapacityTotal == 0) {
+    $percent = 0;
+  } else {
+    $percent = ($CapacityUsed/$CapacityTotal)*100;
+  }
+  $out_msg = "DFSUsedGB:<" . round ($CapacityUsed/(1024*1024*1024),1) . 
+             ">, DFSTotalGB:<" . round($CapacityTotal/(1024*1024*1024),1) . ">";
+  
+  if ($percent >= $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($percent >= $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%>\n";
+  }
+?>

+ 32 - 0
contrib/addons/src/addOns/nagios/plugins/check_hive_metastore_status.sh

@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#The uri is of the form thrift://<hostname>:<port>
+HOST=$1
+PORT=$2
+HCAT_URL=-Dhive.metastore.uris="thrift://$HOST:$PORT"
+out=`hcat $HCAT_URL -e "show databases" 2>&1`
+if [[ "$?" -ne 0 ]]; then 
+  echo "CRITICAL: Error accessing hive-metaserver status [$out]";
+  exit 2;
+fi
+echo "OK: Hive metaserver status OK";
+exit 0;

+ 59 - 0
contrib/addons/src/addOns/nagios/plugins/check_name_dir_status.php

@@ -0,0 +1,59 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to namenode, get the jmx-json document
+ * check the NameDirStatuses to find any offline (failed) directories
+ * check_jmx -H hostaddress -p port
+ */
+
+  $options = getopt ("h:p:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+
+  /* Get the json document */
+  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo");
+  $json_array = json_decode($json_string, true);
+  $object = $json_array['beans'][0];
+  if ($object['NameDirStatuses'] == "") {
+    echo "UNKNOWN: Namenode directory status not available via http://<nn_host>:port/jmx url" . "\n";
+    exit(3);
+  }
+  $NameDirStatuses = json_decode($object['NameDirStatuses'], true);
+  $failed_dir_count = count($NameDirStatuses['failed']);
+  $out_msg = "CRITICAL: Offline Namenode directories: ";
+  if ($failed_dir_count > 0) {
+    foreach ($NameDirStatuses['failed'] as $key => $value) {
+      $out_msg = $out_msg . $key . ":" . $value . ", ";
+    }
+    echo $out_msg . "\n";
+    exit (2);
+  }
+  echo "OK: All Namenode directories are active" . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port\n";
+  }
+?>

+ 35 - 0
contrib/addons/src/addOns/nagios/plugins/check_oozie_status.sh

@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+# OOZIE_URL is of the form http://<hostname>:<port>/oozie
+# OOZIE_URL: http://hortonworks-sandbox.localdomain:11000/oozie
+HOST=$1
+PORT=$2
+JAVA_HOME=$3
+OOZIE_URL="http://$HOST:$PORT/oozie"
+export JAVA_HOME=$JAVA_HOME
+out=`oozie admin -oozie ${OOZIE_URL} -status 2>&1`
+if [[ "$?" -ne 0 ]]; then 
+  echo "CRITICAL: Error accessing oozie server status [$out]";
+  exit 2;
+fi
+echo "OK: Oozie server status [$out]";
+exit 0;

+ 67 - 0
contrib/addons/src/addOns/nagios/plugins/check_rpcq_latency.php

@@ -0,0 +1,67 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
+ * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
+ * Warning and Critical values are in seconds
+ * Service Name = JobTracker, NameNode
+ */
+
+  $options = getopt ("h:p:w:c:n:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
+      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $master=$options['n'];
+  $warn=$options['w']; 
+  $crit=$options['c']; 
+
+  /* Get the json document */
+  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*");
+  $json_array = json_decode($json_string, true);
+  $object = $json_array['beans'][0];
+  
+  $RpcQueueTime_avg_time = $object['RpcQueueTime_avg_time'];
+  $RpcProcessingTime_avg_time = $object['RpcProcessingTime_avg_time'];
+
+  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time . 
+             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time . 
+             "> Secs";
+  
+  if ($RpcQueueTime_avg_time >= $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($RpcQueueTime_avg_time >= $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode> -w <warn_in_sec> -c <crit_in_sec>\n";
+  }
+?>

+ 73 - 0
contrib/addons/src/addOns/nagios/plugins/check_webui.sh

@@ -0,0 +1,73 @@
+#!/bin/bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+checkurl () {
+  url=$1
+  /usr/bin/wget -q $url -O /dev/null
+  echo $?
+}
+
+service=$1
+host=$2
+
+if [[ -z "$service" || -z "$host" ]]; then
+  echo "UNKNOWN: Invalid arguments; Usage: check_webui.sh service_name host_name";
+  exit 3;
+fi
+
+case "$service" in
+
+jobtracker) 
+    jtweburl="http://$host:50030"
+    if [[ `checkurl "$jtweburl"` -ne 0 ]]; then 
+      echo "WARNING: Jobtracker web UI not accessible : $jtweburl";
+      exit 1;
+    fi
+    ;;
+namenode)
+    nnweburl="http://$host:50070"
+    if [[ `checkurl "$nnweburl"` -ne 0 ]] ; then 
+      echo "WARNING: NameNode web UI not accessible : $nnweburl";
+      exit 1;
+    fi
+    ;;
+jobhistory)
+    jhweburl="http://$host:51111/jobhistoryhome.jsp"
+    if [[ `checkurl "$jhweburl"` -ne 0 ]]; then 
+      echo "WARNING: Jobhistory web UI not accessible : $jhweburl";
+      exit 1;
+    fi
+    ;;
+hbase)
+    hbaseweburl="http://$host:60010/master-status"
+    jhweburl="http://domU-12-31-39-16-DC-FB.compute-1.internal:51111/jobhistoryhome.jsp"
+    if [[ `checkurl "$hbaseweburl"` -ne 0 ]]; then 
+      echo "WARNING: Hbase Master web UI not accessible : $hbaseweburl"; 
+      exit 1;
+    fi
+    ;;
+*) echo "UNKNOWN: Invalid service name [$service], valid options [jobtracker|jobhistory|hbase|namenode]"
+   exit 3
+   ;;
+esac
+
+echo "OK: Successfully accessed $service Web UI"
+exit 0;

+ 101 - 0
contrib/addons/src/addOns/nagios/plugins/sys_logger.py

@@ -0,0 +1,101 @@
+#!/usr/bin/python
+import sys
+import syslog
+
+# dictionary of state->severity mappings
+severities = {'UP':'OK', 'DOWN':'Critical', 'UNREACHABLE':'Critical', 'OK':'OK',
+              'WARNING':'Warning', 'UNKNOWN':'Warning', 'CRITICAL':'Critical'}
+
+# List of services which can result in events at the Degraded severity
+degraded_alert_services = ['HBASEMASTER::HBaseMaster CPU utilization',
+                           'HDFS::Namenode RPC Latency',
+                           'MAPREDUCE::JobTracker RPC Latency',
+                           'JOBTRACKER::Jobtracker CPU utilization']
+
+# List of services which can result in events at the Fatal severity
+fatal_alert_services = ['NAMENODE::Namenode Process down']
+
+# dictionary of service->msg_id mappings
+msg_ids = {'Host::Ping':'host_down', 'HBASEMASTER::HBaseMaster CPU utilization':'master_cpu_utilization',
+           'HDFS::HDFS Capacity utilization':'hdfs_percent_capacity', 'HDFS::Corrupt/Missing blocks':'hdfs_block',
+           'NAMENODE::Namenode Edit logs directory status':'namenode_edit_log_write', 'HDFS::Percent DataNodes down':'datanode_down',
+           'DATANODE::Process down':'datanode_process_down', 'HDFS::Percent DataNodes storage full':'datanodes_percent_storage_full',
+           'NAMENODE::Namenode Process down':'namenode_process_down', 'HDFS::Namenode RPC Latency':'namenode_rpc_latency',
+           'DATANODE::Storage full':'datanodes_storage_full', 'JOBTRACKER::Jobtracker Process down':'jobtracker_process_down',
+           'MAPREDUCE::JobTracker RPC Latency':'jobtracker_rpc_latency', 'MAPREDUCE::Percent TaskTrackers down':'tasktrackers_down',
+           'TASKTRACKER::Process down':'tasktracker_process_down', 'HBASEMASTER::HBaseMaster Process down':'hbasemaster_process_down',
+           'REGIONSERVER::Process down':'regionserver_process_down', 'HBASE::Percent region servers down':'regionservers_down',
+           'HIVE-METASTORE::HIVE-METASTORE status check':'hive_metastore_process_down', 'ZOOKEEPER::Percent zookeeper servers down':'zookeepers_down',
+           'ZKSERVERS::ZKSERVERS Process down':'zookeeper_process_down', 'OOZIE::Oozie status check':'oozie_down',
+           'TEMPLETON::Templeton status check':'templeton_down', 'PUPPET::Puppet agent down':'puppet_down',
+           'NAGIOS::Nagios status log staleness':'nagios_status_log_stale', 'GANGLIA::Ganglia [gmetad] Process down':'ganglia_process_down',
+           'GANGLIA::Ganglia collector [gmond] Process down alert for hbasemaster':'ganglia_collector_process_down',
+           'GANGLIA::Ganglia collector [gmond] Process down alert for jobtracker':'ganglia_collector_process_down',
+           'GANGLIA::Ganglia collector [gmond] Process down alert for namenode':'ganglia_collector_process_down',
+           'GANGLIA::Ganglia collector [gmond] Process down alert for slaves':'ganglia_collector_process_down',
+           'NAMENODE::Secondary Namenode Process down':'secondary_namenode_process_down',
+           'JOBTRACKER::Jobtracker CPU utilization':'jobtracker_cpu_utilization',
+           'HBASEMASTER::HBase Web UI down':'hbase_ui_down', 'NAMENODE::Namenode Web UI down':'namenode_ui_down',
+           'JOBTRACKER::JobHistory Web UI down':'jobhistory_ui_down', 'JOBTRACKER::JobTracker Web UI down':'jobtracker_ui_down'}
+
+
+# Determine the severity of the TVI alert based on the Nagios alert state.
+def determine_severity(state, service):
+    if severities.has_key(state):
+        severity = severities[state]
+    else: severity = 'Warning'
+
+    # For some alerts, warning should be converted to Degraded
+    if severity == 'Warning' and service in degraded_alert_services:
+        severity = 'Degraded'
+    elif severity != 'OK' and service in fatal_alert_services:
+        severity = 'Fatal'
+
+    return severity
+
+
+# Determine the msg id for the TVI alert from based on the service which generates the Nagios alert.
+# The msg id is used to correlate a log msg to a TVI rule.
+def determine_msg_id(service, severity):
+    if msg_ids.has_key(service):
+        msg_id = msg_ids[service]
+        if severity == 'OK':
+            msg_id = '{0}_ok'.format(msg_id)
+
+        return msg_id
+    else: return 'HADOOP_UNKNOWN_MSG'
+
+
+# Determine the domain.  Currently the domain is always 'Hadoop'.
+def determine_domain():
+    return 'Hadoop'
+
+
+# log the TVI msg to the syslog
+def log_tvi_msg(msg):
+    syslog.openlog('Hadoop', syslog.LOG_PID)
+    syslog.syslog(msg)
+
+
+# generate a tvi log msg from a Hadoop alert
+def generate_tvi_log_msg(alert_type, attempt, state, service, msg):
+    # Determine the TVI msg contents
+    severity = determine_severity(state, service)  # The TVI alert severity.
+    domain   = determine_domain()                  # The domain specified in the TVI alert.
+    msg_id   = determine_msg_id(service, severity) # The msg_id used to correlate to a TVI rule.
+
+    # Only log HARD alerts
+    if alert_type == 'HARD':
+        # Format and log msg
+        log_tvi_msg('{0}: {1}: {2}# {3}'.format(severity, domain, msg_id, msg))
+
+
+# main method which is called when invoked on the command line
+def main():
+    generate_tvi_log_msg(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
+
+
+# run the main method
+if __name__ == '__main__':
+    main()
+    sys.exit(0)

+ 450 - 0
contrib/addons/src/addOns/nagios/scripts/nagios_alerts.php

@@ -0,0 +1,450 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** Constants. */
+define("HDP_MON_RESPONSE_OPTION_KEY__PROPERTIES", "Properties");
+define("HDP_MON_RESPONSE_OPTION_KEY__TYPE", "Type");
+
+define("HDP_MON_RESPONSE_OPTION_VALUE__PROPERTIES_UNCACHEABLE", "Uncacheable");
+define("HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JSON", "JSON");
+define("HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JAVASCRIPT", "JAVASCRIPT");
+
+define("HDP_MON_QUERY_ARG__JSONP", "jsonp");
+
+/** Spits out appropriate response headers, as per the options passed in. */
+function hdp_mon_generate_response_headers( $response_options )
+{
+  if( $response_options[HDP_MON_RESPONSE_OPTION_KEY__PROPERTIES] == HDP_MON_RESPONSE_OPTION_VALUE__PROPERTIES_UNCACHEABLE )
+  {
+    // Make the response uncache-able.
+    header("Expires: Mon, 26 Jul 1997 05:00:00 GMT"); // Date in the past
+    header("Last-Modified: " . gmdate("D, d M Y H:i:s") . " GMT"); // Always modified
+    header("Cache-Control: no-cache, must-revalidate"); // HTTP/1.1
+    header("Pragma: no-cache"); // HTTP/1.0
+  }
+
+  switch( $response_options[HDP_MON_RESPONSE_OPTION_KEY__TYPE] )
+  {
+    case HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JSON:
+      {
+        header('Content-type: application/json');
+      }
+      break;
+
+    case HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JAVASCRIPT:
+      {
+        header('Content-type: application/javascript');
+      }
+      break;
+  }
+}
+
+/** Given $response_data (which we expect to be a JSON string), generate an
+ *  HTTP response, which includes emitting the necessary HTTP response headers
+ *  followed by the response body (that is either plain ol' $response_data,
+ *  or a JSONP wrapper around it).
+ */
+function hdp_mon_generate_response( $response_data )
+{
+  $jsonpFunctionName = NULL;
+  if (isset($_GET[HDP_MON_QUERY_ARG__JSONP])) {
+    $jsonpFunctionName = $_GET[HDP_MON_QUERY_ARG__JSONP];
+  }
+
+  hdp_mon_generate_response_headers( array
+  ( HDP_MON_RESPONSE_OPTION_KEY__PROPERTIES => HDP_MON_RESPONSE_OPTION_VALUE__PROPERTIES_UNCACHEABLE,
+  HDP_MON_RESPONSE_OPTION_KEY__TYPE =>
+  isset( $jsonpFunctionName )  && $jsonpFunctionName != "" ?
+  HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JAVASCRIPT :
+  HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JSON ) );
+
+  if( isset( $jsonpFunctionName ) )
+  {
+    echo "$jsonpFunctionName( $response_data );";
+  }
+  else
+  {
+    echo $response_data;
+  }
+}
+
+  /* alert_type { ok, non-ok, warning, critical, all } */
+  define ("all", "-2");
+  define ("nok", "-1");
+  define ("ok", "0");
+  define ("warn", "1");
+  define ("critical", "2");
+
+  define ("HDFS_SERVICE_CHECK", "NAMENODE::Namenode Process down");
+  define ("MAPREDUCE_SERVICE_CHECK", "JOBTRACKER::Jobtracker Process down");
+  define ("HBASE_SERVICE_CHECK", "HBASEMASTER::HBaseMaster Process down");
+  define ("ZOOKEEPER_SERVICE_CHECK", "ZOOKEEPER::Percent zookeeper servers down");
+  define ("HIVE_METASTORE_SERVICE_CHECK", "HIVE-METASTORE::HIVE-METASTORE status check");
+  define ("OOZIE_SERVICE_CHECK", "OOZIE::Oozie status check");
+  define ("TEMPLETON_SERVICE_CHECK", "TEMPLETON::Templeton status check");
+  define ("PUPPET_SERVICE_CHECK", "PUPPET::Puppet agent down");
+
+  /* If SUSE, status file is under /var/lib/nagios */
+  if (file_exists("/etc/SuSE-release")) {
+    $status_file="/var/lib/nagios/status.dat";
+  } else {
+    $status_file="/var/nagios/status.dat";
+  }
+
+  $q1="";
+  if (array_key_exists('q1', $_GET)) {
+    $q1=$_GET["q1"];
+  }
+  $q2="";
+  if (array_key_exists('q2', $_GET)) {
+    $q2=$_GET["q2"];
+  }
+  $alert_type="";
+  if (array_key_exists('alert_type', $_GET)) {
+    $alert_type=$_GET["alert_type"];
+  }
+  $host="";
+  if (array_key_exists('host_name', $_GET)) {
+    $host=$_GET["host_name"];
+  }
+  $indent="";
+  if (array_key_exists('indent', $_GET)) {
+    $indent=$_GET["indent"];
+  }
+
+  $result = array();
+  $status_file_content = file_get_contents($status_file);
+
+  if ($q1 == "alerts") {
+    /* Add the service status object to result array */
+    $result['alerts'] = query_alerts ($status_file_content, $alert_type, $host);
+  }
+
+  if ($q2 == "hosts") {
+    /* Add the service status object to result array */
+    $result['hosts'] = query_hosts ($status_file_content, $alert_type, $host);
+  }
+
+  /* Add host count object to the results */
+  $result['hostcounts'] = query_host_count ($status_file_content);
+
+  /* Add services runtime states */
+  $result['servicestates'] = query_service_states ($status_file_content);
+
+  /* Return results */
+  if ($indent == "true") {
+    hdp_mon_generate_response(indent(json_encode($result)));
+  } else {
+    hdp_mon_generate_response(json_encode($result));
+  }
+
+  # Functions
+  /* Query service states */
+  function query_service_states ($status_file_content) {
+    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
+    $services_object = array ();
+    $services_object["PUPPET"] = 0;
+    foreach ($matches[0] as $object) {
+      if (getParameter($object, "service_description") == HDFS_SERVICE_CHECK) {
+        $services_object["HDFS"] = getParameter($object, "last_hard_state");
+        if ($services_object["HDFS"] >= 1) {
+          $services_object["HDFS"] = 1;
+        }
+        continue;
+      }
+      if (getParameter($object, "service_description") == MAPREDUCE_SERVICE_CHECK) {
+        $services_object["MAPREDUCE"] = getParameter($object, "last_hard_state");
+        if ($services_object["MAPREDUCE"] >= 1) {
+          $services_object["MAPREDUCE"] = 1;
+        }
+        continue;
+      }
+      if (getParameter($object, "service_description") == HBASE_SERVICE_CHECK) {
+        $services_object["HBASE"] = getParameter($object, "last_hard_state");
+        if ($services_object["HBASE"] >= 1) {
+          $services_object["HBASE"] = 1;
+        }
+        continue;
+      }
+      if (getParameter($object, "service_description") == HIVE_METASTORE_SERVICE_CHECK) {
+        $services_object["HIVE-METASTORE"] = getParameter($object, "last_hard_state");
+        if ($services_object["HIVE-METASTORE"] >= 1) {
+          $services_object["HIVE-METASTORE"] = 1;
+        }
+        continue;
+      }
+      if (getParameter($object, "service_description") == OOZIE_SERVICE_CHECK) {
+        $services_object["OOZIE"] = getParameter($object, "last_hard_state");
+        if ($services_object["OOZIE"] >= 1) {
+          $services_object["OOZIE"] = 1;
+        }
+        continue;
+      }
+      if (getParameter($object, "service_description") == TEMPLETON_SERVICE_CHECK) {
+        $services_object["TEMPLETON"] = getParameter($object, "last_hard_state");
+        if ($services_object["TEMPLETON"] >= 1) {
+          $services_object["TEMPLETON"] = 1;
+        }
+        continue;
+      }
+      /* In case of zookeeper, service is treated running if alert is ok or warning (i.e partial
+       * instances of zookeepers are running
+       */
+      if (getParameter($object, "service_description") == ZOOKEEPER_SERVICE_CHECK) {
+        $services_object["ZOOKEEPER"] = getParameter($object, "last_hard_state");
+        if ($services_object["ZOOKEEPER"] <= 1) {
+          $services_object["ZOOKEEPER"] = 0;
+        }
+        continue;
+      }
+      if (getParameter($object, "service_description") == PUPPET_SERVICE_CHECK) {
+        $state = getParameter($object, "last_hard_state");
+        if ($state >= 1) {
+          $services_object["PUPPET"]++;
+        }
+        continue;
+      }
+    }
+    if ($services_object["PUPPET"] >= 1) {
+      $services_object["PUPPET"] = 1;
+    }
+    return $services_object;
+  }
+
+  /* Query host count */
+  function query_host_count ($status_file_content) {
+    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
+    $hostcounts_object = array ();
+    $up_hosts = 0;
+    $down_hosts = 0;
+    foreach ($matches[0] as $object) {
+      if (getParameter($object, "last_hard_state") != ok) {
+        $down_hosts++;
+      } else {
+        $up_hosts++;
+      }
+    }
+    $hostcounts_object['up_hosts'] = $up_hosts;
+    $hostcounts_object['down_hosts'] = $down_hosts;
+    return $hostcounts_object;
+  }
+
+  /* Query Hosts */
+  function query_hosts ($status_file_content, $alert_type, $host) {
+    $hoststatus_attributes = array ("host_name", "current_state", "last_hard_state",
+                              "plugin_output", "last_check", "current_attempt",
+                              "last_hard_state_change", "last_time_up", "last_time_down",
+                              "last_time_unreachable", "is_flapping", "last_check");
+
+    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
+    $hosts_objects = array ();
+    $i = 0;
+    foreach ($matches[0] as $object) {
+      $hoststatus = array ();
+      $chost = getParameter($object, "host_name");
+      if (empty($host) || $chost == $host) {
+        foreach ($hoststatus_attributes as $attrib) {
+          $hoststatus[$attrib] = htmlentities(getParameter($object, $attrib), ENT_COMPAT);
+        }
+        $hoststatus['alerts'] = query_alerts ($status_file_content, $alert_type, $chost);
+        if (!empty($host)) {
+          $hosts_objects[$i] = $hoststatus;
+          $i++;
+          break;
+        }
+      }
+      if (!empty($hoststatus)) {
+        $hosts_objects[$i] = $hoststatus;
+        $i++;
+      }
+    }
+    /* echo "COUNT : " . count ($services_objects) . "\n"; */
+    return $hosts_objects;
+  }
+
+  /* Query Alerts */
+  function query_alerts ($status_file_content, $alert_type, $host) {
+
+    $servicestatus_attributes = array ("service_description", "host_name", "current_attempt",
+                                       "current_state", "plugin_output", "last_hard_state_change", "last_hard_state",
+                                       "last_time_ok", "last_time_warning", "last_time_unknown",
+                                       "last_time_critical", "is_flapping", "last_check");
+
+    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
+    #echo $matches[0][0] . ", " . $matches[0][1] . "\n";
+    #echo $matches[1][0] . ", " . $matches[1][1] . "\n";
+    $services_objects = array ();
+    $i = 0;
+    foreach ($matches[0] as $object) {
+      $servicestatus = array ();
+      switch ($alert_type) {
+      case "all":
+        if (empty($host) || getParameter($object, "host_name") == $host) {
+          foreach ($servicestatus_attributes as $attrib) {
+            $servicestatus[$attrib] = htmlentities(getParameter($object, $attrib), ENT_COMPAT);
+          }
+          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
+          $srv_desc = explode ("::",$servicestatus['service_description'],2);
+          $servicestatus['service_description'] = $srv_desc[1];
+        }
+        break;
+      case "nok":
+        if (getParameter($object, "last_hard_state") != ok &&
+           (empty($host) || getParameter($object, "host_name") == $host)) {
+          foreach ($servicestatus_attributes as $attrib) {
+            $servicestatus[$attrib] = htmlentities(getParameter($object, $attrib), ENT_COMPAT);
+          }
+          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
+          $srv_desc = explode ("::",$servicestatus['service_description'],2);
+          $servicestatus['service_description'] = $srv_desc[1];
+        }
+        break;
+      case "ok":
+        if (getParameter($object, "last_hard_state") == ok &&
+           (empty($host) || getParameter($object, "host_name") == $host)) {
+          foreach ($servicestatus_attributes as $attrib) {
+            $servicestatus[$attrib] = htmlentities(getParameter($object, $attrib), ENT_COMPAT);
+          }
+          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
+          $srv_desc = explode ("::",$servicestatus['service_description'],2);
+          $servicestatus['service_description'] = $srv_desc[1];
+        }
+        break;
+      case "warn":
+        if (getParameter($object, "last_hard_state") == warn &&
+           (empty($host) || getParameter($object, "host_name") == $host)) {
+          foreach ($servicestatus_attributes as $attrib) {
+            $servicestatus[$attrib] = htmlentities(getParameter($object, $attrib), ENT_COMPAT);
+          }
+          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
+          $srv_desc = explode ("::",$servicestatus['service_description'],2);
+          $servicestatus['service_description'] = $srv_desc[1];
+        }
+        break;
+      case "critical":
+        if (getParameter($object, "last_hard_state") == critical &&
+           (empty($host) || getParameter($object, "host_name") == $host)) {
+          foreach ($servicestatus_attributes as $attrib) {
+            $servicestatus[$attrib] = htmlentities(getParameter($object, $attrib), ENT_COMPAT);
+          }
+          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
+          $srv_desc = explode ("::",$servicestatus['service_description'],2);
+          $servicestatus['service_description'] = $srv_desc[1];
+        }
+        break;
+      }
+      if (!empty($servicestatus)) {
+        $services_objects[$i] = $servicestatus;
+        $i++;
+      }
+    }
+    /* echo "COUNT : " . count ($services_objects) . "\n"; */
+    return $services_objects;
+  }
+
+  function get_service_type($service_description)
+  {
+    $pieces = explode("::", $service_description);
+    switch ($pieces[0]) {
+      case "NAMENODE":
+        $pieces[0] = "HDFS";
+        break;
+      case "JOBTRACKER":
+        $pieces[0] = "MAPREDUCE";
+        break;
+      case "HBASEMASTER":
+        $pieces[0] = "HBASE";
+        break;
+      case "HDFS":
+      case "MAPREDUCE":
+      case "HBASE":
+      case "ZOOKEEPER":
+      case "HIVE-METASTORE":
+      case "OOZIE":
+      case "TEMPLETON":
+      case "PUPPET":
+        break;
+      default:
+        $pieces[0] = "UNKNOWN";
+    }
+    return $pieces[0];
+  }
+
+  function getParameter($object, $key)
+  {
+    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
+    $num_mat = preg_match($pattern, $object, $matches);
+    $value = "";
+    if ($num_mat) {
+      $value = $matches[1];
+    }
+    return $value;
+  }
+
+function indent($json) {
+
+    $result      = '';
+    $pos         = 0;
+    $strLen      = strlen($json);
+    $indentStr   = '  ';
+    $newLine     = "\n";
+    $prevChar    = '';
+    $outOfQuotes = true;
+
+    for ($i=0; $i<=$strLen; $i++) {
+
+        // Grab the next character in the string.
+        $char = substr($json, $i, 1);
+
+        // Are we inside a quoted string?
+        if ($char == '"' && $prevChar != '\\') {
+            $outOfQuotes = !$outOfQuotes;
+
+        // If this character is the end of an element,
+        // output a new line and indent the next line.
+        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
+            $result .= $newLine;
+            $pos --;
+            for ($j=0; $j<$pos; $j++) {
+                $result .= $indentStr;
+            }
+        }
+
+        // Add the character to the result string.
+        $result .= $char;
+
+        // If the last character was the beginning of an element,
+        // output a new line and indent the next line.
+        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
+            $result .= $newLine;
+            if ($char == '{' || $char == '[') {
+                $pos ++;
+            }
+
+            for ($j = 0; $j < $pos; $j++) {
+                $result .= $indentStr;
+            }
+        }
+
+        $prevChar = $char;
+    }
+
+    return $result;
+}
+?>

+ 93 - 0
contrib/addons/test/dataServices/jmx/data/cluster_configuration.json

@@ -0,0 +1,93 @@
+{
+  "config_version": 1,
+  "stack_version": "1.0.2",
+  "overall": {
+    "cluster_name": "MyHDPCluster",
+    "dashboard_host": "dashboard_host",
+    "dashboard_port": 80, 
+    "ganglia" : {
+      "web_host": "gangliaweb_host",
+      "web_port": 80,
+      "web_root": "/var/www/ganglia2",
+      "grid_name": "HDP_GRID"
+    },
+    "nagios": {
+      "nagiosserver_host": "nagiosserver_host",
+      "nagiosserver_port": 80,
+      "web_root": "/nagios"
+    },
+    "jmx": {
+      "timeout": 1
+    },
+    "services": {
+	  "HDFS" : [
+        {
+          "installed": true,
+          "name": "HDFS",
+          "namenode_host": "namenode",
+          "namenode_port": 50070,
+          "snamenode_host": "snamenode",
+          "snamenode_port": 50071,
+          "total_datanodes": 10,
+          "ganglia_clusters": {
+            "slaves": "HDPSlaves",
+            "namenode": "HDPNameNode"      
+          }
+        }
+      ],
+      "MAPREDUCE" : [        
+        {
+          "installed": true,
+          "name": "MAPREDUCE",
+          "jobtracker_host": "jobtracker",
+          "jobtracker_port": 50030,
+          "total_tasktrackers": 20,
+          "jobhistory_host": "jobhistory_host",
+          "jobhistory_port": 52890,
+          "ganglia_clusters": {
+            "slaves": "HDPSlaves",
+            "jobtracker": "HDPJobTracker"
+          },
+          "scheduler_type": "org.foo.CapacityTaskScheduler"
+        }
+      ],
+      "HBASE" : [  
+        {
+          "installed": true,
+          "name": "HBASE",
+          "hbasemaster_host": "hbasemaster",
+          "hbasemaster_port": 60010,
+          "total_regionservers": 30,
+          "ganglia_clusters": {
+            "slaves": "HDPSlaves",
+            "hbasemaster": "HDPHBaseMaster"
+          }
+        }
+      ],
+      "ZOOKEEPER" : [
+        {
+          "installed": false,
+          "name": "ZOOKEEPER"
+        }
+      ],
+      "HIVE-METASTORE" : [
+        {
+          "installed": true,
+          "name": "HIVE-METASTORE"
+        }
+      ],
+      "TEMPLETON" : [
+        {
+          "installed": true,
+          "name": "TEMPLETON"
+        }
+      ],
+      "OOZIE" : [
+        {
+          "installed": true,
+          "name": "OOZIE"
+        }
+      ]
+    }
+  }
+}

+ 93 - 0
contrib/addons/test/dataServices/jmx/data/cluster_configuration.json.nohbase

@@ -0,0 +1,93 @@
+{
+  "config_version": 1,
+  "stack_version": "1.0.2",
+  "overall": {
+    "cluster_name": "MyHDPCluster",
+    "dashboard_host": "dashboard_host",
+    "dashboard_port": 80, 
+    "ganglia" : {
+      "web_host": "gangliaweb_host",
+      "web_port": 80,
+      "web_root": "/var/www/ganglia2",
+      "grid_name": "HDP_GRID"
+    },
+    "nagios": {
+      "nagiosserver_host": "nagiosserver_host",
+      "nagiosserver_port": 80,
+      "web_root": "/nagios"
+    },
+    "jmx": {
+      "timeout": 1
+    },
+    "services": {
+	  "HDFS" : [
+        {
+          "installed": true,
+          "name": "HDFS",
+          "namenode_host": "namenode",
+          "namenode_port": 50070,
+          "snamenode_host": "snamenode",
+          "snamenode_port": 50071,
+          "total_datanodes": 10,
+          "ganglia_clusters": {
+            "slaves": "HDPSlaves",
+            "namenode": "HDPNameNode"      
+          }
+        }
+      ],
+      "MAPREDUCE" : [        
+        {
+          "installed": true,
+          "name": "MAPREDUCE",
+          "jobtracker_host": "jobtracker",
+          "jobtracker_port": 50030,
+          "total_tasktrackers": 20,
+          "jobhistory_host": "jobhistory_host",
+          "jobhistory_port": 52890,
+          "ganglia_clusters": {
+            "slaves": "HDPSlaves",
+            "jobtracker": "HDPJobTracker"
+          },
+          "scheduler_type": "org.foo.CapacityTaskScheduler"
+        }
+      ],
+      "HBASE" : [  
+        {
+          "installed": false,
+          "name": "HBASE",
+          "hbasemaster_host": "hbasemaster",
+          "hbasemaster_port": 60010,
+          "total_regionservers": 30,
+          "ganglia_clusters": {
+            "slaves": "HDPSlaves",
+            "hbasemaster": "HDPHBaseMaster"
+          }
+        }
+      ],
+      "ZOOKEEPER" : [
+        {
+          "installed": true,
+          "name": "ZOOKEEPER"
+        }
+      ],
+      "HIVE-METASTORE" : [
+        {
+          "installed": false,
+          "name": "HIVE-METASTORE"
+        }
+      ],
+      "TEMPLETON" : [
+        {
+          "installed": false,
+          "name": "TEMPLETON"
+        }
+      ],
+      "OOZIE" : [
+        {
+          "installed": true,
+          "name": "OOZIE"
+        }
+      ]
+    }
+  }
+}

تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 866 - 0
contrib/addons/test/dataServices/jmx/data/sample_hbasemaster_jmx.json


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 44 - 0
contrib/addons/test/dataServices/jmx/data/sample_jobtracker_jmx.json


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 376 - 0
contrib/addons/test/dataServices/jmx/data/sample_namenode_jmx.json


+ 120 - 0
contrib/addons/test/dataServices/jmx/test_config_load.php

@@ -0,0 +1,120 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+$GLOBALS["HDP_MON_DEBUG_MODE"] = FALSE;
+$pwd = exec("pwd");
+$GLOBALS["HDP_MON_CLUSTER_CONFIG_LOCATION"] = $pwd
+    ."/data/cluster_configuration.json";
+
+include_once("../../../src/dataServices/common/common.inc");
+include_once("../../../src/dataServices/common/cluster_configuration.inc");
+
+hdp_mon_load_cluster_configuration();
+
+if (!isset($GLOBALS["HDP_MON_CONFIG"])) {
+  error_log("global CONFIG is still not set");
+  exit(1);
+}
+
+assert($GLOBALS["HDP_MON_CONFIG"]["STACK_VERSION"] === "1.0.2");
+assert($GLOBALS["HDP_MON_CONFIG"]["CLUSTER_NAME"] === "MyHDPCluster");
+
+assert($GLOBALS["HDP_MON_CONFIG"]["HDP_MON"]["DASHBOARD_HOST"] ===
+    "dashboard_host");
+assert($GLOBALS["HDP_MON_CONFIG"]["HDP_MON"]["DASHBOARD_PORT"] === 80);
+
+assert($GLOBALS["HDP_MON_CONFIG"]["GANGLIA"]["WEB_HOST"] === "gangliaweb_host");
+assert($GLOBALS["HDP_MON_CONFIG"]["GANGLIA"]["WEB_PORT"] === 80);
+assert($GLOBALS["HDP_MON_CONFIG"]["GANGLIA"]["WEB_ROOT"] ===
+    "/var/www/ganglia2");
+assert($GLOBALS["HDP_MON_CONFIG"]["GANGLIA"]["GRID_NAME"] === "HDP_GRID");
+
+assert($GLOBALS["HDP_MON_CONFIG"]["NAGIOS"]["NAGIOSSERVER_HOST"] ===
+    "nagiosserver_host");
+assert($GLOBALS["HDP_MON_CONFIG"]["NAGIOS"]["NAGIOSSERVER_PORT"] === 80);
+
+assert($GLOBALS["HDP_MON_CONFIG"]["JMX"]["TIMEOUT"] === 1);
+
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HDFS"]["NAMENODE_HOST"] ===
+    "namenode");
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HDFS"]["NAMENODE_PORT"] ===
+    50070);
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HDFS"]["NAMENODE_ADDR"] ===
+    "namenode:50070");
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HDFS"]["SECONDARY_NAMENODE_ADDR"]
+    === "snamenode:50071");
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HDFS"]["TOTAL_DATANODES"] === 10);
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HDFS"]
+    ["GANGLIA_CLUSTERS"]["NAMENODE"] === "HDPNameNode");
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HDFS"]
+    ["GANGLIA_CLUSTERS"]["SLAVES"] === "HDPSlaves");
+
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["MAPREDUCE"]["JOBTRACKER_HOST"]
+    === "jobtracker");
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["MAPREDUCE"]["JOBTRACKER_PORT"]
+    === 50030);
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["MAPREDUCE"]["JOBTRACKER_ADDR"]
+    === "jobtracker:50030");
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["MAPREDUCE"]["TOTAL_TASKTRACKERS"]
+    === 20);
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["MAPREDUCE"]["JOBHISTORY_HOST"]
+    === "jobhistory_host");
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["MAPREDUCE"]["JOBHISTORY_PORT"]
+    === 52890);
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["MAPREDUCE"]
+    ["GANGLIA_CLUSTERS"]["JOBTRACKER"] === "HDPJobTracker");
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["MAPREDUCE"]
+    ["GANGLIA_CLUSTERS"]["SLAVES"] === "HDPSlaves");
+
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HBASE"]["HBASEMASTER_HOST"]
+    === "hbasemaster");
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HBASE"]["HBASEMASTER_PORT"]
+    === 60010);
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HBASE"]["HBASEMASTER_ADDR"]
+    === "hbasemaster:60010");
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HBASE"]["TOTAL_REGIONSERVERS"]
+    === 30);
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HBASE"]
+    ["GANGLIA_CLUSTERS"]["HBASEMASTER"] === "HDPHBaseMaster");
+assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HBASE"]
+    ["GANGLIA_CLUSTERS"]["SLAVES"] === "HDPSlaves");
+assert(!isset($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["ZOOKEEPER"]));
+assert(is_array($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HIVE-METASTORE"]));
+assert(is_array($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["TEMPLETON"]));
+assert(is_array($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["OOZIE"]));
+
+$GLOBALS["HDP_MON_CLUSTER_CONFIG_LOCATION"] = $pwd
+    ."/data/cluster_configuration.json.nohbase";
+
+unset($GLOBALS["HDP_MON_CONFIG_INITIALIZED"]);
+hdp_mon_load_cluster_configuration();
+
+if (!isset($GLOBALS["HDP_MON_CONFIG"])) {
+  error_log("global CONFIG is still not set");
+  exit(1);
+}
+assert(is_array($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HDFS"]));
+assert(is_array($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["MAPREDUCE"]));
+assert(!isset($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HBASE"]));
+assert(is_array($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["ZOOKEEPER"]));
+assert(!isset($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HIVE-METASTORE"]));
+assert(!isset($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["TEMPLETON"]));
+assert(is_array($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["OOZIE"]));
+
+?>

+ 255 - 0
contrib/addons/test/dataServices/jmx/test_jmx_parsing.php

@@ -0,0 +1,255 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+$GLOBALS["HDP_MON_DEBUG_MODE"] = FALSE;
+$pwd = exec("pwd");
+$GLOBALS["HDP_MON_CLUSTER_CONFIG_LOCATION"] = $pwd
+    ."/data/cluster_configuration.json";
+
+include_once("../../../src/dataServices/common/common.inc");
+include_once("../../../src/dataServices/common/cluster_configuration.inc");
+include_once("../../../src/dataServices/common/response.inc");
+include_once("../../../src/dataServices/jmx/hdp_mon_jmx_helpers.inc");
+
+function verify_hdfs_info($info) {
+  assert(is_array($info));
+  assert($info["service_type"] === "HDFS");
+  assert($info["installed"]);
+
+  assert($info["namenode_addr"] == "namenode:50070");
+  assert($info["secondary_namenode_addr"] == "snamenode:50071");
+  assert($info["total_nodes"] == 10);
+  assert($info["memory_heap_used"] == 529321952);
+  assert($info["memory_heap_max"] == 1006632960);
+  assert($info["dfs_dirfiles_count"] == 554);
+  assert($info["dfs_blocks_total"] == 458);
+  assert($info["dfs_blocks_underreplicated"] == 0);
+  assert($info["dfs_blocks_missing"] == 0);
+  assert($info["dfs_blocks_corrupt"] == 0);
+  assert($info["dfs_state"] == "Operational");
+  assert($info["start_time"] == 1327557522);
+  assert($info["live_nodes"] == 10);
+  assert($info["dead_nodes"] == 1);
+  assert($info["decommissioning_nodes"] == 0);
+  assert($info["version"] == "1.0.0");
+  assert($info["safemode"] == TRUE);
+  assert($info["pending_upgrades"] == "");
+  assert($info["dfs_configured_capacity"] == 36336891658240);
+  assert($info["dfs_percent_used"] == 0);
+  assert($info["dfs_percent_remaining"] == 99.08);
+  assert($info["dfs_total_bytes"] == 36336891658240);
+  assert($info["dfs_used_bytes"] == 1750237184);
+  assert($info["nondfs_used_bytes"] == 331691536384);
+  assert($info["dfs_free_bytes"] == 36003449884672);
+  assert($info["safemode_reason"] != "");
+}
+
+function verify_mr_info($info) {
+  assert(is_array($info));
+  assert($info["service_type"] === "MAPREDUCE");
+  assert($info["installed"]);
+
+  assert($info["jobtracker_addr"] == "jobtracker:50030");
+  assert($info["trackers_total"] == 20);
+  assert($info["jobhistory_addr"] == "jobhistory_host:52890");
+  assert($info["memory_heap_used"] == 158277552);
+  assert($info["memory_heap_max"] == 1052770304);
+  assert($info["trackers_live"] == 10);
+  assert($info["trackers_graylisted"] == 0);
+  assert($info["trackers_blacklisted"] == 0);
+  assert($info["version"] == "1.0.0, r1224962");
+
+  assert(is_array($info["queue_info"])
+         && $info["queue_info"]["type"] == "CapacityTaskScheduler"
+         && count($info["queue_info"]["queues"]) == 1);
+
+  assert($info["queue_info"]["queues"]["default"]["state"] == "running"
+    && $info["queue_info"]["queues"]["default"]["capacity_percentage"]
+        == 100.0
+    && $info["queue_info"]["queues"]["default"]["user_limit"] ==  100
+    && $info["queue_info"]["queues"]["default"]["priority_supported"] == 1
+    && $info["queue_info"]["queues"]["default"]["map_capacity"] == 40
+    && $info["queue_info"]["queues"]["default"]["map_running_tasks"] ==  0
+    && $info["queue_info"]["queues"]["default"]["reduce_capacity"] == 20
+    && $info["queue_info"]["queues"]["default"]["reduce_running_tasks"] == 0
+    && $info["queue_info"]["queues"]["default"]["waiting_jobs"] == 3
+    && $info["queue_info"]["queues"]["default"]["initializing_jobs"] ==  0
+    && $info["queue_info"]["queues"]["default"]["users_with_submitted_jobs"]
+        == 0);
+
+  assert($info["trackers_excluded"] == 0);
+  assert($info["map_task_capacity"] == 40);
+  assert($info["reduce_task_capacity"] == 20);
+  assert($info["job_total_submissions"] == 105);
+  assert($info["job_total_completions"] == 104);
+  assert($info["running_jobs"] == 0);
+  assert($info["waiting_jobs"] == 3);
+  assert($info["running_map_tasks"] == 0);
+  assert($info["running_reduce_tasks"] == 0);
+  assert($info["occupied_map_slots"] == 0);
+  assert($info["occupied_reduce_slots"] == 0);
+  assert($info["reserved_map_slots"] == 0);
+  assert($info["reserved_reduce_slots"] == 0);
+  assert($info["waiting_maps"] == 1);
+  assert($info["waiting_reduces"] == 0);
+  assert($info["start_time"] == 1327557546);
+  assert($info["average_node_capacity"] == 6);
+}
+
+function verify_hbase_info($info) {
+  assert(is_array($info));
+  assert($info["service_type"] === "HBASE");
+  assert($info["installed"]);
+  assert($info["total_regionservers"] === 30);
+  assert($info["memory_heap_used"] === 32946880);
+  assert($info["memory_heap_max"] === 1035468800);
+  assert($info["cluster_id"] === "d24914d7-75d3-4dcc-9e6f-0d7770833993");
+  assert($info["start_time"] == 1329244267);
+  assert($info["active_time"] == 1329244269);
+  assert(is_array($info["coprocessors"])
+         && count($info["coprocessors"]) == 0);
+  assert($info["average_load"] == 2);
+  assert($info["regions_in_transition_count"] === 0);
+  assert($info["live_regionservers"] === 1);
+  assert($info["dead_regionservers"] === 0);
+  assert(is_array($info["zookeeper_quorum"])
+         && count($info["zookeeper_quorum"]) == 1
+         && $info["zookeeper_quorum"][0] === "localhost:2181");
+  assert($info["version"] ===
+      "0.92.1-SNAPSHOT, ra23f8636efd6dd9d37f3a15d83f2396819509502");
+}
+
+function verify_overall_info($info) {
+  assert(is_array($info));
+  assert(is_array($info["overall"]));
+  assert(is_array($info["hbase"]));
+  assert(is_array($info["hdfs"]));
+  assert(is_array($info["mapreduce"]));
+
+  assert($info["overall"]["ganglia_url"] ==
+      "http://gangliaweb_host:80/var/www/ganglia2");
+  assert($info["overall"]["nagios_url"] == "http://nagiosserver_host:80/nagios");
+  assert($info["overall"]["hdfs_installed"] == 1);
+  assert($info["overall"]["mapreduce_installed"] == 1);
+  assert($info["overall"]["hbase_installed"] == 1);
+  assert($info["overall"]["namenode_addr"] == "namenode:50070");
+  assert($info["overall"]["secondary_namenode_addr"] == "snamenode:50071");
+  assert($info["overall"]["namenode_starttime"] == 1327557522);
+  assert($info["overall"]["total_nodes"] == 10);
+  assert($info["overall"]["live_nodes"] == 10);
+  assert($info["overall"]["dead_nodes"] == 1);
+  assert($info["overall"]["decommissioning_nodes"] == 0);
+  assert($info["overall"]["dfs_blocks_underreplicated"] == 0);
+  assert($info["overall"]["safemode"] == TRUE);
+  assert($info["overall"]["pending_upgrades"] == "");
+  assert($info["overall"]["dfs_configured_capacity"] == 36336891658240);
+  assert($info["overall"]["dfs_percent_used"] == 0);
+  assert($info["overall"]["dfs_percent_remaining"] == 99.08);
+  assert($info["overall"]["dfs_total_bytes"] == 36336891658240);
+  assert($info["overall"]["dfs_used_bytes"] == 1750237184);
+  assert($info["overall"]["nondfs_used_bytes"] == 331691536384);
+  assert($info["overall"]["dfs_free_bytes"] == 36003449884672);
+  assert($info["overall"]["jobtracker_addr"] == "jobtracker:50030");
+  assert($info["overall"]["jobtracker_starttime"] == 1327557546);
+  assert($info["overall"]["running_jobs"] == 0);
+  assert($info["overall"]["waiting_jobs"] == 3);
+  assert($info["overall"]["trackers_total"] == 20);
+  assert($info["overall"]["trackers_live"] == 10);
+  assert($info["overall"]["trackers_graylisted"] == 0);
+  assert($info["overall"]["trackers_blacklisted"] == 0);
+  assert($info["overall"]["hbasemaster_addr"] == "hbasemaster:60010");
+  assert($info["overall"]["total_regionservers"] == 30);
+  assert($info["overall"]["hbasemaster_starttime"] == 1329244267);
+  assert($info["overall"]["live_regionservers"] == 1);
+  assert($info["overall"]["dead_regionservers"] == 0);
+  assert($info["overall"]["regions_in_transition_count"] == 0);
+
+  assert($info["hdfs"]["namenode_addr"] == "namenode:50070");
+  assert($info["hdfs"]["secondary_namenode_addr"] == "snamenode:50071");
+  assert($info["hdfs"]["namenode_starttime"] == 1327557522);
+  assert($info["hdfs"]["total_nodes"] == 10);
+  assert($info["hdfs"]["live_nodes"] == 10);
+  assert($info["hdfs"]["dead_nodes"] == 1);
+  assert($info["hdfs"]["decommissioning_nodes"] == 0);
+  assert($info["hdfs"]["dfs_blocks_underreplicated"] == 0);
+  assert($info["hdfs"]["safemode"] == TRUE);
+  assert($info["hdfs"]["pending_upgrades"] == "");
+  assert($info["hdfs"]["dfs_configured_capacity"] == 36336891658240);
+  assert($info["hdfs"]["dfs_percent_used"] == 0);
+  assert($info["hdfs"]["dfs_percent_remaining"] == 99.08);
+  assert($info["hdfs"]["dfs_total_bytes"] == 36336891658240);
+  assert($info["hdfs"]["dfs_used_bytes"] == 1750237184);
+  assert($info["hdfs"]["nondfs_used_bytes"] == 331691536384);
+  assert($info["hdfs"]["dfs_free_bytes"] == 36003449884672);
+
+  assert($info["mapreduce"]["jobtracker_addr"] == "jobtracker:50030");
+  assert($info["mapreduce"]["jobtracker_starttime"] == 1327557546);
+  assert($info["mapreduce"]["running_jobs"] == 0);
+  assert($info["mapreduce"]["waiting_jobs"] == 3);
+  assert($info["mapreduce"]["trackers_total"] == 20);
+  assert($info["mapreduce"]["trackers_live"] == 10);
+  assert($info["mapreduce"]["trackers_graylisted"] == 0);
+  assert($info["mapreduce"]["trackers_blacklisted"] == 0);
+
+  assert($info["hbase"]["hbasemaster_addr"] == "hbasemaster:60010");
+  assert($info["hbase"]["total_regionservers"] == 30);
+  assert($info["hbase"]["hbasemaster_starttime"] == 1329244267);
+  assert($info["hbase"]["live_regionservers"] == 1);
+  assert($info["hbase"]["dead_regionservers"] == 0);
+  assert($info["hbase"]["regions_in_transition_count"] == 0);
+}
+
+hdp_mon_load_cluster_configuration();
+if (!isset($GLOBALS["HDP_MON_CONFIG"])) {
+  error_log("global CONFIG is still not set");
+  exit(1);
+}
+
+$hdfs_jmx_json = file_get_contents("./data/sample_namenode_jmx.json");
+if (!$hdfs_jmx_json || $hdfs_jmx_json == "") {
+  error_log("Invalid json data for namenode jmx");
+  exit(1);
+}
+
+$hdfsinfo = hdp_mon_jmx_parse_hdfs_info(json_decode($hdfs_jmx_json, true));
+verify_hdfs_info($hdfsinfo);
+
+$mr_jmx_json = file_get_contents("./data/sample_jobtracker_jmx.json");
+if (!$mr_jmx_json || $mr_jmx_json == "") {
+  error_log("Invalid json data for jobtracker jmx");
+  exit(1);
+}
+
+$mrinfo = hdp_mon_jmx_parse_mapreduce_info(json_decode($mr_jmx_json, true));
+verify_mr_info($mrinfo);
+
+$hbase_jmx_json = file_get_contents("./data/sample_hbasemaster_jmx.json");
+if (!$hbase_jmx_json || $hbase_jmx_json == "") {
+  error_log("Invalid json data for hbase master jmx");
+  exit(1);
+}
+
+$hbaseinfo = hdp_mon_jmx_parse_hbase_info(json_decode($hbase_jmx_json, true));
+verify_hbase_info($hbaseinfo);
+
+$overallinfo = hdp_mon_helper_get_cluster_info($hdfsinfo,
+    $mrinfo, $hbaseinfo);
+verify_overall_info($overallinfo);
+
+?>

+ 398 - 0
contrib/addons/test/nagios/plugins/test_sys_logger.py

@@ -0,0 +1,398 @@
+#!/usr/bin/python
+
+import sys
+sys.path.append('../src')
+
+import sys_logger
+
+tests_passed = 0
+tests_failed = 0
+def test_log_tvi_msg(msg):
+    global tests_passed, tests_failed
+    if msg == expected_log_msg:
+        print 'Test Passed'
+        tests_passed += 1
+    else:
+        print '*** TEST FAILED ***'
+        print 'Expected MSG: {0}'.format(expected_log_msg)
+        print 'Actual MSG  : {0}'.format(msg)
+        tests_failed += 1
+
+sys_logger.log_tvi_msg = test_log_tvi_msg
+
+def test(tvi_rule, expected_msg, arg1, arg2, arg3, arg4, arg5):
+    sys.stdout.write(tvi_rule + ': ')
+    global expected_log_msg
+    expected_log_msg = expected_msg
+    sys_logger.generate_tvi_log_msg(arg1, arg2, arg3, arg4, arg5)
+
+def summary():
+    total_tests = tests_passed + tests_failed
+    print '\nTests Run: {0}'.format(total_tests)
+    print 'Passed: {0}, Failed: {1}'.format(tests_passed, tests_failed)
+    if not tests_failed:
+        print 'SUCCESS! All tests pass.'
+
+
+# Hadoop_Host_Down
+test('Hadoop_Host_Down',
+     'Critical: Hadoop: host_down# Event Host=MY_HOST(CRITICAL), PING FAILED - Packet loss = 100%, RTA = 0.00 ms',
+     'HARD', '1', 'CRITICAL', 'Host::Ping', 'Event Host=MY_HOST(CRITICAL), PING FAILED - Packet loss = 100%, RTA = 0.00 ms')
+
+test('Hadoop_Host_Down:OK',
+    'OK: Hadoop: host_down_ok# Event Host=MY_HOST(OK), PING SUCCESS - Packet loss = 0%, RTA = 1.00 ms',
+    'HARD', '1', 'OK', 'Host::Ping', 'Event Host=MY_HOST(OK), PING SUCCESS - Packet loss = 0%, RTA = 1.00 ms')
+
+# Hadoop_Master_Daemon_CPU_Utilization
+test('Hadoop_Master_Daemon_CPU_Utilization',
+     'Critical: Hadoop: master_cpu_utilization# Event Host=MY_HOST Service Description=HBASEMASTER::HBaseMaster CPU utilization(CRITICAL), 4 CPU, average load 2.5%  200%',
+     'HARD', '1', 'CRITICAL', 'HBASEMASTER::HBaseMaster CPU utilization',
+     'Event Host=MY_HOST Service Description=HBASEMASTER::HBaseMaster CPU utilization(CRITICAL), 4 CPU, average load 2.5%  200%')
+
+test('Hadoop_Master_Daemon_CPU_Utilization:Degraded',
+    'Degraded: Hadoop: master_cpu_utilization# Event Host=MY_HOST Service Description=HBASEMASTER::HBaseMaster CPU utilization(CRITICAL), 4 CPU, average load 2.5%  200%',
+    'HARD', '1', 'WARNING', 'HBASEMASTER::HBaseMaster CPU utilization',
+    'Event Host=MY_HOST Service Description=HBASEMASTER::HBaseMaster CPU utilization(CRITICAL), 4 CPU, average load 2.5%  200%')
+
+test('Hadoop_Master_Daemon_CPU_Utilization:OK',
+    'OK: Hadoop: master_cpu_utilization_ok# Event Host=MY_HOST Service Description=HBASEMASTER::HBaseMaster CPU utilization(OK), 4 CPU, average load 2.5%  200%',
+    'HARD', '1', 'OK', 'HBASEMASTER::HBaseMaster CPU utilization',
+    'Event Host=MY_HOST Service Description=HBASEMASTER::HBaseMaster CPU utilization(OK), 4 CPU, average load 2.5%  200%')
+
+# Hadoop_HDFS_Percent_Capacity
+test('Hadoop_HDFS_Percent_Capacity',
+     'Critical: Hadoop: hdfs_percent_capacity# Event Host=MY_HOST Service Description=HDFS::HDFS Capacity utilization(CRITICAL),DFSUsedGB:0.1, DFSTotalGB:1568.7',
+     'HARD', '1', 'CRITICAL', 'HDFS::HDFS Capacity utilization',
+     'Event Host=MY_HOST Service Description=HDFS::HDFS Capacity utilization(CRITICAL),DFSUsedGB:0.1, DFSTotalGB:1568.7')
+
+test('Hadoop_HDFS_Percent_Capacity:OK',
+    'OK: Hadoop: hdfs_percent_capacity_ok# Event Host=MY_HOST Service Description=HDFS::HDFS Capacity utilization(OK),DFSUsedGB:0.1, DFSTotalGB:1568.7',
+    'HARD', '1', 'OK', 'HDFS::HDFS Capacity utilization',
+    'Event Host=MY_HOST Service Description=HDFS::HDFS Capacity utilization(OK),DFSUsedGB:0.1, DFSTotalGB:1568.7')
+
+# Hadoop_HDFS_Corrupt_Missing_Blocks
+test('Hadoop_HDFS_Corrupt_Missing_Blocks',
+     'Critical: Hadoop: hdfs_block# Event Host=MY_HOST Service Description=HDFS::Corrupt/Missing blocks(CRITICAL), corrupt_blocks:0, missing_blocks:0, total_blocks:147',
+     'HARD', '1', 'CRITICAL', 'HDFS::Corrupt/Missing blocks',
+     'Event Host=MY_HOST Service Description=HDFS::Corrupt/Missing blocks(CRITICAL), corrupt_blocks:0, missing_blocks:0, total_blocks:147')
+
+test('Hadoop_HDFS_Corrupt_Missing_Blocks:OK',
+    'OK: Hadoop: hdfs_block_ok# Event Host=MY_HOST Service Description=HDFS::Corrupt/Missing blocks(OK), corrupt_blocks:0, missing_blocks:0, total_blocks:147',
+    'HARD', '1', 'OK', 'HDFS::Corrupt/Missing blocks',
+    'Event Host=MY_HOST Service Description=HDFS::Corrupt/Missing blocks(OK), corrupt_blocks:0, missing_blocks:0, total_blocks:147')
+
+# Hadoop_NameNode_Edit_Log_Dir_Write
+test('Hadoop_NameNode_Edit_Log_Dir_Write',
+     'Critical: Hadoop: namenode_edit_log_write# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'NAMENODE::Namenode Edit logs directory status', 'SERVICE MSG')
+
+test('Hadoop_NameNode_Edit_Log_Dir_Write:OK',
+    'OK: Hadoop: namenode_edit_log_write_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'NAMENODE::Namenode Edit logs directory status', 'SERVICE MSG')
+
+# Hadoop_DataNode_Down
+test('Hadoop_DataNode_Down',
+     'Critical: Hadoop: datanode_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'HDFS::Percent DataNodes down','SERVICE MSG')
+
+test('Hadoop_DataNode_Down:OK',
+    'OK: Hadoop: datanode_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'HDFS::Percent DataNodes down','SERVICE MSG')
+
+# Hadoop_DataNode_Process_Down
+test('Hadoop_DataNode_Process_Down',
+     'Critical: Hadoop: datanode_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'DATANODE::Process down', 'SERVICE MSG')
+
+test('Hadoop_DataNode_Process_Down:OK',
+    'OK: Hadoop: datanode_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'DATANODE::Process down', 'SERVICE MSG')
+
+# Hadoop_Percent_DataNodes_Storage_Full
+test('Hadoop_Percent_DataNodes_Storage_Full',
+     'Critical: Hadoop: datanodes_percent_storage_full# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'HDFS::Percent DataNodes storage full', 'SERVICE MSG')
+
+test('Hadoop_Percent_DataNodes_Storage_Full:OK',
+    'OK: Hadoop: datanodes_percent_storage_full_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'HDFS::Percent DataNodes storage full', 'SERVICE MSG')
+
+# Hadoop_NameNode_Process_Down
+test('Hadoop_NameNode_Process_Down:CRITICAL',
+     'Fatal: Hadoop: namenode_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'NAMENODE::Namenode Process down', 'SERVICE MSG')
+
+test('Hadoop_NameNode_Process_Down:WARNING',
+    'Fatal: Hadoop: namenode_process_down# SERVICE MSG',
+    'HARD', '1', 'WARNING', 'NAMENODE::Namenode Process down', 'SERVICE MSG')
+
+test('Hadoop_NameNode_Process_Down:UNKNOWN',
+    'Fatal: Hadoop: namenode_process_down# SERVICE MSG',
+    'HARD', '1', 'UNKNOWN', 'NAMENODE::Namenode Process down', 'SERVICE MSG')
+
+test('Hadoop_NameNode_Process_Down:OK',
+    'OK: Hadoop: namenode_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'NAMENODE::Namenode Process down', 'SERVICE MSG')
+
+# Hadoop_Secondary_NameNode_Process_Down
+test('Hadoop_Secondary_NameNode_Process_Down',
+    'Critical: Hadoop: secondary_namenode_process_down# SERVICE MSG',
+    'HARD', '1', 'CRITICAL', 'NAMENODE::Secondary Namenode Process down', 'SERVICE MSG')
+
+test('Hadoop_Secondary_NameNode_Process_Down:OK',
+    'OK: Hadoop: secondary_namenode_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'NAMENODE::Secondary Namenode Process down', 'SERVICE MSG')
+
+# Hadoop_NameNode_RPC_Latency
+test('Hadoop_NameNode_RPC_Latency',
+     'Critical: Hadoop: namenode_rpc_latency# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'HDFS::Namenode RPC Latency', 'SERVICE MSG')
+
+test('Hadoop_NameNode_RPC_Latency:Degraded',
+    'Degraded: Hadoop: namenode_rpc_latency# SERVICE MSG',
+    'HARD', '1', 'WARNING', 'HDFS::Namenode RPC Latency', 'SERVICE MSG')
+
+test('Hadoop_NameNode_RPC_Latency:OK',
+    'OK: Hadoop: namenode_rpc_latency_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'HDFS::Namenode RPC Latency', 'SERVICE MSG')
+
+# Hadoop_DataNodes_Storage_Full
+test('Hadoop_DataNodes_Storage_Full',
+     'Critical: Hadoop: datanodes_storage_full# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'DATANODE::Storage full', 'SERVICE MSG')
+
+test('Hadoop_DataNodes_Storage_Full:OK',
+    'OK: Hadoop: datanodes_storage_full_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'DATANODE::Storage full', 'SERVICE MSG')
+
+# Hadoop_JobTracker_Process_Down
+test('Hadoop_JobTracker_Process_Down',
+     'Critical: Hadoop: jobtracker_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'JOBTRACKER::Jobtracker Process down', 'SERVICE MSG')
+
+test('Hadoop_JobTracker_Process_Down:OK',
+    'OK: Hadoop: jobtracker_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'JOBTRACKER::Jobtracker Process down', 'SERVICE MSG')
+
+# Hadoop_JobTracker_RPC_Latency
+test('Hadoop_JobTracker_RPC_Latency',
+     'Critical: Hadoop: jobtracker_rpc_latency# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'MAPREDUCE::JobTracker RPC Latency', 'SERVICE MSG')
+
+test('Hadoop_JobTracker_RPC_Latency:Degraded',
+    'Degraded: Hadoop: jobtracker_rpc_latency# SERVICE MSG',
+    'HARD', '1', 'WARNING', 'MAPREDUCE::JobTracker RPC Latency', 'SERVICE MSG')
+
+test('Hadoop_JobTracker_RPC_Latency:OK',
+    'OK: Hadoop: jobtracker_rpc_latency_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'MAPREDUCE::JobTracker RPC Latency', 'SERVICE MSG')
+
+# Hadoop_JobTracker_CPU_Utilization
+test('Hadoop_JobTracker_CPU_Utilization',
+    'Critical: Hadoop: jobtracker_cpu_utilization# SERVICE MSG',
+    'HARD', '1', 'CRITICAL', 'JOBTRACKER::Jobtracker CPU utilization', 'SERVICE MSG')
+
+test('Hadoop_JobTracker_CPU_Utilization:Degraded',
+    'Degraded: Hadoop: jobtracker_cpu_utilization# SERVICE MSG',
+    'HARD', '1', 'WARNING', 'JOBTRACKER::Jobtracker CPU utilization', 'SERVICE MSG')
+
+test('Hadoop_JobTracker_CPU_Utilization:OK',
+    'OK: Hadoop: jobtracker_cpu_utilization_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'JOBTRACKER::Jobtracker CPU utilization', 'SERVICE MSG')
+
+# Hadoop_TaskTracker_Down
+test('Hadoop_TaskTracker_Down',
+     'Critical: Hadoop: tasktrackers_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'MAPREDUCE::Percent TaskTrackers down', 'SERVICE MSG')
+
+test('Hadoop_TaskTracker_Down:OK',
+    'OK: Hadoop: tasktrackers_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'MAPREDUCE::Percent TaskTrackers down', 'SERVICE MSG')
+
+# Hadoop_TaskTracker_Process_Down
+test('Hadoop_TaskTracker_Process_Down',
+     'Critical: Hadoop: tasktracker_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'TASKTRACKER::Process down', 'SERVICE MSG')
+
+test('Hadoop_TaskTracker_Process_Down:OK',
+    'OK: Hadoop: tasktracker_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'TASKTRACKER::Process down', 'SERVICE MSG')
+
+# Hadoop_HBaseMaster_Process_Down
+test('Hadoop_HBaseMaster_Process_Down',
+     'Critical: Hadoop: hbasemaster_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'HBASEMASTER::HBaseMaster Process down', 'SERVICE MSG')
+
+test('Hadoop_HBaseMaster_Process_Down:OK',
+    'OK: Hadoop: hbasemaster_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'HBASEMASTER::HBaseMaster Process down', 'SERVICE MSG')
+
+# Hadoop_RegionServer_Process_Down
+test('Hadoop_RegionServer_Process_Down',
+     'Critical: Hadoop: regionserver_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'REGIONSERVER::Process down', 'SERVICE MSG')
+
+test('Hadoop_RegionServer_Process_Down:OK',
+    'OK: Hadoop: regionserver_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'REGIONSERVER::Process down', 'SERVICE MSG')
+
+# Hadoop_RegionServer_Down
+test('Hadoop_RegionServer_Down',
+     'Critical: Hadoop: regionservers_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'HBASE::Percent region servers down', 'SERVICE MSG')
+
+test('Hadoop_RegionServer_Down:OK',
+    'OK: Hadoop: regionservers_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'HBASE::Percent region servers down', 'SERVICE MSG')
+
+# Hadoop_Hive_Metastore_Process_Down
+test('Hadoop_Hive_Metastore_Process_Down',
+     'Critical: Hadoop: hive_metastore_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'HIVE-METASTORE::HIVE-METASTORE status check', 'SERVICE MSG')
+
+test('Hadoop_Hive_Metastore_Process_Down:OK',
+    'OK: Hadoop: hive_metastore_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'HIVE-METASTORE::HIVE-METASTORE status check', 'SERVICE MSG')
+
+# Hadoop_Zookeeper_Down
+test('Hadoop_Zookeeper_Down',
+     'Critical: Hadoop: zookeepers_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'ZOOKEEPER::Percent zookeeper servers down', 'SERVICE MSG')
+
+test('Hadoop_Zookeeper_Down:OK',
+    'OK: Hadoop: zookeepers_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'ZOOKEEPER::Percent zookeeper servers down', 'SERVICE MSG')
+
+# Hadoop_Zookeeper_Process_Down
+test('Hadoop_Zookeeper_Process_Down',
+     'Critical: Hadoop: zookeeper_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'ZKSERVERS::ZKSERVERS Process down', 'SERVICE MSG')
+
+test('Hadoop_Zookeeper_Process_Down:OK',
+    'OK: Hadoop: zookeeper_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'ZKSERVERS::ZKSERVERS Process down', 'SERVICE MSG')
+
+# Hadoop_Oozie_Down
+test('Hadoop_Oozie_Down',
+     'Critical: Hadoop: oozie_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'OOZIE::Oozie status check', 'SERVICE MSG')
+
+test('Hadoop_Oozie_Down:OK',
+    'OK: Hadoop: oozie_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'OOZIE::Oozie status check', 'SERVICE MSG')
+
+# Hadoop_Templeton_Down
+test('Hadoop_Templeton_Down',
+     'Critical: Hadoop: templeton_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'TEMPLETON::Templeton status check', 'SERVICE MSG')
+
+test('Hadoop_Templeton_Down:OK',
+    'OK: Hadoop: templeton_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'TEMPLETON::Templeton status check', 'SERVICE MSG')
+
+# Hadoop_Puppet_Down
+test('Hadoop_Puppet_Down',
+     'Critical: Hadoop: puppet_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'PUPPET::Puppet agent down', 'SERVICE MSG')
+
+test('Hadoop_Puppet_Down:OK',
+    'OK: Hadoop: puppet_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'PUPPET::Puppet agent down', 'SERVICE MSG')
+
+# Hadoop_Nagios_Status_Log_Stale
+test('Hadoop_Nagios_Status_Log_Stale',
+     'Critical: Hadoop: nagios_status_log_stale# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'NAGIOS::Nagios status log staleness', 'SERVICE MSG')
+
+test('Hadoop_Nagios_Status_Log_Stale:OK',
+    'OK: Hadoop: nagios_status_log_stale_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'NAGIOS::Nagios status log staleness', 'SERVICE MSG')
+
+# Hadoop_Ganglia_Process_Down
+test('Hadoop_Ganglia_Process_Down',
+     'Critical: Hadoop: ganglia_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'GANGLIA::Ganglia [gmetad] Process down', 'SERVICE MSG')
+
+test('Hadoop_Ganglia_Process_Down:OK',
+    'OK: Hadoop: ganglia_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'GANGLIA::Ganglia [gmetad] Process down', 'SERVICE MSG')
+
+# Hadoop_Ganglia_Collector_Process_Down
+test('Hadoop_Ganglia_Collector_Process_Down',
+     'Critical: Hadoop: ganglia_collector_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'GANGLIA::Ganglia collector [gmond] Process down alert for hbasemaster', 'SERVICE MSG')
+
+test('Hadoop_Ganglia_Collector_Process_Down:OK',
+    'OK: Hadoop: ganglia_collector_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'GANGLIA::Ganglia collector [gmond] Process down alert for hbasemaster', 'SERVICE MSG')
+
+# Hadoop_Ganglia_Collector_Process_Down
+test('Hadoop_Ganglia_Collector_Process_Down',
+     'Critical: Hadoop: ganglia_collector_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'GANGLIA::Ganglia collector [gmond] Process down alert for jobtracker', 'SERVICE MSG')
+
+test('Hadoop_Ganglia_Collector_Process_Down:OK',
+    'OK: Hadoop: ganglia_collector_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'GANGLIA::Ganglia collector [gmond] Process down alert for jobtracker', 'SERVICE MSG')
+
+# Hadoop_Ganglia_Collector_Process_Down
+test('Hadoop_Ganglia_Collector_Process_Down',
+     'Critical: Hadoop: ganglia_collector_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'GANGLIA::Ganglia collector [gmond] Process down alert for namenode', 'SERVICE MSG')
+
+test('Hadoop_Ganglia_Collector_Process_Down:OK',
+    'OK: Hadoop: ganglia_collector_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'GANGLIA::Ganglia collector [gmond] Process down alert for namenode', 'SERVICE MSG')
+
+# Hadoop_Ganglia_Collector_Process_Down
+test('Hadoop_Ganglia_Collector_Process_Down',
+     'Critical: Hadoop: ganglia_collector_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'GANGLIA::Ganglia collector [gmond] Process down alert for slaves', 'SERVICE MSG')
+
+test('Hadoop_Ganglia_Collector_Process_Down:OK',
+    'OK: Hadoop: ganglia_collector_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'GANGLIA::Ganglia collector [gmond] Process down alert for slaves', 'SERVICE MSG')
+
+# Hadoop_UNKNOWN_MSG
+test('Hadoop_UNKNOWN_MSG',
+     'Critical: Hadoop: HADOOP_UNKNOWN_MSG# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'ANY UNKNOWN SERVICE', 'SERVICE MSG')
+
+# HBase UI Down
+test('Hadoop_HBase_UI_Down',
+    'Critical: Hadoop: hbase_ui_down# SERVICE MSG',
+    'HARD', '1', 'CRITICAL', 'HBASEMASTER::HBase Web UI down', 'SERVICE MSG')
+
+test('Hadoop_HBase_UI_Down:OK',
+    'OK: Hadoop: hbase_ui_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'HBASEMASTER::HBase Web UI down', 'SERVICE MSG')
+
+# Namenode UI Down
+test('Hadoop_NameNode_UI_Down',
+    'Critical: Hadoop: namenode_ui_down# SERVICE MSG',
+    'HARD', '1', 'CRITICAL', 'NAMENODE::Namenode Web UI down', 'SERVICE MSG')
+
+test('Hadoop_NameNode_UI_Down:OK',
+    'OK: Hadoop: namenode_ui_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'NAMENODE::Namenode Web UI down', 'SERVICE MSG')
+
+# JobHistory UI Down
+test('Hadoop_JobHistory_UI_Down',
+    'Critical: Hadoop: jobhistory_ui_down# SERVICE MSG',
+    'HARD', '1', 'CRITICAL', 'JOBTRACKER::JobHistory Web UI down', 'SERVICE MSG')
+
+test('Hadoop_JobHistory_UI_Down:OK',
+    'OK: Hadoop: jobhistory_ui_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'JOBTRACKER::JobHistory Web UI down', 'SERVICE MSG')
+
+# JobTracker UI Down
+test('Hadoop_JobTracker_UI_Down',
+    'Critical: Hadoop: jobtracker_ui_down# SERVICE MSG',
+    'HARD', '1', 'CRITICAL', 'JOBTRACKER::JobTracker Web UI down', 'SERVICE MSG')
+
+test('Hadoop_JobTracker_UI_Down:OK',
+    'OK: Hadoop: jobtracker_ui_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'JOBTRACKER::JobTracker Web UI down', 'SERVICE MSG')
+
+summary()
+

+ 122 - 0
contrib/addons/test/ui/json/alerts.json

@@ -0,0 +1,122 @@
+{"alerts":
+	[
+		{
+			"service_type":"SYSTEM",
+			"service_description":"0 SYSTEM Load-critical",
+			"host_name":"ip-10-242-191-48.ec2.internal",
+			"current_attempt":"1",
+			"last_hard_state":"2",
+			"plugin_output":"OK - load average: 0.12, 0.11, 0.08 99999999999999999999",
+			"last_hard_state_change":"1327362079",
+			"last_time_ok":"1327385479",
+			"last_time_warning":"0",
+			"last_time_unknown":"0",
+			"last_time_critical":"0",
+			"is_flapping":"0",
+			"last_check":"1328827745"
+		},
+		{
+			"service_type":"MAPREDUCE",
+			"service_description":"1 MAPRED Current Users - 222",
+			"host_name":"ip-10-242-191-48.ec2.internal",
+			"current_attempt":"1",
+			"last_hard_state":"1",
+			"plugin_output":"USERS OK - 1 users currently logged in",
+			"last_hard_state_change":"1327362154",
+			"last_time_ok":"1327385554",
+			"last_time_warning":"0",
+			"last_time_unknown":"0",
+			"last_time_critical":"0",
+			"is_flapping":"0",
+			"last_check":"1328827745"
+		},
+		{
+			"service_type":"HBASE",
+			"service_description":"2 HBASE",
+			"host_name":"ip-10-242-191-48.ec2.internal",
+			"current_attempt":"1",
+			"last_hard_state":"0",
+			"plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
+			"last_hard_state_change":"1327362229",
+			"last_time_ok":"1327385629",
+			"last_time_warning":"0",
+			"last_time_unknown":"0",
+			"last_time_critical":"0",
+			"is_flapping":"0",
+			"last_check":"1328827745"
+		},
+		{
+			"service_type":"HDFS",
+			"service_description":"3 HDFS warning",
+			"host_name":"ip-10-242-191-48.ec2.internal",
+			"current_attempt":"1",
+			"last_hard_state":"1",
+			"plugin_output":"DISK OK - free space: \/ 6605 MB (69% inode=88%):",
+			"last_hard_state_change":"1327362304",
+			"last_time_ok":"1327385704",
+			"last_time_warning":"0",
+			"last_time_unknown":"0",
+			"last_time_critical":"0",
+			"is_flapping":"0",
+			"last_check":"1328827745"
+		},
+		{
+			"service_type":"HDFS",
+			"service_description":"4 HDFS critical",
+			"host_name":"ip-10-40-199-111.ec2.internal",
+			"current_attempt":"1",
+			"last_hard_state":"2",
+			"plugin_output":"4:HDFS critical alert",
+			"last_hard_state_change":"1327362104",
+			"last_time_ok":"1327385504",
+			"last_time_warning":"0",
+			"last_time_unknown":"0",
+			"last_time_critical":"0",
+			"is_flapping":"0",
+			"last_check":"1328827745"
+		},
+		{
+			"service_type":"MAPREDUCE",
+			"service_description":"5 MAPREDUCE ok",
+			"host_name":"ip-10-40-199-111.ec2.internal",
+			"current_attempt":"1",
+			"last_hard_state":"0",
+			"plugin_output":"DISK OK - free space: \/ 6605 MB (69% inode=88%):",
+			"last_hard_state_change":"1327362254",
+			"last_time_ok":"1327385654",
+			"last_time_warning":"0",
+			"last_time_unknown":"0",
+			"last_time_critical":"0",
+			"is_flapping":"0",
+			"last_check":"1328827745"
+		},
+		{
+			"service_type":"ZOOKEEPER","service_description":"DATANODE Total Processes","host_name":"ip-10-80-119-243.ec2.internal","current_attempt":"1","last_hard_state":"1","plugin_output":"PROCS OK: 73 processes with STATE = RSZDT","last_hard_state_change":"1327362129","last_time_ok":"1327385529","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0","last_check":"1328827745"},
+		{
+			"service_type":"HIVE-METASTORE","service_description":"PING","host_name":"ip-10-80-119-243.ec2.internal","current_attempt":"1","last_hard_state":"1","plugin_output":"PING OK - Packet loss = 0%, RTA = 2.92 ms","last_hard_state_change":"1327362204","last_time_ok":"1327385604","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0","last_check":"1328827745"},
+		{
+			"service_type":"HBASE","service_description":"DATANODE Total Processes","host_name":"ip-10-80-119-243.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"PROCS OK: 73 processes with STATE = RSZDT","last_hard_state_change":"1327362129","last_time_ok":"1327385529","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0","last_check":"1328827745"},
+		{
+			"service_type":"HBASE","service_description":"PING","host_name":"ip-10-80-119-243.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"PING OK - Packet loss = 0%, RTA = 2.92 ms","last_hard_state_change":"1327362204","last_time_ok":"1327385604","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0","last_check":"1328827745"},
+		{
+			"service_type":"HBASE","service_description":"Root Partition","host_name":"ip-10-80-119-243.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"DISK OK - free space: \/ 6605 MB (69% inode=88%):","last_hard_state_change":"1327362279","last_time_ok":"1327385679","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0","last_check":"1328827745"},
+		{
+			"service_type":"HBASE","service_description":"Root Partition","host_name":"ip-10-80-119-243.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"DISK OK - free space: \/ 6605 MB (69% inode=88%):","last_hard_state_change":"1327362279","last_time_ok":"1327385679","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0","last_check":"1328827745"
+		}
+	],
+"hosts":
+	[
+		{"host_name":"ip-10-242-191-48.ec2.internal","last_hard_state":"0","last_hard_state":"0","plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms","last_check":"1327385564","current_attempt":"1","last_hard_state_change":"1327362079","last_time_up":"1327385574","last_time_down":"0","last_time_unreachable":"0","is_flapping":"0","alerts":[{"service_description":"HDFS Current Load","host_name":"ip-10-242-191-48.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"OK - load average: 0.12, 0.11, 0.08","last_hard_state_change":"1327362079","last_time_ok":"1327385479","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0"},
+		{"service_description":"MAPRED Current Users","host_name":"ip-10-242-191-48.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"USERS OK - 1 users currently logged in","last_hard_state_change":"1327362154","last_time_ok":"1327385554","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0"},
+		{"service_description":"PING","host_name":"ip-10-242-191-48.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms","last_hard_state_change":"1327362229","last_time_ok":"1327385629","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0"},
+		{"service_description":"Root Partition","host_name":"ip-10-242-191-48.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"DISK OK - free space: \/ 6605 MB (69% inode=88%):","last_hard_state_change":"1327362304","last_time_ok":"1327385704","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0"}]},
+		{"host_name":"ip-10-40-199-111.ec2.internal","last_hard_state":"1","last_hard_state":"0","plugin_output":"(Host Check Timed Out)","last_check":"1327385664","current_attempt":"2","last_hard_state_change":"1327383724","last_time_up":"1327385574","last_time_down":"1327385694","last_time_unreachable":"0","is_flapping":"0",
+			"alerts":[{"service_description":"DATANODE Total Processes","host_name":"ip-10-40-199-111.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"PROCS OK: 73 processes with STATE = RSZDT","last_hard_state_change":"1327362104","last_time_ok":"1327385504","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0"},
+		{"service_description":"Root Partition","host_name":"ip-10-40-199-111.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"DISK OK - free space: \/ 6605 MB (69% inode=88%):","last_hard_state_change":"1327362254","last_time_ok":"1327385654","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0"}]},
+		{"host_name":"ip-10-80-119-243.ec2.internal","last_hard_state":"0","last_hard_state":"0","plugin_output":"PING OK - Packet loss = 0%, RTA = 0.44 ms","last_check":"1327385464","current_attempt":"1","last_hard_state_change":"1327362129","last_time_up":"1327385474","last_time_down":"0","last_time_unreachable":"0","is_flapping":"0",
+			"alerts":[{"service_description":"DATANODE Total Processes","host_name":"ip-10-80-119-243.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"PROCS OK: 73 processes with STATE = RSZDT","last_hard_state_change":"1327362129","last_time_ok":"1327385529","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0"},
+		{"service_description":"PING","host_name":"ip-10-80-119-243.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"PING OK - Packet loss = 0%, RTA = 2.92 ms","last_hard_state_change":"1327362204","last_time_ok":"1327385604","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0"},{"service_description":"Root Partition",
+		"host_name":"ip-10-80-119-243.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"DISK OK - free space: \/ 6605 MB (69% inode=88%):","last_hard_state_change":"1327362279","last_time_ok":"1327385679","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0"}]}],
+"hostcounts":{"up_hosts":9,"down_hosts":0},
+"servicestates":{"HIVE-METASTORE":1,"ZOOKEEPER":0,"HBASE":"0","HDFS":"0","MAPREDUCE":"0"}
+}

+ 119 - 0
contrib/addons/test/ui/json/clusterSummary.json

@@ -0,0 +1,119 @@
+{
+  "overall" : {
+     "dfs_configured_capacity": 1234546666,
+     "dfs_percent_used": 25,
+     "dfs_percent_remaining": 75,
+     "dfs_total_bytes": 36336891658240,
+     "dfs_used_bytes": 1750237184,
+     "nondfs_used_bytes": 331691536384,
+  	 "dfs_free_bytes": 36003449884672,
+     "live_nodes": 5,
+     "dead_nodes": 1,
+     "decommissioning_nodes": 1,
+     "dfs_blocks_underreplicated": 23,
+     "jobtracker_starttime": 1243236234,
+     "namenode_starttime": 1243234234,
+     "trackers_live": 4,
+     "trackers_blacklisted": 1,
+      "running_jobs": 10,
+      "waiting_jobs": 1,
+      "namenode_addr": "1.1.1.1:50070",
+      "jobtracker_addr": "2.2.2.2:50030",
+      "hbasemaster_addr": "3.3.3.3:60010",
+      "hbasemaster_starttime": 1243234234,
+      "live_regionservers": 4,
+      "dead_regionservers": 3,
+      "regions_in_transition_count": 3,
+      "ganglia_url": "http://gangliahost/ganglia",
+   	  "nagios_url": "http://nagioshost/nagios",
+      "hdfs_installed": true,
+      "mapreduce_installed": true,
+      "hbase_installed": true
+   },
+   "hdfs": {
+       "service_type": "HDFS",
+       "namenode_addr": "nnhost.amazon.seattle.firstserver.com:50070",
+       "total_nodes": 10,
+       "live_nodes": 8,
+       "dead_nodes": 1,
+       "decommissioning_nodes": 1,
+       "start_time": 23235351,
+       "pending_upgrades": false,
+       "version": "1.0.0, r12345",
+       "safemode": true,
+       "memory_heap_used": 1001312,
+       "memory_heap_max": 800212312,
+       "dfs_configured_capacity": 1234456545644,
+       "dfs_percent_used": 24,
+       "dfs_percent_remaining": 76,
+       "dfs_blocks_total": 100113,
+       "dfs_blocks_underreplicated": 0,
+       "dfs_blocks_missing": 0,
+       "dfs_blocks_corrupt": 0,
+       "dfs_dirfiles_count": 1045
+   },
+   "mapreduce": {
+        "service_type": "MAPREDUCE",
+        "jobtracker_addr": "jthost.amazon.seattle.firstserver.com:50069",
+        "jobhistory_addr": "jthost:51111",
+        "trackers_total": 10,
+        "trackers_live": 7,
+        "trackers_graylisted": 1,
+        "trackers_blacklisted": 1,
+        "trackers_excluded": 1,
+        "start_time": 23235351,
+        "version": "1.0.0, r12345",
+        "memory_heap_used": 10042424,
+        "memory_heap_max": 8003242420,
+        "map_task_capacity": 32,
+        "reduce_task_capacity": 8,
+        "average_node_capacity": 5,
+        "job_total_submissions": 6,
+        "job_total_completions": 3,
+        "running_map_tasks": 5,
+        "running_reduce_tasks": 5,
+        "occupied_map_slots": 3,
+        "occupied_reduce_slots": 4,
+        "reserved_map_slots": 3,
+        "reserved_reduce_slots": 5,
+        "waiting_maps": 3,
+        "waiting_reduces": 3,
+        "queue_info": {
+            "type": "capacity_scheduler",
+            "queues": [
+                {
+                    "default": {
+                        "capacity_percentage": 50,
+                        "user_limit": 100,
+                        "map_capacity": 2,
+                        "map_used_capacity": 1,
+                        "map_running_tasks": 1,
+                        "reduce_capacity": 2,
+                        "reduce_used_capacity": 1,
+                        "reduce_running_tasks": 1,
+                        "waiting_jobs": 1,
+                        "initializing_jobs": 1,
+                        "users_with_submitted_jobs": 1
+                    }
+                }
+            ]
+        }
+    },
+    "hbase": {
+        "version": "0.92.0",
+        "hbasemaster_addr": "1.1.1.1.amazon.seattle.com:60011",
+        "live_regionservers": 5,
+        "dead_regionservers": 1,
+        "regions_in_transition_count": 1,
+        "cluster_id": "ddd-ddd-dddd",
+        "zookeeper_quorum": [
+            "zkhost1: 2181",
+            "zkhost2: 2181"
+        ],
+        "start_time": 1327179894,
+        "active_time": 1327179894,
+        "average_load": 3,
+        "memory_heap_used":1004242423,
+        "memory_heap_max": 8003242423
+    }
+}

+ 24 - 0
contrib/addons/test/ui/json/get_graph_info_all.json

@@ -0,0 +1,24 @@
+{
+    "Global": [
+        {
+            "description": "Key load metrics, aggregated across the entire grid", 
+            "title": "Load Report", 
+            "url": "http://10.10.10.114:80/ganglia2/graph.php?me=HDP&g=load_report"
+        }, 
+        {
+            "description": "Key memory metrics, aggregated across the entire grid", 
+            "title": "Memory Report", 
+            "url": "http://10.10.10.114:80/ganglia2/graph.php?me=HDP&g=mem_report"
+        }, 
+        {
+            "description": "Key CPU metrics, aggregated across the entire grid", 
+            "title": "CPU Report", 
+            "url": "http://10.10.10.114:80/ganglia2/graph.php?me=HDP&g=cpu_report"
+        }, 
+        {
+            "description": "Key network I/O metrics, aggregated across the entire grid", 
+            "title": "Network I/O Report", 
+            "url": "http://10.10.10.114:80/ganglia2/graph.php?me=HDP&g=network_report"
+        }
+    ] 
+}

+ 0 - 0
contrib/addons/test/ui/json/hbaseSummary.json


+ 24 - 0
contrib/addons/test/ui/json/hdfsSummary.json

@@ -0,0 +1,24 @@
+{
+    "hdfs": {
+        "service_type": "HDFS",
+        "namenode_addr": "nnhost:50070",
+        "total_nodes": 10,
+        "live_nodes": 8,
+        "dead_nodes": 1,
+        "decommissioned_nodes": 1,
+        "start_time": 23235351,
+        "pending_upgrades": false,
+        "version": "1.0.0, r1234555555555555555555555555555555555555333333333",
+        "safemode": false,
+        "memory_heap_committed": 100131233,
+        "memory_heap_total": 800212312,
+        "dfs_configured_capacity": 1234456545644,
+        "dfs_percent_used": 24,
+        "dfs_percent_remaining": 76,
+        "dfs_blocks_total": 100113,
+        "dfs_blocks_underreplicated": 0,
+        "dfs_blocks_missing": 0,
+        "dfs_blocks_corrupt": 0,
+        "dfs_dirfiles_count": 1045
+    }
+}

+ 26 - 0
contrib/addons/test/ui/json/mrSummary.json

@@ -0,0 +1,26 @@
+{
+    "mapreduce": {
+        "service_type": "MAPREDUCE",
+        "jobtracker_addr": "jthost:50030",
+        "trackers_total": 10,
+        "trackers_live": 7,
+        "trackers_graylisted": 1,
+        "trackers_blacklisted": 1,
+        "trackers_excluded": 1,
+        "start_time": 23235351,
+        "version": "1.0.0, r12345",
+        "memory_heap_committed": 1004242423,
+        "memory_heap_total": 8003242423,
+        "map_task_capacity": 32,
+        "reduce_task_capacity": 8,
+        "average_node_capacity": 5,
+        "job_total_submissions": 6,
+        "job_total_completions": 3,
+        "running_map_tasks": 5,
+        "running_reduce_tasks": 5,
+        "occupied map slots": 3,
+        "occupied reduce slots": 4,
+        "reserved map slots": 3,
+        "reserved reduce slots": 5
+    }
+}

+ 36 - 0
contrib/addons/utils/dataServices/ganglia/generateAll.sh

@@ -0,0 +1,36 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+GRAPH_INFO_JSON_PATH="../../../src/dataServices/ganglia/graph_info";
+
+JSON_PRETTY_PRINT="python -mjson.tool"
+
+### WARNING: These PHP definitions have diverged from the actual JSON definitions
+###          (which I started to modify directly), so running the scripts below
+###          will result in data loss!
+
+### php ./generate_dashboard_json.php | ${JSON_PRETTY_PRINT} > ${GRAPH_INFO_JSON_PATH}/dashboard/all.json;
+### php ./generate_dashboard_hdp_json.php | ${JSON_PRETTY_PRINT} > ${GRAPH_INFO_JSON_PATH}/dashboard/custom/hdp.json;
+### php ./generate_mapreduce_json.php | ${JSON_PRETTY_PRINT} > ${GRAPH_INFO_JSON_PATH}/mapreduce/all.json;
+### php ./generate_mapreduce_hdp_json.php | ${JSON_PRETTY_PRINT} > ${GRAPH_INFO_JSON_PATH}/mapreduce/custom/hdp.json;
+### php ./generate_hdfs_json.php | ${JSON_PRETTY_PRINT} > ${GRAPH_INFO_JSON_PATH}/hdfs/all.json;
+### php ./generate_hdfs_hdp_json.php | ${JSON_PRETTY_PRINT} > ${GRAPH_INFO_JSON_PATH}/hdfs/custom/hdp.json;

+ 66 - 0
contrib/addons/utils/dataServices/ganglia/generate_dashboard_hdp_json.php

@@ -0,0 +1,66 @@
+<?php
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+*/
+
+
+$data = array
+  (
+     'Global' => array
+     (
+        array
+        (
+           'url' =>
+           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%HDFSSlavesClusterName%&g=hdp_mon_hdfs_io_report',
+           'title' => 'HDFS I/O',
+           'description' => 'Bytes written to and read from HDFS, aggregated across all the DataNodes',
+           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%'
+        ),
+        array
+        (
+           'url' =>
+           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&g=hdp_mon_jobtracker_map_slot_report',
+           'title' => 'Map Slot Utilization',
+           'description' => 'Utilized Map slots (occupied + reserved) vs. Total Map slots',
+           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%'
+        ),
+        array
+        (
+           'url' =>
+           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&g=hdp_mon_jobtracker_reduce_slot_report',
+           'title' => 'Reduce Slot Utilization',
+           'description' => 'Utilized Reduce slots (occupied + reserved) vs. Total Reduce slots',
+           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%'
+        ),
+        array
+        (
+           'url' =>
+           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&g=hdp_mon_jobtracker_mapreduce_report',
+           'title' => 'MapReduce Backlog',
+           'description' => 'Waiting Maps and Reduces, to give a feel for a combined MapReduce backlog',
+           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%'
+        )
+     )
+  );
+
+echo json_encode($data);
+
+?>
+

برخی فایل ها در این مقایسه diff نمایش داده نمی شوند زیرا تعداد فایل ها بسیار زیاد است