Browse Source

AMBARI-18739. Perf: Create Rolling and Express Upgrade Packs (dlysnichenko)

Lisnichenko Dmitro 8 years ago
parent
commit
984d46056b
100 changed files with 1790 additions and 1471 deletions
  1. 9 10
      ambari-agent/src/main/python/ambari_agent/PythonExecutor.py
  2. 27 2
      ambari-common/src/main/python/resource_management/libraries/script/dummy.py
  3. 2 0
      ambari-server/src/main/resources/custom_actions/scripts/ru_set_all.py
  4. 11 18
      ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-INSTALL/scripts/conf-select.py
  5. 145 0
      ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-INSTALL/scripts/distro-select.py
  6. 32 1
      ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-INSTALL/scripts/hook.py
  7. 23 0
      ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-INSTALL/scripts/params.py
  8. 3 3
      ambari-server/src/main/resources/stacks/PERF/1.0/metainfo.xml
  9. 7 1
      ambari-server/src/main/resources/stacks/PERF/1.0/repos/repoinfo.xml
  10. 8 8
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/alerts.json
  11. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/hbase-alert-config.xml
  12. 39 39
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/hbase-env.xml
  13. 1 1
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/hbase-log4j.xml
  14. 1 1
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/hbase-logsearch-conf.xml
  15. 2 2
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/hbase-policy.xml
  16. 25 43
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/hbase-site.xml
  17. 4 4
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/ranger-hbase-audit.xml
  18. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/ranger-hbase-policymgr-ssl.xml
  19. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/ranger-hbase-security.xml
  20. 6 6
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/kerberos.json
  21. 23 23
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/metainfo.xml
  22. 231 231
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/metrics.json
  23. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/package/alerts/hbase_master_process.py
  24. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/package/alerts/hbase_regionserver_process.py
  25. 1 1
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/package/scripts/hbase_client.py
  26. 1 1
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/package/scripts/hbase_master.py
  27. 4 4
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/package/scripts/hbase_regionserver.py
  28. 1 1
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/package/scripts/phoenix_queryserver.py
  29. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/package/scripts/service_check.py
  30. 2 2
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/quicklinks/quicklinks.json
  31. 1 1
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/themes/theme.json
  32. 85 85
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/widgets.json
  33. 17 17
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/alerts.json
  34. 2 2
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/core-site.xml
  35. 34 34
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/hadoop-env.xml
  36. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/hadoop-metrics2.properties.xml
  37. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/hadoop-policy.xml
  38. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/hdfs-alert-config.xml
  39. 5 5
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/hdfs-log4j.xml
  40. 3 3
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/hdfs-logsearch-conf.xml
  41. 30 30
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/hdfs-site.xml
  42. 4 4
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/ranger-hdfs-audit.xml
  43. 2 2
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/ranger-hdfs-plugin-properties.xml
  44. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/ranger-hdfs-policymgr-ssl.xml
  45. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/ranger-hdfs-security.xml
  46. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/ssl-client.xml
  47. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/ssl-server.xml
  48. 9 9
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/kerberos.json
  49. 28 28
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/metainfo.xml
  50. 191 191
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/metrics.json
  51. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_checkpoint_time.py
  52. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_datanode_unmounted_data_dir.py
  53. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_nfs_gateway_process.py
  54. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_snamenode_process.py
  55. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_upgrade_finalized.py
  56. 57 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/datanode.py
  57. 1 1
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/hdfs_client.py
  58. 58 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/journalnode.py
  59. 27 6
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/namenode.py
  60. 4 4
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/nfsgateway.py
  61. 9 18
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/params.py
  62. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/service_check.py
  63. 4 4
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/snamenode.py
  64. 1 1
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/zkfc_slave.py
  65. 3 3
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/quicklinks/quicklinks.json
  66. 3 3
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/themes/theme.json
  67. 124 124
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/widgets.json
  68. 176 176
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/YARN_metrics.json
  69. 139 139
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/YARN_widgets.json
  70. 14 14
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/alerts.json
  71. 2 2
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration-mapred/mapred-env.xml
  72. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration-mapred/mapred-site.xml
  73. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration/capacity-scheduler.xml
  74. 4 4
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration/ranger-yarn-audit.xml
  75. 2 2
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration/ranger-yarn-plugin-properties.xml
  76. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration/ranger-yarn-policymgr-ssl.xml
  77. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration/ranger-yarn-security.xml
  78. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration/yarn-alert-config.xml
  79. 56 56
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration/yarn-env.xml
  80. 5 5
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration/yarn-log4j.xml
  81. 21 37
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration/yarn-site.xml
  82. 9 9
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/kerberos.json
  83. 35 33
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/metainfo.xml
  84. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/alerts/alert_history_process.py
  85. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/alerts/alert_nodemanager_health.py
  86. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/alerts/alert_resourcemanager_process.py
  87. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/alerts/alert_timeline_process.py
  88. 1 1
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/scripts/application_timeline_server.py
  89. 1 1
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/scripts/historyserver.py
  90. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/scripts/mapred_service_check.py
  91. 4 4
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/scripts/mapreduce2_client.py
  92. 1 1
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/scripts/nodemanager.py
  93. 1 1
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/scripts/resourcemanager.py
  94. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/scripts/service_check.py
  95. 1 1
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/scripts/yarn_client.py
  96. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/quicklinks-mapred/quicklinks.json
  97. 3 3
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/quicklinks/quicklinks.json
  98. 0 0
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/themes-mapred/theme.json
  99. 2 2
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/themes/theme.json
  100. 3 3
      ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEZOOKEEPER/alerts.json

+ 9 - 10
ambari-agent/src/main/python/ambari_agent/PythonExecutor.py

@@ -91,10 +91,9 @@ class PythonExecutor(object):
     recreated or appended.
     The structured out file, however, is preserved during multiple invocations that use the same file.
     """
-
     pythonCommand = self.python_command(script, script_params)
     logger.debug("Running command " + pprint.pformat(pythonCommand))
-    
+
     if handle is None:
       tmpout, tmperr = self.open_subprocess_files(tmpoutfile, tmperrfile, override_output_files, backup_log_files)
 
@@ -111,10 +110,10 @@ class PythonExecutor(object):
       self.event.set()
       thread.join()
       result = self.prepare_process_result(process.returncode, tmpoutfile, tmperrfile, tmpstructedoutfile, timeout=timeout)
-      
+
       if log_info_on_failure and result['exitcode']:
         self.on_failure(pythonCommand, result)
-      
+
       return result
     else:
       holder = Holder(pythonCommand, tmpoutfile, tmperrfile, tmpstructedoutfile, handle)
@@ -122,7 +121,7 @@ class PythonExecutor(object):
       background = BackgroundThread(holder, self)
       background.start()
       return {"exitcode": 777}
-    
+
   def on_failure(self, pythonCommand, result):
     """
     Log some useful information after task failure.
@@ -134,11 +133,11 @@ class PythonExecutor(object):
       cmd_list = ["ps faux", "netstat -tulpn"]
 
     shell_runner = shellRunner()
-    
+
     for cmd in cmd_list:
       ret = shell_runner.run(cmd)
       logger.info("Command '{0}' returned {1}. {2}{3}".format(cmd, ret["exitCode"], ret["error"], ret["output"]))
-    
+
   def prepare_process_result(self, returncode, tmpoutfile, tmperrfile, tmpstructedoutfile, timeout=None):
     out, error, structured_out = self.read_result_from_files(tmpoutfile, tmperrfile, tmpstructedoutfile)
 
@@ -166,7 +165,7 @@ class PythonExecutor(object):
       else:
         structured_out = {}
     return out, error, structured_out
-  
+
   def preexec_fn(self):
     os.setpgid(0, 0)
 
@@ -197,14 +196,14 @@ class PythonExecutor(object):
 
   def condenseOutput(self, stdout, stderr, retcode, structured_out):
     log_lines_count = self.config.get('heartbeat', 'log_lines_count')
-    
+
     result = {
       "exitcode": retcode,
       "stdout": self.grep.tail(stdout, log_lines_count) if log_lines_count else stdout,
       "stderr": self.grep.tail(stderr, log_lines_count) if log_lines_count else stderr,
       "structuredOut" : structured_out
     }
-    
+
     return result
 
   def python_watchdog_func(self, python, timeout):

+ 27 - 2
ambari-common/src/main/python/resource_management/libraries/script/dummy.py

@@ -23,16 +23,21 @@ __all__ = ["Dummy"]
 
 # Python Imports
 import os
+import re
 
 # Local Imports
 from resource_management.libraries.script.script import Script
 from resource_management.core.resources.system import Directory, File, Execute
 from ambari_commons.constants import AMBARI_SUDO_BINARY
 from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.logger import Logger
 
 
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
 
 class Dummy(Script):
   """
@@ -69,12 +74,25 @@ class Dummy(Script):
   def install(self, env):
     print "Install"
     self.prepare()
+    component_name = self.get_component_name()
+    repo_info = str(default("/hostLevelParams/repo_info", "1.1.1.1-1"))
+    matches = re.findall(r"([\d\.]+\-\d+)", repo_info)
+    version = matches[0] if matches and len(matches) > 0 else "1.1.1.1-1"
+
+    from resource_management.libraries.functions import stack_tools
+    (stack_selector_name, stack_selector_path, stack_selector_package) = stack_tools.get_stack_tool(stack_tools.STACK_SELECTOR_NAME)
+    command = 'ambari-python-wrap {0} install {1}'.format(stack_selector_path, version)
+    Execute(command)
+
+    if component_name:
+      conf_select.select("PERF", component_name, version)
+      stack_select.select(component_name, version)
 
   def configure(self, env):
     print "Configure"
     self.prepare()
 
-  def start(self, env):
+  def start(self, env, upgrade_type=None):
     print "Start"
     self.prepare()
 
@@ -101,7 +119,7 @@ class Dummy(Script):
            content=""
            )
 
-  def stop(self, env):
+  def stop(self, env, upgrade_type=None):
     print "Stop"
     self.prepare()
 
@@ -115,3 +133,10 @@ class Dummy(Script):
 
     if not os.path.isfile(self.pid_file):
       raise ComponentIsNotRunning()
+
+  def get_component_name(self):
+    """
+    To be overridden by subclasses.
+     Returns a string with the component name used in selecting the version.
+    """
+    pass

+ 2 - 0
ambari-server/src/main/resources/custom_actions/scripts/ru_set_all.py

@@ -61,6 +61,8 @@ class UpgradeSetAll(Script):
       stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)
       cmd = ('ambari-python-wrap', stack_selector_path, 'set', 'all', version)
       code, out = shell.call(cmd, sudo=True)
+      if code != 0:
+        raise Exception("Command '{0}' exit code is nonzero".format(cmd))
 
     if real_ver and check_stack_feature(StackFeature.CONFIG_VERSIONING, real_ver):
       # backup the old and symlink /etc/[component]/conf to <stack-root>/current/[component]

+ 11 - 18
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/datanode.py → ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-INSTALL/scripts/conf-select.py

@@ -1,3 +1,4 @@
+#!/usr/bin/env python
 """
 Licensed to the Apache Software Foundation (ASF) under one
 or more contributor license agreements.  See the NOTICE file
@@ -14,29 +15,21 @@ distributed under the License is distributed on an "AS IS" BASIS,
 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
+"""
 
-Ambari Agent
+import sys
 
-"""
 
-# Python Imports
 
-# Local Imports
-from resource_management.libraries.script.dummy import Dummy
+# main method to parse arguments from user and start work
+def do_work(args):
+  pass
 
 
-class DataNode(Dummy):
-  """
-  Dummy script that simulates a slave component.
-  """
+def main():
 
-  def __init__(self):
-    super(DataNode, self).__init__()
-    self.component_name = "DATANODE"
-    self.principal_conf_name = "hdfs-site"
-    self.principal_name = "dfs.datanode.kerberos.principal"
-    self.keytab_conf_name = "hdfs-site"
-    self.keytab_name = "dfs.datanode.keytab.file"
+  if len(sys.argv) <= 1:
+    sys.exit(-1)
 
-if __name__ == "__main__":
-  DataNode().execute()
+  args = sys.argv[1:]
+  do_work(args)

+ 145 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-INSTALL/scripts/distro-select.py

@@ -0,0 +1,145 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import sys
+import os
+
+AMBARI_AGENT_HOST_DIR = "AMBARI_AGENT_HOST_DIR"
+
+SYMLINKS_TXT = "symlinks.txt"
+VERSIONS_TXT = "versions.txt"
+
+# main method to parse arguments from user and start work
+def main():
+  if len(sys.argv) <= 1:
+    sys.exit(-1)
+
+  args = sys.argv[1:]
+
+  do_work(args)
+
+
+def extrakt_var_from_pythonpath(name):
+  PATH = os.environ['PATH']
+  paths = PATH.split(':')
+  var = ''
+  for item in paths:
+    if item.startswith(name):
+      var = item.replace(name, '')
+      break
+  return var
+
+
+def print_versions(args):
+  dest = versions_file_destination()
+
+  with open(dest, 'r') as f:
+    for line in f:
+      print line
+
+
+def versions_file_destination():
+  agent_host_dir = extrakt_var_from_pythonpath(AMBARI_AGENT_HOST_DIR)
+  dest = os.path.join(agent_host_dir, VERSIONS_TXT)
+  if not os.path.exists(dest):
+    open(dest, 'w').close()
+  return dest
+
+
+def print_status(args):
+  dest = symlinks_file_destination()
+
+  with open(dest, 'r') as f:
+    if len(args) >= 2:
+      for line in f:
+        if args[1] in line:
+          print line
+          pass
+
+    for line in f:
+      print line
+
+
+def set_version(args):
+  dest = symlinks_file_destination()
+
+  line_template = "{0} - {1}\n"
+  result = ""
+  with open(dest, 'r') as f:
+
+    if len(args) >= 3:
+      if args[1] != "all":
+        seted = False
+        for line in f:
+          if args[1] in line:
+            compinfo = str.split(line)
+            result += line_template.format(compinfo[0], args[2])
+            seted = True
+          else:
+            result += line
+        if seted != True:
+          result += line_template.format(args[1], args[2])
+      else:
+        for line in f:
+          compinfo = str.split(line)
+          result += line_template.format(compinfo[0], args[2])
+
+  with open(dest, 'w') as f:
+    f.write(result)
+
+
+def symlinks_file_destination():
+  agent_host_dir = extrakt_var_from_pythonpath(AMBARI_AGENT_HOST_DIR)
+  dest = os.path.join(agent_host_dir, SYMLINKS_TXT)
+  if not os.path.exists(dest):
+    open(dest, 'w').close()
+  return dest
+
+
+def install_version(args):
+  dest = versions_file_destination()
+  installed = False
+  with open(dest, 'r') as f:
+    for line in f:
+      if args[1] in line:
+        installed = True
+        break
+  if not installed:
+    with open(dest, 'a') as f:
+      if args[1]:
+        f.write(args[1] + "\n")
+
+def do_work(args):
+  """
+  Check that all required args are passed in. If so, perform required action.
+  :param args:
+  """
+  if not args[0] or args[0] == "status":
+    print_status(args)
+  elif args[0] == "versions":
+    print_versions(args)
+  elif args[0] == "set":
+    set_version(args)
+  elif args[0] == "install":
+    install_version(args)
+
+
+
+if __name__ == "__main__":
+  main()

+ 32 - 1
ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-INSTALL/scripts/hook.py

@@ -16,13 +16,44 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-
+import os
+from resource_management.core.resources.system import Directory, File, Execute
 from resource_management.libraries.script import Hook
 
+AMBARI_AGENT_CACHE_DIR = 'AMBARI_AGENT_CACHE_DIR'
+
+BEFORE_INSTALL_SCRIPTS = "hooks/before-INSTALL/scripts"
+STACK = "PERF/1.0"
+STACKS = "stacks"
+DISTRO_SELECT_PY = os.path.join(STACKS, STACK, BEFORE_INSTALL_SCRIPTS, "distro-select.py")
+CONF_SELECT_PY = os.path.join(STACKS, STACK, BEFORE_INSTALL_SCRIPTS, "conf-select.py")
+DISTRO_SELECT_DEST = "/usr/bin/distro-select"
+CONF_SELECT_DEST = "/usr/bin/conf-select"
+
 class BeforeInstallHook(Hook):
 
   def hook(self, env):
     print "Before Install Hook"
+    cache_dir = self.extrakt_var_from_pythonpath(AMBARI_AGENT_CACHE_DIR)
+    conf_select = os.path.join(cache_dir, CONF_SELECT_PY)
+    dist_select = os.path.join(cache_dir, DISTRO_SELECT_PY)
+    if not os.path.exists(CONF_SELECT_DEST):
+      Execute("cp %s %s" % (conf_select, CONF_SELECT_DEST), user="root")
+      Execute("chmod a+x %s" % (CONF_SELECT_DEST), user="root")
+    if not os.path.exists(DISTRO_SELECT_DEST):
+      Execute("cp %s %s" % (dist_select, DISTRO_SELECT_DEST), user="root")
+      Execute("chmod a+x %s" % (DISTRO_SELECT_DEST), user="root")
+
+  def extrakt_var_from_pythonpath(self, name):
+
+    PATH = os.environ['PATH']
+    paths = PATH.split(':')
+    var = ''
+    for item in paths:
+      if item.startswith(name):
+        var = item.replace(name, '')
+        break
+    return var
 
 if __name__ == "__main__":
   BeforeInstallHook().execute()

+ 23 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-INSTALL/scripts/params.py

@@ -0,0 +1,23 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions import default, format
+
+
+version = default("/commandParams/version", None)

+ 3 - 3
ambari-server/src/main/resources/stacks/PERF/1.0/metainfo.xml

@@ -16,7 +16,7 @@
    limitations under the License.
 -->
 <metainfo>
-    <versions>
-	  <active>false</active>
-    </versions>
+  <versions>
+    <active>false</active>
+  </versions>
 </metainfo>

+ 7 - 1
ambari-server/src/main/resources/stacks/PERF/1.0/repos/repoinfo.xml

@@ -18,10 +18,16 @@
 <reposinfo>
   <os family="redhat6">
     <repo>
-      <baseurl>http://foo</baseurl>
+      <baseurl>http://foo-1.0.1.0-1</baseurl>
       <repoid>PERF-1.0</repoid>
       <reponame>PERF</reponame>
       <unique>true</unique>
     </repo>
+    <repo>
+      <baseurl>http://foo2</baseurl>
+      <repoid>PERF-UTILS-1.0</repoid>
+      <reponame>PERF-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
   </os>
 </reposinfo>

+ 8 - 8
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/alerts.json → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/alerts.json

@@ -1,32 +1,32 @@
 {
-  "HBASE": {
+  "FAKEHBASE": {
 
-    "HBASE_MASTER": [
+    "FAKEHBASE_MASTER": [
       {
         "name": "hbase_master_process",
-        "label": "HBase Master Process",
+        "label": "FAKEHBase Master Process",
         "description": "This alert is triggered if the HBase master processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
         "interval": 1,
         "scope": "HOST",
         "enabled": true,
         "source": {
           "type": "SCRIPT",
-          "path": "PERF/1.0/services/HBASE/package/alerts/hbase_master_process.py",
+          "path": "PERF/1.0/services/FAKEHBASE/package/alerts/hbase_master_process.py",
           "parameters": []
         }
       }
     ],
-    "HBASE_REGIONSERVER": [
+    "FAKEHBASE_REGIONSERVER": [
       {
         "name": "hbase_regionserver_process",
-        "label": "HBase RegionServer Process",
-        "description": "This host-level alert is triggered if the RegionServer processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
+        "label": "HBase FAKERegionServer Process",
+        "description": "This host-level alert is triggered if the FAKERegionServer processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
         "interval": 1,
         "scope": "HOST",
         "enabled": true,
         "source": {
           "type": "SCRIPT",
-          "path": "PERF/1.0/services/HBASE/package/alerts/hbase_regionserver_process.py",
+          "path": "PERF/1.0/services/FAKEHBASE/package/alerts/hbase_regionserver_process.py",
           "parameters": []
         }
       }

+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/configuration/hbase-alert-config.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/hbase-alert-config.xml


+ 39 - 39
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/configuration/hbase-env.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/hbase-env.xml

@@ -35,8 +35,8 @@
   <property>
     <name>hbase_regionserver_heapsize</name>
     <value>4096</value>
-    <description>Maximum amount of memory each HBase RegionServer can use.</description>
-    <display-name>HBase RegionServer Maximum Memory</display-name>
+    <description>Maximum amount of memory each HBase FAKERegionServer can use.</description>
+    <display-name>HBase FAKERegionServer Maximum Memory</display-name>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -49,8 +49,8 @@
   <property>
     <name>hbase_master_heapsize</name>
     <value>4096</value>
-    <description>Maximum amount of memory each HBase Master can use.</description>
-    <display-name>HBase Master Maximum Memory</display-name>
+    <description>Maximum amount of memory each FAKEHBase Master can use.</description>
+    <display-name>FAKEHBase Master Maximum Memory</display-name>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -63,19 +63,19 @@
   <property>
     <name>hbase_user_nofile_limit</name>
     <value>32000</value>
-    <description>Max open files limit setting for HBASE user.</description>
+    <description>Max open files limit setting for FAKEHBASE user.</description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hbase_user_nproc_limit</name>
     <value>16000</value>
-    <description>Max number of processes limit setting for HBASE user.</description>
+    <description>Max number of processes limit setting for FAKEHBASE user.</description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hbase_java_io_tmpdir</name>
     <value>/tmp</value>
-    <description>Used in hbase-env.sh as HBASE_OPTS=-Djava.io.tmpdir=java_io_tmpdir</description>
+    <description>Used in hbase-env.sh as FAKEHBASE_OPTS=-Djava.io.tmpdir=java_io_tmpdir</description>
     <value-attributes>
       <type>directory</type>
     </value-attributes>
@@ -94,10 +94,10 @@
   <property>
     <name>hbase_regionserver_shutdown_timeout</name>
     <value>30</value>
-    <display-name>HBase RegionServer shutdown timeout</display-name>
+    <display-name>HBase FAKERegionServer shutdown timeout</display-name>
     <description>
-      After this number of seconds waiting for graceful stop of HBase Master it will be forced to exit with SIGKILL.
-      The timeout is introduced because there is a known bug when from time to time HBase RegionServer hangs forever on stop if NN safemode is on.
+      After this number of seconds waiting for graceful stop of FAKEHBase Master it will be forced to exit with SIGKILL.
+      The timeout is introduced because there is a known bug when from time to time HBase FAKERegionServer hangs forever on stop if NN safemode is on.
     </description>
     <value-attributes>
       <type>int</type>
@@ -135,11 +135,11 @@
     <name>hbase_regionserver_xmn_max</name>
     <value>512</value>
     <description>
-      Sets the upper bound on HBase RegionServers' young generation size.
+      Sets the upper bound on HBase FAKEFAKERegionServers' young generation size.
       This value is used in case the young generation size (-Xmn) calculated based on the max heapsize (hbase_regionserver_heapsize)
       and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds this value.
     </description>
-    <display-name>RegionServers maximum value for -Xmn</display-name>
+    <display-name>FAKEFAKERegionServers maximum value for -Xmn</display-name>
     <value-attributes>
       <type>int</type>
       <unit>MB</unit>
@@ -149,7 +149,7 @@
   <property>
     <name>hbase_regionserver_xmn_ratio</name>
     <value>0.2</value>
-    <display-name>RegionServers -Xmn in -Xmx ratio</display-name>
+    <display-name>FAKEFAKERegionServers -Xmn in -Xmx ratio</display-name>
     <description>Percentage of max heap size (-Xmx) which used for young generation heap (-Xmn).</description>
     <value-attributes>
       <type>float</type>
@@ -172,7 +172,7 @@
     <name>hbase_max_direct_memory_size</name>
     <value/>
     <display-name>HBase off-heap MaxDirectMemorySize</display-name>
-    <description>If not empty, adds '-XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m' to HBASE_REGIONSERVER_OPTS.</description>
+    <description>If not empty, adds '-XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m' to FAKEHBASE_REGIONSERVER_OPTS.</description>
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
@@ -213,14 +213,14 @@
 export JAVA_HOME={{java64_home}}
 
 # HBase Configuration directory
-export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}
+export FAKEHBASE_CONF_DIR=${FAKEHBASE_CONF_DIR:-{{hbase_conf_dir}}}
 
 # Extra Java CLASSPATH elements. Optional.
-export HBASE_CLASSPATH=${HBASE_CLASSPATH}
+export FAKEHBASE_CLASSPATH=${FAKEHBASE_CLASSPATH}
 
 
 # The maximum amount of heap to use, in MB. Default is 1000.
-# export HBASE_HEAPSIZE=1000
+# export FAKEHBASE_HEAPSIZE=1000
 
 # Extra Java runtime options.
 # Below are what we set by default. May only work with SUN JVM.
@@ -228,61 +228,61 @@ export HBASE_CLASSPATH=${HBASE_CLASSPATH}
 # see http://wiki.apache.org/hadoop/PerformanceTuning
 export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
 # Uncomment below to enable java garbage collection logging.
-# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+# export FAKEHBASE_OPTS="$FAKEHBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$FAKEHBASE_HOME/logs/gc-hbase.log"
 
 # Uncomment and adjust to enable JMX exporting
 # See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
 # More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
 #
-# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+# export FAKEHBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
 # If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size
-# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
-# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+# export FAKEHBASE_THRIFT_OPTS="$FAKEHBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export FAKEHBASE_FAKEZOOKEEPER_OPTS="$FAKEHBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
 
-# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
-export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+# File naming hosts on which HFAKEFAKERegionServers will run. $FAKEHBASE_HOME/conf/regionservers by default.
+export FAKEHBASE_REGIONSERVERS=${FAKEHBASE_CONF_DIR}/regionservers
 
 # Extra ssh options. Empty by default.
-# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+# export FAKEHBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=FAKEHBASE_CONF_DIR"
 
-# Where log files are stored. $HBASE_HOME/logs by default.
-export HBASE_LOG_DIR={{log_dir}}
+# Where log files are stored. $FAKEHBASE_HOME/logs by default.
+export FAKEHBASE_LOG_DIR={{log_dir}}
 
 # A string representing this instance of hbase. $USER by default.
-# export HBASE_IDENT_STRING=$USER
+# export FAKEHBASE_IDENT_STRING=$USER
 
 # The scheduling priority for daemon processes. See 'man nice'.
-# export HBASE_NICENESS=10
+# export FAKEHBASE_NICENESS=10
 
 # The directory where pid files are stored. /tmp by default.
-export HBASE_PID_DIR={{pid_dir}}
+export FAKEHBASE_PID_DIR={{pid_dir}}
 
 # Seconds to sleep between slave commands. Unset by default. This
 # can be useful in large clusters, where, e.g., slave rsyncs can
 # otherwise arrive faster than the master can service them.
-# export HBASE_SLAVE_SLEEP=0.1
+# export FAKEHBASE_SLAVE_SLEEP=0.1
 
 # Tell HBase whether it should manage it's own instance of Zookeeper or not.
-export HBASE_MANAGES_ZK=false
+export FAKEHBASE_MANAGES_ZK=false
 
 {% if java_version &lt; 8 %}
 JDK_DEPENDED_OPTS="-XX:PermSize=128m -XX:MaxPermSize=128m"
 {% endif %}
       
 {% if security_enabled %}
-export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}} -Djava.io.tmpdir={{java_io_tmpdir}}"
-export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}} $JDK_DEPENDED_OPTS"
-export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}} $JDK_DEPENDED_OPTS"
+export FAKEHBASE_OPTS="$FAKEHBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}} -Djava.io.tmpdir={{java_io_tmpdir}}"
+export FAKEHBASE_MASTER_OPTS="$FAKEHBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}} $JDK_DEPENDED_OPTS"
+export FAKEHBASE_REGIONSERVER_OPTS="$FAKEHBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}} $JDK_DEPENDED_OPTS"
 export PHOENIX_QUERYSERVER_OPTS="$PHOENIX_QUERYSERVER_OPTS -Djava.security.auth.login.config={{queryserver_jaas_config_file}}"
 {% else %}
-export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{java_io_tmpdir}}"
-export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} $JDK_DEPENDED_OPTS"
-export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} $JDK_DEPENDED_OPTS"
+export FAKEHBASE_OPTS="$FAKEHBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{java_io_tmpdir}}"
+export FAKEHBASE_MASTER_OPTS="$FAKEHBASE_MASTER_OPTS -Xmx{{master_heapsize}} $JDK_DEPENDED_OPTS"
+export FAKEHBASE_REGIONSERVER_OPTS="$FAKEHBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} $JDK_DEPENDED_OPTS"
 {% endif %}
 
 # HBase off-heap MaxDirectMemorySize
-export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %}"
-export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %}"
+export FAKEHBASE_REGIONSERVER_OPTS="$FAKEHBASE_REGIONSERVER_OPTS {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %}"
+export FAKEHBASE_MASTER_OPTS="$FAKEHBASE_MASTER_OPTS {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %}"
     </value>
     <value-attributes>
       <type>content</type>

+ 1 - 1
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/configuration/hbase-log4j.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/hbase-log4j.xml

@@ -121,7 +121,7 @@ log4j.logger.org.apache.zookeeper=INFO
 log4j.logger.org.apache.hadoop.hbase=INFO
 # Make these two classes INFO-level. Make them DEBUG to see more zk debug.
 log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
-log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
+log4j.logger.org.apache.hadoop.hbase.zookeeper.FAKEZooKeeperWatcher=INFO
 #log4j.logger.org.apache.hadoop.dfs=DEBUG
 # Set this class to log INFO only otherwise its OTT
 # Enable this to get detailed connection error/retry logging.

+ 1 - 1
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/configuration/hbase-logsearch-conf.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/hbase-logsearch-conf.xml

@@ -31,7 +31,7 @@
     <name>component_mappings</name>
     <display-name>Component mapping</display-name>
     <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>HBASE_MASTER:hbase_master;HBASE_REGIONSERVER:hbase_regionserver;PHOENIX_QUERY_SERVER:hbase_phoenix_server</value>
+    <value>FAKEHBASE_MASTER:hbase_master;FAKEHBASE_REGIONSERVER:hbase_regionserver;FAKEPHOENIX_QUERY_SERVER:hbase_phoenix_server</value>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>

+ 2 - 2
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/configuration/hbase-policy.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/hbase-policy.xml

@@ -24,7 +24,7 @@
     <name>security.client.protocol.acl</name>
     <value>*</value>
     <description>ACL for HRegionInterface protocol implementations (ie. 
-    clients talking to HRegionServers)
+    clients talking to HFAKEFAKERegionServers)
     The ACL is a comma-separated list of user and group names. The user and 
     group list is separated by a blank. For e.g. "alice,bob users,wheel". 
     A special value of "*" means all users are allowed.</description>
@@ -44,7 +44,7 @@
     <name>security.masterregion.protocol.acl</name>
     <value>*</value>
     <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
+    (for HFAKEFAKERegionServers communicating with HMaster)
     The ACL is a comma-separated list of user and group names. The user and 
     group list is separated by a blank. For e.g. "alice,bob users,wheel". 
     A special value of "*" means all users are allowed.</description>

+ 25 - 43
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/configuration/hbase-site.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/hbase-site.xml

@@ -26,7 +26,7 @@
     <description>The directory shared by region servers and into
       which HBase persists.  The URL should be 'fully-qualified'
       to include the filesystem scheme.  For example, to specify the
-      HDFS directory '/hbase' where the HDFS instance's namenode is
+      FAKEHDFS directory '/hbase' where the FAKEHDFS instance's namenode is
       running at namenode.example.org on port 9000, set this value to:
       hdfs://namenode.example.org:9000/hbase.  By default HBase writes
       into /tmp.  Change this configuration else all data will be lost
@@ -39,7 +39,7 @@
     <value>true</value>
     <description>The mode the cluster will be in. Possible values are
       false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
+      false, startup will run all HBase and FAKEZooKeeper daemons together
       in the one JVM.
     </description>
     <on-ambari-upgrade add="false"/>
@@ -68,7 +68,7 @@
   <property>
     <name>hbase.master.info.bindAddress</name>
     <value>0.0.0.0</value>
-    <description>The bind address for the HBase Master web UI
+    <description>The bind address for the FAKEHBase Master web UI
     </description>
     <on-ambari-upgrade add="false"/>
   </property>
@@ -76,10 +76,10 @@
     <name>hbase.regionserver.handler.count</name>
     <value>30</value>
     <description>
-      Count of RPC Listener instances spun up on RegionServers.
+      Count of RPC Listener instances spun up on FAKEFAKERegionServers.
       Same property is used by the Master for count of master handlers.
     </description>
-    <display-name>Number of Handlers per RegionServer</display-name>
+    <display-name>Number of Handlers per FAKERegionServer</display-name>
     <value-attributes>
       <type>int</type>
       <minimum>5</minimum>
@@ -199,8 +199,8 @@
   <property>
     <name>zookeeper.session.timeout</name>
     <value>90000</value>
-    <description>ZooKeeper session timeout.
-      ZooKeeper session timeout in milliseconds. It is used in two different ways.
+    <description>FAKEZooKeeper session timeout.
+      FAKEZooKeeper session timeout in milliseconds. It is used in two different ways.
       First, this value is used in the ZK client that HBase uses to connect to the ensemble.
       It is also used by HBase when it starts a ZK server and it is passed as the 'maxSessionTimeout'. See
       http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions.
@@ -294,8 +294,8 @@
   <property>
     <name>hfile.block.cache.size</name>
     <value>0.40</value>
-    <description>Percentage of RegionServer memory to allocate to read buffers.</description>
-    <display-name>% of RegionServer Allocated to Read Buffers</display-name>
+    <description>Percentage of FAKERegionServer memory to allocate to read buffers.</description>
+    <display-name>% of FAKERegionServer Allocated to Read Buffers</display-name>
     <value-attributes>
       <type>float</type>
       <minimum>0</minimum>
@@ -412,49 +412,31 @@
   <property>
     <name>hbase.zookeeper.property.clientPort</name>
     <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
+    <description>Property from FAKEZooKeeper's config zoo.cfg.
       The port at which the clients will connect.
     </description>
     <on-ambari-upgrade add="false"/>
   </property>
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value>localhost</value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-      For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-      By default this is set to localhost for local and pseudo-distributed modes
-      of operation. For a fully-distributed setup, this should be set to a full
-      list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-      this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-    <value-attributes>
-      <type>multiLine</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
+
+  <!-- End of properties used to generate FAKEZooKeeper host:port quorum list. -->
   <property>
     <name>hbase.zookeeper.useMulti</name>
     <value>true</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-      This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-      with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).&#xB7;
-      IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-      and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
-      not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
+    <description>Instructs HBase to make use of FAKEZooKeeper's multi-update functionality.
+      This allows certain FAKEZooKeeper operations to complete more quickly and prevents some issues
+      with rare Replication failure scenarios (see the release note of FAKEHBASE-2611 for an example).&#xB7;
+      IMPORTANT: only set this to true if all FAKEZooKeeper servers in the cluster are on version 3.4+
+      and will not be downgraded.  FAKEZooKeeper versions before 3.4 do not support multi-update and will
+      not fail gracefully if multi-update is invoked (see FAKEZOOKEEPER-1495).
     </description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>zookeeper.znode.parent</name>
     <value>/hbase-unsecure</value>
-    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
+    <description>Root ZNode for HBase in FAKEZooKeeper. All of HBase's FAKEZooKeeper
       files that are configured with a relative path will go under this node.
-      By default, all of HBase's ZooKeeper file path are configured with a
+      By default, all of HBase's FAKEZooKeeper file path are configured with a
       relative path, so they will all go under this directory unless changed.
     </description>
     <on-ambari-upgrade add="false"/>
@@ -531,8 +513,8 @@
   <property>
     <name>hbase.master.port</name>
     <value>16000</value>
-    <display-name>HBase Master Port</display-name>
-    <description>The port the HBase Master should bind to.</description>
+    <display-name>FAKEHBase Master Port</display-name>
+    <description>The port the FAKEHBase Master should bind to.</description>
     <value-attributes>
       <overridable>false</overridable>
       <type>int</type>
@@ -542,19 +524,19 @@
   <property>
     <name>hbase.master.info.port</name>
     <value>16010</value>
-    <description>The port for the HBase Master web UI.</description>
+    <description>The port for the FAKEHBase Master web UI.</description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hbase.regionserver.port</name>
     <value>16020</value>
-    <description>The port the HBase RegionServer binds to.</description>
+    <description>The port the HBase FAKERegionServer binds to.</description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hbase.regionserver.info.port</name>
     <value>16030</value>
-    <description>The port for the HBase RegionServer web UI.</description>
+    <description>The port for the HBase FAKERegionServer web UI.</description>
     <on-ambari-upgrade add="false"/>
   </property>
 

+ 4 - 4
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/configuration/ranger-hbase-audit.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/ranger-hbase-audit.xml

@@ -29,8 +29,8 @@
   <property>
     <name>xasecure.audit.destination.hdfs</name>
     <value>true</value>
-    <display-name>Audit to HDFS</display-name>
-    <description>Is Audit to HDFS enabled?</description>
+    <display-name>Audit to FAKEHDFS</display-name>
+    <description>Is Audit to FAKEHDFS enabled?</description>
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
@@ -44,8 +44,8 @@
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs.dir</name>
-    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
-    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <value>hdfs://FAKENAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>FAKEHDFS folder to write audit to, make sure the service user has requried permissions</description>
     <depends-on>
       <property>
         <type>ranger-env</type>

+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/configuration/ranger-hbase-policymgr-ssl.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/ranger-hbase-policymgr-ssl.xml


+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/configuration/ranger-hbase-security.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/ranger-hbase-security.xml


+ 6 - 6
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/kerberos.json → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/kerberos.json

@@ -1,7 +1,7 @@
 {
   "services": [
     {
-      "name": "HBASE",
+      "name": "FAKEHBASE",
       "identities": [
         {
           "name": "/spnego"
@@ -57,10 +57,10 @@
       ],
       "components": [
         {
-          "name": "HBASE_MASTER",
+          "name": "FAKEHBASE_MASTER",
           "identities": [
             {
-              "name": "/HDFS/NAMENODE/hdfs"
+              "name": "/FAKEHDFS/FAKENAMENODE/hdfs"
             },
             {
               "name": "hbase_master_hbase",
@@ -93,7 +93,7 @@
               }
             },
             {
-              "name": "/HBASE/HBASE_MASTER/hbase_master_hbase",
+              "name": "/FAKEHBASE/FAKEHBASE_MASTER/hbase_master_hbase",
               "principal": {
                 "configuration": "ranger-hbase-audit/xasecure.audit.jaas.Client.option.principal"
               },
@@ -104,7 +104,7 @@
           ]
         },
         {
-          "name": "HBASE_REGIONSERVER",
+          "name": "FAKEHBASE_REGIONSERVER",
           "identities": [
             {
               "name": "hbase_regionserver_hbase",
@@ -139,7 +139,7 @@
           ]
         },
         {
-          "name": "PHOENIX_QUERY_SERVER",
+          "name": "FAKEPHOENIX_QUERY_SERVER",
           "identities": [
             {
               "name": "phoenix_spnego",

+ 23 - 23
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/metainfo.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/metainfo.xml

@@ -19,33 +19,33 @@
   <schemaVersion>2.0</schemaVersion>
   <services>
     <service>
-      <name>HBASE</name>
-      <displayName>HBase</displayName>
+      <name>FAKEHBASE</name>
+      <displayName>FAKEHBASE</displayName>
       <version>1.1.1.2.3</version>
       <comment>A Non-relational distributed database, plus Phoenix, a high performance SQL layer for low latency applications.</comment>
 
       <components>
         <component>
-          <name>HBASE_MASTER</name>
-          <displayName>HBase Master</displayName>
+          <name>FAKEHBASE_MASTER</name>
+          <displayName>FAKEHBase Master</displayName>
           <category>MASTER</category>
           <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <timelineAppid>HBASE</timelineAppid>
+          <versionAdvertised>false</versionAdvertised>
+          <timelineAppid>FAKEHBASE</timelineAppid>
           <dependencies>
             <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
+              <name>FAKEHDFS/FAKEHDFS_CLIENT</name>
               <scope>host</scope>
               <auto-deploy>
                 <enabled>true</enabled>
               </auto-deploy>
             </dependency>
             <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <name>FAKEZOOKEEPER/FAKEZOOKEEPER_SERVER</name>
               <scope>cluster</scope>
               <auto-deploy>
                 <enabled>true</enabled>
-                <co-locate>HBASE/HBASE_MASTER</co-locate>
+                <co-locate>FAKEHBASE/FAKEHBASE_MASTER</co-locate>
               </auto-deploy>
             </dependency>
           </dependencies>
@@ -73,11 +73,11 @@
         </component>
 
         <component>
-          <name>PHOENIX_QUERY_SERVER</name>
-          <displayName>Phoenix Query Server</displayName>
+          <name>FAKEPHOENIX_QUERY_SERVER</name>
+          <displayName>FAKEPhoenix Query Server</displayName>
           <category>SLAVE</category>
           <cardinality>0+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <commandScript>
             <script>scripts/phoenix_queryserver.py</script>
             <scriptType>PYTHON</scriptType>
@@ -91,21 +91,21 @@
         </component>
 
         <component>
-          <name>HBASE_REGIONSERVER</name>
-          <displayName>RegionServer</displayName>
+          <name>FAKEHBASE_REGIONSERVER</name>
+          <displayName>FAKERegionServer</displayName>
           <category>SLAVE</category>
           <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <decommissionAllowed>true</decommissionAllowed>
-          <timelineAppid>HBASE</timelineAppid>
+          <timelineAppid>FAKEHBASE</timelineAppid>
           <commandScript>
             <script>scripts/hbase_regionserver.py</script>
             <scriptType>PYTHON</scriptType>
           </commandScript>
           <bulkCommands>
-            <displayName>RegionServers</displayName>
+            <displayName>FAKEFAKERegionServers</displayName>
             <!-- Used by decommission and recommission -->
-            <masterComponent>HBASE_MASTER</masterComponent>
+            <masterComponent>FAKEHBASE_MASTER</masterComponent>
           </bulkCommands>
           <logs>
             <log>
@@ -116,11 +116,11 @@
         </component>
 
         <component>
-          <name>HBASE_CLIENT</name>
-          <displayName>HBase Client</displayName>
+          <name>FAKEHBASE_CLIENT</name>
+          <displayName>FAKEHBase Client</displayName>
           <category>CLIENT</category>
           <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <commandScript>
             <script>scripts/hbase_client.py</script>
             <scriptType>PYTHON</scriptType>
@@ -188,8 +188,8 @@
       </commandScript>
 
       <requiredServices>
-        <service>ZOOKEEPER</service>
-        <service>HDFS</service>
+        <service>FAKEZOOKEEPER</service>
+        <service>FAKEHDFS</service>
       </requiredServices>
 
     </service>

File diff suppressed because it is too large
+ 231 - 231
ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/metrics.json


+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/alerts/hbase_master_process.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/package/alerts/hbase_master_process.py


+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/alerts/hbase_regionserver_process.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/package/alerts/hbase_regionserver_process.py


+ 1 - 1
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_client.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/package/scripts/hbase_client.py

@@ -32,7 +32,7 @@ class HBaseClient(Dummy):
 
   def __init__(self):
     super(HBaseClient, self).__init__()
-    self.component_name = "HBASE_CLIENT"
+    self.component_name = "FAKEHBASE_CLIENT"
 
 if __name__ == "__main__":
   HBaseClient().execute()

+ 1 - 1
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_master.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/package/scripts/hbase_master.py

@@ -32,7 +32,7 @@ class HBaseMaster(Dummy):
 
   def __init__(self):
     super(HBaseMaster, self).__init__()
-    self.component_name = "HBASE_MASTER"
+    self.component_name = "FAKEHBASE_MASTER"
     self.principal_conf_name = "hbase-site"
     self.principal_name = "hbase.master.kerberos.principal"
     self.keytab_conf_name = "hbase-site"

+ 4 - 4
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_regionserver.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/package/scripts/hbase_regionserver.py

@@ -25,14 +25,14 @@ Ambari Agent
 from resource_management.libraries.script.dummy import Dummy
 
 
-class HBaseRegionServer(Dummy):
+class HBaseFAKERegionServer(Dummy):
   """
   Dummy script that simulates a slave component.
   """
 
   def __init__(self):
-    super(HBaseRegionServer, self).__init__()
-    self.component_name = "HBASE_REGIONSERVER"
+    super(HBaseFAKERegionServer, self).__init__()
+    self.component_name = "FAKEHBASE_REGIONSERVER"
     self.principal_conf_name = "hbase-site"
     self.principal_name = "hbase.regionserver.kerberos.principal"
     self.keytab_conf_name = "hbase-site"
@@ -42,4 +42,4 @@ class HBaseRegionServer(Dummy):
     print "Decommission"
 
 if __name__ == "__main__":
-  HBaseRegionServer().execute()
+  HBaseFAKERegionServer().execute()

+ 1 - 1
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/phoenix_queryserver.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/package/scripts/phoenix_queryserver.py

@@ -32,7 +32,7 @@ class PhoenixQueryServer(Dummy):
 
   def __init__(self):
     super(PhoenixQueryServer, self).__init__()
-    self.component_name = "PHOENIX_QUERY_SERVER"
+    self.component_name = "FAKEPHOENIX_QUERY_SERVER"
     self.principal_conf_name = "hbase-site"
     self.principal_name = "phoenix.queryserver.kerberos.principal"
     self.keytab_conf_name = "hbase-site"

+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/service_check.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/package/scripts/service_check.py


+ 2 - 2
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/quicklinks/quicklinks.json → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/quicklinks/quicklinks.json

@@ -10,7 +10,7 @@
     "links": [
       {
         "name": "hbase_master_ui",
-        "label": "HBase Master UI",
+        "label": "FAKEHBase Master UI",
         "url":"%@://%@:%@/master-status",
         "requires_user_name": "false",
         "port":{
@@ -52,7 +52,7 @@
       },
       {
         "name": "hbase_master_jmx",
-        "label": "HBase Master JMX",
+        "label": "FAKEHBase Master JMX",
         "url":"%@://%@:%@/jmx",
         "requires_user_name": "false",
         "port":{

+ 1 - 1
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/themes/theme.json → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/themes/theme.json

@@ -1,6 +1,6 @@
 {
   "name": "default",
-  "description": "Default theme for HBASE service",
+  "description": "Default theme for FAKEHBASE service",
   "configuration": {
     "layouts": [
       {

+ 85 - 85
ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/widgets.json → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/widgets.json

@@ -3,7 +3,7 @@
     {
       "layout_name": "default_hbase_dashboard",
       "display_name": "Standard HBase Dashboard",
-      "section_name": "HBASE_SUMMARY",
+      "section_name": "FAKEHBASE_SUMMARY",
       "widgetLayoutInfo": [
         {
           "widget_name": "Reads and Writes",
@@ -14,38 +14,38 @@
             {
               "name": "regionserver.Server.Get_num_ops._rate",
               "metric_path": "metrics/hbase/regionserver/Server/Get_num_ops._rate",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             },
             {
               "name": "regionserver.Server.ScanNext_num_ops._rate",
               "metric_path": "metrics/hbase/regionserver/Server/ScanNext_num_ops._rate",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             },
             {
               "name": "regionserver.Server.Append_num_ops._rate",
               "metric_path": "metrics/hbase/regionserver/Server/Append_num_ops._rate",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             },
             {
               "name": "regionserver.Server.Delete_num_ops._rate",
               "metric_path": "metrics/hbase/regionserver/Server/Delete_num_ops._rate",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             },
             {
               "name": "regionserver.Server.Increment_num_ops._rate",
               "metric_path": "metrics/hbase/regionserver/Server/Increment_num_ops._rate",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             },
             {
               "name": "regionserver.Server.Mutate_num_ops._rate",
               "metric_path": "metrics/hbase/regionserver/Server/Mutate_num_ops._rate",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             }
           ],
           "values": [
@@ -72,14 +72,14 @@
             {
               "name": "regionserver.Server.Get_95th_percentile._max",
               "metric_path": "metrics/hbase/regionserver/Server/Get_95th_percentile._max",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             },
             {
               "name": "regionserver.Server.ScanNext_95th_percentile._max",
               "metric_path": "metrics/hbase/regionserver/Server/ScanNext_95th_percentile._max",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             }
           ],
           "values": [
@@ -107,26 +107,26 @@
             {
               "name": "regionserver.Server.Mutate_95th_percentile._max",
               "metric_path": "metrics/hbase/regionserver/Server/Mutate_95th_percentile._max",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             },
             {
               "name": "regionserver.Server.Increment_95th_percentile._max",
               "metric_path": "metrics/hbase/regionserver/Server/Increment_95th_percentile._max",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             },
             {
               "name": "regionserver.Server.Append_95th_percentile._max",
               "metric_path": "metrics/hbase/regionserver/Server/Append_95th_percentile._max",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             },
             {
               "name": "regionserver.Server.Delete_95th_percentile._max",
               "metric_path": "metrics/hbase/regionserver/Server/Delete_95th_percentile._max",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             }
           ],
           "values": [
@@ -155,22 +155,22 @@
         },
         {
           "widget_name": "Open Connections",
-          "description": "Count of open connections across all RegionServer. This is indicative of RegionServer load in the cluster.",
+          "description": "Count of open connections across all FAKERegionServer. This is indicative of FAKERegionServer load in the cluster.",
           "widget_type": "GRAPH",
           "is_visible": true,
           "metrics": [
             {
-              "name": "regionserver.RegionServer.numOpenConnections._sum",
+              "name": "regionserver.FAKERegionServer.numOpenConnections._sum",
               "metric_path": "metrics/hbase/ipc/IPC/numOpenConnections._sum",
               "category": "",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             }
           ],
           "values": [
             {
               "name": "Open Connections",
-              "value": "${regionserver.RegionServer.numOpenConnections._sum}"
+              "value": "${regionserver.FAKERegionServer.numOpenConnections._sum}"
             }
           ],
           "properties": {
@@ -185,26 +185,26 @@
           "is_visible": true,
           "metrics": [
             {
-              "name": "regionserver.RegionServer.numActiveHandler._sum",
+              "name": "regionserver.FAKERegionServer.numActiveHandler._sum",
               "metric_path": "metrics/hbase/ipc/IPC/numActiveHandler._sum",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             },
             {
-              "name": "regionserver.RegionServer.numCallsInGeneralQueue._sum",
+              "name": "regionserver.FAKERegionServer.numCallsInGeneralQueue._sum",
               "metric_path": "metrics/hbase/ipc/IPC/numCallsInGeneralQueue._sum",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             }
           ],
           "values": [
             {
               "name": "Active Handlers",
-              "value": "${regionserver.RegionServer.numActiveHandler._sum}"
+              "value": "${regionserver.FAKERegionServer.numActiveHandler._sum}"
             },
             {
               "name": "Calls in General Queue",
-              "value": "${regionserver.RegionServer.numCallsInGeneralQueue._sum}"
+              "value": "${regionserver.FAKERegionServer.numCallsInGeneralQueue._sum}"
             }
           ],
           "properties": {
@@ -214,15 +214,15 @@
         },
         {
           "widget_name": "Files Local",
-          "description": "Average percentage of local files to RegionServer in the cluster.",
+          "description": "Average percentage of local files to FAKERegionServer in the cluster.",
           "widget_type": "NUMBER",
           "is_visible": true,
           "metrics": [
             {
               "name": "regionserver.Server.percentFilesLocal",
               "metric_path": "metrics/hbase/regionserver/Server/percentFilesLocal",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             }
           ],
           "values": [
@@ -246,8 +246,8 @@
             {
               "name": "regionserver.Server.updatesBlockedTime._rate",
               "metric_path": "metrics/hbase/regionserver/Server/updatesBlockedTime._rate",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             }
           ],
           "values": [
@@ -264,39 +264,39 @@
         },
         {
           "widget_name": "Cluster CPU",
-          "description": "Percentage of CPU utilized across all RegionServer hosts.",
+          "description": "Percentage of CPU utilized across all FAKERegionServer hosts.",
           "widget_type": "GRAPH",
           "is_visible": true,
           "metrics": [
             {
               "name": "cpu_system._sum",
               "metric_path": "metrics/cpu/cpu_system._sum",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             },
             {
               "name": "cpu_user._sum",
               "metric_path": "metrics/cpu/cpu_user._sum",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             },
             {
               "name": "cpu_nice._sum",
               "metric_path": "metrics/cpu/cpu_nice._sum",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             },
             {
               "name": "cpu_idle._sum",
               "metric_path": "metrics/cpu/cpu_idle._sum",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             },
             {
               "name": "cpu_wio._sum",
               "metric_path": "metrics/cpu/cpu_wio._sum",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             }
           ],
           "values": [
@@ -313,21 +313,21 @@
         },
         {
           "widget_name": "Cluster Network",
-          "description": "Average of Network IO utilized across all RegionServer hosts.",
+          "description": "Average of Network IO utilized across all FAKERegionServer hosts.",
           "widget_type": "GRAPH",
           "is_visible": true,
           "metrics": [
             {
               "name": "pkts_in._avg",
               "metric_path": "metrics/network/pkts_in._avg",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             },
             {
               "name": "pkts_out._avg",
               "metric_path": "metrics/network/pkts_out._avg",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             }
           ],
           "values": [
@@ -347,21 +347,21 @@
         },
         {
           "widget_name": "Cluster Disk",
-          "description": "Sum of disk throughput for all RegionServer hosts.",
+          "description": "Sum of disk throughput for all FAKERegionServer hosts.",
           "widget_type": "GRAPH",
           "is_visible": true,
           "metrics": [
             {
               "name": "read_bps._sum",
               "metric_path": "metrics/disk/read_bps._sum",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             },
             {
               "name": "write_bps._sum",
               "metric_path": "metrics/disk/write_bps._sum",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             }
           ],
           "values": [
@@ -385,7 +385,7 @@
     {
       "layout_name": "default_hbase_heatmap",
       "display_name": "HBase Heatmaps",
-      "section_name": "HBASE_HEATMAPS",
+      "section_name": "FAKEHBASE_HEATMAPS",
       "widgetLayoutInfo": [
         {
           "widget_name": "HBase Compaction Queue Size",
@@ -394,16 +394,16 @@
           "is_visible": true,
           "metrics": [
             {
-              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength",
+              "name": "Hadoop:service=HBase,name=FAKERegionServer,sub=Server.compactionQueueLength",
               "metric_path": "metrics/hbase/regionserver/compactionQueueSize",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             }
           ],
           "values": [
             {
               "name": "HBase Compaction Queue Size",
-              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength} "
+              "value": "${Hadoop:service=HBase,name=FAKERegionServer,sub=Server.compactionQueueLength} "
             }
           ],
           "properties": {
@@ -418,16 +418,16 @@
           "is_visible": false,
           "metrics": [
             {
-              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.memStoreSize",
+              "name": "Hadoop:service=HBase,name=FAKERegionServer,sub=Server.memStoreSize",
               "metric_path": "metrics/hbase/regionserver/memstoreSize",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             }
           ],
           "values": [
             {
               "name": "HBase Memstore Sizes",
-              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.memStoreSize}"
+              "value": "${Hadoop:service=HBase,name=FAKERegionServer,sub=Server.memStoreSize}"
             }
           ],
           "properties": {
@@ -442,16 +442,16 @@
           "is_visible": false,
           "metrics": [
             {
-              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.readRequestCount",
+              "name": "Hadoop:service=HBase,name=FAKERegionServer,sub=Server.readRequestCount",
               "metric_path": "metrics/hbase/regionserver/readRequestsCount",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             }
           ],
           "values": [
             {
               "name": "HBase Read Request Count",
-              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.readRequestCount}"
+              "value": "${Hadoop:service=HBase,name=FAKERegionServer,sub=Server.readRequestCount}"
             }
           ],
           "properties": {
@@ -465,16 +465,16 @@
           "is_visible": false,
           "metrics": [
             {
-              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.writeRequestCount",
+              "name": "Hadoop:service=HBase,name=FAKERegionServer,sub=Server.writeRequestCount",
               "metric_path": "metrics/hbase/regionserver/writeRequestsCount",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             }
           ],
           "values": [
             {
               "name": "HBase Write Request Count",
-              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.writeRequestCount}"
+              "value": "${Hadoop:service=HBase,name=FAKERegionServer,sub=Server.writeRequestCount}"
             }
           ],
           "properties": {
@@ -488,16 +488,16 @@
           "is_visible": false,
           "metrics": [
             {
-              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.regionCount",
+              "name": "Hadoop:service=HBase,name=FAKERegionServer,sub=Server.regionCount",
               "metric_path": "metrics/hbase/regionserver/regions",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
+              "service_name": "FAKEHBASE",
+              "component_name": "FAKEHBASE_REGIONSERVER"
             }
           ],
           "values": [
             {
               "name": "HBase Regions",
-              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.regionCount}"
+              "value": "${Hadoop:service=HBase,name=FAKERegionServer,sub=Server.regionCount}"
             }
           ],
           "properties": {

+ 17 - 17
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/alerts.json → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/alerts.json

@@ -1,31 +1,31 @@
 {
-  "HDFS":{
-    "NAMENODE": [
+  "FAKEHDFS":{
+    "FAKENAMENODE": [
 
       {
         "name": "upgrade_finalized_state",
-        "label": "HDFS Upgrade Finalized State",
-        "description": "This service-level alert is triggered if HDFS is not in the finalized state",
+        "label": "FAKEHDFS Upgrade Finalized State",
+        "description": "This service-level alert is triggered if FAKEHDFS is not in the finalized state",
         "interval": 1,
         "scope": "HOST",
         "enabled": true,
         "source": {
           "type": "SCRIPT",
-          "path": "PERF/1.0/services/HDFS/package/alerts/alert_upgrade_finalized.py",
+          "path": "PERF/1.0/services/FAKEHDFS/package/alerts/alert_upgrade_finalized.py",
           "parameters": []
         }
       },
 
       {
         "name": "namenode_last_checkpoint",
-        "label": "NameNode Last Checkpoint",
-        "description": "This service-level alert will trigger if the last time that the NameNode performed a checkpoint was too long ago. It will also trigger if the number of uncommitted transactions is beyond a certain threshold.",
+        "label": "FAKEHNameNode Last Checkpoint",
+        "description": "This service-level alert will trigger if the last time that the FAKEHNameNode performed a checkpoint was too long ago. It will also trigger if the number of uncommitted transactions is beyond a certain threshold.",
         "interval": 1,
         "scope": "ANY",
         "enabled": true,
         "source": {
           "type": "SCRIPT",
-          "path": "PERF/1.0/services/HDFS/package/alerts/alert_checkpoint_time.py",
+          "path": "PERF/1.0/services/FAKEHDFS/package/alerts/alert_checkpoint_time.py",
           "parameters": [
             {
               "name": "connection.timeout",
@@ -74,21 +74,21 @@
         }
       }
     ],
-    "SECONDARY_NAMENODE": [
+    "SECONDARY_FAKENAMENODE": [
       {
         "name": "secondary_namenode_process",
-        "label": "Secondary NameNode Process",
-        "description": "This host-level alert is triggered if the Secondary NameNode process cannot be confirmed to be up and listening on the network.",
+        "label": "Secondary FAKEHNameNode Process",
+        "description": "This host-level alert is triggered if the Secondary FAKEHNameNode process cannot be confirmed to be up and listening on the network.",
         "interval": 1,
         "scope": "HOST",
         "enabled": true,
         "source": {
           "type": "SCRIPT",
-          "path": "PERF/1.0/services/HDFS/package/alerts/alert_snamenode_process.py"
+          "path": "PERF/1.0/services/FAKEHDFS/package/alerts/alert_snamenode_process.py"
         }
       }
     ],
-    "NFS_GATEWAY": [
+    "FAKENFS_GATEWAY": [
       {
         "name": "nfsgateway_process",
         "label": "NFS Gateway Process",
@@ -98,21 +98,21 @@
         "enabled": true,
         "source": {
           "type": "SCRIPT",
-          "path": "PERF/1.0/services/HDFS/package/alerts/alert_nfs_gateway_process.py"
+          "path": "PERF/1.0/services/FAKEHDFS/package/alerts/alert_nfs_gateway_process.py"
         }
       }
     ],
-    "DATANODE": [
+    "FAKEDATANODE": [
       {
         "name": "datanode_unmounted_data_dir",
-        "label": "DataNode Unmounted Data Dir",
+        "label": "FAKEDataNode Unmounted Data Dir",
         "description": "This host-level alert is triggered if one of the data directories on a host was previously on a mount point and became unmounted. If the mount history file does not exist, then report an error if a host has one or more mounted data directories as well as one or more unmounted data directories on the root partition. This may indicate that a data directory is writing to the root partition, which is undesirable.",
         "interval": 1,
         "scope": "HOST",
         "enabled": true,
         "source": {
           "type": "SCRIPT",
-          "path": "PERF/1.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py"
+          "path": "PERF/1.0/services/FAKEHDFS/package/alerts/alert_datanode_unmounted_data_dir.py"
         }
       }
     ]

+ 2 - 2
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/core-site.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/core-site.xml

@@ -22,7 +22,7 @@
   <property>
     <name>ha.failover-controller.active-standby-elector.zk.op.retries</name>
     <value>120</value>
-    <description>ZooKeeper Failover Controller retries setting for your environment</description>
+    <description>FAKEZooKeeper Failover Controller retries setting for your environment</description>
     <on-ambari-upgrade add="false"/>
   </property>
   <!-- i/o properties -->
@@ -55,7 +55,7 @@
     <!-- cluster variant -->
     <value>hdfs://localhost:8020</value>
     <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for HDFS.</description>
+      literal string "local" or a host:port for FAKEHDFS.</description>
     <final>true</final>
     <on-ambari-upgrade add="false"/>
   </property>

+ 34 - 34
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hadoop-env.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/hadoop-env.xml

@@ -69,8 +69,8 @@
   <property>
     <name>namenode_heapsize</name>
     <value>1024</value>
-    <description>NameNode Java heap size</description>
-    <display-name>NameNode Java heap size</display-name>
+    <description>FAKEHNameNode Java heap size</description>
+    <display-name>FAKEHNameNode Java heap size</display-name>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -90,8 +90,8 @@
   <property>
     <name>namenode_opt_newsize</name>
     <value>200</value>
-    <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
-    <display-name>NameNode new generation size</display-name>
+    <description>Default size of Java new generation for FAKEHNameNode (Java option -XX:NewSize) Note: The value of FAKEHNameNode new generation size (default size of Java new generation for FAKEHNameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+    <display-name>FAKEHNameNode new generation size</display-name>
     <depends-on>
       <property>
         <type>hadoop-env</type>
@@ -111,8 +111,8 @@
   <property>
     <name>namenode_opt_maxnewsize</name>
     <value>200</value>
-    <description>NameNode maximum new generation size</description>
-    <display-name>NameNode maximum new generation size</display-name>
+    <description>FAKEHNameNode maximum new generation size</description>
+    <display-name>FAKEHNameNode maximum new generation size</display-name>
     <depends-on>
       <property>
         <type>hadoop-env</type>
@@ -132,8 +132,8 @@
   <property>
     <name>namenode_opt_permsize</name>
     <value>128</value>
-    <description>NameNode permanent generation size</description>
-    <display-name>NameNode permanent generation size</display-name>
+    <description>FAKEHNameNode permanent generation size</description>
+    <display-name>FAKEHNameNode permanent generation size</display-name>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -147,8 +147,8 @@
   <property>
     <name>namenode_opt_maxpermsize</name>
     <value>256</value>
-    <description>NameNode maximum permanent generation size</description>
-    <display-name>NameNode maximum permanent generation size</display-name>
+    <description>FAKEHNameNode maximum permanent generation size</description>
+    <display-name>FAKEHNameNode maximum permanent generation size</display-name>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -162,8 +162,8 @@
   <property>
     <name>dtnode_heapsize</name>
     <value>1024</value>
-    <description>DataNode maximum Java heap size</description>
-    <display-name>DataNode maximum Java heap size</display-name>
+    <description>FAKEDataNode maximum Java heap size</description>
+    <display-name>FAKEDataNode maximum Java heap size</display-name>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -187,10 +187,10 @@
   </property>
   <property>
     <name>hdfs_user</name>
-    <display-name>HDFS User</display-name>
+    <display-name>FAKEHDFS User</display-name>
     <value>hdfs</value>
     <property-type>USER</property-type>
-    <description>User to run HDFS as</description>
+    <description>User to run FAKEHDFS as</description>
     <value-attributes>
       <type>user</type>
       <overridable>false</overridable>
@@ -200,9 +200,9 @@
   <property>
     <name>hdfs_tmp_dir</name>
     <value>/tmp</value>
-    <description>HDFS tmp Dir</description>
-    <display-name>HDFS tmp Dir</display-name>
-    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
+    <description>FAKEHDFS tmp Dir</description>
+    <display-name>FAKEHDFS tmp Dir</display-name>
+    <property-type>NOT_MANAGED_FAKEHDFS_PATH</property-type>
     <value-attributes>
       <read-only>true</read-only>
       <overridable>false</overridable>
@@ -213,29 +213,29 @@
   <property>
     <name>hdfs_user_nofile_limit</name>
     <value>128000</value>
-    <description>Max open files limit setting for HDFS user.</description>
+    <description>Max open files limit setting for FAKEHDFS user.</description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hdfs_user_nproc_limit</name>
     <value>65536</value>
-    <description>Max number of processes limit setting for HDFS user.</description>
+    <description>Max number of processes limit setting for FAKEHDFS user.</description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>namenode_backup_dir</name>
-    <description>Local directory for storing backup copy of NameNode images during upgrade</description>
+    <description>Local directory for storing backup copy of FAKEHNameNode images during upgrade</description>
     <value>/tmp/upgrades</value>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hdfs_user_keytab</name>
-    <description>HDFS keytab path</description>
+    <description>FAKEHDFS keytab path</description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hdfs_principal_name</name>
-    <description>HDFS principal name</description>
+    <description>FAKEHDFS principal name</description>
     <on-ambari-upgrade add="false"/>
   </property>
 
@@ -293,7 +293,7 @@ export JSVC_HOME={{jsvc_path}}
 # The maximum amount of heap to use, in MB. Default is 1000.
 export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
 
-export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+export HADOOP_FAKENAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
 
 # Extra Java runtime options.  Empty by default.
 export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
@@ -304,21 +304,21 @@ HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC
 HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
 
 {% if java_version &lt; 8 %}
-SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
-export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
-export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
+SHARED_HADOOP_FAKENAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
+export HADOOP_FAKENAMENODE_OPTS="${SHARED_HADOOP_FAKENAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_FAKENAMENODE_OPTS}"
+export HADOOP_FAKEDATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_FAKEDATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
 
-export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
+export HADOOP_SECONDARYFAKENAMENODE_OPTS="${SHARED_HADOOP_FAKENAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYFAKENAMENODE_OPTS}"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
 
 {% else %}
-SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
-export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
-export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
+SHARED_HADOOP_FAKENAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
+export HADOOP_FAKENAMENODE_OPTS="${SHARED_HADOOP_FAKENAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_FAKENAMENODE_OPTS}"
+export HADOOP_FAKEDATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_FAKEDATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
 
-export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
+export HADOOP_SECONDARYFAKENAMENODE_OPTS="${SHARED_HADOOP_FAKENAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYFAKENAMENODE_OPTS}"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
@@ -361,7 +361,7 @@ export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
 # History server pid
 export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
 
-YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+FAKEYARN_FAKERESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
 
 # A string representing this instance of hadoop. $USER by default.
 export HADOOP_IDENT_STRING=$USER
@@ -407,9 +407,9 @@ fi
   </property>
   <property>
     <name>nfsgateway_heapsize</name>
-    <display-name>NFSGateway maximum Java heap size</display-name>
+    <display-name>FAKENFSGateway maximum Java heap size</display-name>
     <value>1024</value>
-    <description>Maximum Java heap size for NFSGateway (Java option -Xmx)</description>
+    <description>Maximum Java heap size for FAKENFSGateway (Java option -Xmx)</description>
     <value-attributes>
       <type>int</type>
       <unit>MB</unit>

+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hadoop-metrics2.properties.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/hadoop-metrics2.properties.xml


+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hadoop-policy.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/hadoop-policy.xml


+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-alert-config.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/hdfs-alert-config.xml


+ 5 - 5
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-log4j.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/hdfs-log4j.xml

@@ -142,12 +142,12 @@ log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
 log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
 
 #
-# NameNode metrics logging.
+# FAKEHNameNode metrics logging.
 # The default is to retain two namenode-metrics.log files up to 64MB each.
 #
 namenode.metrics.logger=INFO,NullAppender
-log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
-log4j.additivity.NameNodeMetricsLog=false
+log4j.logger.FAKEHNameNodeMetricsLog=${namenode.metrics.logger}
+log4j.additivity.FAKEHNameNodeMetricsLog=false
 log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
 log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
 log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
@@ -210,10 +210,10 @@ log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
 log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
 
 #
-# HDFS block state change log from block manager
+# FAKEHDFS block state change log from block manager
 #
 # Uncomment the following to suppress normal block state change
-# messages from BlockManager in NameNode.
+# messages from BlockManager in FAKEHNameNode.
 #log4j.logger.BlockStateChange=WARN
     </value>
     <value-attributes>

+ 3 - 3
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-logsearch-conf.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/hdfs-logsearch-conf.xml

@@ -24,14 +24,14 @@
     <name>service_name</name>
     <display-name>Service name</display-name>
     <description>Service name for Logsearch Portal (label)</description>
-    <value>HDFS</value>
+    <value>FAKEHDFS</value>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>component_mappings</name>
     <display-name>Component mapping</display-name>
     <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>NAMENODE:hdfs_namenode;DATANODE:hdfs_datanode;SECONDARY_NAMENODE:hdfs_secondarynamenode;JOURNALNODE:hdfs_journalnode;ZKFC:hdfs_zkfc;NFS_GATEWAY:hdfs_nfs3</value>
+    <value>FAKENAMENODE:hdfs_namenode;FAKEDATANODE:hdfs_datanode;SECONDARY_FAKENAMENODE:hdfs_secondarynamenode;FAKEJOURNALNODE:hdfs_journalnode;FAKEZKFC:hdfs_zkfc;FAKENFS_GATEWAY:hdfs_nfs3</value>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
@@ -76,7 +76,7 @@
       "rowtype":"audit",
       "is_enabled":"true",
       "add_fields":{
-        "logType":"HDFSAudit",
+        "logType":"FAKEHDFSAudit",
         "enforcer":"hadoop-acl",
         "repoType":"1",
         "repo":"hdfs"

+ 30 - 30
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-site.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/hdfs-site.xml

@@ -24,7 +24,7 @@
     <name>dfs.namenode.name.dir</name>
     <!-- cluster variant -->
     <value>/hadoop/hdfs/namenode</value>
-    <display-name>NameNode directories</display-name>
+    <display-name>FAKEHNameNode directories</display-name>
     <description>Determines where on the local filesystem the DFS name node
       should store the name table.  If this is a comma-delimited list
       of directories then the name table is replicated in all of the
@@ -46,8 +46,8 @@
   <property>
     <name>dfs.webhdfs.enabled</name>
     <value>true</value>
-    <display-name>WebHDFS enabled</display-name>
-    <description>Whether to enable WebHDFS feature</description>
+    <display-name>WebFAKEHDFS enabled</display-name>
+    <description>Whether to enable WebFAKEHDFS feature</description>
     <final>true</final>
     <value-attributes>
       <type>boolean</type>
@@ -58,9 +58,9 @@
   <property>
     <name>dfs.datanode.failed.volumes.tolerated</name>
     <value>0</value>
-    <description> Number of failed disks a DataNode would tolerate before it stops offering service</description>
+    <description> Number of failed disks a FAKEDataNode would tolerate before it stops offering service</description>
     <final>true</final>
-    <display-name>DataNode failed disk tolerance</display-name>
+    <display-name>FAKEDataNode failed disk tolerance</display-name>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -78,7 +78,7 @@
   <property>
     <name>dfs.datanode.data.dir</name>
     <value>/hadoop/hdfs/data</value>
-    <display-name>DataNode directories</display-name>
+    <display-name>FAKEDataNode directories</display-name>
     <description>Determines where on the local filesystem an DFS data node
       should store its blocks.  If this is a comma-delimited
       list of directories, then data will be stored in all named
@@ -113,7 +113,7 @@
   <property>
     <name>dfs.namenode.checkpoint.dir</name>
     <value>/hadoop/hdfs/namesecondary</value>
-    <display-name>SecondaryNameNode Checkpoint directories</display-name>
+    <display-name>SecondaryFAKEHNameNode Checkpoint directories</display-name>
     <description>Determines where on the local filesystem the DFS secondary
       name node should store the temporary images to merge.
       If this is a comma-delimited list of directories then the image is
@@ -139,7 +139,7 @@
   <property>
     <name>dfs.namenode.checkpoint.period</name>
     <value>21600</value>
-    <display-name>HDFS Maximum Checkpoint Delay</display-name>
+    <display-name>FAKEHDFS Maximum Checkpoint Delay</display-name>
     <description>The number of seconds between two periodic checkpoints.</description>
     <value-attributes>
       <type>int</type>
@@ -150,7 +150,7 @@
   <property>
     <name>dfs.namenode.checkpoint.txns</name>
     <value>1000000</value>
-    <description>The Secondary NameNode or CheckpointNode will create a checkpoint
+    <description>The Secondary FAKEHNameNode or CheckpointNode will create a checkpoint
       of the namespace every 'dfs.namenode.checkpoint.txns' transactions,
       regardless of whether 'dfs.namenode.checkpoint.period' has expired.
     </description>
@@ -250,7 +250,7 @@
     <name>dfs.namenode.http-address</name>
     <value>localhost:50070</value>
     <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for HDFS.</description>
+      literal string "local" or a host:port for FAKEHDFS.</description>
     <final>true</final>
     <on-ambari-upgrade add="false"/>
   </property>
@@ -264,7 +264,7 @@
     <name>dfs.datanode.du.reserved</name>
     <!-- cluster variant -->
     <value>1073741824</value>
-    <display-name>Reserved space for HDFS</display-name>
+    <display-name>Reserved space for FAKEHDFS</display-name>
     <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
     </description>
     <value-attributes>
@@ -298,7 +298,7 @@
     <name>dfs.datanode.max.transfer.threads</name>
     <value>1024</value>
     <description>Specifies the maximum number of threads to use for transferring data in and out of the datanode.</description>
-    <display-name>DataNode max data transfer threads</display-name>
+    <display-name>FAKEDataNode max data transfer threads</display-name>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -319,7 +319,7 @@
     <name>dfs.permissions.enabled</name>
     <value>true</value>
     <description>
-      If "true", enable permission checking in HDFS.
+      If "true", enable permission checking in FAKEHDFS.
       If "false", permission checking is turned off,
       but all other behavior is unchanged.
       Switching from one parameter value to the other does not change the mode,
@@ -337,7 +337,7 @@
     <name>dfs.namenode.handler.count</name>
     <value>100</value>
     <description>Added to grow Queue size so that more client connections are allowed</description>
-    <display-name>NameNode Server threads</display-name>
+    <display-name>FAKEHNameNode Server threads</display-name>
     <value-attributes>
       <type>int</type>
       <minimum>1</minimum>
@@ -370,7 +370,7 @@
   <property>
     <name>dfs.datanode.data.dir.perm</name>
     <value>750</value>
-    <display-name>DataNode directories permission</display-name>
+    <display-name>FAKEDataNode directories permission</display-name>
     <description>The permissions that should be there on dfs.datanode.data.dir
       directories. The datanode will not come up if the permissions are
       different on existing dfs.datanode.data.dir directories. If the directories
@@ -384,9 +384,9 @@
     <name>dfs.namenode.accesstime.precision</name>
     <value>0</value>
     <display-name>Access time precision</display-name>
-    <description>The access time for HDFS file is precise up to this value.
+    <description>The access time for FAKEHDFS file is precise up to this value.
       The default value is 1 hour. Setting a value of 0 disables
-      access times for HDFS.
+      access times for FAKEHDFS.
     </description>
     <value-attributes>
       <type>int</type>
@@ -396,7 +396,7 @@
   <property>
     <name>dfs.cluster.administrators</name>
     <value> hdfs</value>
-    <description>ACL for who all can view the default servlets in the HDFS</description>
+    <description>ACL for who all can view the default servlets in the FAKEHDFS</description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
@@ -436,22 +436,22 @@
   <property>
     <name>dfs.journalnode.http-address</name>
     <value>0.0.0.0:8480</value>
-    <description>The address and port the JournalNode web UI listens on.
+    <description>The address and port the FAKEJournalNode web UI listens on.
       If the port is 0 then the server will start on a free port. </description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.journalnode.https-address</name>
     <value>0.0.0.0:8481</value>
-    <description>The address and port the JournalNode HTTPS server listens on.
+    <description>The address and port the FAKEJournalNode HTTPS server listens on.
       If the port is 0 then the server will start on a free port. </description>
     <on-ambari-upgrade add="false"/>
   </property>
-  <!-- HDFS Short-Circuit Local Reads -->
+  <!-- FAKEHDFS Short-Circuit Local Reads -->
   <property>
     <name>dfs.client.read.shortcircuit</name>
     <value>true</value>
-    <display-name>HDFS Short-circuit read</display-name>
+    <display-name>FAKEHDFS Short-circuit read</display-name>
     <description>
       This configuration parameter turns on short-circuit local reads.
     </description>
@@ -464,8 +464,8 @@
     <name>dfs.domain.socket.path</name>
     <value>/var/lib/hadoop-hdfs/dn_socket</value>
     <description>
-      This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients.
-      If the string "_PORT" is present in this path, it will be replaced by the TCP port of the DataNode.
+      This is a path to a UNIX domain socket that will be used for communication between the FAKEDataNode and local FAKEHDFS clients.
+      If the string "_PORT" is present in this path, it will be replaced by the TCP port of the FAKEDataNode.
     </description>
     <on-ambari-upgrade add="false"/>
   </property>
@@ -483,7 +483,7 @@
   <property>
     <name>dfs.namenode.name.dir.restore</name>
     <value>true</value>
-    <description>Set to true to enable NameNode to attempt recovering a previously failed dfs.namenode.name.dir.
+    <description>Set to true to enable FAKEHNameNode to attempt recovering a previously failed dfs.namenode.name.dir.
       When enabled, a recovery of any failed directory is attempted during checkpoint.</description>
     <on-ambari-upgrade add="false"/>
   </property>
@@ -491,7 +491,7 @@
     <name>dfs.http.policy</name>
     <value>HTTP_ONLY</value>
     <description>
-      Decide if HTTPS(SSL) is supported on HDFS This configures the HTTP endpoint for HDFS daemons:
+      Decide if HTTPS(SSL) is supported on FAKEHDFS This configures the HTTP endpoint for FAKEHDFS daemons:
       The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY :
       Service is provided only on https - HTTP_AND_HTTPS : Service is provided both on http and https
     </description>
@@ -529,13 +529,13 @@
   <property>
     <name>dfs.journalnode.edits.dir</name>
     <value>/hadoop/hdfs/journalnode</value>
-    <description>The path where the JournalNode daemon will store its local state. </description>
+    <description>The path where the FAKEJournalNode daemon will store its local state. </description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.client.retry.policy.enabled</name>
     <value>false</value>
-    <description>Enables HDFS client retry in the event of a NameNode failure.</description>
+    <description>Enables FAKEHDFS client retry in the event of a FAKEHNameNode failure.</description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
@@ -579,10 +579,10 @@
   <property>
     <name>nfs.file.dump.dir</name>
     <value>/tmp/.hdfs-nfs</value>
-    <display-name>NFSGateway dump directory</display-name>
+    <display-name>FAKENFSGateway dump directory</display-name>
     <description>
       This directory is used to temporarily save out-of-order writes before
-      writing to HDFS. For each file, the out-of-order writes are dumped after
+      writing to FAKEHDFS. For each file, the out-of-order writes are dumped after
       they are accumulated to exceed certain threshold (e.g., 1MB) in memory.
       One needs to make sure the directory has enough space.
     </description>

+ 4 - 4
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-audit.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/ranger-hdfs-audit.xml

@@ -30,8 +30,8 @@
   <property>
     <name>xasecure.audit.destination.hdfs</name>
     <value>true</value>
-    <display-name>Audit to HDFS</display-name>
-    <description>Is Audit to HDFS enabled?</description>
+    <display-name>Audit to FAKEHDFS</display-name>
+    <description>Is Audit to FAKEHDFS enabled?</description>
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
@@ -45,8 +45,8 @@
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs.dir</name>
-    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
-    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <value>hdfs://FAKENAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>FAKEHDFS folder to write audit to, make sure the service user has requried permissions</description>
     <depends-on>
       <property>
         <type>ranger-env</type>

+ 2 - 2
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/ranger-hdfs-plugin-properties.xml

@@ -23,7 +23,7 @@
   <property>
     <name>policy_user</name>
     <value>ambari-qa</value>
-    <display-name>Policy user for HDFS</display-name>
+    <display-name>Policy user for FAKEHDFS</display-name>
     <description>This user must be system user and also present at Ranger
       admin portal</description>
     <on-ambari-upgrade add="false"/>
@@ -40,7 +40,7 @@
   <property>
     <name>ranger-hdfs-plugin-enabled</name>
     <value>No</value>
-    <display-name>Enable Ranger for HDFS</display-name>
+    <display-name>Enable Ranger for FAKEHDFS</display-name>
     <description>Enable ranger hdfs plugin</description>
     <depends-on>
       <property>

+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/ranger-hdfs-policymgr-ssl.xml


+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-security.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/ranger-hdfs-security.xml


+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ssl-client.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/ssl-client.xml


+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ssl-server.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/ssl-server.xml


+ 9 - 9
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/kerberos.json → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/kerberos.json

@@ -1,7 +1,7 @@
 {
   "services": [
     {
-      "name": "HDFS",
+      "name": "FAKEHDFS",
       "identities": [
         {
           "name": "/spnego",
@@ -40,15 +40,15 @@
       ],
       "components": [
         {
-          "name":  "HDFS_CLIENT",
+          "name":  "FAKEHDFS_CLIENT",
           "identities": [
             {
-              "name": "/HDFS/NAMENODE/hdfs"
+              "name": "/FAKEHDFS/FAKENAMENODE/hdfs"
             }
           ]
         },
         {
-          "name": "NAMENODE",
+          "name": "FAKENAMENODE",
           "identities": [
             {
               "name": "hdfs",
@@ -99,7 +99,7 @@
               }
             },
             {
-              "name": "/HDFS/NAMENODE/namenode_nn",
+              "name": "/FAKEHDFS/FAKENAMENODE/namenode_nn",
               "principal": {
                 "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.principal"                
               },
@@ -117,7 +117,7 @@
           ]
         },
         {
-          "name": "DATANODE",
+          "name": "FAKEDATANODE",
           "identities": [
             {
               "name": "datanode_dn",
@@ -151,7 +151,7 @@
           ]
         },
         {
-          "name": "SECONDARY_NAMENODE",
+          "name": "SECONDARY_FAKENAMENODE",
           "identities": [
             {
               "name": "secondary_namenode_nn",
@@ -183,7 +183,7 @@
           ]
         },
         {
-          "name": "NFS_GATEWAY",
+          "name": "FAKENFS_GATEWAY",
           "identities": [
             {
               "name": "nfsgateway",
@@ -209,7 +209,7 @@
           ]
         },
         {
-          "name": "JOURNALNODE",
+          "name": "FAKEJOURNALNODE",
           "identities": [
             {
               "name": "journalnode_jn",

+ 28 - 28
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/metainfo.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/metainfo.xml

@@ -19,18 +19,18 @@
   <schemaVersion>2.0</schemaVersion>
   <services>
     <service>
-      <name>HDFS</name>
-      <displayName>HDFS</displayName>
+      <name>FAKEHDFS</name>
+      <displayName>FAKEHDFS</displayName>
       <comment>Apache Hadoop Distributed File System</comment>
       <version>2.7.1.2.3</version>
 
       <components>
         <component>
-          <name>NAMENODE</name>
-          <displayName>NameNode</displayName>
+          <name>FAKENAMENODE</name>
+          <displayName>FAKEHNameNode</displayName>
           <category>MASTER</category>
           <cardinality>1-2</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <reassignAllowed>true</reassignAllowed>
           <commandScript>
             <script>scripts/namenode.py</script>
@@ -56,7 +56,7 @@
               </commandScript>
             </customCommand>
             <customCommand>
-              <name>REBALANCEHDFS</name>
+              <name>REBALANCEFAKEHDFS</name>
               <background>true</background>
               <commandScript>
                 <script>scripts/namenode.py</script>
@@ -67,8 +67,8 @@
         </component>
 
         <component>
-          <name>DATANODE</name>
-          <displayName>DataNode</displayName>
+          <name>FAKEDATANODE</name>
+          <displayName>FAKEDataNode</displayName>
           <category>SLAVE</category>
           <cardinality>1+</cardinality>
           <versionAdvertised>true</versionAdvertised>
@@ -79,9 +79,9 @@
             <timeout>1200</timeout>
           </commandScript>
           <bulkCommands>
-            <displayName>DataNodes</displayName>
+            <displayName>FAKEDataNodes</displayName>
             <!-- Used by decommission and recommission -->
-            <masterComponent>NAMENODE</masterComponent>
+            <masterComponent>FAKENAMENODE</masterComponent>
           </bulkCommands>
           <logs>
             <log>
@@ -92,10 +92,10 @@
         </component>
 
         <component>
-          <name>SECONDARY_NAMENODE</name>
-          <displayName>SNameNode</displayName>
+          <name>SECONDARY_FAKENAMENODE</name>
+          <displayName>SFAKEHNameNode</displayName>
           <cardinality>1</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <reassignAllowed>true</reassignAllowed>
           <category>MASTER</category>
           <commandScript>
@@ -112,11 +112,11 @@
         </component>
 
         <component>
-          <name>HDFS_CLIENT</name>
-          <displayName>HDFS Client</displayName>
+          <name>FAKEHDFS_CLIENT</name>
+          <displayName>FAKEHDFS Client</displayName>
           <category>CLIENT</category>
           <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <commandScript>
             <script>scripts/hdfs_client.py</script>
             <scriptType>PYTHON</scriptType>
@@ -147,11 +147,11 @@
         </component>
 
         <component>
-          <name>JOURNALNODE</name>
-          <displayName>JournalNode</displayName>
+          <name>FAKEJOURNALNODE</name>
+          <displayName>FAKEJournalNode</displayName>
           <category>SLAVE</category>
           <cardinality>0+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <commandScript>
             <script>scripts/journalnode.py</script>
             <scriptType>PYTHON</scriptType>
@@ -165,7 +165,7 @@
           </logs>
           <dependencies>
             <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
+              <name>FAKEHDFS/FAKEHDFS_CLIENT</name>
               <scope>host</scope>
               <auto-deploy>
                 <enabled>true</enabled>
@@ -175,11 +175,11 @@
         </component>
 
         <component>
-          <name>ZKFC</name>
-          <displayName>ZKFailoverController</displayName>
+          <name>FAKEZKFC</name>
+          <displayName>FAKEZKFailoverController</displayName>
           <category>SLAVE</category>
           <cardinality>0+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <commandScript>
             <script>scripts/zkfc_slave.py</script>
             <scriptType>PYTHON</scriptType>
@@ -194,10 +194,10 @@
         </component>
 
         <component>
-          <name>NFS_GATEWAY</name>
-          <displayName>NFSGateway</displayName>
+          <name>FAKENFS_GATEWAY</name>
+          <displayName>FAKENFSGateway</displayName>
           <cardinality>0+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <category>SLAVE</category>
           <commandScript>
             <script>scripts/nfsgateway.py</script>
@@ -206,7 +206,7 @@
           </commandScript>
           <dependencies>
             <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
+              <name>FAKEHDFS/FAKEHDFS_CLIENT</name>
               <scope>host</scope>
               <auto-deploy>
                 <enabled>true</enabled>
@@ -223,7 +223,7 @@
       </commandScript>
 
       <requiredServices>
-        <service>ZOOKEEPER</service>
+        <service>FAKEZOOKEEPER</service>
       </requiredServices>
 
       <configuration-dependencies>

File diff suppressed because it is too large
+ 191 - 191
ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/metrics.json


+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_checkpoint_time.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_checkpoint_time.py


+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_datanode_unmounted_data_dir.py


+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_nfs_gateway_process.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_nfs_gateway_process.py


+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_snamenode_process.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_snamenode_process.py


+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_upgrade_finalized.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_upgrade_finalized.py


+ 57 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/datanode.py

@@ -0,0 +1,57 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# Python Imports
+
+# Local Imports
+from resource_management.libraries.script.dummy import Dummy
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.core.logger import Logger
+
+
+class FAKEDataNode(Dummy):
+  """
+  Dummy script that simulates a slave component.
+  """
+
+  def __init__(self):
+    super(FAKEDataNode, self).__init__()
+    self.component_name = "FAKEDATANODE"
+    self.principal_conf_name = "hdfs-site"
+    self.principal_name = "dfs.datanode.kerberos.principal"
+    self.keytab_conf_name = "hdfs-site"
+    self.keytab_name = "dfs.datanode.keytab.file"
+
+  def get_component_name(self):
+    return "hadoop-hdfs-datanode"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing FAKEDataNode Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      stack_select.select(self.get_component_name(), params.version)
+
+if __name__ == "__main__":
+  FAKEDataNode().execute()

+ 1 - 1
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/hdfs_client.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/hdfs_client.py

@@ -32,7 +32,7 @@ class HdfsClient(Dummy):
 
   def __init__(self):
     super(HdfsClient, self).__init__()
-    self.component_name = "HDFS_CLIENT"
+    self.component_name = "FAKEHDFS_CLIENT"
 
 if __name__ == "__main__":
   HdfsClient().execute()

+ 58 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/journalnode.py

@@ -0,0 +1,58 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# Python Imports
+
+# Local Imports
+from resource_management.libraries.script.dummy import Dummy
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.core.logger import Logger
+
+
+class FAKEJournalNode(Dummy):
+  """
+  Dummy script that simulates a master component.
+  """
+
+  def __init__(self):
+    super(FAKEJournalNode, self).__init__()
+    self.component_name = "FAKEJOURNALNODE"
+    self.principal_conf_name = "hdfs-site"
+    self.principal_name = "dfs.journalnode.kerberos.principal"
+    self.keytab_conf_name = "hdfs-site"
+    self.keytab_name = "dfs.journalnode.keytab.file"
+
+  def get_component_name(self):
+    return "hadoop-hdfs-journalnode"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing FAKEJournalNode Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      stack_select.select(self.get_component_name(), params.version)
+
+if __name__ == "__main__":
+  FAKEJournalNode().execute()

+ 27 - 6
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/namenode.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/namenode.py

@@ -24,23 +24,28 @@ import json
 
 # Local Imports
 from resource_management.libraries.script.dummy import Dummy
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.core.logger import Logger
 
 
-class NameNode(Dummy):
+class FAKEHNameNode(Dummy):
   """
   Dummy script that simulates a master component.
   """
 
   def __init__(self):
-    super(NameNode, self).__init__()
-    self.component_name = "NAMENODE"
+    super(FAKEHNameNode, self).__init__()
+    self.component_name = "FAKENAMENODE"
     self.principal_conf_name = "hdfs-site"
     self.principal_name = "dfs.namenode.kerberos.principal"
     self.keytab_conf_name = "hdfs-site"
     self.keytab_name = "dfs.namenode.keytab.file"
 
   def rebalancehdfs(self, env):
-    print "Rebalance HDFS"
+    print "Rebalance FAKEHDFS"
 
     threshold = 10
     if "namenode" in self.config["commandParams"]:
@@ -52,7 +57,23 @@ class NameNode(Dummy):
     print "Threshold: %s" % str(threshold)
 
   def decommission(self):
-    print "Rebalance HDFS"
+    print "Rebalance FAKEHDFS"
+
+  def get_component_name(self):
+    return "hadoop-hdfs-namenode"
+
+  def finalize_non_rolling_upgrade(self, env):
+    pass
+
+  def finalize_rolling_upgrade(self, env):
+    pass
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing FAKEHNameNode Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      stack_select.select(self.get_component_name(), params.version)
 
 if __name__ == "__main__":
-  NameNode().execute()
+  FAKEHNameNode().execute()

+ 4 - 4
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/nfsgateway.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/nfsgateway.py

@@ -25,18 +25,18 @@ Ambari Agent
 from resource_management.libraries.script.dummy import Dummy
 
 
-class NFSGateway(Dummy):
+class FAKENFSGateway(Dummy):
   """
   Dummy script that simulates a slave component.
   """
 
   def __init__(self):
-    super(NFSGateway, self).__init__()
-    self.component_name = "NFS_GATEWAY"
+    super(FAKENFSGateway, self).__init__()
+    self.component_name = "FAKENFS_GATEWAY"
     self.principal_conf_name = "hdfs-site"
     self.principal_name = "nfs.kerberos.principal"
     self.keytab_conf_name = "hdfs-site"
     self.keytab_name = "nfs.keytab.file"
 
 if __name__ == "__main__":
-  NFSGateway().execute()
+  FAKENFSGateway().execute()

+ 9 - 18
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/journalnode.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/params.py

@@ -15,28 +15,19 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 
-Ambari Agent
-
 """
 
-# Python Imports
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.get_architecture import get_architecture
 
-# Local Imports
-from resource_management.libraries.script.dummy import Dummy
 
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
 
-class JournalNode(Dummy):
-  """
-  Dummy script that simulates a master component.
-  """
+architecture = get_architecture()
 
-  def __init__(self):
-    super(JournalNode, self).__init__()
-    self.component_name = "JOURNALNODE"
-    self.principal_conf_name = "hdfs-site"
-    self.principal_name = "dfs.journalnode.kerberos.principal"
-    self.keytab_conf_name = "hdfs-site"
-    self.keytab_name = "dfs.journalnode.keytab.file"
+stack_name = default("/hostLevelParams/stack_name", None)
 
-if __name__ == "__main__":
-  JournalNode().execute()
+# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
+version = default("/commandParams/version", None)

+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/service_check.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/service_check.py


+ 4 - 4
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/snamenode.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/snamenode.py

@@ -25,18 +25,18 @@ Ambari Agent
 from resource_management.libraries.script.dummy import Dummy
 
 
-class SNameNode(Dummy):
+class SFAKEHNameNode(Dummy):
   """
   Dummy script that simulates a slave component.
   """
 
   def __init__(self):
-    super(SNameNode, self).__init__()
-    self.component_name = "SECONDARY_NAMENODE"
+    super(SFAKEHNameNode, self).__init__()
+    self.component_name = "SECONDARY_FAKENAMENODE"
     self.principal_conf_name = "hdfs-site"
     self.principal_name = "dfs.secondary.namenode.kerberos.principal"
     self.keytab_conf_name = "hdfs-site"
     self.keytab_name = "dfs.secondary.namenode.keytab.file"
 
 if __name__ == "__main__":
-  SNameNode().execute()
+  SFAKEHNameNode().execute()

+ 1 - 1
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/zkfc_slave.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/zkfc_slave.py

@@ -32,7 +32,7 @@ class ZkfcSlave(Dummy):
 
   def __init__(self):
     super(ZkfcSlave, self).__init__()
-    self.component_name = "ZKFC"
+    self.component_name = "FAKEZKFC"
 
 if __name__ == "__main__":
   ZkfcSlave().execute()

+ 3 - 3
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/quicklinks/quicklinks.json → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/quicklinks/quicklinks.json

@@ -17,7 +17,7 @@
     "links": [
       {
         "name": "namenode_ui",
-        "label": "NameNode UI",
+        "label": "FAKEHNameNode UI",
         "url":"%@://%@:%@",
         "requires_user_name": "false",
         "port":{
@@ -31,7 +31,7 @@
       },
       {
         "name": "namenode_logs",
-        "label": "NameNode Logs",
+        "label": "FAKEHNameNode Logs",
         "url":"%@://%@:%@/logs",
         "requires_user_name": "false",
         "port":{
@@ -45,7 +45,7 @@
       },
       {
         "name": "namenode_jmx",
-        "label": "NameNode JMX",
+        "label": "FAKEHNameNode JMX",
         "url":"%@://%@:%@/jmx",
         "requires_user_name": "false",
         "port":{

+ 3 - 3
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/themes/theme.json → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/themes/theme.json

@@ -1,6 +1,6 @@
 {
   "name": "default",
-  "description": "Default theme for HDFS service",
+  "description": "Default theme for FAKEHDFS service",
   "configuration": {
     "layouts": [
       {
@@ -15,7 +15,7 @@
               "sections": [
                 {
                   "name": "section-namenode",
-                  "display-name": "NameNode",
+                  "display-name": "FAKEHNameNode",
                   "row-index": "0",
                   "column-index": "0",
                   "row-span": "1",
@@ -34,7 +34,7 @@
                 },
                 {
                   "name": "section-datanode",
-                  "display-name": "DataNode",
+                  "display-name": "FAKEDataNode",
                   "row-index": "0",
                   "column-index": "1",
                   "row-span": "1",

+ 124 - 124
ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/widgets.json → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/widgets.json

@@ -2,11 +2,11 @@
   "layouts": [
     {
       "layout_name": "default_hdfs_dashboard",
-      "display_name": "Standard HDFS Dashboard",
-      "section_name": "HDFS_SUMMARY",
+      "display_name": "Standard FAKEHDFS Dashboard",
+      "section_name": "FAKEHDFS_SUMMARY",
       "widgetLayoutInfo": [
         {
-          "widget_name": "NameNode GC count",
+          "widget_name": "FAKEHNameNode GC count",
           "description": "Count of total garbage collections and count of major type garbage collections of the JVM.",
           "widget_type": "GRAPH",
           "is_visible": true,
@@ -14,15 +14,15 @@
             {
               "name": "jvm.JvmMetrics.GcCount._rate",
               "metric_path": "metrics/jvm/gcCount._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             },
             {
               "name": "jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate",
               "metric_path": "metrics/jvm/GcCountConcurrentMarkSweep._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             }
           ],
@@ -42,7 +42,7 @@
           }
         },
         {
-          "widget_name": "NameNode GC time",
+          "widget_name": "FAKEHNameNode GC time",
           "description": "Total time taken by major type garbage collections in milliseconds.",
           "widget_type": "GRAPH",
           "is_visible": true,
@@ -50,8 +50,8 @@
             {
               "name": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate",
               "metric_path": "metrics/jvm/GcTimeMillisConcurrentMarkSweep._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             }
           ],
@@ -69,7 +69,7 @@
         },
         {
           "widget_name": "NN Connection Load",
-          "description": "Number of open RPC connections being managed by NameNode.",
+          "description": "Number of open RPC connections being managed by FAKEHNameNode.",
           "widget_type": "GRAPH",
           "is_visible": true,
           "metrics": [
@@ -77,16 +77,16 @@
               "name": "rpc.rpc.client.NumOpenConnections",
               "metric_path": "metrics/rpc/client/NumOpenConnections",
               "category": "",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             },
             {
               "name": "rpc.rpc.datanode.NumOpenConnections",
               "metric_path": "metrics/rpc/datanode/NumOpenConnections",
               "category": "",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             }
           ],
@@ -106,7 +106,7 @@
           }
         },
         {
-          "widget_name": "NameNode Heap",
+          "widget_name": "FAKEHNameNode Heap",
           "description": "Heap memory committed and Heap memory used with respect to time.",
           "widget_type": "GRAPH",
           "is_visible": true,
@@ -114,15 +114,15 @@
             {
               "name": "jvm.JvmMetrics.MemHeapCommittedM",
               "metric_path": "metrics/jvm/memHeapCommittedM",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             },
             {
               "name": "jvm.JvmMetrics.MemHeapUsedM",
               "metric_path": "metrics/jvm/memHeapUsedM",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             }
           ],
@@ -143,58 +143,58 @@
           }
         },
         {
-          "widget_name": "NameNode Host Load",
-          "description": "Percentage of CPU and Memory resources being consumed on NameNode host.",
+          "widget_name": "FAKEHNameNode Host Load",
+          "description": "Percentage of CPU and Memory resources being consumed on FAKEHNameNode host.",
           "widget_type": "GRAPH",
           "is_visible": true,
           "metrics": [
             {
               "name": "cpu_system",
               "metric_path": "metrics/cpu/cpu_system",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             },
             {
               "name": "cpu_user",
               "metric_path": "metrics/cpu/cpu_user",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             },
             {
               "name": "cpu_nice",
               "metric_path": "metrics/cpu/cpu_nice",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             },
             {
               "name": "cpu_idle",
               "metric_path": "metrics/cpu/cpu_idle",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             },
             {
               "name": "cpu_wio",
               "metric_path": "metrics/cpu/cpu_wio",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             },
             {
               "name": "mem_total",
               "metric_path": "metrics/memory/mem_total",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             },
             {
               "name": "mem_free",
               "metric_path": "metrics/memory/mem_free",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             }
           ],
@@ -215,7 +215,7 @@
           }
         },
         {
-          "widget_name": "NameNode RPC",
+          "widget_name": "FAKEHNameNode RPC",
           "description": "Compares the average time spent for RPC request in a queue and RPC request being processed.",
           "widget_type": "GRAPH",
           "is_visible": true,
@@ -223,29 +223,29 @@
             {
               "name": "rpc.rpc.client.RpcQueueTimeAvgTime",
               "metric_path": "metrics/rpc/client/RpcQueueTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             },
             {
               "name": "rpc.rpc.client.RpcProcessingTimeAvgTime",
               "metric_path": "metrics/rpc/client/RpcProcessingTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             },
             {
               "name": "rpc.rpc.datanode.RpcQueueTimeAvgTime",
               "metric_path": "metrics/rpc/datanode/RpcQueueTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             },
             {
               "name": "rpc.rpc.datanode.RpcProcessingTimeAvgTime",
               "metric_path": "metrics/rpc/datanode/RpcProcessingTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             }
           ],
@@ -274,7 +274,7 @@
           }
         },
         {
-          "widget_name": "NameNode Operations",
+          "widget_name": "FAKEHNameNode Operations",
           "description": "Rate per second of number of file operation over time.",
           "widget_type": "GRAPH",
           "is_visible": false,
@@ -282,14 +282,14 @@
             {
               "name": "dfs.namenode.TotalFileOps._rate",
               "metric_path": "metrics/dfs/namenode/TotalFileOps._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             }
           ],
           "values": [
             {
-              "name": "NameNode File Operations",
+              "name": "FAKEHNameNode File Operations",
               "value": "${dfs.namenode.TotalFileOps._rate}"
             }
           ],
@@ -300,15 +300,15 @@
         },
         {
           "widget_name": "Failed disk volumes",
-          "description": "Number of Failed disk volumes across all DataNodes. Its indicative of HDFS bad health.",
+          "description": "Number of Failed disk volumes across all FAKEDataNodes. Its indicative of FAKEHDFS bad health.",
           "widget_type": "NUMBER",
           "is_visible": true,
           "metrics": [
             {
               "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum",
               "metric_path": "metrics/dfs/datanode/NumFailedVolumes",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
             }
           ],
           "values": [
@@ -323,22 +323,22 @@
         },
         {
           "widget_name": "Blocks With Corrupted Replicas",
-          "description": "Number represents data blocks with at least one corrupted replica (but not all of them). Its indicative of HDFS bad health.",
+          "description": "Number represents data blocks with at least one corrupted replica (but not all of them). Its indicative of FAKEHDFS bad health.",
           "widget_type": "NUMBER",
           "is_visible": true,
           "metrics": [
             {
-              "name": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
+              "name": "Hadoop:service=FAKEHNameNode,name=FSNamesystem.CorruptBlocks",
               "metric_path": "metrics/dfs/FSNamesystem/CorruptBlocks",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             }
           ],
           "values": [
             {
               "name": "Blocks With Corrupted Replicas",
-              "value": "${Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks}"
+              "value": "${Hadoop:service=FAKEHNameNode,name=FSNamesystem.CorruptBlocks}"
             }
           ],
           "properties": {
@@ -348,22 +348,22 @@
         },
         {
           "widget_name": "Under Replicated Blocks",
-          "description": "Number represents file blocks that does not meet the replication factor criteria. Its indicative of HDFS bad health.",
+          "description": "Number represents file blocks that does not meet the replication factor criteria. Its indicative of FAKEHDFS bad health.",
           "widget_type": "NUMBER",
           "is_visible": true,
           "metrics": [
             {
-              "name": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
+              "name": "Hadoop:service=FAKEHNameNode,name=FSNamesystem.UnderReplicatedBlocks",
               "metric_path": "metrics/dfs/FSNamesystem/UnderReplicatedBlocks",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
               "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             }
           ],
           "values": [
             {
               "name": "Under Replicated Blocks",
-              "value": "${Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks}"
+              "value": "${Hadoop:service=FAKEHNameNode,name=FSNamesystem.UnderReplicatedBlocks}"
             }
           ],
           "properties": {
@@ -372,7 +372,7 @@
           }
         },
         {
-          "widget_name": "HDFS Space Utilization",
+          "widget_name": "FAKEHDFS Space Utilization",
           "description": "Percentage of available space used in the DFS.",
           "widget_type": "GAUGE",
           "is_visible": true,
@@ -380,19 +380,19 @@
             {
               "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
               "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
             },
             {
               "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
               "metric_path": "metrics/dfs/datanode/Capacity",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
             }
           ],
           "values": [
             {
-              "name": "HDFS Space Utilization",
+              "name": "FAKEHDFS Space Utilization",
               "value": "${(FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity}"
             }
           ],
@@ -405,12 +405,12 @@
     },
     {
       "layout_name": "default_hdfs_heatmap",
-      "section_name": "HDFS_HEATMAPS",
-      "display_name": "HDFS Heatmaps",
+      "section_name": "FAKEHDFS_HEATMAPS",
+      "display_name": "FAKEHDFS Heatmaps",
       "widgetLayoutInfo": [
         {
-          "widget_name": "HDFS Bytes Read",
-          "default_section_name": "HDFS_HEATMAPS",
+          "widget_name": "FAKEHDFS Bytes Read",
+          "default_section_name": "FAKEHDFS_HEATMAPS",
           "description": "",
           "widget_type": "HEATMAP",
           "is_visible": true,
@@ -418,13 +418,13 @@
             {
               "name": "dfs.datanode.BytesRead._rate",
               "metric_path": "metrics/dfs/datanode/bytes_read._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
             }
           ],
           "values": [
             {
-              "name": "HDFS Bytes Read",
+              "name": "FAKEHDFS Bytes Read",
               "value": "${dfs.datanode.BytesRead._rate}"
             }
           ],
@@ -434,7 +434,7 @@
           }
         },
         {
-          "widget_name": "HDFS Bytes Written",
+          "widget_name": "FAKEHDFS Bytes Written",
           "description": "",
           "widget_type": "HEATMAP",
           "is_visible": false,
@@ -442,13 +442,13 @@
             {
               "name": "dfs.datanode.BytesWritten._rate",
               "metric_path": "metrics/dfs/datanode/bytes_written._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
             }
           ],
           "values": [
             {
-              "name": "HDFS Bytes Written",
+              "name": "FAKEHDFS Bytes Written",
               "value": "${dfs.datanode.BytesWritten._rate}"
             }
           ],
@@ -458,22 +458,22 @@
           }
         },
         {
-          "widget_name": "DataNode Garbage Collection Time",
+          "widget_name": "FAKEDataNode Garbage Collection Time",
           "description": "",
           "widget_type": "HEATMAP",
           "is_visible": false,
           "metrics": [
             {
-              "name": "Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis",
+              "name": "Hadoop:service=FAKEDataNode,name=JvmMetrics.GcTimeMillis",
               "metric_path": "metrics/jvm/gcTimeMillis",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
             }
           ],
           "values": [
             {
-              "name": "DataNode Garbage Collection Time",
-              "value": "${Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis}"
+              "name": "FAKEDataNode Garbage Collection Time",
+              "value": "${Hadoop:service=FAKEDataNode,name=JvmMetrics.GcTimeMillis}"
             }
           ],
           "properties": {
@@ -482,22 +482,22 @@
           }
         },
         {
-          "widget_name": "DataNode JVM Heap Memory Used",
+          "widget_name": "FAKEDataNode JVM Heap Memory Used",
           "description": "",
           "widget_type": "HEATMAP",
           "is_visible": false,
           "metrics": [
             {
-              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM",
+              "name": "Hadoop:service=FAKEDataNode,name=JvmMetrics.MemHeapUsedM",
               "metric_path": "metrics/jvm/memHeapUsedM",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
             }
           ],
           "values": [
             {
-              "name": "DataNode JVM Heap Memory Used",
-              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM}"
+              "name": "FAKEDataNode JVM Heap Memory Used",
+              "value": "${Hadoop:service=FAKEDataNode,name=JvmMetrics.MemHeapUsedM}"
             }
           ],
           "properties": {
@@ -506,22 +506,22 @@
           }
         },
         {
-          "widget_name": "DataNode JVM Heap Memory Committed",
+          "widget_name": "FAKEDataNode JVM Heap Memory Committed",
           "description": "",
           "widget_type": "HEATMAP",
           "is_visible": false,
           "metrics": [
             {
-              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM",
+              "name": "Hadoop:service=FAKEDataNode,name=JvmMetrics.MemHeapCommittedM",
               "metric_path": "metrics/jvm/memHeapCommittedM",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
             }
           ],
           "values": [
             {
-              "name": "DataNode JVM Heap Memory Committed",
-              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM}"
+              "name": "FAKEDataNode JVM Heap Memory Committed",
+              "value": "${Hadoop:service=FAKEDataNode,name=JvmMetrics.MemHeapCommittedM}"
             }
           ],
           "properties": {
@@ -530,8 +530,8 @@
           }
         },
         {
-          "widget_name": "DataNode Process Disk I/O Utilization",
-          "default_section_name": "HDFS_HEATMAPS",
+          "widget_name": "FAKEDataNode Process Disk I/O Utilization",
+          "default_section_name": "FAKEHDFS_HEATMAPS",
           "description": "",
           "widget_type": "HEATMAP",
           "is_visible": false,
@@ -539,31 +539,31 @@
             {
               "name": "dfs.datanode.BytesRead._rate",
               "metric_path": "metrics/dfs/datanode/bytes_read._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
             },
             {
               "name": "dfs.datanode.BytesWritten._rate",
               "metric_path": "metrics/dfs/datanode/bytes_written._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
             },
             {
               "name": "dfs.datanode.TotalReadTime._rate",
               "metric_path": "metrics/dfs/datanode/TotalReadTime._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
             },
             {
               "name": "dfs.datanode.TotalWriteTime._rate",
               "metric_path": "metrics/dfs/datanode/TotalWriteTime._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
             }
           ],
           "values": [
             {
-              "name": "DataNode Process Disk I/O Utilization",
+              "name": "FAKEDataNode Process Disk I/O Utilization",
               "value": "${((dfs.datanode.BytesRead._rate/dfs.datanode.TotalReadTime._rate)+(dfs.datanode.BytesWritten._rate/dfs.datanode.TotalWriteTime._rate))*50}"
             }
           ],
@@ -573,7 +573,7 @@
           }
         },
         {
-          "widget_name": "DataNode Process Network I/O Utilization",
+          "widget_name": "FAKEDataNode Process Network I/O Utilization",
           "description": "",
           "widget_type": "HEATMAP",
           "is_visible": false,
@@ -581,31 +581,31 @@
             {
               "name": "dfs.datanode.RemoteBytesRead._rate",
               "metric_path": "metrics/dfs/datanode/RemoteBytesRead._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
             },
             {
               "name": "dfs.datanode.ReadsFromRemoteClient._rate",
               "metric_path": "metrics/dfs/datanode/reads_from_remote_client._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
             },
             {
               "name": "dfs.datanode.RemoteBytesWritten._rate",
               "metric_path": "metrics/dfs/datanode/RemoteBytesWritten._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
             },
             {
               "name": "dfs.datanode.WritesFromRemoteClient._rate",
               "metric_path": "metrics/dfs/datanode/writes_from_remote_client._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
             }
           ],
           "values": [
             {
-              "name": "DataNode Process Network I/O Utilization",
+              "name": "FAKEDataNode Process Network I/O Utilization",
               "value": "${((dfs.datanode.RemoteBytesRead._rate/dfs.datanode.ReadsFromRemoteClient._rate)+(dfs.datanode.RemoteBytesWritten._rate/dfs.datanode.WritesFromRemoteClient._rate))*50}"
             }
           ],
@@ -615,26 +615,26 @@
           }
         },
         {
-          "widget_name": "HDFS Space Utilization",
+          "widget_name": "FAKEHDFS Space Utilization",
           "widget_type": "HEATMAP",
           "is_visible": false,
           "metrics": [
             {
               "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
               "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
             },
             {
               "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
               "metric_path": "metrics/dfs/datanode/Capacity",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
             }
           ],
           "values": [
             {
-              "name": "HDFS Space Utilization",
+              "name": "FAKEHDFS Space Utilization",
               "value": "${((FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity) * 100}"
             }
           ],

File diff suppressed because it is too large
+ 176 - 176
ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/YARN_metrics.json


+ 139 - 139
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/YARN_widgets.json → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/YARN_widgets.json

@@ -2,8 +2,8 @@
   "layouts": [
     {
       "layout_name": "default_yarn_dashboard",
-      "display_name": "Standard YARN Dashboard",
-      "section_name": "YARN_SUMMARY",
+      "display_name": "Standard FAKEYARN Dashboard",
+      "section_name": "FAKEYARN_SUMMARY",
       "widgetLayoutInfo": [
         {
           "widget_name": "Memory Utilization",
@@ -14,15 +14,15 @@
             {
               "name": "yarn.QueueMetrics.Queue=root.AllocatedMB",
               "metric_path": "metrics/yarn/Queue/root/AllocatedMB",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
+              "service_name": "FAKEYARN",
+              "component_name": "FAKERESOURCEMANAGER",
               "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
             },
             {
               "name": "yarn.QueueMetrics.Queue=root.AvailableMB",
               "metric_path": "metrics/yarn/Queue/root/AvailableMB",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
+              "service_name": "FAKEYARN",
+              "component_name": "FAKERESOURCEMANAGER",
               "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
             }
           ],
@@ -47,21 +47,21 @@
             {
               "name": "yarn.QueueMetrics.Queue=root.AllocatedVCores",
               "metric_path": "metrics/yarn/Queue/root/AllocatedVCores",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
+              "service_name": "FAKEYARN",
+              "component_name": "FAKERESOURCEMANAGER",
               "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
             },
             {
               "name": "yarn.QueueMetrics.Queue=root.AvailableVCores",
               "metric_path": "metrics/yarn/Queue/root/AvailableVCores",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
+              "service_name": "FAKEYARN",
+              "component_name": "FAKERESOURCEMANAGER",
               "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
             }
           ],
           "values": [
             {
-              "name": "Total Allocatable CPU Utilized across NodeManager",
+              "name": "Total Allocatable CPU Utilized across FAKENodeManager",
               "value": "${(yarn.QueueMetrics.Queue=root.AllocatedVCores / (yarn.QueueMetrics.Queue=root.AllocatedVCores + yarn.QueueMetrics.Queue=root.AvailableVCores)) * 100}"
             }
           ],
@@ -78,46 +78,46 @@
           "is_visible": true,
           "metrics": [
             {
-              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
+              "name": "yarn.FAKENodeManagerMetrics.ContainersFailed._rate",
               "metric_path": "metrics/yarn/ContainersFailed._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             },
             {
-              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
+              "name": "yarn.FAKENodeManagerMetrics.ContainersCompleted._rate",
               "metric_path": "metrics/yarn/ContainersCompleted._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             },
             {
-              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
+              "name": "yarn.FAKENodeManagerMetrics.ContainersLaunched._rate",
               "metric_path": "metrics/yarn/ContainersLaunched._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             },
             {
-              "name": "yarn.NodeManagerMetrics.ContainersIniting._sum",
+              "name": "yarn.FAKENodeManagerMetrics.ContainersIniting._sum",
               "metric_path": "metrics/yarn/ContainersIniting._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             },
             {
-              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
+              "name": "yarn.FAKENodeManagerMetrics.ContainersKilled._rate",
               "metric_path": "metrics/yarn/ContainersKilled._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             },
             {
-              "name": "yarn.NodeManagerMetrics.ContainersRunning._sum",
+              "name": "yarn.FAKENodeManagerMetrics.ContainersRunning._sum",
               "metric_path": "metrics/yarn/ContainersRunning._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             }
           ],
           "values": [
             {
               "name": "Container Failures",
-              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting._sum + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning._sum)) * 100}"
+              "value": "${(yarn.FAKENodeManagerMetrics.ContainersFailed._rate/(yarn.FAKENodeManagerMetrics.ContainersFailed._rate + yarn.FAKENodeManagerMetrics.ContainersCompleted._rate + yarn.FAKENodeManagerMetrics.ContainersLaunched._rate + yarn.FAKENodeManagerMetrics.ContainersIniting._sum + yarn.FAKENodeManagerMetrics.ContainersKilled._rate + yarn.FAKENodeManagerMetrics.ContainersRunning._sum)) * 100}"
             }
           ],
           "properties": {
@@ -135,43 +135,43 @@
             {
               "name": "yarn.QueueMetrics.Queue=root.AppsFailed._rate",
               "metric_path": "metrics/yarn/Queue/root/AppsFailed._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
+              "service_name": "FAKEYARN",
+              "component_name": "FAKERESOURCEMANAGER",
               "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
             },
             {
               "name": "yarn.QueueMetrics.Queue=root.AppsKilled._rate",
               "metric_path": "metrics/yarn/Queue/root/AppsKilled._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
+              "service_name": "FAKEYARN",
+              "component_name": "FAKERESOURCEMANAGER",
               "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
             },
             {
               "name": "yarn.QueueMetrics.Queue=root.AppsPending",
               "metric_path": "metrics/yarn/Queue/root/AppsPending",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
+              "service_name": "FAKEYARN",
+              "component_name": "FAKERESOURCEMANAGER",
               "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
             },
             {
               "name": "yarn.QueueMetrics.Queue=root.AppsRunning",
               "metric_path": "metrics/yarn/Queue/root/AppsRunning",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
+              "service_name": "FAKEYARN",
+              "component_name": "FAKERESOURCEMANAGER",
               "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
             },
             {
               "name": "yarn.QueueMetrics.Queue=root.AppsSubmitted._rate",
               "metric_path": "metrics/yarn/Queue/root/AppsSubmitted._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
+              "service_name": "FAKEYARN",
+              "component_name": "FAKERESOURCEMANAGER",
               "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
             },
             {
               "name": "yarn.QueueMetrics.Queue=root.AppsCompleted._rate",
               "metric_path": "metrics/yarn/Queue/root/AppsCompleted._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
+              "service_name": "FAKEYARN",
+              "component_name": "FAKERESOURCEMANAGER",
               "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
             }
           ],
@@ -196,8 +196,8 @@
             {
               "name": "yarn.QueueMetrics.Queue=root.AppsPending",
               "metric_path": "metrics/yarn/Queue/root/AppsPending",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
+              "service_name": "FAKEYARN",
+              "component_name": "FAKERESOURCEMANAGER",
               "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
             }
           ],
@@ -215,21 +215,21 @@
         },
         {
           "widget_name": "Cluster Memory",
-          "description": "Percentage of memory used across all NodeManager hosts.",
+          "description": "Percentage of memory used across all FAKENodeManager hosts.",
           "widget_type": "GRAPH",
           "is_visible": true,
           "metrics": [
             {
               "name": "mem_total._sum",
               "metric_path": "metrics/memory/mem_total._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             },
             {
               "name": "mem_free._sum",
               "metric_path": "metrics/memory/mem_free._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             }
           ],
           "values": [
@@ -246,21 +246,21 @@
         },
         {
           "widget_name": "Cluster Disk",
-          "description": "Sum of disk throughput for all NodeManager hosts.",
+          "description": "Sum of disk throughput for all FAKENodeManager hosts.",
           "widget_type": "GRAPH",
           "is_visible": true,
           "metrics": [
             {
               "name": "read_bps._sum",
               "metric_path": "metrics/disk/read_bps._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             },
             {
               "name": "write_bps._sum",
               "metric_path": "metrics/disk/write_bps._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             }
           ],
           "values": [
@@ -281,22 +281,22 @@
         },
         {
           "widget_name": "Cluster Network",
-          "description": "Average of Network utilized across all NodeManager hosts.",
-          "default_section_name": "YARN_SUMMARY",
+          "description": "Average of Network utilized across all FAKENodeManager hosts.",
+          "default_section_name": "FAKEYARN_SUMMARY",
           "widget_type": "GRAPH",
           "is_visible": true,
           "metrics": [
             {
               "name": "pkts_in._avg",
               "metric_path": "metrics/network/pkts_in._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             },
             {
               "name": "pkts_out._avg",
               "metric_path": "metrics/network/pkts_out._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             }
           ],
           "values": [
@@ -316,40 +316,40 @@
         },
         {
           "widget_name": "Cluster CPU",
-          "description": "Percentage of CPU utilized across all NodeManager hosts.",
-          "default_section_name": "YARN_SUMMARY",
+          "description": "Percentage of CPU utilized across all FAKENodeManager hosts.",
+          "default_section_name": "FAKEYARN_SUMMARY",
           "widget_type": "GRAPH",
           "is_visible": true,
           "metrics": [
             {
               "name": "cpu_system._sum",
               "metric_path": "metrics/cpu/cpu_system._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             },
             {
               "name": "cpu_user._sum",
               "metric_path": "metrics/cpu/cpu_user._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             },
             {
               "name": "cpu_nice._sum",
               "metric_path": "metrics/cpu/cpu_nice._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             },
             {
               "name": "cpu_idle._sum",
               "metric_path": "metrics/cpu/cpu_idle._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             },
             {
               "name": "cpu_wio._sum",
               "metric_path": "metrics/cpu/cpu_wio._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             }
           ],
           "values": [
@@ -368,32 +368,32 @@
     },
     {
       "layout_name": "default_yarn_heatmap",
-      "display_name": "YARN Heatmaps",
-      "section_name": "YARN_HEATMAPS",
+      "display_name": "FAKEYARN Heatmaps",
+      "section_name": "FAKEYARN_HEATMAPS",
       "widgetLayoutInfo": [
         {
-          "widget_name": "Total Allocatable RAM Utilized per NodeManager",
+          "widget_name": "Total Allocatable RAM Utilized per FAKENodeManager",
           "description": "",
           "widget_type": "HEATMAP",
           "is_visible": true,
           "metrics": [
             {
-              "name": "yarn.NodeManagerMetrics.AllocatedGB",
+              "name": "yarn.FAKENodeManagerMetrics.AllocatedGB",
               "metric_path": "metrics/yarn/AllocatedGB",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             },
             {
-              "name": "yarn.NodeManagerMetrics.AvailableGB",
+              "name": "yarn.FAKENodeManagerMetrics.AvailableGB",
               "metric_path": "metrics/yarn/AvailableGB",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             }
           ],
           "values": [
             {
-              "name": "Total Allocatable RAM Utilized per NodeManager",
-              "value": "${(yarn.NodeManagerMetrics.AllocatedGB/(yarn.NodeManagerMetrics.AvailableGB + yarn.NodeManagerMetrics.AllocatedGB)) * 100}"
+              "name": "Total Allocatable RAM Utilized per FAKENodeManager",
+              "value": "${(yarn.FAKENodeManagerMetrics.AllocatedGB/(yarn.FAKENodeManagerMetrics.AvailableGB + yarn.FAKENodeManagerMetrics.AllocatedGB)) * 100}"
             }
           ],
           "properties": {
@@ -402,28 +402,28 @@
           }
         },
         {
-          "widget_name": "Total Allocatable CPU Utilized per NodeManager",
+          "widget_name": "Total Allocatable CPU Utilized per FAKENodeManager",
           "description": "",
           "widget_type": "HEATMAP",
           "is_visible": false,
           "metrics": [
             {
-              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "name": "yarn.FAKENodeManagerMetrics.AllocatedVCores",
               "metric_path": "metrics/yarn/AllocatedVCores",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             },
             {
-              "name": "yarn.NodeManagerMetrics.AvailableVCores",
+              "name": "yarn.FAKENodeManagerMetrics.AvailableVCores",
               "metric_path": "metrics/yarn/AvailableVCores",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             }
           ],
           "values": [
             {
-              "name": "Total Allocatable CPU Utilized per NodeManager",
-              "value": "${(yarn.NodeManagerMetrics.AllocatedVCores/(yarn.NodeManagerMetrics.AllocatedVCores + yarn.NodeManagerMetrics.AvailableVCores)) * 100}"
+              "name": "Total Allocatable CPU Utilized per FAKENodeManager",
+              "value": "${(yarn.FAKENodeManagerMetrics.AllocatedVCores/(yarn.FAKENodeManagerMetrics.AllocatedVCores + yarn.FAKENodeManagerMetrics.AvailableVCores)) * 100}"
             }
           ],
           "properties": {
@@ -438,46 +438,46 @@
           "is_visible": false,
           "metrics": [
             {
-              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
+              "name": "yarn.FAKENodeManagerMetrics.ContainersFailed._rate",
               "metric_path": "metrics/yarn/ContainersFailed._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             },
             {
-              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
+              "name": "yarn.FAKENodeManagerMetrics.ContainersCompleted._rate",
               "metric_path": "metrics/yarn/ContainersCompleted._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             },
             {
-              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
+              "name": "yarn.FAKENodeManagerMetrics.ContainersLaunched._rate",
               "metric_path": "metrics/yarn/ContainersLaunched._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             },
             {
-              "name": "yarn.NodeManagerMetrics.ContainersIniting",
+              "name": "yarn.FAKENodeManagerMetrics.ContainersIniting",
               "metric_path": "metrics/yarn/ContainersIniting",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             },
             {
-              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
+              "name": "yarn.FAKENodeManagerMetrics.ContainersKilled._rate",
               "metric_path": "metrics/yarn/ContainersKilled._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             },
             {
-              "name": "yarn.NodeManagerMetrics.ContainersRunning",
+              "name": "yarn.FAKENodeManagerMetrics.ContainersRunning",
               "metric_path": "metrics/yarn/ContainersRunning",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             }
           ],
           "values": [
             {
               "name": "Container Failures",
-              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning)) * 100}"
+              "value": "${(yarn.FAKENodeManagerMetrics.ContainersFailed._rate/(yarn.FAKENodeManagerMetrics.ContainersFailed._rate + yarn.FAKENodeManagerMetrics.ContainersCompleted._rate + yarn.FAKENodeManagerMetrics.ContainersLaunched._rate + yarn.FAKENodeManagerMetrics.ContainersIniting + yarn.FAKENodeManagerMetrics.ContainersKilled._rate + yarn.FAKENodeManagerMetrics.ContainersRunning)) * 100}"
             }
           ],
           "properties": {
@@ -486,22 +486,22 @@
           }
         },
         {
-          "widget_name": "NodeManager GC Time",
+          "widget_name": "FAKENodeManager GC Time",
           "description": "",
           "widget_type": "HEATMAP",
           "is_visible": false,
           "metrics": [
             {
-              "name": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
+              "name": "Hadoop:service=FAKENodeManager,name=JvmMetrics.GcTimeMillis",
               "metric_path": "metrics/jvm/gcTimeMillis",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             }
           ],
           "values": [
             {
-              "name": "NodeManager Garbage Collection Time",
-              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis}"
+              "name": "FAKENodeManager Garbage Collection Time",
+              "value": "${Hadoop:service=FAKENodeManager,name=JvmMetrics.GcTimeMillis}"
             }
           ],
           "properties": {
@@ -510,22 +510,22 @@
           }
         },
         {
-          "widget_name": "NodeManager JVM Heap Memory Used",
+          "widget_name": "FAKENodeManager JVM Heap Memory Used",
           "description": "",
           "widget_type": "HEATMAP",
           "is_visible": false,
           "metrics": [
             {
-              "name": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
+              "name": "Hadoop:service=FAKENodeManager,name=JvmMetrics.MemHeapUsedM",
               "metric_path": "metrics/jvm/memHeapUsedM",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             }
           ],
           "values": [
             {
-              "name": "NodeManager JVM Heap Memory Used",
-              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM}"
+              "name": "FAKENodeManager JVM Heap Memory Used",
+              "value": "${Hadoop:service=FAKENodeManager,name=JvmMetrics.MemHeapUsedM}"
             }
           ],
           "properties": {
@@ -540,16 +540,16 @@
           "is_visible": false,
           "metrics": [
             {
-              "name": "yarn.NodeManagerMetrics.AllocatedContainers",
+              "name": "yarn.FAKENodeManagerMetrics.AllocatedContainers",
               "metric_path": "metrics/yarn/AllocatedContainers",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             }
           ],
           "values": [
             {
               "name": "Allocated Containers",
-              "value": "${yarn.NodeManagerMetrics.AllocatedContainers}"
+              "value": "${yarn.FAKENodeManagerMetrics.AllocatedContainers}"
             }
           ],
           "properties": {
@@ -558,22 +558,22 @@
           }
         },
         {
-          "widget_name": "NodeManager RAM Utilized",
+          "widget_name": "FAKENodeManager RAM Utilized",
           "description": "",
           "widget_type": "HEATMAP",
           "is_visible": false,
           "metrics": [
             {
-              "name": "yarn.NodeManagerMetrics.AllocatedGB",
+              "name": "yarn.FAKENodeManagerMetrics.AllocatedGB",
               "metric_path": "metrics/yarn/AllocatedGB",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             }
           ],
           "values": [
             {
-              "name": "NodeManager RAM Utilized",
-              "value": "${yarn.NodeManagerMetrics.AllocatedGB}"
+              "name": "FAKENodeManager RAM Utilized",
+              "value": "${yarn.FAKENodeManagerMetrics.AllocatedGB}"
             }
           ],
           "properties": {
@@ -582,22 +582,22 @@
           }
         },
         {
-          "widget_name": "NodeManager CPU Utilized",
+          "widget_name": "FAKENodeManager CPU Utilized",
           "description": "",
           "widget_type": "HEATMAP",
           "is_visible": false,
           "metrics": [
             {
-              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "name": "yarn.FAKENodeManagerMetrics.AllocatedVCores",
               "metric_path": "metrics/yarn/AllocatedVCores",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
+              "service_name": "FAKEYARN",
+              "component_name": "FAKENODEMANAGER"
             }
           ],
           "values": [
             {
-              "name": "NodeManager CPU Utilized",
-              "value": "${yarn.NodeManagerMetrics.AllocatedVCores}"
+              "name": "FAKENodeManager CPU Utilized",
+              "value": "${yarn.FAKENodeManagerMetrics.AllocatedVCores}"
             }
           ],
           "properties": {

+ 14 - 14
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/alerts.json → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/alerts.json

@@ -1,34 +1,34 @@
 {
-  "MAPREDUCE2": {
-    "HISTORYSERVER": [
+  "FAKEMAPREDUCE2": {
+    "FAKEHISTORYSERVER": [
       {
         "name": "mapreduce_history_process",
-        "label": "History Server process",
+        "label": "FAKEHistory Server process",
         "description": "Alert for history server process status",
         "interval": 1,
         "scope": "HOST",
         "enabled": true,
         "source": {
           "type": "SCRIPT",
-          "path": "PERF/1.0/services/YARN/package/alerts/alert_history_process.py",
+          "path": "PERF/1.0/services/FAKEYARN/package/alerts/alert_history_process.py",
           "parameters": []
         }
       }
     ]
   },
-  "YARN": {
+  "FAKEYARN": {
 
-    "NODEMANAGER": [
+    "FAKENODEMANAGER": [
       {
         "name": "yarn_nodemanager_health",
-        "label": "NodeManager Health",
-        "description": "This host-level alert checks the node health property available from the NodeManager component.",
+        "label": "FAKENodeManager Health",
+        "description": "This host-level alert checks the node health property available from the FAKENodeManager component.",
         "interval": 1,
         "scope": "HOST",
         "enabled": true,
         "source": {
           "type": "SCRIPT",
-          "path": "PERF/1.0/services/YARN/package/alerts/alert_nodemanager_health.py",
+          "path": "PERF/1.0/services/FAKEYARN/package/alerts/alert_nodemanager_health.py",
           "parameters": [
             {
               "name": "connection.timeout",
@@ -43,22 +43,22 @@
         }
       }
     ],
-    "RESOURCEMANAGER": [
+    "FAKERESOURCEMANAGER": [
       {
         "name": "yarn_resourcemanager_process",
-        "label": "ResourceManager process",
+        "label": "FAKEResourceManager process",
         "description": "Alert for resourcemanager process status",
         "interval": 1,
         "scope": "HOST",
         "enabled": true,
         "source": {
           "type": "SCRIPT",
-          "path": "PERF/1.0/services/YARN/package/alerts/alert_resourcemanager_process.py",
+          "path": "PERF/1.0/services/FAKEYARN/package/alerts/alert_resourcemanager_process.py",
           "parameters": []
         }
       }
     ],
-    "APP_TIMELINE_SERVER": [
+    "FAKEAPP_TIMELINE_SERVER": [
       {
         "name": "yarn_app_timeline_server_process",
         "label": "App Timeline process",
@@ -68,7 +68,7 @@
         "enabled": true,
         "source": {
           "type": "SCRIPT",
-          "path": "PERF/1.0/services/YARN/package/alerts/alert_timeline_process.py",
+          "path": "PERF/1.0/services/FAKEYARN/package/alerts/alert_timeline_process.py",
           "parameters": []
         }
       }

+ 2 - 2
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration-mapred/mapred-env.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration-mapred/mapred-env.xml

@@ -28,11 +28,11 @@
     <value>
 # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
 
-export HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}
+export HADOOP_JOB_FAKEHISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}
 
 export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
 
-#export HADOOP_JOB_HISTORYSERVER_OPTS=
+#export HADOOP_JOB_FAKEHISTORYSERVER_OPTS=
 #export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
 #export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
 #export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.

+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration-mapred/mapred-site.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration-mapred/mapred-site.xml


+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/capacity-scheduler.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration/capacity-scheduler.xml


+ 4 - 4
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-audit.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration/ranger-yarn-audit.xml

@@ -28,8 +28,8 @@
   <property>
     <name>xasecure.audit.destination.hdfs</name>
     <value>true</value>
-    <display-name>Audit to HDFS</display-name>
-    <description>Is Audit to HDFS enabled?</description>
+    <display-name>Audit to FAKEHDFS</display-name>
+    <description>Is Audit to FAKEHDFS enabled?</description>
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
@@ -43,8 +43,8 @@
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs.dir</name>
-    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
-    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <value>hdfs://FAKENAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>FAKEHDFS folder to write audit to, make sure the service user has requried permissions</description>
     <depends-on>
       <property>
         <type>ranger-env</type>

+ 2 - 2
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration/ranger-yarn-plugin-properties.xml

@@ -22,7 +22,7 @@
   <property>
     <name>policy_user</name>
     <value>ambari-qa</value>
-    <display-name>Policy user for YARN</display-name>
+    <display-name>Policy user for FAKEYARN</display-name>
     <description>This user must be system user and also present at Ranger admin portal</description>
     <on-ambari-upgrade add="false"/>
   </property>
@@ -47,7 +47,7 @@
   <property>
     <name>ranger-yarn-plugin-enabled</name>
     <value>No</value>
-    <display-name>Enable Ranger for YARN</display-name>
+    <display-name>Enable Ranger for FAKEYARN</display-name>
     <description>Enable ranger yarn plugin ?</description>
     <depends-on>
       <property>

+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration/ranger-yarn-policymgr-ssl.xml


+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-security.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration/ranger-yarn-security.xml


+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-alert-config.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration/yarn-alert-config.xml


+ 56 - 56
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-env.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration/yarn-env.xml

@@ -70,26 +70,26 @@
     <display-name>yarn-env template</display-name>
     <description>This is the jinja template for yarn-env.sh file</description>
     <value>
-      export HADOOP_YARN_HOME={{hadoop_yarn_home}}
-      export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
-      export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
+      export HADOOP_FAKEYARN_HOME={{hadoop_yarn_home}}
+      export FAKEYARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
+      export FAKEYARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
       export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
       export JAVA_HOME={{java64_home}}
       export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
 
       # We need to add the EWMA appender for the yarn daemons only;
-      # however, YARN_ROOT_LOGGER is shared by the yarn client and the
+      # however, FAKEYARN_ROOT_LOGGER is shared by the yarn client and the
       # daemons. This is restrict the EWMA appender to daemons only.
       INVOKER="${0##*/}"
       if [ "$INVOKER" == "yarn-daemon.sh" ]; then
-        export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,EWMA,RFA}
+        export FAKEYARN_ROOT_LOGGER=${FAKEYARN_ROOT_LOGGER:-INFO,EWMA,RFA}
       fi
 
-      # User for YARN daemons
-      export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+      # User for FAKEYARN daemons
+      export HADOOP_FAKEYARN_USER=${HADOOP_FAKEYARN_USER:-yarn}
 
       # resolve links - $0 may be a softlink
-      export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+      export FAKEYARN_CONF_DIR="${FAKEYARN_CONF_DIR:-$HADOOP_FAKEYARN_HOME/conf}"
 
       # some Java parameters
       # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
@@ -106,92 +106,92 @@
       JAVA=$JAVA_HOME/bin/java
       JAVA_HEAP_MAX=-Xmx1000m
 
-      # For setting YARN specific HEAP sizes please use this
+      # For setting FAKEYARN specific HEAP sizes please use this
       # Parameter and set appropriately
-      YARN_HEAPSIZE={{yarn_heapsize}}
+      FAKEYARN_HEAPSIZE={{yarn_heapsize}}
 
       # check envvars which might override default args
-      if [ "$YARN_HEAPSIZE" != "" ]; then
-      JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+      if [ "$FAKEYARN_HEAPSIZE" != "" ]; then
+      JAVA_HEAP_MAX="-Xmx""$FAKEYARN_HEAPSIZE""m"
       fi
 
       # Resource Manager specific parameters
 
-      # Specify the max Heapsize for the ResourceManager using a numerical value
+      # Specify the max Heapsize for the FAKEResourceManager using a numerical value
       # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
       # the value to 1000.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_RESOURCEMANAGER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
+      # This value will be overridden by an Xmx setting specified in either FAKEYARN_OPTS
+      # and/or FAKEYARN_FAKERESOURCEMANAGER_OPTS.
+      # If not specified, the default value will be picked from either FAKEYARN_HEAPMAX
+      # or JAVA_HEAP_MAX with FAKEYARN_HEAPMAX as the preferred option of the two.
+      export FAKEYARN_FAKERESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
 
-      # Specify the JVM options to be used when starting the ResourceManager.
-      # These options will be appended to the options specified as YARN_OPTS
-      # and therefore may override any similar flags set in YARN_OPTS
-      #export YARN_RESOURCEMANAGER_OPTS=
+      # Specify the JVM options to be used when starting the FAKEResourceManager.
+      # These options will be appended to the options specified as FAKEYARN_OPTS
+      # and therefore may override any similar flags set in FAKEYARN_OPTS
+      #export FAKEYARN_FAKERESOURCEMANAGER_OPTS=
 
       # Node Manager specific parameters
 
-      # Specify the max Heapsize for the NodeManager using a numerical value
+      # Specify the max Heapsize for the FAKENodeManager using a numerical value
       # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
       # the value to 1000.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_NODEMANAGER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
+      # This value will be overridden by an Xmx setting specified in either FAKEYARN_OPTS
+      # and/or FAKEYARN_FAKENODEMANAGER_OPTS.
+      # If not specified, the default value will be picked from either FAKEYARN_HEAPMAX
+      # or JAVA_HEAP_MAX with FAKEYARN_HEAPMAX as the preferred option of the two.
+      export FAKEYARN_FAKENODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
 
       # Specify the max Heapsize for the timeline server using a numerical value
       # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
       # the value to 1024.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_TIMELINESERVER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}}
+      # This value will be overridden by an Xmx setting specified in either FAKEYARN_OPTS
+      # and/or FAKEYARN_TIMELINESERVER_OPTS.
+      # If not specified, the default value will be picked from either FAKEYARN_HEAPMAX
+      # or JAVA_HEAP_MAX with FAKEYARN_HEAPMAX as the preferred option of the two.
+      export FAKEYARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}}
 
-      # Specify the JVM options to be used when starting the NodeManager.
-      # These options will be appended to the options specified as YARN_OPTS
-      # and therefore may override any similar flags set in YARN_OPTS
-      #export YARN_NODEMANAGER_OPTS=
+      # Specify the JVM options to be used when starting the FAKENodeManager.
+      # These options will be appended to the options specified as FAKEYARN_OPTS
+      # and therefore may override any similar flags set in FAKEYARN_OPTS
+      #export FAKEYARN_FAKENODEMANAGER_OPTS=
 
       # so that filenames w/ spaces are handled correctly in loops below
       IFS=
 
 
       # default log directory and file
-      if [ "$YARN_LOG_DIR" = "" ]; then
-      YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+      if [ "$FAKEYARN_LOG_DIR" = "" ]; then
+      FAKEYARN_LOG_DIR="$HADOOP_FAKEYARN_HOME/logs"
       fi
-      if [ "$YARN_LOGFILE" = "" ]; then
-      YARN_LOGFILE='yarn.log'
+      if [ "$FAKEYARN_LOGFILE" = "" ]; then
+      FAKEYARN_LOGFILE='yarn.log'
       fi
 
       # default policy file for service-level authorization
-      if [ "$YARN_POLICYFILE" = "" ]; then
-      YARN_POLICYFILE="hadoop-policy.xml"
+      if [ "$FAKEYARN_POLICYFILE" = "" ]; then
+      FAKEYARN_POLICYFILE="hadoop-policy.xml"
       fi
 
       # restore ordinary behaviour
       unset IFS
 
 
-      YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
-      YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
-      YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
-      YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
-      YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
-      YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
-      YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-      YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-      export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT"
-      export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT"
+      FAKEYARN_OPTS="$FAKEYARN_OPTS -Dhadoop.log.dir=$FAKEYARN_LOG_DIR"
+      FAKEYARN_OPTS="$FAKEYARN_OPTS -Dyarn.log.dir=$FAKEYARN_LOG_DIR"
+      FAKEYARN_OPTS="$FAKEYARN_OPTS -Dhadoop.log.file=$FAKEYARN_LOGFILE"
+      FAKEYARN_OPTS="$FAKEYARN_OPTS -Dyarn.log.file=$FAKEYARN_LOGFILE"
+      FAKEYARN_OPTS="$FAKEYARN_OPTS -Dyarn.home.dir=$FAKEYARN_COMMON_HOME"
+      FAKEYARN_OPTS="$FAKEYARN_OPTS -Dyarn.id.str=$FAKEYARN_IDENT_STRING"
+      FAKEYARN_OPTS="$FAKEYARN_OPTS -Dhadoop.root.logger=${FAKEYARN_ROOT_LOGGER:-INFO,console}"
+      FAKEYARN_OPTS="$FAKEYARN_OPTS -Dyarn.root.logger=${FAKEYARN_ROOT_LOGGER:-INFO,console}"
+      export FAKEYARN_FAKENODEMANAGER_OPTS="$FAKEYARN_FAKENODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT"
+      export FAKEYARN_FAKERESOURCEMANAGER_OPTS="$FAKEYARN_FAKERESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT"
       if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-      YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+      FAKEYARN_OPTS="$FAKEYARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
       fi
-      YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
-      YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
+      FAKEYARN_OPTS="$FAKEYARN_OPTS -Dyarn.policy.file=$FAKEYARN_POLICYFILE"
+      FAKEYARN_OPTS="$FAKEYARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
     </value>
     <value-attributes>
       <type>content</type>

+ 5 - 5
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-log4j.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration/yarn-log4j.xml

@@ -37,9 +37,9 @@ yarn.log.dir=.
 hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
 hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
 log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
-# Set the ResourceManager summary log filename
+# Set the FAKEResourceManager summary log filename
 yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
-# Set the ResourceManager summary log level and appender
+# Set the FAKEResourceManager summary log level and appender
 yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
 #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
 
@@ -47,7 +47,7 @@ yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
 # set yarn.server.resourcemanager.appsummary.logger to
 # LEVEL,RMSUMMARY in hadoop-env.sh
 
-# Appender for ResourceManager Application Summary Log
+# Appender for FAKEResourceManager Application Summary Log
 # Requires the following properties to be set
 #    - hadoop.log.dir (Hadoop Log directory)
 #    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
@@ -74,7 +74,7 @@ log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
 log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
 log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
 
-# Audit logging for ResourceManager
+# Audit logging for FAKEResourceManager
 rm.audit.logger=${hadoop.root.logger}
 log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}
 log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false
@@ -84,7 +84,7 @@ log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout
 log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
 log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd
 
-# Audit logging for NodeManager
+# Audit logging for FAKENodeManager
 nm.audit.logger=${hadoop.root.logger}
 log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}
 log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false

+ 21 - 37
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-site.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/configuration/yarn-site.xml

@@ -96,14 +96,6 @@
     </description>
     <on-ambari-upgrade add="false"/>
   </property>
-  <property>
-    <name>hadoop.registry.zk.quorum</name>
-    <value>localhost:2181</value>
-    <description>
-      List of hostname:port pairs defining the zookeeper quorum binding for the registry
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
   <property>
     <name>yarn.nodemanager.recovery.enabled</name>
     <value>true</value>
@@ -144,7 +136,7 @@
     <name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
     <value>true</value>
     <description>
-      Enable RM work preserving recovery. This configuration is private to YARN for experimenting the feature.
+      Enable RM work preserving recovery. This configuration is private to FAKEYARN for experimenting the feature.
     </description>
     <display-name>Enable Work Preserving Restart</display-name>
     <value-attributes>
@@ -158,29 +150,21 @@
     <description>
       The class to use as the persistent store.
       If org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore is used,
-      the store is implicitly fenced; meaning a single ResourceManager
+      the store is implicitly fenced; meaning a single FAKEResourceManager
       is able to use the store at any point in time.
     </description>
     <on-ambari-upgrade add="false"/>
   </property>
-  <property>
-    <name>yarn.resourcemanager.zk-address</name>
-    <value>localhost:2181</value>
-    <description>
-      List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
   <property>
     <name>yarn.resourcemanager.zk-state-store.parent-path</name>
     <value>/rmstore</value>
-    <description>Full path of the ZooKeeper znode where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the value for yarn.resourcemanager.store.class</description>
+    <description>Full path of the FAKEZooKeeper znode where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the value for yarn.resourcemanager.store.class</description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-acl</name>
     <value>world:anyone:rwcda</value>
-    <description>ACL's to be used for ZooKeeper znodes.</description>
+    <description>ACL's to be used for FAKEZooKeeper znodes.</description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
@@ -192,19 +176,19 @@
   <property>
     <name>yarn.resourcemanager.connect.retry-interval.ms</name>
     <value>30000</value>
-    <description>How often to try connecting to the ResourceManager.</description>
+    <description>How often to try connecting to the FAKEResourceManager.</description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.connect.max-wait.ms</name>
     <value>900000</value>
-    <description>Maximum time to wait to establish connection to ResourceManager</description>
+    <description>Maximum time to wait to establish connection to FAKEResourceManager</description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-retry-interval-ms</name>
     <value>1000</value>
-    <description>"Retry interval in milliseconds when connecting to ZooKeeper.
+    <description>"Retry interval in milliseconds when connecting to FAKEZooKeeper.
       When HA is enabled, the value here is NOT used. It is generated
       automatically from yarn.resourcemanager.zk-timeout-ms and
       yarn.resourcemanager.zk-num-retries."
@@ -214,13 +198,13 @@
   <property>
     <name>yarn.resourcemanager.zk-num-retries</name>
     <value>1000</value>
-    <description>Number of times RM tries to connect to ZooKeeper.</description>
+    <description>Number of times RM tries to connect to FAKEZooKeeper.</description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-timeout-ms</name>
     <value>10000</value>
-    <description>ZooKeeper session timeout in milliseconds. Session expiration is managed by the ZooKeeper cluster itself, not by the client. This value is used by the cluster to determine when the client's session expires. Expirations happens when the cluster does not hear from the client within the specified session timeout period (i.e. no heartbeat).</description>
+    <description>FAKEZooKeeper session timeout in milliseconds. Session expiration is managed by the FAKEZooKeeper cluster itself, not by the client. This value is used by the cluster to determine when the client's session expires. Expirations happens when the cluster does not hear from the client within the specified session timeout period (i.e. no heartbeat).</description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
@@ -262,7 +246,7 @@
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
     <value>hadoop-yarn</value>
-    <description>Name of the Cgroups hierarchy under which all YARN jobs will be launched</description>
+    <description>Name of the Cgroups hierarchy under which all FAKEYARN jobs will be launched</description>
     <depends-on>
       <property>
         <type>yarn-env</type>
@@ -274,7 +258,7 @@
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
     <value>false</value>
-    <description>If true, YARN will automount the CGroup, however the directory needs to already exist; else, the cgroup should be mounted by the admin</description>
+    <description>If true, FAKEYARN will automount the CGroup, however the directory needs to already exist; else, the cgroup should be mounted by the admin</description>
     <depends-on>
       <property>
         <type>yarn-env</type>
@@ -286,7 +270,7 @@
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name>
     <value>/cgroup</value>
-    <description>Path used by the LCE to mount cgroups if not found. This path must exist before the NodeManager is launched.</description>
+    <description>Path used by the LCE to mount cgroups if not found. This path must exist before the FAKENodeManager is launched.</description>
     <depends-on>
       <property>
         <type>yarn-env</type>
@@ -307,7 +291,7 @@
     <description>Number of vcores that can be allocated
       for containers. This is used by the RM scheduler when allocating
       resources for containers. This is not used to limit the number of
-      CPUs used by YARN containers. If it is set to -1 and
+      CPUs used by FAKEYARN containers. If it is set to -1 and
       yarn.nodemanager.resource.detect-hardware-capabilities is true, it is
       automatically determined from the hardware in case of Windows and Linux.
       In other cases, number of vcores is 8 by default.
@@ -329,7 +313,7 @@
   <property>
     <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
     <value>80</value>
-    <description>The amount of CPU allocated for YARN containers - only effective when used with CGroups</description>
+    <description>The amount of CPU allocated for FAKEYARN containers - only effective when used with CGroups</description>
     <display-name>Percentage of physical CPU allocated for all containers on a node</display-name>
     <value-attributes>
       <type>int</type>
@@ -553,7 +537,7 @@
     <name>yarn.node-labels.enabled</name>
     <value>false</value>
     <description>
-      Enable node labels to restrict YARN applications so that they run only on cluster nodes that have a specified node label.
+      Enable node labels to restrict FAKEYARN applications so that they run only on cluster nodes that have a specified node label.
     </description>
     <display-name>Node Labels</display-name>
     <value-attributes>
@@ -591,7 +575,7 @@
   <property>
     <name>yarn.nodemanager.linux-container-executor.group</name>
     <value>hadoop</value>
-    <description>Unix group of the NodeManager</description>
+    <description>Unix group of the FAKENodeManager</description>
     <depends-on>
       <property>
         <type>yarn-env</type>
@@ -666,7 +650,7 @@
   <property>
     <name>yarn.admin.acl</name>
     <value>yarn</value>
-    <description> ACL of who can be admin of the YARN cluster. </description>
+    <description> ACL of who can be admin of the FAKEYARN cluster. </description>
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
@@ -682,7 +666,7 @@
   <property>
     <name>yarn.timeline-service.store-class</name>
     <value>org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore</value>
-    <description>Main storage class for YARN timeline server.</description>
+    <description>Main storage class for FAKEYARN timeline server.</description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
@@ -718,7 +702,7 @@
     <name>yarn.timeline-service.entity-group-fs-store.scan-interval-seconds</name>
     <description>
       Scan interval for ATS v1.5 entity group file system storage reader.This
-      value controls how frequent the reader will scan the HDFS active directory
+      value controls how frequent the reader will scan the FAKEHDFS active directory
       for application status.
     </description>
     <!-- Default is 60 seconds, advanced -->
@@ -729,7 +713,7 @@
     <name>yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds</name>
     <description>
       Scan interval for ATS v1.5 entity group file system storage cleaner.This
-      value controls how frequent the reader will scan the HDFS done directory
+      value controls how frequent the reader will scan the FAKEHDFS done directory
       for stale application data.
     </description>
     <!-- 3600 is default, advanced -->
@@ -752,7 +736,7 @@
   <property>
     <name>yarn.nodemanager.aux-services</name>
     <value>mapreduce_shuffle,spark_shuffle,spark2_shuffle</value>
-    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and cannot start with numbers</description>
+    <description>Auxilliary services of FAKENodeManager. A valid service name should only contain a-zA-Z0-9_ and cannot start with numbers</description>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>

+ 9 - 9
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/kerberos.json

@@ -1,7 +1,7 @@
 {
   "services": [
     {
-      "name": "YARN",
+      "name": "FAKEYARN",
       "identities": [
         {
           "name": "/spnego"
@@ -63,7 +63,7 @@
       ],
       "components": [
         {
-          "name": "NODEMANAGER",
+          "name": "FAKENODEMANAGER",
           "identities": [
             {
               "name": "nodemanager_nm",
@@ -140,7 +140,7 @@
           ]
         },
         {
-          "name": "RESOURCEMANAGER",
+          "name": "FAKERESOURCEMANAGER",
           "identities": [
             {
               "name": "resource_manager_rm",
@@ -173,7 +173,7 @@
               }
             },
             {
-              "name": "/YARN/RESOURCEMANAGER/resource_manager_rm",
+              "name": "/FAKEYARN/FAKERESOURCEMANAGER/resource_manager_rm",
               "principal": {
                 "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.principal"
               },
@@ -184,7 +184,7 @@
           ]
         },
         {
-          "name": "APP_TIMELINE_SERVER",
+          "name": "FAKEAPP_TIMELINE_SERVER",
           "identities": [
             {
               "name": "app_timeline_server_yarn",
@@ -217,14 +217,14 @@
               }
             },
             {
-              "name": "/HDFS/NAMENODE/hdfs"
+              "name": "/FAKEHDFS/FAKENAMENODE/hdfs"
             }
           ]
         }
       ]
     },
     {
-      "name": "MAPREDUCE2",
+      "name": "FAKEMAPREDUCE2",
       "identities": [
         {
           "name": "/spnego"
@@ -235,10 +235,10 @@
       ],
       "components": [
         {
-          "name": "HISTORYSERVER",
+          "name": "FAKEHISTORYSERVER",
           "identities": [
             {
-              "name": "/HDFS/NAMENODE/hdfs"
+              "name": "/FAKEHDFS/FAKENAMENODE/hdfs"
             },
             {
               "name": "history_server_jhs",

+ 35 - 33
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/metainfo.xml → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/metainfo.xml

@@ -20,9 +20,9 @@
   <schemaVersion>2.0</schemaVersion>
   <services>
     <service>
-      <name>YARN</name>
-      <displayName>YARN</displayName>
-      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <name>FAKEYARN</name>
+      <displayName>FAKEYARN</displayName>
+      <comment>Apache Hadoop NextGen MapReduce (FAKEYARN)</comment>
       <version>2.7.1.2.5</version>
 
       <quickLinksConfigurations>
@@ -35,11 +35,12 @@
       <components>
 
         <component>
-          <name>APP_TIMELINE_SERVER</name>
-          <displayName>App Timeline Server</displayName>
+          <name>FAKEAPP_TIMELINE_SERVER</name>
+          <displayName>FAKEApp Timeline Server</displayName>
           <category>MASTER</category>
           <cardinality>1</cardinality>
           <reassignAllowed>true</reassignAllowed>
+          <versionAdvertised>false</versionAdvertised>
           <commandScript>
             <script>scripts/application_timeline_server.py</script>
             <scriptType>PYTHON</scriptType>
@@ -67,10 +68,11 @@
         </component>
 
         <component>
-          <name>RESOURCEMANAGER</name>
-          <displayName>ResourceManager</displayName>
+          <name>FAKERESOURCEMANAGER</name>
+          <displayName>FAKEResourceManager</displayName>
           <category>MASTER</category>
           <cardinality>1-2</cardinality>
+          <versionAdvertised>false</versionAdvertised>
 
           <commandScript>
             <script>scripts/resourcemanager.py</script>
@@ -129,11 +131,11 @@
         </component>
 
         <component>
-          <name>NODEMANAGER</name>
-          <displayName>NodeManager</displayName>
+          <name>FAKENODEMANAGER</name>
+          <displayName>FAKENodeManager</displayName>
           <category>SLAVE</category>
           <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <decommissionAllowed>true</decommissionAllowed>
           <commandScript>
             <script>scripts/nodemanager.py</script>
@@ -141,9 +143,9 @@
             <timeout>1200</timeout>
           </commandScript>
           <bulkCommands>
-            <displayName>NodeManagers</displayName>
+            <displayName>FAKENodeManagers</displayName>
             <!-- Used by decommission and recommission -->
-            <masterComponent>RESOURCEMANAGER</masterComponent>
+            <masterComponent>FAKERESOURCEMANAGER</masterComponent>
           </bulkCommands>
           <logs>
             <log>
@@ -153,11 +155,11 @@
         </component>
 
         <component>
-          <name>YARN_CLIENT</name>
-          <displayName>YARN Client</displayName>
+          <name>FAKEYARN_CLIENT</name>
+          <displayName>FAKEYARN Client</displayName>
           <category>CLIENT</category>
           <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <commandScript>
             <script>scripts/yarn_client.py</script>
             <scriptType>PYTHON</scriptType>
@@ -208,8 +210,8 @@
         <config-type>ranger-yarn-policymgr-ssl</config-type>
         <config-type>ranger-yarn-security</config-type>
       </configuration-dependencies>
-      <widgetsFileName>YARN_widgets.json</widgetsFileName>
-      <metricsFileName>YARN_metrics.json</metricsFileName>
+      <widgetsFileName>FAKEYARN_widgets.json</widgetsFileName>
+      <metricsFileName>FAKEYARN_metrics.json</metricsFileName>
 
       <commandScript>
         <script>scripts/service_check.py</script>
@@ -218,8 +220,8 @@
       </commandScript>
 
       <requiredServices>
-        <service>HDFS</service>
-        <service>MAPREDUCE2</service>
+        <service>FAKEHDFS</service>
+        <service>FAKEMAPREDUCE2</service>
       </requiredServices>
 
       <!-- No packages to install. -->
@@ -227,27 +229,27 @@
     </service>
 
     <service>
-      <name>MAPREDUCE2</name>
+      <name>FAKEFAKEMAPREDUCE2</name>
       <version>2.7.1.2.5</version>
-      <displayName>MapReduce2</displayName>
-      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <displayName>FAKEFAKEMapReduce2</displayName>
+      <comment>Apache Hadoop NextGen MapReduce (FAKEYARN)</comment>
       <configuration-dir>configuration-mapred</configuration-dir>
 
       <components>
         <component>
-          <name>HISTORYSERVER</name>
-          <displayName>History Server</displayName>
+          <name>FAKEHISTORYSERVER</name>
+          <displayName>FAKEHistory Server</displayName>
           <category>MASTER</category>
           <cardinality>1</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <reassignAllowed>true</reassignAllowed>
           <auto-deploy>
             <enabled>true</enabled>
-            <co-locate>YARN/RESOURCEMANAGER</co-locate>
+            <co-locate>FAKEYARN/FAKERESOURCEMANAGER</co-locate>
           </auto-deploy>
           <dependencies>
             <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
+              <name>FAKEHDFS/FAKEHDFS_CLIENT</name>
               <scope>host</scope>
               <auto-deploy>
                 <enabled>true</enabled>
@@ -285,11 +287,11 @@
         </component>
 
         <component>
-          <name>MAPREDUCE2_CLIENT</name>
-          <displayName>MapReduce2 Client</displayName>
+          <name>FAKEMAPREDUCE2_CLIENT</name>
+          <displayName>FAKEMapReduce2 Client</displayName>
           <category>CLIENT</category>
           <cardinality>0+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <commandScript>
             <script>scripts/mapreduce2_client.py</script>
             <scriptType>PYTHON</scriptType>
@@ -322,7 +324,7 @@
       </commandScript>
 
       <requiredServices>
-        <service>YARN</service>
+        <service>FAKEYARN</service>
       </requiredServices>
 
       <configuration-dependencies>
@@ -337,8 +339,8 @@
         <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
       <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
-      <widgetsFileName>MAPREDUCE2_widgets.json</widgetsFileName>
-      <metricsFileName>MAPREDUCE2_metrics.json</metricsFileName>
+      <widgetsFileName>FAKEMAPREDUCE2_widgets.json</widgetsFileName>
+      <metricsFileName>FAKEMAPREDUCE2_metrics.json</metricsFileName>
 
       <quickLinksConfigurations-dir>quicklinks-mapred</quickLinksConfigurations-dir>
       <quickLinksConfigurations>

+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_history_process.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/alerts/alert_history_process.py


+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_nodemanager_health.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/alerts/alert_nodemanager_health.py


+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_resourcemanager_process.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/alerts/alert_resourcemanager_process.py


+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_timeline_process.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/alerts/alert_timeline_process.py


+ 1 - 1
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/application_timeline_server.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/scripts/application_timeline_server.py

@@ -32,7 +32,7 @@ class ApplicationTimelineServer(Dummy):
 
   def __init__(self):
     super(ApplicationTimelineServer, self).__init__()
-    self.component_name = "APP_TIMELINE_SERVER"
+    self.component_name = "FAKEAPP_TIMELINE_SERVER"
     self.principal_conf_name = "yarn-site"
     self.principal_name = "yarn.timeline-service.principal"
     self.keytab_conf_name = "yarn-site"

+ 1 - 1
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/historyserver.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/scripts/historyserver.py

@@ -32,7 +32,7 @@ class HistoryServer(Dummy):
 
   def __init__(self):
     super(HistoryServer, self).__init__()
-    self.component_name = "HISTORYSERVER"
+    self.component_name = "FAKEHISTORYSERVER"
     self.principal_conf_name = "mapred-site"
     self.principal_name = "mapreduce.jobhistory.principal"
     self.keytab_conf_name = "mapred-site"

+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/mapred_service_check.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/scripts/mapred_service_check.py


+ 4 - 4
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/mapreduce2_client.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/scripts/mapreduce2_client.py

@@ -25,14 +25,14 @@ Ambari Agent
 from resource_management.libraries.script.dummy import Dummy
 
 
-class MapReduce2Client(Dummy):
+class FAKEMapReduce2Client(Dummy):
   """
   Dummy script that simulates a client component.
   """
 
   def __init__(self):
-    super(MapReduce2Client, self).__init__()
-    self.component_name = "MAPREDUCE2_CLIENT"
+    super(FAKEMapReduce2Client, self).__init__()
+    self.component_name = "FAKEMAPREDUCE2_CLIENT"
 
 if __name__ == "__main__":
-  MapReduce2Client().execute()
+  FAKEMapReduce2Client().execute()

+ 1 - 1
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/nodemanager.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/scripts/nodemanager.py

@@ -32,7 +32,7 @@ class Nodemanager(Dummy):
 
   def __init__(self):
     super(Nodemanager, self).__init__()
-    self.component_name = "NODEMANAGER"
+    self.component_name = "FAKENODEMANAGER"
     self.principal_conf_name = "yarn-site"
     self.principal_name = "yarn.nodemanager.principal"
     self.keytab_conf_name = "yarn-site"

+ 1 - 1
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/resourcemanager.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/scripts/resourcemanager.py

@@ -32,7 +32,7 @@ class Resourcemanager(Dummy):
 
   def __init__(self):
     super(Resourcemanager, self).__init__()
-    self.component_name = "RESOURCEMANAGER"
+    self.component_name = "FAKERESOURCEMANAGER"
     self.principal_conf_name = "yarn-site"
     self.principal_name = "yarn.resourcemanager.principal"
     self.keytab_conf_name = "yarn-site"

+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/service_check.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/scripts/service_check.py


+ 1 - 1
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/yarn_client.py → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/scripts/yarn_client.py

@@ -32,7 +32,7 @@ class YarnClient(Dummy):
 
   def __init__(self):
     super(YarnClient, self).__init__()
-    self.component_name = "YARN_CLIENT"
+    self.component_name = "FAKEYARN_CLIENT"
 
 if __name__ == "__main__":
   YarnClient().execute()

+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/quicklinks-mapred/quicklinks.json → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/quicklinks-mapred/quicklinks.json


+ 3 - 3
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/quicklinks/quicklinks.json → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/quicklinks/quicklinks.json

@@ -17,7 +17,7 @@
     "links": [
       {
         "name": "resourcemanager_ui",
-        "label": "ResourceManager UI",
+        "label": "FAKEResourceManager UI",
         "requires_user_name": "false",
         "url": "%@://%@:%@",
         "port":{
@@ -31,7 +31,7 @@
       },
       {
         "name": "resourcemanager_logs",
-        "label": "ResourceManager logs",
+        "label": "FAKEResourceManager logs",
         "requires_user_name": "false",
         "url": "%@://%@:%@/logs",
         "port":{
@@ -45,7 +45,7 @@
       },
       {
         "name": "resourcemanager_jmx",
-        "label":"ResourceManager JMX",
+        "label":"FAKEResourceManager JMX",
         "requires_user_name": "false",
         "url":"%@://%@:%@/jmx",
         "port":{

+ 0 - 0
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/themes-mapred/theme.json → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/themes-mapred/theme.json


+ 2 - 2
ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/themes/theme.json → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/themes/theme.json

@@ -1,6 +1,6 @@
 {
   "name": "default",
-  "description": "Default theme for YARN service",
+  "description": "Default theme for FAKEYARN service",
   "configuration": {
     "layouts": [
       {
@@ -43,7 +43,7 @@
                 },
                 {
                   "name": "section-yarn-platform-features",
-                  "display-name": "YARN Features",
+                  "display-name": "FAKEYARN Features",
                   "row-index": "0",
                   "column-index": "2",
                   "row-span": "1",

+ 3 - 3
ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/alerts.json → ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEZOOKEEPER/alerts.json

@@ -1,7 +1,7 @@
 {
-    "ZOOKEEPER": {
+    "FAKEZOOKEEPER": {
 
-        "ZOOKEEPER_SERVER": [
+        "FAKEZOOKEEPER_SERVER": [
             {
                 "name": "zookeeper_server_process",
                 "label": "Zookeeper server Process",
@@ -11,7 +11,7 @@
                 "enabled": true,
                 "source": {
                     "type": "SCRIPT",
-                    "path": "PERF/1.0/services/ZOOKEEPER/package/alerts/alert_zk_server_process.py",
+                    "path": "PERF/1.0/services/FAKEZOOKEEPER/package/alerts/alert_zk_server_process.py",
                     "parameters": []
                 }
             }

Some files were not shown because too many files changed in this diff