Browse Source

Merge trunk to ambari-rest-api-explorer branch. (jaimin)

Jaimin Jetly 8 years ago
parent
commit
3acd2e6da4
100 changed files with 895 additions and 506 deletions
  1. 2 2
      ambari-admin/src/main/resources/ui/admin-web/app/index.html
  2. 9 1
      ambari-agent/pom.xml
  3. 95 114
      ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py
  4. 3 2
      ambari-agent/src/main/python/ambari_agent/main.py
  5. 39 0
      ambari-agent/src/test/python/resource_management/TestUtils.py
  6. 2 0
      ambari-common/src/main/python/ambari_commons/network.py
  7. 1 0
      ambari-common/src/main/python/ambari_commons/resources/os_family.json
  8. 44 14
      ambari-common/src/main/python/resource_management/core/sudo.py
  9. 55 1
      ambari-common/src/main/python/resource_management/core/utils.py
  10. 23 1
      ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
  11. 14 2
      ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
  12. 4 3
      ambari-common/src/main/python/resource_management/libraries/functions/solr_cloud_util.py
  13. 15 1
      ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
  14. 5 3
      ambari-common/src/main/python/resource_management/libraries/script/script.py
  15. 5 0
      ambari-infra/ambari-infra-solr-client/pom.xml
  16. 16 1
      ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudCLI.java
  17. 9 0
      ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java
  18. 44 0
      ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/UnsecureZNodeZkCommand.java
  19. 0 1
      ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/converter/FieldAuditLogRequestQueryConverter.java
  20. 54 0
      ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/converter/TopFieldAuditLogRequestQueryConverter.java
  21. 2 1
      ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/AuditLogsManager.java
  22. 1 16
      ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/request/impl/FieldAuditLogRequest.java
  23. 40 0
      ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/request/impl/TopFieldAuditLogRequest.java
  24. 2 2
      ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/AuditLogsResource.java
  25. 1 1
      ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/UserConfigResource.java
  26. 1 2
      ambari-logsearch/ambari-logsearch-server/src/test/java/org/apache/ambari/logsearch/converter/FieldAuditLogRequestQueryConverterTest.java
  27. 61 0
      ambari-logsearch/ambari-logsearch-server/src/test/java/org/apache/ambari/logsearch/converter/TopFieldAuditLogRequestQueryConverterTest.java
  28. 9 8
      ambari-logsearch/ambari-logsearch-web/package.json
  29. 3 3
      ambari-logsearch/ambari-logsearch-web/src/main/webapp/scripts/views/common/Header.js
  30. 2 1
      ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
  31. 15 16
      ambari-server/src/main/java/org/apache/ambari/server/KdcServerConnectionVerification.java
  32. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/Role.java
  33. 9 7
      ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessor.java
  34. 16 11
      ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java
  35. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java
  36. 77 101
      ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java
  37. 26 0
      ambari-server/src/main/java/org/apache/ambari/server/actionmanager/HostRoleCommand.java
  38. 5 5
      ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java
  39. 22 22
      ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
  40. 6 6
      ambari-server/src/main/java/org/apache/ambari/server/agent/ActionQueue.java
  41. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java
  42. 5 5
      ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
  43. 3 3
      ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeat.java
  44. 7 7
      ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
  45. 5 5
      ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatResponse.java
  46. 3 3
      ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
  47. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
  48. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/agent/HostInfo.java
  49. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/agent/RecoveryReport.java
  50. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/agent/RegistrationResponse.java
  51. 2 2
      ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java
  52. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/alerts/AmbariPerformanceRunnable.java
  53. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/alerts/StaleAlertRunnable.java
  54. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/AmbariCsrfProtectionFilter.java
  55. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/AmbariErrorHandler.java
  56. 3 3
      ambari-server/src/main/java/org/apache/ambari/server/api/handlers/QueryCreateHandler.java
  57. 12 12
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/QueryLexer.java
  58. 4 4
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/QueryParser.java
  59. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/AbstractExpression.java
  60. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/NotLogicalExpression.java
  61. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/EqualsOperator.java
  62. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/GreaterEqualsOperator.java
  63. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/GreaterOperator.java
  64. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/InOperator.java
  65. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LessEqualsOperator.java
  66. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LessOperator.java
  67. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/NotEqualsOperator.java
  68. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/query/ExtendedResourcePredicateVisitor.java
  69. 2 2
      ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
  70. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaSortBuilder.java
  71. 3 3
      ambari-server/src/main/java/org/apache/ambari/server/api/query/ProcessingPredicateVisitor.java
  72. 30 30
      ambari-server/src/main/java/org/apache/ambari/server/api/query/QueryImpl.java
  73. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/query/QueryInfo.java
  74. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/query/SubResourcePredicateVisitor.java
  75. 3 3
      ambari-server/src/main/java/org/apache/ambari/server/api/query/render/AlertSummaryGroupedRenderer.java
  76. 2 2
      ambari-server/src/main/java/org/apache/ambari/server/api/query/render/AlertSummaryRenderer.java
  77. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/query/render/BaseRenderer.java
  78. 18 18
      ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java
  79. 2 2
      ambari-server/src/main/java/org/apache/ambari/server/api/query/render/DefaultRenderer.java
  80. 5 5
      ambari-server/src/main/java/org/apache/ambari/server/api/query/render/MinimalRenderer.java
  81. 4 4
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/BaseResourceDefinition.java
  82. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/ClusterResourceDefinition.java
  83. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/ComponentStackVersionResourceDefinition.java
  84. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/ExtensionLinkResourceDefinition.java
  85. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/ExtensionResourceDefinition.java
  86. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/ExtensionVersionResourceDefinition.java
  87. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/FeedResourceDefinition.java
  88. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/GroupResourceDefinition.java
  89. 2 2
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/HostComponentResourceDefinition.java
  90. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/JobResourceDefinition.java
  91. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/PermissionResourceDefinition.java
  92. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactoryImpl.java
  93. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/RootServiceHostComponentResourceDefinition.java
  94. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/RootServiceResourceDefinition.java
  95. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/ServiceResourceDefinition.java
  96. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/StackConfigurationResourceDefinition.java
  97. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/StackLevelConfigurationResourceDefinition.java
  98. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/StackResourceDefinition.java
  99. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/StackServiceResourceDefinition.java
  100. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/StackVersionResourceDefinition.java

+ 2 - 2
ambari-admin/src/main/resources/ui/admin-web/app/index.html

@@ -48,8 +48,8 @@
   <header class="navbar navbar-static-top navbar-inverse">
     <div class="navbar-inner">
       <div class="container">
-        <a href="{{fromSiteRoot('/#/main/dashboard')}}" class="logo"><img src="/img/ambari-logo.png" alt="{{'common.apacheAmbari' | translate}}" title="{{'common.apacheAmbari' | translate}}"></a>
-        <a href="{{fromSiteRoot('/#/main/dashboard')}}" class="brand" title="{{'common.apacheAmbari' | translate}}">{{'common.ambari' | translate}}</a>
+        <a href="{{fromSiteRoot('/#/main/dashboard')}}" class="logo"><img src="/img/ambari-logo.png" alt="{{'common.apacheAmbari' | translate}}" title="{{'common.apacheAmbari' | translate}}" data-qa="ambari-logo"></a>
+        <a href="{{fromSiteRoot('/#/main/dashboard')}}" class="brand" title="{{'common.apacheAmbari' | translate}}" data-qa="ambari-title">{{'common.ambari' | translate}}</a>
         <ul class="nav navbar-nav navbar-right">
           <li>
             <div class="btn-group navbar-views-dropdown" dropdown is-open="viewsdropdown.isopen" ng-mouseover="viewsdropdown.isopen=true" ng-mouseout="viewsdropdown.isopen=false">

+ 9 - 1
ambari-agent/pom.xml

@@ -327,7 +327,7 @@
                   <location>${project.build.directory}${dirsep}${project.artifactId}-${project.version}/var/lib/ambari-agent</location>
                   <includes>
                     <include>/cred/lib/*.jar</include>
-                    <include>/tools/zkmigrator.jar</include>
+                    <include>/tools/*.jar</include>
                     <include>/cache/stacks/HDP/2.1.GlusterFS/services/STORM/package/files/wordCount.jar</include>
                     <include>/cache/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar</include>
                     <include>/cache/common-services/STORM/0.9.1/package/files/wordCount.jar</include>
@@ -424,6 +424,14 @@
               <goal>shade</goal>
             </goals>
             <configuration>
+              <minimizeJar>true</minimizeJar>
+              <artifactSet>
+                <includes>
+                  <include>commons-cli:commons-cli</include>
+                  <include>org.slf4j:*</include>
+                  <include>log4j:*</include>
+                </includes>
+              </artifactSet>
               <transformers>
                 <transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
                   <mainClass>org.apache.ambari.tools.jce.JcePolicyInfo</mainClass>

+ 95 - 114
ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py

@@ -49,7 +49,7 @@ class SingleProcessStatusCommandsExecutor(StatusCommandsExecutor):
     self.config = config
     self.actionQueue = actionQueue
     self.statusCommandQueue = Queue.Queue()
-    self.need_relaunch = False
+    self.need_relaunch = (False, None) #  tuple (bool, str|None) with flag to relaunch and reason of relaunch
 
   def put_commands(self, commands):
     with self.statusCommandQueue.mutex:
@@ -88,12 +88,13 @@ class MultiProcessStatusCommandsExecutor(StatusCommandsExecutor):
     self.config = config
     self.actionQueue = actionQueue
 
-    self._can_relaunch_lock = threading.RLock()
-    self._can_relaunch = True
+    self.can_relaunch = True
 
     # used to prevent queues from been used during creation of new one to prevent threads messing up with combination of
     # old and new queues
     self.usage_lock = threading.RLock()
+    # protects against simultaneous killing/creating from different threads.
+    self.kill_lock = threading.RLock()
 
     self.status_command_timeout = int(self.config.get('agent', 'status_command_timeout', 5))
     self.customServiceOrchestrator = self.actionQueue.customServiceOrchestrator
@@ -107,42 +108,32 @@ class MultiProcessStatusCommandsExecutor(StatusCommandsExecutor):
     self.mp_result_logs = multiprocessing.Queue()
     self.mp_task_queue = multiprocessing.Queue()
 
-  @property
-  def can_relaunch(self):
-    with self._can_relaunch_lock:
-      return self._can_relaunch
-
-  @can_relaunch.setter
-  def can_relaunch(self, value):
-    with self._can_relaunch_lock:
-      self._can_relaunch = value
-
-  def _log_message(self, level, message, exception=None):
-    """
-    Put log message to logging queue. Must be used only for logging from child process(in _worker_process_target).
-
-    :param level:
-    :param message:
-    :param exception:
-    :return:
+  def _drain_queue(self, target_queue, max_time=5, max_empty_count=15, read_break=.001):
     """
-    result_message = "StatusCommandExecutor reporting at {0}: ".format(time.time()) + message
-    self.mp_result_logs.put((level, result_message, exception))
-
-  def _get_log_messages(self):
-    """
-    Returns list of (level, message, exception) log messages.
-
-    :return: list of (level, message, exception)
+    Read everything that available in queue. Using not reliable multiprocessing.Queue methods(qsize, empty), so contains
+    extremely dumb protection against blocking too much at this method: will try to get all possible items for not more
+    than ``max_time`` seconds; will return after ``max_empty_count`` calls of ``target_queue.get(False)`` that raised
+    ``Queue.Empty`` exception. Notice ``read_break`` argument, with default values this method will be able to read
+    ~4500 ``range(1,10000)`` objects for 5 seconds. So don't fill queue too fast.
+
+    :param target_queue: queue to read from
+    :param max_time: maximum time to spend in this method call
+    :param max_empty_count: maximum allowed ``Queue.Empty`` in a row
+    :param read_break: time to wait before next read cycle iteration
+    :return: list of resulting objects
     """
     results = []
+    _empty = 0
+    _start = time.time()
     with self.usage_lock:
       try:
-        while not self.mp_result_logs.empty():
+        while (not target_queue.empty() or target_queue.qsize() > 0) and time.time() - _start < max_time and _empty < max_empty_count:
           try:
-            results.append(self.mp_result_logs.get(False))
+            results.append(target_queue.get(False))
+            _empty = 0
+            time.sleep(read_break) # sleep a little to get more accurate empty and qsize results
           except Queue.Empty:
-            pass
+            _empty += 1
           except IOError:
             pass
           except UnicodeDecodeError:
@@ -151,11 +142,23 @@ class MultiProcessStatusCommandsExecutor(StatusCommandsExecutor):
         pass
     return results
 
+  def _log_message(self, level, message, exception=None):
+    """
+    Put log message to logging queue. Must be used only for logging from child process(in _worker_process_target).
+
+    :param level:
+    :param message:
+    :param exception:
+    :return:
+    """
+    result_message = "StatusCommandExecutor reporting at {0}: ".format(time.time()) + message
+    self.mp_result_logs.put((level, result_message, exception))
+
   def _process_logs(self):
     """
     Get all available at this moment logs and prints them to logger.
     """
-    for level, message, exception in self._get_log_messages():
+    for level, message, exception in self._drain_queue(self.mp_result_logs):
       if level == logging.ERROR:
         logger.debug(message, exc_info=exception)
       if level == logging.WARN:
@@ -256,16 +259,6 @@ class MultiProcessStatusCommandsExecutor(StatusCommandsExecutor):
     :return:
     """
     with self.usage_lock:
-      if not self.mp_task_queue.empty():
-        status_command_queue_size = 0
-        try:
-          while not self.mp_task_queue.empty():
-            self.mp_task_queue.get(False)
-            status_command_queue_size += 1
-        except Queue.Empty:
-          pass
-
-        logger.info("Number of status commands removed from queue : " + str(status_command_queue_size))
       for command in commands:
         logger.info("Adding " + command['commandType'] + " for component " + \
                     command['componentName'] + " of service " + \
@@ -276,43 +269,29 @@ class MultiProcessStatusCommandsExecutor(StatusCommandsExecutor):
 
   def process_results(self):
     """
-    Process all the results from the internal worker
+    Process all the results from the SCE worker process.
     """
     self._process_logs()
-    for result in self._get_results():
+    results = self._drain_queue(self.mp_result_queue)
+    logger.debug("Drained %s status commands results, ~%s remains in queue", len(results), self.mp_result_queue.qsize())
+    for result in results:
       try:
         self.actionQueue.process_status_command_result(result)
       except UnicodeDecodeError:
         pass
 
-  def _get_results(self):
-    """
-    Get all available results for status commands.
-
-    :return: list of results
-    """
-    results = []
-    with self.usage_lock:
-      try:
-        while not self.mp_result_queue.empty():
-          try:
-            results.append(self.mp_result_queue.get(False))
-          except Queue.Empty:
-            pass
-          except IOError:
-            pass
-          except UnicodeDecodeError:
-            pass
-      except IOError:
-        pass
-    return results
-
   @property
   def need_relaunch(self):
     """
     Indicates if process need to be relaunched due to timeout or it is dead or even was not created.
+
+    :return: tuple (bool, str|None) with flag to relaunch and reason of relaunch
     """
-    return self.timedOutEvent.is_set() or not self.worker_process or not self.worker_process.is_alive()
+    if not self.worker_process or not self.worker_process.is_alive():
+      return True, "WORKER_DEAD"
+    elif self.timedOutEvent.is_set():
+      return True, "COMMAND_TIMEOUT"
+    return False, None
 
   def relaunch(self, reason=None):
     """
@@ -321,13 +300,15 @@ class MultiProcessStatusCommandsExecutor(StatusCommandsExecutor):
     :param reason: reason of restart
     :return:
     """
-    if self.can_relaunch:
-      self.kill(reason)
-      self.worker_process = multiprocessing.Process(target=self._worker_process_target)
-      self.worker_process.start()
-      logger.info("Started process with pid {0}".format(self.worker_process.pid))
-    else:
-      logger.debug("Relaunch does not allowed, can not relaunch")
+    with self.kill_lock:
+      logger.info("Relaunching child process reason:" + str(reason))
+      if self.can_relaunch:
+        self.kill(reason)
+        self.worker_process = multiprocessing.Process(target=self._worker_process_target)
+        self.worker_process.start()
+        logger.info("Started process with pid {0}".format(self.worker_process.pid))
+      else:
+        logger.debug("Relaunch does not allowed, can not relaunch")
 
   def kill(self, reason=None, can_relaunch=True):
     """
@@ -339,43 +320,43 @@ class MultiProcessStatusCommandsExecutor(StatusCommandsExecutor):
     :param reason: reason of killing
     :return:
     """
-    logger.info("Killing child process reason:" + str(reason))
-    self.can_relaunch = can_relaunch
-
-    if not self.can_relaunch:
-      logger.info("Killing without possibility to relaunch...")
-
-    # try graceful stop, otherwise hard-kill
-    if self.worker_process and self.worker_process.is_alive():
-      self.mustDieEvent.set()
-      self.worker_process.join(timeout=3)
-      if self.worker_process.is_alive():
-        os.kill(self.worker_process.pid, signal.SIGKILL)
-        logger.info("Child process killed by -9")
+    with self.kill_lock:
+      self.can_relaunch = can_relaunch
+
+      if not self.can_relaunch:
+        logger.info("Killing without possibility to relaunch...")
+
+      # try graceful stop, otherwise hard-kill
+      if self.worker_process and self.worker_process.is_alive():
+        self.mustDieEvent.set()
+        self.worker_process.join(timeout=3)
+        if self.worker_process.is_alive():
+          os.kill(self.worker_process.pid, signal.SIGKILL)
+          logger.info("Child process killed by -9")
+        else:
+          # get log messages only if we died gracefully, otherwise we will have chance to block here forever, in most cases
+          # this call will do nothing, as all logs will be processed in ActionQueue loop
+          self._process_logs()
+          logger.info("Child process died gracefully")
       else:
-        # get log messages only if we died gracefully, otherwise we will have chance to block here forever, in most cases
-        # this call will do nothing, as all logs will be processed in ActionQueue loop
-        self._process_logs()
-        logger.info("Child process died gracefully")
-    else:
-      logger.info("Child process already dead")
-
-    # close queues and acquire usage lock
-    # closing both sides of pipes here, we need this hack in case of blocking on recv() call
-    self.mp_result_queue.close()
-    self.mp_result_queue._writer.close()
-    self.mp_result_logs.close()
-    self.mp_result_logs._writer.close()
-    self.mp_task_queue.close()
-    self.mp_task_queue._writer.close()
-
-    with self.usage_lock:
-      self.mp_result_queue.join_thread()
-      self.mp_result_queue = multiprocessing.Queue()
-      self.mp_task_queue.join_thread()
-      self.mp_task_queue = multiprocessing.Queue()
-      self.mp_result_logs.join_thread()
-      self.mp_result_logs = multiprocessing.Queue()
-      self.customServiceOrchestrator = self.actionQueue.customServiceOrchestrator
-      self.mustDieEvent.clear()
-      self.timedOutEvent.clear()
+        logger.info("Child process already dead")
+
+      # close queues and acquire usage lock
+      # closing both sides of pipes here, we need this hack in case of blocking on recv() call
+      self.mp_result_queue.close()
+      self.mp_result_queue._writer.close()
+      self.mp_result_logs.close()
+      self.mp_result_logs._writer.close()
+      self.mp_task_queue.close()
+      self.mp_task_queue._writer.close()
+
+      with self.usage_lock:
+        self.mp_result_queue.join_thread()
+        self.mp_result_queue = multiprocessing.Queue()
+        self.mp_task_queue.join_thread()
+        self.mp_task_queue = multiprocessing.Queue()
+        self.mp_result_logs.join_thread()
+        self.mp_result_logs = multiprocessing.Queue()
+        self.customServiceOrchestrator = self.actionQueue.customServiceOrchestrator
+        self.mustDieEvent.clear()
+        self.timedOutEvent.clear()

+ 3 - 2
ambari-agent/src/main/python/ambari_agent/main.py

@@ -352,8 +352,9 @@ def run_threads(server_hostname, heartbeat_stop_callback):
   while controller.is_alive():
     time.sleep(0.1)
 
-    if controller.get_status_commands_executor().need_relaunch:
-      controller.get_status_commands_executor().relaunch("COMMAND_TIMEOUT_OR_KILLED")
+    need_relaunch, reason = controller.get_status_commands_executor().need_relaunch
+    if need_relaunch:
+      controller.get_status_commands_executor().relaunch(reason)
 
   controller.get_status_commands_executor().kill("AGENT_STOPPED", can_relaunch=False)
 

+ 39 - 0
ambari-agent/src/test/python/resource_management/TestUtils.py

@@ -0,0 +1,39 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import stat
+from unittest import TestCase
+from resource_management.core.utils import attr_to_bitmask
+
+
+class TestUtils(TestCase):
+
+  def test_attr_to_bitmask(self):
+    test_set = [
+      ["+r", stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH, 0],
+      ["u+w", stat.S_IWUSR, 0],
+      ["uo+x", stat.S_IXUSR | stat.S_IXOTH, 0],
+      ["-x", stat.S_IRUSR, stat.S_IXUSR | stat.S_IXOTH | stat.S_IRUSR],
+      ["=x", stat.S_IXUSR | stat.S_IXOTH | stat.S_IXGRP, stat.S_IRUSR | stat.S_IRGRP]
+    ]
+
+    for test in test_set:
+      test_pattern, expected, initial_val = test
+      bitmask = attr_to_bitmask(test_pattern, initial_bitmask= initial_val)
+      self.assertEquals(expected, bitmask, "Test set \"{0}\" failed, expected: {1} but got {2}".format(
+        test_pattern, expected, bitmask))

+ 2 - 0
ambari-common/src/main/python/ambari_commons/network.py

@@ -53,12 +53,14 @@ def get_http_connection(host, port, https_enabled=False, ca_certs=None):
 
 def check_ssl_certificate_and_return_ssl_version(host, port, ca_certs):
   try:
+    # Try with TLSv1 first.
     ssl_version = ssl.PROTOCOL_TLSv1
     ssl.get_server_certificate((host, port), ssl_version=ssl_version, ca_certs=ca_certs)
   except ssl.SSLError as ssl_error:
     print_warning_msg("Failed to verify the SSL certificate for https://{0}:{1} with CA certificate in {2} using ssl.PROTOCOL_TLSv1."
                       " Trying to use less secure ssl.PROTOCOL_SSLv23. Error : {3}".format(host, port, ca_certs, str(ssl_error)))
     try:
+      # Try with SSLv23 only if TLSv1 failed.
       ssl_version = ssl.PROTOCOL_SSLv23
       ssl.get_server_certificate((host, port), ssl_version=ssl_version, ca_certs=ca_certs)
     except ssl.SSLError as ssl_error:

+ 1 - 0
ambari-common/src/main/python/ambari_commons/resources/os_family.json

@@ -68,6 +68,7 @@
     "aliases": {
       "amazon2015": "amazon6",
       "amazon2016": "amazon6",
+      "amazon2017": "amazon6",
       "suse11sp3": "suse11"
     }
 }

+ 44 - 14
ambari-common/src/main/python/resource_management/core/sudo.py

@@ -30,6 +30,7 @@ from resource_management.core import shell
 from resource_management.core.exceptions import Fail
 import subprocess
 
+from resource_management.core.utils import attr_to_bitmask
 
 if os.geteuid() == 0:
   def chown(path, owner, group):
@@ -54,16 +55,45 @@ if os.geteuid() == 0:
             
   
   def chmod(path, mode):
+    """
+    Wrapper around python function
+    
+    :type path str
+    :type mode int
+    """
     return os.chmod(path, mode)
   
-  mode_to_stat = {"a+x": stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH, "a+rx": stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH, "u+x": stat.S_IXUSR, "g+x": stat.S_IXGRP,  "o+x": stat.S_IXOTH}
+
   def chmod_extended(path, mode):
-    if mode in mode_to_stat:
-      st = os.stat(path)
-      os.chmod(path, st.st_mode | mode_to_stat[mode])
-    else:
-      shell.checked_call(["chmod", mode, path])
-      
+    """
+    :type path str
+    :type mode str
+    """
+    st = os.stat(path)
+    os.chmod(path, attr_to_bitmask(mode, initial_bitmask=st.st_mode))
+
+  def chmod_recursive(path, recursive_mode_flags, recursion_follow_links=False):
+    """
+    Change recursively permissions on directories or files
+    
+    :type path str
+    :type recursive_mode_flags
+    :type recursion_follow_links bool
+    """
+    dir_attrib = recursive_mode_flags["d"] if "d" in recursive_mode_flags else None
+    files_attrib = recursive_mode_flags["f"] if "d" in recursive_mode_flags else None
+
+    for root, dirs, files in os.walk(path, followlinks=recursion_follow_links):
+      if dir_attrib is not None:
+        for dir_name in dirs:
+          full_dir_path = os.path.join(root, dir_name)
+          chmod(full_dir_path, attr_to_bitmask(dir_attrib, initial_bitmask=os.stat(full_dir_path).st_mode))
+
+      if files_attrib is not None:
+        for file_name in files:
+          full_file_path = os.path.join(root, file_name)
+          chmod(full_file_path, attr_to_bitmask(files_attrib, initial_bitmask=os.stat(full_file_path).st_mode))
+
   def copy(src, dst):
     shutil.copy(src, dst)
     
@@ -278,10 +308,10 @@ else:
     return files
 
 
-def chmod_recursive(path, recursive_mode_flags, recursion_follow_links):
-  find_flags = []
-  if recursion_follow_links:
-    find_flags.append('-L')
-    
-  for key, flags in recursive_mode_flags.iteritems():
-    shell.checked_call(["find"] + find_flags + [path, "-type", key, "-exec" , "chmod", flags ,"{}" ,";"])
+  def chmod_recursive(path, recursive_mode_flags, recursion_follow_links):
+    find_flags = []
+    if recursion_follow_links:
+      find_flags.append('-L')
+
+    for key, flags in recursive_mode_flags.iteritems():
+      shell.checked_call(["find"] + find_flags + [path, "-type", key, "-exec" , "chmod", flags ,"{}" ,";"])

+ 55 - 1
ambari-common/src/main/python/resource_management/core/utils.py

@@ -27,10 +27,16 @@ import sys
 import signal
 import cStringIO
 from functools import wraps
+
+import re
+
 from resource_management.core.exceptions import Fail
 from itertools import chain, repeat, islice
 
 PASSWORDS_HIDE_STRING = "[PROTECTED]"
+PERM_STRING_REGEXP = re.compile("(?P<scope>[ugoa]*)(?P<direction>[-+=])(?P<attr>[rwx]*)")
+PERM_REGISTER = {"u": 0o100, "g": 0o010, "o": 0o001}
+PERM_BITS = {"r": 0o004, "w": 0o002, "x": 0o001}
 
 class AttributeDictionary(object):
   def __init__(self, *args, **kwargs):
@@ -157,4 +163,52 @@ def pad_infinite(iterable, padding=None):
   return chain(iterable, repeat(padding))
 
 def pad(iterable, size, padding=None):
-  return islice(pad_infinite(iterable, padding), size)
+  return islice(pad_infinite(iterable, padding), size)
+
+
+def attr_to_bitmask(attr, initial_bitmask=0o0):
+  """
+  Function able to generate permission bits from passed named permission string (chmod like style)
+   
+  Supports:
+   - scope modifications: u,g,o or a 
+   - setting mode: +,-,-
+   - attributes: r,x,w
+   
+  Samples:
+    uo+rw, a+x, u-w, o=r
+  
+  :type attr str 
+  :type initial_bitmask int
+  """
+  attr_dict = {"scope": "", "direction": "", "attr": ""}
+  re_match_result = PERM_STRING_REGEXP.match(attr)
+
+  if re_match_result:
+    attr_dict = re_match_result.groupdict(default=attr_dict)
+
+  if attr_dict["scope"] == "":
+    attr_dict["scope"] = "a"
+
+  if "a" in attr_dict["scope"]:
+    attr_dict["scope"] = "ugo"
+
+  attr_dict["scope"] = list(attr_dict["scope"])
+  attr_dict["attr"] = list(attr_dict["attr"])
+
+  if attr_dict["direction"] == "=":
+    clear_mask = 0o0
+    for scope in attr_dict["scope"]:
+      clear_mask = clear_mask | 0o007 * PERM_REGISTER[scope]
+
+    initial_bitmask = initial_bitmask ^ (initial_bitmask & clear_mask)
+    attr_dict["direction"] = "+"
+
+  for scope in attr_dict["scope"]:
+    for attr in attr_dict["attr"]:
+      if attr_dict["direction"] == "-" and (initial_bitmask & (PERM_BITS[attr] * PERM_REGISTER[scope])) > 0:
+        initial_bitmask = initial_bitmask ^ (PERM_BITS[attr] * PERM_REGISTER[scope])
+      elif attr_dict["direction"] == "+":
+        initial_bitmask = initial_bitmask | (PERM_BITS[attr] * PERM_REGISTER[scope])
+
+  return initial_bitmask

+ 23 - 1
ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py

@@ -64,6 +64,18 @@ TARBALL_MAP = {
              "/{0}/apps/{1}/spark2/spark2-{0}-yarn-archive.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN))
 }
 
+SERVICE_MAP = {
+  "slider": "SLIDER",
+  "tez": "TEZ_CLIENT",
+  "pig": "PIG",
+  "sqoop": "SQOOP",
+  "hive": "HIVE_CLIENT",
+  "mapreduce": "HDFS_CLIENT",
+  "hadoop_streaming": "MAPREDUCE2_CLIENT",
+  "tez_hive2": "HIVE_CLIENT",
+  "spark": "SPARK_CLIENT",
+  "spark2": "SPARK2_CLIENT"
+}
 
 def get_sysprep_skip_copy_tarballs_hdfs():
   import params
@@ -199,7 +211,7 @@ def _get_single_version_from_stack_select():
 
 
 def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=None, custom_dest_file=None, force_execute=False,
-                 use_upgrading_version_during_upgrade=True, replace_existing_files=False, skip=False):
+                 use_upgrading_version_during_upgrade=True, replace_existing_files=False, skip=False, skip_component_check=False):
   """
   :param name: Tarball name, e.g., tez, hive, pig, sqoop.
   :param user_group: Group to own the directory.
@@ -210,6 +222,8 @@ def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=Non
   :param force_execute: If true, will execute the HDFS commands immediately, otherwise, will defer to the calling function.
   :param use_upgrading_version_during_upgrade: If true, will use the version going to during upgrade. Otherwise, use the CURRENT (source) version.
   :param skip: If true, tarballs will not be copied as the cluster deployment uses prepped VMs.
+  :param skip_component_check: If true, will skip checking if a given component is installed on the node for a file under its dir to be copied.
+                               This is in case the file is not mapped to a component but rather to a specific location (JDK jar, Ambari jar, etc).
   :return: Will return True if successful, otherwise, False.
   """
   import params
@@ -226,6 +240,14 @@ def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=Non
     Logger.warning("Skipping copying {0} to {1} for {2} as it is a sys prepped host.".format(str(source_file), str(dest_file), str(name)))
     return True
 
+  if not skip_component_check:
+    #Use components installed on the node to check if a file can be copied into HDFS
+    local_components = default("/localComponents", [])
+    component = SERVICE_MAP.get(name)
+    if component not in local_components:
+      Logger.info("{0} is not installed on the host. Skip copying {1}".format(component, source_file))
+      return False
+
   Logger.info("Source file: {0} , Dest file in HDFS: {1}".format(source_file, dest_file))
 
   if not os.path.exists(source_file):

+ 14 - 2
ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py

@@ -17,7 +17,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-__all__ = ["setup_ranger_plugin", "get_audit_configs"]
+__all__ = ["setup_ranger_plugin", "get_audit_configs", "generate_ranger_service_config"]
 
 import os
 import ambari_simplejson as json
@@ -279,4 +279,16 @@ def get_audit_configs(config):
     audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
     jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
 
-  return jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver
+  return jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver
+
+def generate_ranger_service_config(ranger_plugin_properties):
+  custom_service_config_dict = {}
+  ranger_plugin_properties_copy = {}
+  ranger_plugin_properties_copy.update(ranger_plugin_properties)
+
+  for key, value in ranger_plugin_properties_copy.iteritems():
+    if key.startswith("ranger.service.config.param."):
+      modify_key_name = key.replace("ranger.service.config.param.","")
+      custom_service_config_dict[modify_key_name] = value
+
+  return custom_service_config_dict

+ 4 - 3
ambari-common/src/main/python/resource_management/libraries/functions/solr_cloud_util.py

@@ -294,7 +294,6 @@ def add_solr_roles(config, roles = [], new_service_principals = [], tries = 30,
   if it is then update the user-roles mapping for Solr (this will upgrade the solr_znode/security.json file).
   In case of custom security.json is used for infra-solr, this step will be skipped.
   """
-  sudo = AMBARI_SUDO_BINARY
   solr_hosts = default_config(config, "/clusterHostInfo/infra_solr_hosts", [])
   security_enabled = config['configurations']['cluster-env']['security_enabled']
   solr_ssl_enabled = default_config(config, 'configurations/infra-solr-env/infra_solr_ssl_enabled', False)
@@ -316,10 +315,11 @@ def add_solr_roles(config, roles = [], new_service_principals = [], tries = 30,
     hostname = config['hostname'].lower()
     solr_host = __get_random_solr_host(hostname, solr_hosts)
     solr_url = format("{solr_protocol}://{solr_host}:{solr_port}/solr/admin/authorization")
+    solr_user = config['configurations']['infra-solr-env']['infra_solr_user']
     solr_user_keytab = config['configurations']['infra-solr-env']['infra_solr_kerberos_keytab']
     solr_user_principal = config['configurations']['infra-solr-env']['infra_solr_kerberos_principal'].replace('_HOST', hostname)
     solr_user_kinit_cmd = format("{kinit_path_local} -kt {solr_user_keytab} {solr_user_principal};")
-    solr_authorization_enabled_cmd=format("{sudo} {solr_user_kinit_cmd} {sudo} curl -k -s --negotiate -u : {solr_protocol}://{solr_host}:{solr_port}/solr/admin/authorization | grep authorization.enabled")
+    solr_authorization_enabled_cmd=format("{solr_user_kinit_cmd} curl -k -s --negotiate -u : {solr_protocol}://{solr_host}:{solr_port}/solr/admin/authorization | grep authorization.enabled")
 
     if len(new_service_principals) > 0:
       new_service_users = []
@@ -338,10 +338,11 @@ def add_solr_roles(config, roles = [], new_service_principals = [], tries = 30,
       set_user_role_map['set-user-role'] = user_role_map
       set_user_role_json = json.dumps(set_user_role_map)
 
-      add_solr_role_cmd = format("{sudo} {solr_user_kinit_cmd} {sudo} curl -H 'Content-type:application/json' -d '{set_user_role_json}' -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {solr_url} | grep 200")
+      add_solr_role_cmd = format("{solr_user_kinit_cmd} curl -H 'Content-type:application/json' -d '{set_user_role_json}' -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {solr_url} | grep 200")
 
       Logger.info(format("Check authorization enabled command: {solr_authorization_enabled_cmd} \nSet user-role settings command: {add_solr_role_cmd}"))
       Execute(solr_authorization_enabled_cmd + " && "+ add_solr_role_cmd,
               tries=tries,
               try_sleep=try_sleep,
+              user=solr_user,
               logoutput=True)

+ 15 - 1
ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py

@@ -290,11 +290,25 @@ def _get_upgrade_stack():
 
   return None
 
+def unsafe_get_stack_versions():
+  """
+  Gets list of stack versions installed on the host.
+  By default a call to <stack-selector-tool> versions is made to get the list of installed stack versions.
+  DO NOT use a fall-back since this function is called by alerts in order to find potential errors.
+  :return: Returns a tuple of (exit code, output, list of installed stack versions).
+  """
+  stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)
+  code, out = call((STACK_SELECT_PREFIX, stack_selector_path, 'versions'))
+  versions = []
+  if 0 == code:
+    for line in out.splitlines():
+      versions.append(line.rstrip('\n'))
+  return (code, out, versions)
 
 def get_stack_versions(stack_root):
   """
   Gets list of stack versions installed on the host.
-  Be default a call to <stack-selector-tool> versions is made to get the list of installed stack versions.
+  By default a call to <stack-selector-tool> versions is made to get the list of installed stack versions.
   As a fallback list of installed versions is collected from stack version directories in stack install root.
   :param stack_root: Stack install root
   :return: Returns list of installed stack versions.

+ 5 - 3
ambari-common/src/main/python/resource_management/libraries/script/script.py

@@ -874,6 +874,7 @@ class Script(object):
     Directory(self.get_tmp_dir(), create_parents = True)
 
     conf_tmp_dir = tempfile.mkdtemp(dir=self.get_tmp_dir())
+    os.chmod(conf_tmp_dir, 0700)
     output_filename = os.path.join(self.get_tmp_dir(), config['commandParams']['output_file'])
 
     try:
@@ -881,22 +882,23 @@ class Script(object):
         for filename, dict in file_dict.iteritems():
           XmlConfig(filename,
                     conf_dir=conf_tmp_dir,
-                    mode=0644,
+                    mode=0600,
                     **self.generate_configs_get_xml_file_content(filename, dict)
           )
       for file_dict in env_configs_list:
         for filename,dicts in file_dict.iteritems():
           File(os.path.join(conf_tmp_dir, filename),
-               mode=0644,
+               mode=0600,
                content=InlineTemplate(self.generate_configs_get_template_file_content(filename, dicts)))
 
       for file_dict in properties_configs_list:
         for filename, dict in file_dict.iteritems():
           PropertiesFile(os.path.join(conf_tmp_dir, filename),
-                         mode=0644,
+                         mode=0600,
                          properties=self.generate_configs_get_xml_file_dict(filename, dict)
           )
       with closing(tarfile.open(output_filename, "w:gz")) as tar:
+        os.chmod(output_filename, 0600)
         try:
           tar.add(conf_tmp_dir, arcname=os.path.basename("."))
         finally:

+ 5 - 0
ambari-infra/ambari-infra-solr-client/pom.xml

@@ -35,6 +35,11 @@
       <artifactId>solr-solrj</artifactId>
       <version>${solr.version}</version>
     </dependency>
+    <dependency>
+      <groupId>org.apache.zookeeper</groupId>
+      <artifactId>zookeeper</artifactId>
+      <version>3.4.9</version>
+    </dependency>
     <dependency>
       <groupId>commons-cli</groupId>
       <artifactId>commons-cli</artifactId>

+ 16 - 1
ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudCLI.java

@@ -49,6 +49,7 @@ public class AmbariSolrCloudCLI {
   private static final String SETUP_KERBEROS_PLUGIN = "setup-kerberos-plugin";
   private static final String CHECK_ZNODE = "check-znode";
   private static final String SECURE_ZNODE_COMMAND = "secure-znode";
+  private static final String UNSECURE_ZNODE_COMMAND = "unsecure-znode";
   private static final String SECURE_SOLR_ZNODE_COMMAND = "secure-solr-znode";
   private static final String SECURITY_JSON_LOCATION = "security-json-location";
   private static final String CMD_LINE_SYNTAX =
@@ -61,6 +62,7 @@ public class AmbariSolrCloudCLI {
       + "\n./solrCloudCli.sh --check-znode -z host1:2181,host2:2181 -zn /ambari-solr"
       + "\n./solrCloudCli.sh --cluster-prop -z host1:2181,host2:2181/ambari-solr -cpn urlScheme -cpn http"
       + "\n./solrCloudCli.sh --secure-znode -z host1:2181,host2:2181 -zn /ambari-solr -su logsearch,atlas,ranger --jaas-file /etc/myconf/jaas_file"
+      + "\n./solrCloudCli.sh --unsecure-znode -z host1:2181,host2:2181 -zn /ambari-solr --jaas-file /etc/myconf/jaas_file"
       + "\n./solrCloudCli.sh --secure-solr-znode -z host1:2181,host2:2181 -zn /ambari-solr -su logsearch,atlas,ranger --jaas-file /etc/myconf/jaas_file"
       + "\n./solrCloudCli.sh --setup-kerberos-plugin -z host1:2181,host2:2181 -zn /ambari-solr --security-json-location /etc/infra-solr/conf/security.json\n";
 
@@ -130,6 +132,11 @@ public class AmbariSolrCloudCLI {
       .desc("Set acls for znode")
       .build();
 
+    final Option unsecureZnodeOption = Option.builder("uz")
+      .longOpt(UNSECURE_ZNODE_COMMAND)
+      .desc("Disable security for znode")
+      .build();
+
     final Option shardNameOption = Option.builder("sn")
       .longOpt("shard-name")
       .desc("Name of the shard for create-shard command")
@@ -327,6 +334,7 @@ public class AmbariSolrCloudCLI {
     options.addOption(configDirOption);
     options.addOption(collectionOption);
     options.addOption(secureZnodeOption);
+    options.addOption(unsecureZnodeOption);
     options.addOption(secureSolrZnodeOption);
     options.addOption(shardsOption);
     options.addOption(replicationOption);
@@ -403,9 +411,12 @@ public class AmbariSolrCloudCLI {
       } else if (cli.hasOption("ssz")) {
         command = SECURE_SOLR_ZNODE_COMMAND;
         validateRequiredOptions(cli, command, zkConnectStringOption, znodeOption, jaasFileOption, saslUsersOption);
+      } else if (cli.hasOption("uz")) {
+        command = UNSECURE_ZNODE_COMMAND;
+        validateRequiredOptions(cli, command, zkConnectStringOption, znodeOption, jaasFileOption);
       } else {
         List<String> commands = Arrays.asList(CREATE_COLLECTION_COMMAND, CREATE_SHARD_COMMAND, UPLOAD_CONFIG_COMMAND,
-          DOWNLOAD_CONFIG_COMMAND, CONFIG_CHECK_COMMAND, SET_CLUSTER_PROP, CREATE_ZNODE, SECURE_ZNODE_COMMAND,
+          DOWNLOAD_CONFIG_COMMAND, CONFIG_CHECK_COMMAND, SET_CLUSTER_PROP, CREATE_ZNODE, SECURE_ZNODE_COMMAND, UNSECURE_ZNODE_COMMAND,
           SECURE_SOLR_ZNODE_COMMAND, CHECK_ZNODE, SETUP_KERBEROS_PLUGIN);
         helpFormatter.printHelp(CMD_LINE_SYNTAX, options);
         exit(1, String.format("One of the supported commands is required (%s)", StringUtils.join(commands, "|")));
@@ -521,6 +532,10 @@ public class AmbariSolrCloudCLI {
           solrCloudClient = clientBuilder.build();
           solrCloudClient.secureZnode();
           break;
+        case UNSECURE_ZNODE_COMMAND:
+          solrCloudClient = clientBuilder.build();
+          solrCloudClient.unsecureZnode();
+          break;
         case SECURE_SOLR_ZNODE_COMMAND:
           solrCloudClient = clientBuilder.build();
           solrCloudClient.secureSolrZnode();

+ 9 - 0
ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java

@@ -30,6 +30,7 @@ import org.apache.ambari.infra.solr.commands.ListCollectionCommand;
 import org.apache.ambari.infra.solr.commands.SecureSolrZNodeZkCommand;
 import org.apache.ambari.infra.solr.commands.SecureZNodeZkCommand;
 import org.apache.ambari.infra.solr.commands.SetClusterPropertyZkCommand;
+import org.apache.ambari.infra.solr.commands.UnsecureZNodeZkCommand;
 import org.apache.ambari.infra.solr.commands.UploadConfigZkCommand;
 import org.apache.ambari.infra.solr.commands.CheckZnodeZkCommand;
 import org.apache.ambari.infra.solr.util.ShardUtils;
@@ -177,6 +178,14 @@ public class AmbariSolrCloudClient {
     new SecureZNodeZkCommand(getRetryTimes(), getInterval()).run(this);
   }
 
+  /**
+   * Unsecure znode
+   */
+  public void unsecureZnode() throws Exception {
+    LOG.info("Disable security for znode - ", this.getZnode());
+    new UnsecureZNodeZkCommand(getRetryTimes(), getInterval()).run(this);
+  }
+
   /**
    * Upload config set to zookeeper
    */

+ 44 - 0
ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/UnsecureZNodeZkCommand.java

@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.solr.commands;
+
+import org.apache.ambari.infra.solr.AmbariSolrCloudClient;
+import org.apache.ambari.infra.solr.util.AclUtils;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.SolrZooKeeper;
+import org.apache.zookeeper.ZooDefs;
+import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Id;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class UnsecureZNodeZkCommand extends AbstractZookeeperRetryCommand<Boolean> {
+
+  public UnsecureZNodeZkCommand(int maxRetries, int interval) {
+    super(maxRetries, interval);
+  }
+
+  @Override
+  protected Boolean executeZkCommand(AmbariSolrCloudClient client, SolrZkClient zkClient, SolrZooKeeper solrZooKeeper) throws Exception {
+    String zNode = client.getZnode();
+    AclUtils.setRecursivelyOn(client.getSolrZkClient().getSolrZooKeeper(), zNode, ZooDefs.Ids.OPEN_ACL_UNSAFE);
+    return true;
+  }
+}

+ 0 - 1
ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/converter/FieldAuditLogRequestQueryConverter.java

@@ -33,7 +33,6 @@ public class FieldAuditLogRequestQueryConverter extends AbstractLogRequestFacetQ
   @Override
   public void appendFacetOptions(FacetOptions facetOptions, FieldAuditLogRequest request) {
     facetOptions.addFacetOnPivot(request.getField(), AUDIT_COMPONENT);
-    facetOptions.setFacetLimit(request.getTop());
   }
 
   @Override

+ 54 - 0
ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/converter/TopFieldAuditLogRequestQueryConverter.java

@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.converter;
+
+import org.apache.ambari.logsearch.common.LogType;
+import org.apache.ambari.logsearch.model.request.impl.TopFieldAuditLogRequest;
+import org.springframework.data.solr.core.query.FacetOptions;
+
+import javax.inject.Named;
+
+import static org.apache.ambari.logsearch.solr.SolrConstants.AuditLogConstants.AUDIT_COMPONENT;
+import static org.apache.ambari.logsearch.solr.SolrConstants.AuditLogConstants.AUDIT_EVTTIME;
+
+@Named
+public class TopFieldAuditLogRequestQueryConverter extends AbstractLogRequestFacetQueryConverter<TopFieldAuditLogRequest> {
+
+  @Override
+  public void appendFacetOptions(FacetOptions facetOptions, TopFieldAuditLogRequest request) {
+    facetOptions.addFacetOnPivot(request.getField(), AUDIT_COMPONENT);
+    facetOptions.setFacetLimit(request.getTop());
+  }
+
+  @Override
+  public FacetOptions.FacetSort getFacetSort() {
+    return FacetOptions.FacetSort.COUNT;
+  }
+
+  @Override
+  public String getDateTimeField() {
+    return AUDIT_EVTTIME;
+  }
+
+  @Override
+  public LogType getLogType() {
+    return LogType.AUDIT;
+  }
+
+}

+ 2 - 1
ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/AuditLogsManager.java

@@ -47,6 +47,7 @@ import org.apache.ambari.logsearch.model.request.impl.AuditComponentRequest;
 import org.apache.ambari.logsearch.model.request.impl.AuditLogRequest;
 import org.apache.ambari.logsearch.model.request.impl.AuditServiceLoadRequest;
 import org.apache.ambari.logsearch.model.request.impl.FieldAuditLogRequest;
+import org.apache.ambari.logsearch.model.request.impl.TopFieldAuditLogRequest;
 import org.apache.ambari.logsearch.model.request.impl.UserExportRequest;
 import org.apache.ambari.logsearch.model.response.AuditLogResponse;
 import org.apache.ambari.logsearch.model.response.BarGraphDataListResponse;
@@ -134,7 +135,7 @@ public class AuditLogsManager extends ManagerBase<SolrAuditLogData, AuditLogResp
     return responseDataGenerator.generateBarGraphDataResponseWithRanges(response, SolrConstants.AuditLogConstants.AUDIT_COMPONENT, true);
   }
 
-  public BarGraphDataListResponse topResources(FieldAuditLogRequest request) {
+  public BarGraphDataListResponse topResources(TopFieldAuditLogRequest request) {
     SimpleFacetQuery facetQuery = conversionService.convert(request, SimpleFacetQuery.class);
     QueryResponse queryResponse = auditSolrDao.process(facetQuery);
     return responseDataGenerator.generateSecondLevelBarGraphDataResponse(queryResponse, 0);

+ 1 - 16
ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/request/impl/FieldAuditLogRequest.java

@@ -20,19 +20,14 @@ package org.apache.ambari.logsearch.model.request.impl;
 
 import org.apache.ambari.logsearch.common.LogSearchConstants;
 import org.apache.ambari.logsearch.model.request.FieldParamDefinition;
-import org.apache.ambari.logsearch.model.request.TopParamDefinition;
 
-import javax.ws.rs.PathParam;
 import javax.ws.rs.QueryParam;
 
-public class FieldAuditLogRequest extends BaseLogRequest implements FieldParamDefinition, TopParamDefinition {
+public class FieldAuditLogRequest extends BaseLogRequest implements FieldParamDefinition {
 
   @QueryParam(LogSearchConstants.REQUEST_PARAM_FIELD)
   private String field;
 
-  @PathParam(LogSearchConstants.REQUEST_PARAM_TOP)
-  private Integer top;
-
   @Override
   public String getField() {
     return field;
@@ -42,14 +37,4 @@ public class FieldAuditLogRequest extends BaseLogRequest implements FieldParamDe
   public void setField(String field) {
     this.field = field;
   }
-
-  @Override
-  public Integer getTop() {
-    return top;
-  }
-
-  @Override
-  public void setTop(Integer top) {
-    this.top = top;
-  }
 }

+ 40 - 0
ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/request/impl/TopFieldAuditLogRequest.java

@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.model.request.impl;
+
+import org.apache.ambari.logsearch.common.LogSearchConstants;
+import org.apache.ambari.logsearch.model.request.TopParamDefinition;
+
+import javax.ws.rs.PathParam;
+
+public class TopFieldAuditLogRequest extends FieldAuditLogRequest implements TopParamDefinition {
+
+  @PathParam(LogSearchConstants.REQUEST_PARAM_TOP)
+  private Integer top;
+
+  @Override
+  public Integer getTop() {
+    return top;
+  }
+
+  @Override
+  public void setTop(Integer top) {
+    this.top = top;
+  }
+}

+ 2 - 2
ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/AuditLogsResource.java

@@ -35,7 +35,7 @@ import org.apache.ambari.logsearch.common.StatusMessage;
 import org.apache.ambari.logsearch.model.request.impl.AuditBarGraphRequest;
 import org.apache.ambari.logsearch.model.request.impl.AuditComponentRequest;
 import org.apache.ambari.logsearch.model.request.impl.AuditServiceLoadRequest;
-import org.apache.ambari.logsearch.model.request.impl.FieldAuditLogRequest;
+import org.apache.ambari.logsearch.model.request.impl.TopFieldAuditLogRequest;
 import org.apache.ambari.logsearch.model.request.impl.UserExportRequest;
 import org.apache.ambari.logsearch.model.response.AuditLogResponse;
 import org.apache.ambari.logsearch.model.response.BarGraphDataListResponse;
@@ -97,7 +97,7 @@ public class AuditLogsResource {
   @Path("/resources/{top}")
   @Produces({"application/json"})
   @ApiOperation(GET_TOP_AUDIT_RESOURCES_OD)
-  public BarGraphDataListResponse getResources(@BeanParam FieldAuditLogRequest request) {
+  public BarGraphDataListResponse getResources(@BeanParam TopFieldAuditLogRequest request) {
     return auditLogsManager.topResources(request);
   }
 

+ 1 - 1
ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/UserConfigResource.java

@@ -82,7 +82,7 @@ public class UserConfigResource {
   }
 
   @PUT
-  @Path("/filters/{id}")
+  @Path("/filters")
   @Produces({"application/json"})
   @ApiOperation(UPDATE_USER_FILTER_OD)
   public LogFeederDataMap updateUserFilter(LogFeederDataMap request) {

+ 1 - 2
ambari-logsearch/ambari-logsearch-server/src/test/java/org/apache/ambari/logsearch/converter/FieldAuditLogRequestQueryConverterTest.java

@@ -40,14 +40,13 @@ public class FieldAuditLogRequestQueryConverterTest extends AbstractRequestConve
     // GIVEN
     FieldAuditLogRequest request = new FieldAuditLogRequest();
     fillBaseLogRequestWithTestData(request);
-    request.setTop(10);
     request.setField("myfield");
     // WHEN
     SolrQuery query = new DefaultQueryParser().doConstructSolrQuery(underTest.convert(request));
     // THEN
     assertEquals("?q=*%3A*&rows=0&fq=evtTime%3A%5B2016-09-13T22%3A00%3A01.000Z+TO+2016-09-14T22%3A00%3A01.000Z%5D&fq=log_message%3Amyincludemessage" +
       "&fq=-log_message%3Amyexcludemessage&fq=repo%3A%28logsearch_app+secure_log%29&fq=-repo%3A%28hst_agent+system_message%29&fq=cluster%3Acl1&facet=true" +
-      "&facet.mincount=1&facet.limit=10&facet.pivot=myfield%2Crepo",
+      "&facet.mincount=1&facet.limit=-1&facet.pivot=myfield%2Crepo",
       query.toQueryString());
   }
 

+ 61 - 0
ambari-logsearch/ambari-logsearch-server/src/test/java/org/apache/ambari/logsearch/converter/TopFieldAuditLogRequestQueryConverterTest.java

@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.converter;
+
+import org.apache.ambari.logsearch.model.request.impl.TopFieldAuditLogRequest;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.junit.Before;
+import org.junit.Test;
+import org.springframework.data.solr.core.DefaultQueryParser;
+
+import static org.junit.Assert.assertEquals;
+
+public class TopFieldAuditLogRequestQueryConverterTest extends AbstractRequestConverterTest {
+
+  private TopFieldAuditLogRequestQueryConverter underTest;
+
+  @Before
+  public void setUp() {
+    underTest = new TopFieldAuditLogRequestQueryConverter();
+  }
+
+  @Test
+  public void testConvert() {
+    // GIVEN
+    TopFieldAuditLogRequest request = new TopFieldAuditLogRequest();
+    fillBaseLogRequestWithTestData(request);
+    request.setTop(10);
+    request.setField("myfield");
+    // WHEN
+    SolrQuery query = new DefaultQueryParser().doConstructSolrQuery(underTest.convert(request));
+    // THEN
+    assertEquals("?q=*%3A*&rows=0&fq=evtTime%3A%5B2016-09-13T22%3A00%3A01.000Z+TO+2016-09-14T22%3A00%3A01.000Z%5D&fq=log_message%3Amyincludemessage" +
+        "&fq=-log_message%3Amyexcludemessage&fq=repo%3A%28logsearch_app+secure_log%29&fq=-repo%3A%28hst_agent+system_message%29&fq=cluster%3Acl1&facet=true" +
+        "&facet.mincount=1&facet.limit=10&facet.pivot=myfield%2Crepo",
+      query.toQueryString());
+  }
+
+  @Test(expected = IllegalArgumentException.class) // TODO: later use @Valid on the fields to validate object
+  public void testConvertWithoutData() {
+    // GIVEN
+    TopFieldAuditLogRequest request = new TopFieldAuditLogRequest();
+    // WHEN
+    new DefaultQueryParser().doConstructSolrQuery(underTest.convert(request));
+  }
+}

+ 9 - 8
ambari-logsearch/ambari-logsearch-web/package.json

@@ -1,13 +1,16 @@
 {
   "name": "ambari-logsearch-web",
   "version": "0.5.0",
-  "description": "<!-- {% comment %} Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements.  See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License.  You may obtain a copy of the License at",
+  "description": "Front-end package for the Apache Ambari Log Search",
+  "homepage": "http://ambari.apache.org/",
+  "license" : "Apache-2.0",
+  "private": true,
+  "repository": {
+    "type": "git",
+    "url": "https://git-wip-us.apache.org/repos/asf/ambari/repo?p=ambari.git;a=summary"
+  },
   "dependencies": {
     "del": "^2.2.0",
-    "gulp": "^3.9.0",
-    "gulp-minify-css": "^1.2.3",
-    "gulp-minify-html": "^1.0.5",
-    "gulp-shell": "^0.5.2",
     "run-sequence": "^1.1.5",
     "yargs": "^3.32.0",
     "bower": "~1.7.2",
@@ -18,7 +21,5 @@
   },
   "scripts": {
     "test": "echo \"Error: no test specified\" && exit 1"
-  },
-  "author": "",
-  "license": "Apache 2.0"
+  }
 }

+ 3 - 3
ambari-logsearch/ambari-logsearch-web/src/main/webapp/scripts/views/common/Header.js

@@ -232,11 +232,11 @@ define(['require',
                     //this.hostArray = content.ui.hostSelect2.val().split(',');
                     // this.levelArray = content.ui.levelSelect2.val().split(',');
 
-                    //this.filterList = { /*components : this.componentArray,*/hosts : this.hostArray/*, levels : this.levelArray */};
-
+                    //this.filterList = { /*components : this.componentArray,*/hosts : this.hostArray/*, levels : this.levelArray */}
                     content.model.set(content.setValues());
 
-                    content.model.save(content.model.attributes,{
+                    content.model.save(content.model.attributes, {
+                        url: Globals.baseURL + 'userconfig/filters',
                         success : function(model,response){
                             Utils.notifySuccess({
                                 content: "Filter has been saved."

+ 2 - 1
ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector

@@ -38,6 +38,7 @@ METRIC_COLLECTOR=ambari-metrics-collector
 
 NORMALIZER_ENABLED_STUB_FILE=/var/run/ambari-metrics-collector/normalizer_enabled
 FIFO_ENABLED_STUB_FILE=/var/run/ambari-metrics-collector/fifo_enabled
+COLLECTOR_ADDITIONAL_CLASSPATH=
 
 STOP_TIMEOUT=5
 
@@ -256,7 +257,7 @@ function start()
     rm -f "${PIDFILE}" >/dev/null 2>&1
   fi
 
-  nohup "${JAVA}" "-Xms$AMS_COLLECTOR_HEAPSIZE" "-Xmx$AMS_COLLECTOR_HEAPSIZE" ${AMS_COLLECTOR_OPTS} "-cp" "/usr/lib/ambari-metrics-collector/*:${COLLECTOR_CONF_DIR}" "-Djava.net.preferIPv4Stack=true" "-Dams.log.dir=${AMS_COLLECTOR_LOG_DIR}" "-Dproc_${DAEMON_NAME}" "${CLASS}" "$@" > $OUTFILE 2>&1 &
+  nohup "${JAVA}" "-Xms$AMS_COLLECTOR_HEAPSIZE" "-Xmx$AMS_COLLECTOR_HEAPSIZE" ${AMS_COLLECTOR_OPTS} "-cp" "/usr/lib/ambari-metrics-collector/*:${COLLECTOR_CONF_DIR}:${COLLECTOR_ADDITIONAL_CLASSPATH}" "-Djava.net.preferIPv4Stack=true" "-Dams.log.dir=${AMS_COLLECTOR_LOG_DIR}" "-Dproc_${DAEMON_NAME}" "${CLASS}" "$@" > $OUTFILE 2>&1 &
   PID=$!
   write_pidfile "${PIDFILE}"
   sleep 2

+ 15 - 16
ambari-server/src/main/java/org/apache/ambari/server/KdcServerConnectionVerification.java

@@ -134,7 +134,7 @@ public class KdcServerConnectionVerification {
     config.setUseUdp(ConnectionProtocol.UDP == connectionProtocol);
     config.setTimeout(timeoutMillis);
 
-    FutureTask<Boolean> future = new FutureTask<Boolean>(new Callable<Boolean>() {
+    FutureTask<Boolean> future = new FutureTask<>(new Callable<Boolean>() {
       @Override
       public Boolean call() {
         Boolean success;
@@ -146,8 +146,8 @@ public class KdcServerConnectionVerification {
           connection.getTgt("noUser@noRealm", "noPassword");
 
           LOG.info(String.format("Encountered no Exceptions while testing connectivity to the KDC:\n" +
-                  "**** Host: %s:%d (%s)",
-              server, port, connectionProtocol.name()));
+              "**** Host: %s:%d (%s)",
+            server, port, connectionProtocol.name()));
           success = true;
         } catch (KerberosException e) {
           KrbError error = e.getError();
@@ -155,11 +155,10 @@ public class KdcServerConnectionVerification {
 
           String errorCodeMessage;
           int errorCodeCode;
-          if(errorCode != null) {
+          if (errorCode != null) {
             errorCodeMessage = errorCode.getMessage();
             errorCodeCode = errorCode.getValue();
-          }
-          else {
+          } else {
             errorCodeMessage = "<Not Specified>";
             errorCodeCode = -1;
           }
@@ -167,10 +166,10 @@ public class KdcServerConnectionVerification {
           // unfortunately, need to look at msg as error 60 is a generic error code
           //todo: evaluate other error codes to provide better information
           //todo: as there may be other error codes where we should return false
-          success  = !(errorCodeCode == ErrorType.KRB_ERR_GENERIC.getValue() &&
-              errorCodeMessage.contains("TimeOut"));
+          success = !(errorCodeCode == ErrorType.KRB_ERR_GENERIC.getValue() &&
+            errorCodeMessage.contains("TimeOut"));
 
-          if(!success || LOG.isDebugEnabled()) {
+          if (!success || LOG.isDebugEnabled()) {
             KerberosMessageType messageType = error.getMessageType();
 
             String messageTypeMessage;
@@ -184,12 +183,12 @@ public class KdcServerConnectionVerification {
             }
 
             String message = String.format("Received KerberosException while testing connectivity to the KDC: %s\n" +
-                    "**** Host:    %s:%d (%s)\n" +
-                    "**** Error:   %s\n" +
-                    "**** Code:    %d (%s)\n" +
-                    "**** Message: %d (%s)",
-                e.getLocalizedMessage(), server, port, connectionProtocol.name(), error.getEText(), errorCodeCode,
-                errorCodeMessage, messageTypeCode, messageTypeMessage);
+                "**** Host:    %s:%d (%s)\n" +
+                "**** Error:   %s\n" +
+                "**** Code:    %d (%s)\n" +
+                "**** Message: %d (%s)",
+              e.getLocalizedMessage(), server, port, connectionProtocol.name(), error.getEText(), errorCodeCode,
+              errorCodeMessage, messageTypeCode, messageTypeMessage);
 
             if (LOG.isDebugEnabled()) {
               LOG.info(message, e);
@@ -199,7 +198,7 @@ public class KdcServerConnectionVerification {
           }
         } catch (Throwable e) {
           LOG.info(String.format("Received Exception while testing connectivity to the KDC: %s\n**** Host: %s:%d (%s)",
-              e.getLocalizedMessage(), server, port, connectionProtocol.name()), e);
+            e.getLocalizedMessage(), server, port, connectionProtocol.name()), e);
 
           // some bad unexpected thing occurred
           throw new RuntimeException(e);

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/Role.java

@@ -29,7 +29,7 @@ import java.util.concurrent.ConcurrentHashMap;
  */
 public class Role {
 
-  private static final Map<String, Role> roles = new ConcurrentHashMap<String, Role>();
+  private static final Map<String, Role> roles = new ConcurrentHashMap<>();
 
   /**
    * @param name the role name

+ 9 - 7
ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessor.java

@@ -76,17 +76,19 @@ public interface ActionDBAccessor {
                        boolean skipSupported, boolean hostUnknownState);
 
   /**
-   * Returns all the pending stages, including queued and not-queued. A stage is
-   * considered in progress if it is in progress for any host.
+   * Returns the next stage which is in-progress for every in-progress request
+   * in the system. Since stages are always synchronous, there is no reason to
+   * return more than the most recent stage per request. Returning every single
+   * stage in the requesrt would be extremely inffecient and wasteful. However,
+   * since requests can run in parallel, this method must return the most recent
+   * stage for every request. The results will be sorted by request ID.
    * <p/>
-   * The results will be sorted by request ID and then stage ID making this call
-   * expensive in some scenarios. Use {@link #getCommandsInProgressCount()} in
-   * order to determine if there are stages that are in progress before getting
-   * the stages from this method.
+   * Use {@link #getCommandsInProgressCount()} in order to determine if there
+   * are stages that are in progress before getting the stages from this method.
    *
    * @see HostRoleStatus#IN_PROGRESS_STATUSES
    */
-  public List<Stage> getStagesInProgress();
+  public List<Stage> getFirstStageInProgressPerRequest();
 
   /**
    * Returns all the pending stages in a request, including queued and not-queued. A stage is

+ 16 - 11
ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java

@@ -285,11 +285,16 @@ public class ActionDBAccessorImpl implements ActionDBAccessor {
    * {@inheritDoc}
    */
   @Override
-  @Experimental(feature = ExperimentalFeature.PARALLEL_PROCESSING)
-  public List<Stage> getStagesInProgress() {
-    List<StageEntity> stageEntities = stageDAO.findByStatuses(
+  public List<Stage> getFirstStageInProgressPerRequest() {
+    List<StageEntity> stageEntities = stageDAO.findFirstStageByStatus(
       HostRoleStatus.IN_PROGRESS_STATUSES);
-    return getStagesForEntities(stageEntities);
+
+    List<Stage> stages = new ArrayList<>(stageEntities.size());
+    for (StageEntity stageEntity : stageEntities) {
+      stages.add(stageFactory.createExisting(stageEntity));
+    }
+
+    return stages;
   }
 
   @Experimental(feature = ExperimentalFeature.PARALLEL_PROCESSING)
@@ -357,7 +362,7 @@ public class ActionDBAccessorImpl implements ActionDBAccessor {
     requestDAO.create(requestEntity);
 
     //TODO wire request to cluster
-    List<StageEntity> stageEntities = new ArrayList<StageEntity>(request.getStages().size());
+    List<StageEntity> stageEntities = new ArrayList<>(request.getStages().size());
 
     addRequestToAuditlogCache(request);
 
@@ -503,17 +508,17 @@ public class ActionDBAccessorImpl implements ActionDBAccessor {
 
   @Override
   public void updateHostRoleStates(Collection<CommandReport> reports) {
-    Map<Long, CommandReport> taskReports = new HashMap<Long, CommandReport>();
+    Map<Long, CommandReport> taskReports = new HashMap<>();
     for (CommandReport report : reports) {
       taskReports.put(report.getTaskId(), report);
     }
 
     long now = System.currentTimeMillis();
 
-    List<Long> requestsToCheck = new ArrayList<Long>();
+    List<Long> requestsToCheck = new ArrayList<>();
 
     List<HostRoleCommandEntity> commandEntities = hostRoleCommandDAO.findByPKs(taskReports.keySet());
-    List<HostRoleCommandEntity> commandEntitiesToMerge = new ArrayList<HostRoleCommandEntity>();
+    List<HostRoleCommandEntity> commandEntitiesToMerge = new ArrayList<>();
     for (HostRoleCommandEntity commandEntity : commandEntities) {
       CommandReport report = taskReports.get(commandEntity.getTaskId());
       HostRoleStatus existingTaskStatus = commandEntity.getStatus();
@@ -704,12 +709,12 @@ public class ActionDBAccessorImpl implements ActionDBAccessor {
       return Collections.emptyList();
     }
 
-    List<HostRoleCommand> commands = new ArrayList<HostRoleCommand>();
+    List<HostRoleCommand> commands = new ArrayList<>();
 
     Map<Long, HostRoleCommand> cached = hostRoleCommandCache.getAllPresent(taskIds);
     commands.addAll(cached.values());
 
-    List<Long> absent = new ArrayList<Long>();
+    List<Long> absent = new ArrayList<>();
     absent.addAll(taskIds);
     absent.removeAll(cached.keySet());
 
@@ -798,7 +803,7 @@ public class ActionDBAccessorImpl implements ActionDBAccessor {
   @Override
   public List<Request> getRequests(Collection<Long> requestIds) {
     List<RequestEntity> requestEntities = requestDAO.findByPks(requestIds);
-    List<Request> requests = new ArrayList<Request>(requestEntities.size());
+    List<Request> requests = new ArrayList<>(requestEntities.size());
     for (RequestEntity requestEntity : requestEntities) {
       requests.add(requestFactory.createExisting(requestEntity));
     }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java

@@ -142,7 +142,7 @@ public class ActionManager {
         return (int) (o1.getTaskId()-o2.getTaskId());
       }
     });
-    List<CommandReport> reportsToProcess = new ArrayList<CommandReport>();
+    List<CommandReport> reportsToProcess = new ArrayList<>();
     //persist the action response into the db.
     for (CommandReport report : reports) {
       HostRoleCommand command = commands.get(report.getTaskId());

+ 77 - 101
ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java

@@ -42,7 +42,6 @@ import org.apache.ambari.server.ServiceComponentHostNotFoundException;
 import org.apache.ambari.server.ServiceComponentNotFoundException;
 import org.apache.ambari.server.agent.ActionQueue;
 import org.apache.ambari.server.agent.AgentCommand;
-import org.apache.ambari.server.agent.AgentCommand.AgentCommandType;
 import org.apache.ambari.server.agent.CancelCommand;
 import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.agent.ExecutionCommand;
@@ -161,7 +160,7 @@ class ActionScheduler implements Runnable {
   private final Object wakeupSyncObject = new Object();
   private final ServerActionExecutor serverActionExecutor;
 
-  private final Set<Long> requestsInProgress = new HashSet<Long>();
+  private final Set<Long> requestsInProgress = new HashSet<>();
 
   /**
    * Contains request ids that have been scheduled to be cancelled,
@@ -176,7 +175,7 @@ class ActionScheduler implements Runnable {
    * requestsToBeCancelled object
    */
   private final Map<Long, String> requestCancelReasons =
-    new HashMap<Long, String>();
+    new HashMap<>();
 
   /**
    * true if scheduler should run ASAP.
@@ -354,16 +353,16 @@ class ActionScheduler implements Runnable {
         return;
       }
 
-      Set<Long> runningRequestIds = new HashSet<Long>();
-      List<Stage> stages = db.getStagesInProgress();
+      Set<Long> runningRequestIds = new HashSet<>();
+      List<Stage> firstStageInProgressPerRequest = db.getFirstStageInProgressPerRequest();
       if (LOG.isDebugEnabled()) {
         LOG.debug("Scheduler wakes up");
-        LOG.debug("Processing {} in progress stages ", stages.size());
+        LOG.debug("Processing {} in progress stages", firstStageInProgressPerRequest.size());
       }
 
-      publishInProgressTasks(stages);
+      publishInProgressTasks(firstStageInProgressPerRequest);
 
-      if (stages.isEmpty()) {
+      if (firstStageInProgressPerRequest.isEmpty()) {
         // Nothing to do
         if (LOG.isDebugEnabled()) {
           LOG.debug("There are no stages currently in progress.");
@@ -375,11 +374,19 @@ class ActionScheduler implements Runnable {
 
       int i_stage = 0;
 
-      HashSet<String> hostsWithTasks = getListOfHostsWithPendingTask(stages);
-      actionQueue.updateListOfHostsWithPendingTask(hostsWithTasks);
+      // get the range of requests in progress
+      long iLowestRequestIdInProgress = firstStageInProgressPerRequest.get(0).getRequestId();
+      long iHighestRequestIdInProgress = firstStageInProgressPerRequest.get(
+          firstStageInProgressPerRequest.size() - 1).getRequestId();
 
-      stages = filterParallelPerHostStages(stages);
-      // At this point the stages is a filtered list
+      List<String> hostsWithPendingTasks = hostRoleCommandDAO.getHostsWithPendingTasks(
+          iLowestRequestIdInProgress, iHighestRequestIdInProgress);
+
+      actionQueue.updateListOfHostsWithPendingTask(new HashSet<>(hostsWithPendingTasks));
+
+      // filter the stages in progress down to those which can be scheduled in
+      // parallel
+      List<Stage> stages = filterParallelPerHostStages(firstStageInProgressPerRequest);
 
       boolean exclusiveRequestIsGoing = false;
       // This loop greatly depends on the fact that order of stages in
@@ -453,8 +460,8 @@ class ActionScheduler implements Runnable {
           return;
         }
 
-        List<ExecutionCommand> commandsToStart = new ArrayList<ExecutionCommand>();
-        List<ExecutionCommand> commandsToUpdate = new ArrayList<ExecutionCommand>();
+        List<ExecutionCommand> commandsToStart = new ArrayList<>();
+        List<ExecutionCommand> commandsToUpdate = new ArrayList<>();
 
         //Schedule what we have so far
 
@@ -468,7 +475,7 @@ class ActionScheduler implements Runnable {
 
         //Multimap is analog of Map<Object, List<Object>> but allows to avoid nested loop
         ListMultimap<String, ServiceComponentHostEvent> eventMap = formEventMap(stage, commandsToStart);
-        Map<ExecutionCommand, String> commandsToAbort = new HashMap<ExecutionCommand, String>();
+        Map<ExecutionCommand, String> commandsToAbort = new HashMap<>();
         if (!eventMap.isEmpty()) {
           LOG.debug("==> processing {} serviceComponentHostEvents...", eventMap.size());
           Cluster cluster = clusters.getCluster(stage.getClusterName());
@@ -501,7 +508,7 @@ class ActionScheduler implements Runnable {
         if (commandsToAbort.size() > 0) { // Code branch may be a bit slow, but is extremely rarely used
           LOG.debug("==> Aborting {} tasks...", commandsToAbort.size());
           // Build a list of HostRoleCommands
-          List<Long> taskIds = new ArrayList<Long>();
+          List<Long> taskIds = new ArrayList<>();
           for (ExecutionCommand command : commandsToAbort.keySet()) {
             taskIds.add(command.getTaskId());
           }
@@ -565,123 +572,92 @@ class ActionScheduler implements Runnable {
   }
 
   /**
-   * Returns the list of hosts that have a task assigned
-   *
-   * @param stages
-   * @return
-   */
-  private HashSet<String> getListOfHostsWithPendingTask(List<Stage> stages) {
-    HashSet<String> hostsWithTasks = new HashSet<String>();
-    for (Stage s : stages) {
-      hostsWithTasks.addAll(s.getHosts());
-    }
-    return hostsWithTasks;
-  }
-
-  /**
-   * Returns filtered list of stages such that the returned list is an ordered list of stages that may
-   * be executed in parallel or in the order in which they are presented
+   * Returns filtered list of stages such that the returned list is an ordered
+   * list of stages that may be executed in parallel or in the order in which
+   * they are presented.
    * <p/>
-   * Assumption: the list of stages supplied as input are ordered by request id and then stage id.
+   * The specified stages must be ordered by request ID and may only contain the
+   * next stage in progress per request (as returned by
+   * {@link ActionDBAccessor#getFirstStageInProgressPerRequest()}. This is
+   * because there is a requirement that within a request, no two stages may
+   * ever run in parallel.
    * <p/>
-   * Rules:
+   * The following rules will be applied to the list:
    * <ul>
-   * <li>
-   * Stages are filtered such that the first stage in the list (assumed to be the first pending
-   * stage from the earliest active request) has priority
-   * </li>
-   * <li>
-   * No stage in any request may be executed before an earlier stage in the same request
-   * </li>
-   * <li>
-   * A stages in different requests may be performed in parallel if the relevant hosts for the
-   * stage in the later requests do not intersect with the union of hosts from (pending) stages
-   * in earlier requests
+   * <li>Stages are filtered such that the first stage in the list (assumed to
+   * be the first pending stage from the earliest active request) has priority.
    * </li>
+   * <li>No stage in any request may be executed before an earlier stage in the
+   * same request. This requirement is automatically covered by virtue of the
+   * supplied stages only being for the next stage in progress per request.</li>
+   * <li>A stage in different request may be performed in parallel
+   * if-and-only-if the relevant hosts for the stage in the later requests do
+   * not intersect with the union of hosts from (pending) stages in earlier
+   * requests. In order to accomplish this</li>
    * </ul>
    *
-   * @param stages the stages to process
+   * @param firstStageInProgressPerRequest
+   *          the stages to process, one stage per request
    * @return a list of stages that may be executed in parallel
    */
-  private List<Stage> filterParallelPerHostStages(List<Stage> stages) {
-    List<Stage> retVal = new ArrayList<Stage>();
-    Set<String> affectedHosts = new HashSet<String>();
-    Set<Long> affectedRequests = new HashSet<Long>();
+  private List<Stage> filterParallelPerHostStages(List<Stage> firstStageInProgressPerRequest) {
+    // if there's only 1 stage in progress in 1 request, simply return that stage
+    if (firstStageInProgressPerRequest.size() == 1) {
+      return firstStageInProgressPerRequest;
+    }
+
+    List<Stage> retVal = new ArrayList<>();
 
-    for (Stage s : stages) {
-      long requestId = s.getRequestId();
+    // set the lower range (inclusive) of requests to limit the query a bit
+    // since there can be a LOT of commands
+    long lowerRequestIdInclusive = firstStageInProgressPerRequest.get(0).getRequestId();
+
+    // determine if this stage can be scheduled in parallel with the other
+    // stages from other requests
+    for (Stage stage : firstStageInProgressPerRequest) {
+      long requestId = stage.getRequestId();
 
       if (LOG.isTraceEnabled()) {
-        LOG.trace("==> Processing stage: {}/{} ({}) for {}", requestId, s.getStageId(), s.getRequestContext());
+        LOG.trace("==> Processing stage: {}/{} ({}) for {}", requestId, stage.getStageId(), stage.getRequestContext());
       }
 
       boolean addStage = true;
 
+      // there are at least 2 request in progress concurrently; determine which
+      // hosts are affected
+      HashSet<String> hostsInProgressForEarlierRequests = new HashSet<>(
+          hostRoleCommandDAO.getBlockingHostsForRequest(lowerRequestIdInclusive, requestId));
+
       // Iterate over the relevant hosts for this stage to see if any intersect with the set of
       // hosts needed for previous stages.  If any intersection occurs, this stage may not be
       // executed in parallel.
-      for (String host : s.getHosts()) {
+      for (String host : stage.getHosts()) {
         LOG.trace("===> Processing Host {}", host);
 
-        if (affectedHosts.contains(host)) {
+        if (hostsInProgressForEarlierRequests.contains(host)) {
           if (LOG.isTraceEnabled()) {
-            LOG.trace("===>  Skipping stage since it utilizes at least one host that a previous stage requires: {}/{} ({})", s.getRequestId(), s.getStageId(), s.getRequestContext());
+            LOG.trace("===>  Skipping stage since it utilizes at least one host that a previous stage requires: {}/{} ({})", stage.getRequestId(), stage.getStageId(), stage.getRequestContext());
           }
 
-          addStage &= false;
-        } else {
-          if (!Stage.INTERNAL_HOSTNAME.equalsIgnoreCase(host) && !isStageHasBackgroundCommandsOnly(s, host)) {
-            LOG.trace("====>  Adding host to affected hosts: {}", host);
-            affectedHosts.add(host);
-          }
-
-          addStage &= true;
-        }
-      }
-
-      // If this stage is for a request that we have already processed, the it cannot execute in
-      // parallel since only one stage per request my execute at a time. The first time we encounter
-      // a request id, will be for the first pending stage for that request, so it is a candidate
-      // for execution at this time - if the previous test for host intersection succeeds.
-      if (affectedRequests.contains(requestId)) {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("===>  Skipping stage since the request it is in has been processed already: {}/{} ({})", s.getRequestId(), s.getStageId(), s.getRequestContext());
-        }
-
-        addStage = false;
-      } else {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("====>  Adding request to affected requests: {}", requestId);
+          addStage = false;
+          break;
         }
-
-        affectedRequests.add(requestId);
-        addStage &= true;
       }
 
-      // If both tests pass - the stage is the first pending stage in its request and the hosts
-      // required in the stage do not intersect with hosts from stages that should occur before this,
-      // than add it to the list of stages that may be executed in parallel.
+      // add the stage is no other prior stages for prior requests intersect the
+      // hosts in this stage
       if (addStage) {
         if (LOG.isTraceEnabled()) {
-          LOG.trace("===>  Adding stage to return value: {}/{} ({})", s.getRequestId(), s.getStageId(), s.getRequestContext());
+          LOG.trace("===>  Adding stage to return value: {}/{} ({})", stage.getRequestId(), stage.getStageId(), stage.getRequestContext());
         }
 
-        retVal.add(s);
+        retVal.add(stage);
       }
     }
 
     return retVal;
   }
 
-  private boolean isStageHasBackgroundCommandsOnly(Stage s, String host) {
-    for (ExecutionCommandWrapper c : s.getExecutionCommands(host)) {
-      if (c.getCommandType() != AgentCommandType.BACKGROUND_EXECUTION_COMMAND) {
-        return false;
-      }
-    }
-    return true;
-  }
-
   private boolean hasPreviousStageFailed(Stage stage) {
     boolean failed = false;
 
@@ -697,8 +673,8 @@ class ActionScheduler implements Runnable {
         return false;
       }
 
-      Map<Role, Integer> hostCountsForRoles = new HashMap<Role, Integer>();
-      Map<Role, Integer> failedHostCountsForRoles = new HashMap<Role, Integer>();
+      Map<Role, Integer> hostCountsForRoles = new HashMap<>();
+      Map<Role, Integer> failedHostCountsForRoles = new HashMap<>();
 
       for (String host : prevStage.getHostRoleCommands().keySet()) {
         Map<String, HostRoleCommand> roleCommandMap = prevStage.getHostRoleCommands().get(host);
@@ -1008,9 +984,9 @@ class ActionScheduler implements Runnable {
    */
   private Map<String, RoleStats> initRoleStats(Stage s) {
     // Meaning: how many hosts are affected by commands for each role
-    Map<Role, Integer> hostCountsForRoles = new HashMap<Role, Integer>();
+    Map<Role, Integer> hostCountsForRoles = new HashMap<>();
     // < role_name, rolestats >
-    Map<String, RoleStats> roleStats = new TreeMap<String, RoleStats>();
+    Map<String, RoleStats> roleStats = new TreeMap<>();
 
     for (String host : s.getHostRoleCommands().keySet()) {
       Map<String, HostRoleCommand> roleCommandMap = s.getHostRoleCommands().get(host);

+ 26 - 0
ambari-server/src/main/java/org/apache/ambari/server/actionmanager/HostRoleCommand.java

@@ -68,6 +68,7 @@ public class HostRoleCommand {
   private String commandDetail;
   private String customCommandName;
   private ExecutionCommandWrapper executionCommandWrapper;
+  private boolean isBackgroundCommand = false;
 
   @Inject
   private ExecutionCommandDAO executionCommandDAO;
@@ -179,6 +180,7 @@ public class HostRoleCommand {
     event = new ServiceComponentHostEventWrapper(hostRoleCommandEntity.getEvent());
     commandDetail = hostRoleCommandEntity.getCommandDetail();
     customCommandName = hostRoleCommandEntity.getCustomCommandName();
+    isBackgroundCommand = hostRoleCommandEntity.isBackgroundCommand();
   }
 
   //todo: why is this not symmetrical with the constructor which takes an entity
@@ -201,6 +203,7 @@ public class HostRoleCommand {
     hostRoleCommandEntity.setRoleCommand(roleCommand);
     hostRoleCommandEntity.setCommandDetail(commandDetail);
     hostRoleCommandEntity.setCustomCommandName(customCommandName);
+    hostRoleCommandEntity.setBackgroundCommand(isBackgroundCommand);
 
     HostEntity hostEntity = hostDAO.findById(hostId);
     if (null != hostEntity) {
@@ -432,6 +435,29 @@ public class HostRoleCommand {
     return requestId;
   }
 
+  /**
+   * Gets whether this command runs in the background and does not block other
+   * commands.
+   *
+   * @return {@code true} if this command runs in the background, {@code false}
+   *         otherise.
+   */
+  public boolean isBackgroundCommand() {
+    return isBackgroundCommand;
+  }
+
+  /**
+   * Sets whether this command runs in the background and does not block other
+   * commands.
+   *
+   * @param isBackgroundCommand
+   *          {@code true} if this command runs in the background, {@code false}
+   *          otherise.
+   */
+  public void setBackgroundCommand(boolean isBackgroundCommand) {
+    this.isBackgroundCommand = isBackgroundCommand;
+  }
+
   /**
    * Gets whether commands which fail and are retryable are automatically
    * skipped and marked with {@link HostRoleStatus#SKIPPED_FAILED}.

+ 5 - 5
ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java

@@ -78,7 +78,7 @@ public class Request {
   private RequestOperationLevel operationLevel;
   private RequestType requestType;
 
-  private Collection<Stage> stages = new ArrayList<Stage>();
+  private Collection<Stage> stages = new ArrayList<>();
 
   @Inject
   private static HostDAO hostDAO;
@@ -207,7 +207,7 @@ public class Request {
   }
 
   private static List<String> getHostsList(String hosts) {
-    List<String> hostList = new ArrayList<String>();
+    List<String> hostList = new ArrayList<>();
     if (hosts != null && !hosts.isEmpty()) {
       for (String host : hosts.split(",")) {
         if (!host.trim().isEmpty()) {
@@ -248,7 +248,7 @@ public class Request {
     //TODO set all fields
 
     if (resourceFilters != null) {
-      List<RequestResourceFilterEntity> filterEntities = new ArrayList<RequestResourceFilterEntity>();
+      List<RequestResourceFilterEntity> filterEntities = new ArrayList<>();
       for (RequestResourceFilter resourceFilter : resourceFilters) {
         RequestResourceFilterEntity filterEntity = new RequestResourceFilterEntity();
         filterEntity.setServiceName(resourceFilter.getServiceName());
@@ -367,7 +367,7 @@ public class Request {
   }
 
   public List<HostRoleCommand> getCommands() {
-    List<HostRoleCommand> commands = new ArrayList<HostRoleCommand>();
+    List<HostRoleCommand> commands = new ArrayList<>();
     for (Stage stage : stages) {
       commands.addAll(stage.getOrderedHostRoleCommands());
     }
@@ -420,7 +420,7 @@ public class Request {
 
     Collection<RequestResourceFilterEntity> resourceFilterEntities = entity.getResourceFilterEntities();
     if (resourceFilterEntities != null) {
-      resourceFilters = new ArrayList<RequestResourceFilter>();
+      resourceFilters = new ArrayList<>();
       for (RequestResourceFilterEntity resourceFilterEntity : resourceFilterEntities) {
         RequestResourceFilter resourceFilter =
           new RequestResourceFilter(

+ 22 - 22
ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java

@@ -90,13 +90,13 @@ public class Stage {
   private volatile boolean wrappersLoaded = false;
 
   //Map of roles to successFactors for this stage. Default is 1 i.e. 100%
-  private Map<Role, Float> successFactors = new HashMap<Role, Float>();
+  private Map<Role, Float> successFactors = new HashMap<>();
 
   //Map of host to host-roles
   Map<String, Map<String, HostRoleCommand>> hostRoleCommands =
-      new TreeMap<String, Map<String, HostRoleCommand>>();
+    new TreeMap<>();
   private Map<String, List<ExecutionCommandWrapper>> commandsToSend =
-      new TreeMap<String, List<ExecutionCommandWrapper>>();
+    new TreeMap<>();
 
   @Inject
   private HostRoleCommandFactory hostRoleCommandFactory;
@@ -237,7 +237,7 @@ public class Stage {
   }
 
   public List<HostRoleCommand> getOrderedHostRoleCommands() {
-    List<HostRoleCommand> commands = new ArrayList<HostRoleCommand>();
+    List<HostRoleCommand> commands = new ArrayList<>();
     //Correct due to ordered maps
     for (Map.Entry<String, Map<String, HostRoleCommand>> hostRoleCommandEntry : hostRoleCommands.entrySet()) {
       for (Map.Entry<String, HostRoleCommand> roleCommandEntry : hostRoleCommandEntry.getValue().entrySet()) {
@@ -491,7 +491,7 @@ public class Stage {
 
     ExecutionCommand cmd = commandWrapper.getExecutionCommand();
 
-    Map<String, String> cmdParams = new HashMap<String, String>();
+    Map<String, String> cmdParams = new HashMap<>();
     if (commandParams != null) {
       cmdParams.putAll(commandParams);
     }
@@ -500,18 +500,18 @@ public class Stage {
     }
     cmd.setCommandParams(cmdParams);
 
-    Map<String, Map<String, String>> configurations = new TreeMap<String, Map<String, String>>();
+    Map<String, Map<String, String>> configurations = new TreeMap<>();
     cmd.setConfigurations(configurations);
 
-    Map<String, Map<String, Map<String, String>>> configurationAttributes = new TreeMap<String, Map<String, Map<String, String>>>();
+    Map<String, Map<String, Map<String, String>>> configurationAttributes = new TreeMap<>();
     cmd.setConfigurationAttributes(configurationAttributes);
 
     if (configTags == null) {
-      configTags = new TreeMap<String, Map<String, String>>();
+      configTags = new TreeMap<>();
     }
     cmd.setConfigurationTags(configTags);
 
-    Map<String, String> roleParams = new HashMap<String, String>();
+    Map<String, String> roleParams = new HashMap<>();
     roleParams.put(ServerAction.ACTION_NAME, actionName);
     if (userName != null) {
       roleParams.put(ServerAction.ACTION_USER_NAME, userName);
@@ -539,7 +539,7 @@ public class Stage {
 
     Assert.notEmpty(cancelTargets, "Provided targets task Id are empty.");
 
-    Map<String, String> roleParams = new HashMap<String, String>();
+    Map<String, String> roleParams = new HashMap<>();
 
     roleParams.put("cancelTaskIdTargets", StringUtils.join(cancelTargets, ','));
     cmd.setRoleParams(roleParams);
@@ -550,7 +550,7 @@ public class Stage {
    * @return list of hosts
    */
   public synchronized List<String> getHosts() { // TODO: Check whether method should be synchronized
-    List<String> hlist = new ArrayList<String>();
+    List<String> hlist = new ArrayList<>();
     for (String h : hostRoleCommands.keySet()) {
       hlist.add(h);
     }
@@ -930,19 +930,19 @@ public class Stage {
   public synchronized String toString() {
     StringBuilder builder = new StringBuilder();
     builder.append("STAGE DESCRIPTION BEGIN\n");
-    builder.append("requestId="+requestId+"\n");
-    builder.append("stageId="+stageId+"\n");
-    builder.append("clusterName="+clusterName+"\n");
-    builder.append("logDir=" + logDir+"\n");
-    builder.append("requestContext="+requestContext+"\n");
-    builder.append("clusterHostInfo="+clusterHostInfo+"\n");
-    builder.append("commandParamsStage="+commandParamsStage+"\n");
-    builder.append("hostParamsStage="+hostParamsStage+"\n");
-    builder.append("status="+status+"\n");
-    builder.append("displayStatus="+displayStatus+"\n");
+    builder.append("requestId=").append(requestId).append("\n");
+    builder.append("stageId=").append(stageId).append("\n");
+    builder.append("clusterName=").append(clusterName).append("\n");
+    builder.append("logDir=").append(logDir).append("\n");
+    builder.append("requestContext=").append(requestContext).append("\n");
+    builder.append("clusterHostInfo=").append(clusterHostInfo).append("\n");
+    builder.append("commandParamsStage=").append(commandParamsStage).append("\n");
+    builder.append("hostParamsStage=").append(hostParamsStage).append("\n");
+    builder.append("status=").append(status).append("\n");
+    builder.append("displayStatus=").append(displayStatus).append("\n");
     builder.append("Success Factors:\n");
     for (Role r : successFactors.keySet()) {
-      builder.append("  role: "+r+", factor: "+successFactors.get(r)+"\n");
+      builder.append("  role: ").append(r).append(", factor: ").append(successFactors.get(r)).append("\n");
     }
     for (HostRoleCommand hostRoleCommand : getOrderedHostRoleCommands()) {
       builder.append("HOST: ").append(hostRoleCommand.getHostName()).append(" :\n");

+ 6 - 6
ambari-server/src/main/java/org/apache/ambari/server/agent/ActionQueue.java

@@ -40,14 +40,14 @@ public class ActionQueue {
 
   private static Logger LOG = LoggerFactory.getLogger(ActionQueue.class);
 
-  private static HashSet<String> EMPTY_HOST_LIST = new HashSet<String>();
+  private static HashSet<String> EMPTY_HOST_LIST = new HashSet<>();
 
   final ConcurrentMap<String, Queue<AgentCommand>> hostQueues;
 
-  HashSet<String> hostsWithPendingTask = new HashSet<String>();
+  HashSet<String> hostsWithPendingTask = new HashSet<>();
 
   public ActionQueue() {
-    hostQueues = new ConcurrentHashMap<String, Queue<AgentCommand>>();
+    hostQueues = new ConcurrentHashMap<>();
   }
 
   private Queue<AgentCommand> getQueue(String hostname) {
@@ -138,8 +138,8 @@ public class ActionQueue {
       return null;
     }
 
-    List<AgentCommand> removedCommands = new ArrayList<AgentCommand>(
-        queue.size());
+    List<AgentCommand> removedCommands = new ArrayList<>(
+      queue.size());
 
     Iterator<AgentCommand> iterator = queue.iterator();
     while (iterator.hasNext()) {
@@ -196,7 +196,7 @@ public class ActionQueue {
       return null;
     }
 
-    List<AgentCommand> l = new ArrayList<AgentCommand>();
+    List<AgentCommand> l = new ArrayList<>();
 
     AgentCommand command;
     do {

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java

@@ -35,7 +35,7 @@ import com.google.inject.Singleton;
 @Singleton
 public class AgentRequests {
   private static Log LOG = LogFactory.getLog(HeartbeatMonitor.class);
-  private final Map<String, Map<String, Boolean>> requiresExecCmdDetails = new HashMap<String, Map<String, Boolean>>();
+  private final Map<String, Map<String, Boolean>> requiresExecCmdDetails = new HashMap<>();
   private final Object _lock = new Object();
 
   /**

+ 5 - 5
ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java

@@ -67,7 +67,7 @@ public class ExecutionCommand extends AgentCommand {
   private String role;
 
   @SerializedName("hostLevelParams")
-  private Map<String, String> hostLevelParams = new HashMap<String, String>();
+  private Map<String, String> hostLevelParams = new HashMap<>();
 
   @SerializedName("roleParams")
   private Map<String, String> roleParams = null;
@@ -77,7 +77,7 @@ public class ExecutionCommand extends AgentCommand {
 
   @SerializedName("clusterHostInfo")
   private Map<String, Set<String>> clusterHostInfo =
-      new HashMap<String, Set<String>>();
+    new HashMap<>();
 
   @SerializedName("configurations")
   private Map<String, Map<String, String>> configurations;
@@ -92,7 +92,7 @@ public class ExecutionCommand extends AgentCommand {
   private boolean forceRefreshConfigTagsBeforeExecution = false;
 
   @SerializedName("commandParams")
-  private Map<String, String> commandParams = new HashMap<String, String>();
+  private Map<String, String> commandParams = new HashMap<>();
 
   @SerializedName("serviceName")
   private String serviceName;
@@ -104,10 +104,10 @@ public class ExecutionCommand extends AgentCommand {
   private String componentName;
 
   @SerializedName("kerberosCommandParams")
-  private List<Map<String, String>> kerberosCommandParams = new ArrayList<Map<String, String>>();
+  private List<Map<String, String>> kerberosCommandParams = new ArrayList<>();
 
   @SerializedName("localComponents")
-  private Set<String> localComponents = new HashSet<String>();
+  private Set<String> localComponents = new HashSet<>();
 
   @SerializedName("availableServices")
   private Map<String, String> availableServices = new HashMap<>();

+ 3 - 3
ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeat.java

@@ -35,9 +35,9 @@ public class HeartBeat {
   private long responseId = -1;
   private long timestamp;
   private String hostname;
-  List<CommandReport> reports = new ArrayList<CommandReport>();
-  List<ComponentStatus> componentStatus = new ArrayList<ComponentStatus>();
-  private List<DiskInfo> mounts = new ArrayList<DiskInfo>();
+  List<CommandReport> reports = new ArrayList<>();
+  List<ComponentStatus> componentStatus = new ArrayList<>();
+  private List<DiskInfo> mounts = new ArrayList<>();
   HostStatus nodeStatus;
   private AgentEnv agentEnv = null;
   private List<Alert> alerts = null;

+ 7 - 7
ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java

@@ -110,9 +110,9 @@ public class HeartBeatHandler {
   @Inject
   private KerberosIdentityDataFileReaderFactory kerberosIdentityDataFileReaderFactory;
 
-  private Map<String, Long> hostResponseIds = new ConcurrentHashMap<String, Long>();
+  private Map<String, Long> hostResponseIds = new ConcurrentHashMap<>();
 
-  private Map<String, HeartBeatResponse> hostResponses = new ConcurrentHashMap<String, HeartBeatResponse>();
+  private Map<String, HeartBeatResponse> hostResponses = new ConcurrentHashMap<>();
 
   @Inject
   public HeartBeatHandler(Clusters fsm, ActionQueue aq, ActionManager am,
@@ -523,10 +523,10 @@ public class HeartBeatHandler {
   }
 
   private Map<String, Map<String, String>> getComponentsMap(StackInfo stack) {
-    Map<String, Map<String, String>> result = new HashMap<String, Map<String, String>>();
+    Map<String, Map<String, String>> result = new HashMap<>();
 
     for (ServiceInfo service : stack.getServices()) {
-      Map<String, String> components = new HashMap<String, String>();
+      Map<String, String> components = new HashMap<>();
 
       for (ComponentInfo component : service.getComponents()) {
         components.put(component.getName(), component.getCategory());
@@ -554,7 +554,7 @@ public class HeartBeatHandler {
       return null;
     }
 
-    List<AlertDefinitionCommand> commands = new ArrayList<AlertDefinitionCommand>();
+    List<AlertDefinitionCommand> commands = new ArrayList<>();
 
     // for every cluster this host is a member of, build the command
     for (Cluster cluster : hostClusters) {
@@ -608,7 +608,7 @@ public class HeartBeatHandler {
                 File keytabFile = new File(dataDir + File.separator + hostName + File.separator + sha1Keytab);
 
                 if (keytabFile.canRead()) {
-                  Map<String, String> keytabMap = new HashMap<String, String>();
+                  Map<String, String> keytabMap = new HashMap<>();
                   String principal = record.get(KerberosIdentityDataFileReader.PRINCIPAL);
                   String isService = record.get(KerberosIdentityDataFileReader.SERVICE);
 
@@ -636,7 +636,7 @@ public class HeartBeatHandler {
                 }
               }
             } else if ("REMOVE_KEYTAB".equalsIgnoreCase(command)) {
-              Map<String, String> keytabMap = new HashMap<String, String>();
+              Map<String, String> keytabMap = new HashMap<>();
 
               keytabMap.put(KerberosIdentityDataFileReader.HOSTNAME, hostName);
               keytabMap.put(KerberosIdentityDataFileReader.SERVICE, record.get(KerberosIdentityDataFileReader.SERVICE));

+ 5 - 5
ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatResponse.java

@@ -32,13 +32,13 @@ public class HeartBeatResponse {
   private long responseId;
 
   @SerializedName("executionCommands")
-  private List<ExecutionCommand> executionCommands = new ArrayList<ExecutionCommand>();
+  private List<ExecutionCommand> executionCommands = new ArrayList<>();
 
   @SerializedName("statusCommands")
-  private List<StatusCommand> statusCommands = new ArrayList<StatusCommand>();
+  private List<StatusCommand> statusCommands = new ArrayList<>();
 
   @SerializedName("cancelCommands")
-  private List<CancelCommand> cancelCommands = new ArrayList<CancelCommand>();
+  private List<CancelCommand> cancelCommands = new ArrayList<>();
 
   /**
    * {@link AlertDefinitionCommand}s are used to isntruct the agent as to which
@@ -197,7 +197,7 @@ public class HeartBeatResponse {
     // commands are added here when they are taken off the queue; there should
     // be no thread contention and thus no worry about locks for the null check
     if (null == alertDefinitionCommands) {
-      alertDefinitionCommands = new ArrayList<AlertDefinitionCommand>();
+      alertDefinitionCommands = new ArrayList<>();
     }
 
     alertDefinitionCommands.add(command);
@@ -207,7 +207,7 @@ public class HeartBeatResponse {
     // commands are added here when they are taken off the queue; there should
     // be no thread contention and thus no worry about locks for the null check
     if (null == alertExecutionCommands) {
-      alertExecutionCommands = new ArrayList<AlertExecutionCommand>();
+      alertExecutionCommands = new ArrayList<>();
     }
 
     alertExecutionCommands.add(command);

+ 3 - 3
ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java

@@ -210,7 +210,7 @@ public class HeartbeatMonitor implements Runnable {
    * @return list of commands to get status of service components on a concrete host
    */
   public List<StatusCommand> generateStatusCommands(String hostname) throws AmbariException {
-    List<StatusCommand> cmds = new ArrayList<StatusCommand>();
+    List<StatusCommand> cmds = new ArrayList<>();
 
     for (Cluster cl : clusters.getClustersForHost(hostname)) {
       Map<String, DesiredConfig> desiredConfigs = cl.getDesiredConfigs();
@@ -249,8 +249,8 @@ public class HeartbeatMonitor implements Runnable {
     StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(),
         stackId.getStackVersion());
 
-    Map<String, Map<String, String>> configurations = new TreeMap<String, Map<String, String>>();
-    Map<String, Map<String,  Map<String, String>>> configurationAttributes = new TreeMap<String, Map<String, Map<String, String>>>();
+    Map<String, Map<String, String>> configurations = new TreeMap<>();
+    Map<String, Map<String,  Map<String, String>>> configurationAttributes = new TreeMap<>();
 
     // get the cluster config for type '*-env'
     // apply config group overrides

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java

@@ -357,7 +357,7 @@ public class HeartbeatProcessor extends AbstractService{
     List<CommandReport> reports = heartbeat.getReports();
 
     // Cache HostRoleCommand entities because we will need them few times
-    List<Long> taskIds = new ArrayList<Long>();
+    List<Long> taskIds = new ArrayList<>();
     for (CommandReport report : reports) {
       taskIds.add(report.getTaskId());
     }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/agent/HostInfo.java

@@ -45,7 +45,7 @@ public class HostInfo {
   private String macaddress;
   private long memoryfree;
   private long memorysize;
-  private List<DiskInfo> mounts = new ArrayList<DiskInfo>();
+  private List<DiskInfo> mounts = new ArrayList<>();
   private long memorytotal;
   private String netmask;
   private String operatingsystem;

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/agent/RecoveryReport.java

@@ -30,7 +30,7 @@ public class RecoveryReport {
    * One of DISABLED, RECOVERABLE, UNRECOVERABLE, PARTIALLY_RECOVERABLE
    */
   private String summary = "DISABLED";
-  private List<ComponentRecoveryReport> componentReports = new ArrayList<ComponentRecoveryReport>();
+  private List<ComponentRecoveryReport> componentReports = new ArrayList<>();
 
 
   @JsonProperty("summary")

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/agent/RegistrationResponse.java

@@ -38,7 +38,7 @@ public class RegistrationResponse {
    * alert definitions it needs to schedule.
    */
   @JsonProperty("alertDefinitionCommands")
-  private List<AlertDefinitionCommand> alertDefinitionCommands = new ArrayList<AlertDefinitionCommand>();
+  private List<AlertDefinitionCommand> alertDefinitionCommands = new ArrayList<>();
 
   /**
    * exitstatus is a code of error which was rised on server side.

+ 2 - 2
ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java

@@ -49,10 +49,10 @@ public class StatusCommand extends AgentCommand {
   private Map<String, Map<String, Map<String, String>>> configurationAttributes;
 
   @SerializedName("commandParams")
-  private Map<String, String> commandParams = new HashMap<String, String>();
+  private Map<String, String> commandParams = new HashMap<>();
 
   @SerializedName("hostLevelParams")
-  private Map<String, String> hostLevelParams = new HashMap<String, String>();
+  private Map<String, String> hostLevelParams = new HashMap<>();
 
   @SerializedName("hostname")
   private String hostname = null;

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/alerts/AmbariPerformanceRunnable.java

@@ -157,7 +157,7 @@ public class AmbariPerformanceRunnable extends AlertRunnable {
         SecurityContextHolder.getContext().setAuthentication(authenticationToken);
 
         // create the request
-        Map<Resource.Type, String> mapIds = new HashMap<Resource.Type, String>();
+        Map<Resource.Type, String> mapIds = new HashMap<>();
         mapIds.put(Resource.Type.Cluster, cluster.getClusterName());
 
         ClusterController clusterController = ClusterControllerHelper.getClusterController();

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/alerts/StaleAlertRunnable.java

@@ -134,7 +134,7 @@ public class StaleAlertRunnable extends AlertRunnable {
     long uptime = rb.getUptime();
 
     int totalStaleAlerts = 0;
-    Set<String> staleAlertGroupings = new TreeSet<String>();
+    Set<String> staleAlertGroupings = new TreeSet<>();
     Map<String, Set<String>> staleAlertsByHost = new HashMap<>();
     Set<String> hostsWithStaleAlerts = new TreeSet<>();
 

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/AmbariCsrfProtectionFilter.java

@@ -38,7 +38,7 @@ public class AmbariCsrfProtectionFilter implements ContainerRequestFilter {
   private static final JsonSerializer JSON_SERIALIZER = new JsonSerializer();
 
   static {
-    HashSet<String> methods = new HashSet<String>();
+    HashSet<String> methods = new HashSet<>();
     methods.add("GET");
     methods.add("OPTIONS");
     methods.add("HEAD");

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/AmbariErrorHandler.java

@@ -57,7 +57,7 @@ public class AmbariErrorHandler extends ErrorHandler {
 
     response.setContentType(MimeTypes.TEXT_PLAIN);
 
-    Map<String, Object> errorMap = new LinkedHashMap<String, Object>();
+    Map<String, Object> errorMap = new LinkedHashMap<>();
     int code = connection.getResponse().getStatus();
     errorMap.put("status", code);
     String message = connection.getResponse().getReason();

+ 3 - 3
ambari-server/src/main/java/org/apache/ambari/server/api/handlers/QueryCreateHandler.java

@@ -111,7 +111,7 @@ public class QueryCreateHandler extends BaseManagementHandler {
     Set<NamedPropertySet> setRequestProps = request.getBody().getNamedPropertySets();
 
     HashMap<Resource.Type, Set<Map<String, Object>>> mapProps =
-        new HashMap<Resource.Type, Set<Map<String, Object>>>();
+      new HashMap<>();
 
     ResourceInstance  resource            = request.getResource();
     Resource.Type     type                = resource.getResourceDefinition().getType();
@@ -128,13 +128,13 @@ public class QueryCreateHandler extends BaseManagementHandler {
         for (Map.Entry<String, Object> entry : namedProps.getProperties().entrySet()) {
           Set<Map<String, Object>> set = (Set<Map<String, Object>>) entry.getValue();
           for (Map<String, Object> map : set) {
-            Map<String, Object> mapResourceProps = new HashMap<String, Object>(map);
+            Map<String, Object> mapResourceProps = new HashMap<>(map);
             Resource.Type       createType       = getCreateType(resource, entry.getKey());
             mapResourceProps.put(controller.getSchema(createType).
                 getKeyPropertyId(resource.getResourceDefinition().getType()), keyVal);
             Set<Map<String, Object>> setCreateProps = mapProps.get(createType);
             if (setCreateProps == null) {
-              setCreateProps = new HashSet<Map<String, Object>>();
+              setCreateProps = new HashSet<>();
               mapProps.put(createType, setCreateProps);
             }
             setCreateProps.add(mapResourceProps);

+ 12 - 12
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/QueryLexer.java

@@ -64,12 +64,12 @@ public class QueryLexer {
    * Map of token type to list of valid handlers for next token.
    */
   private static final Map<Token.TYPE, List<TokenHandler>> TOKEN_HANDLERS =
-      new HashMap<Token.TYPE, List<TokenHandler>>();
+    new HashMap<>();
 
   /**
    * Static set of property names to ignore.
    */
-  private static final Set<String> SET_IGNORE = new HashSet<String>();
+  private static final Set<String> SET_IGNORE = new HashSet<>();
 
 
   /**
@@ -78,7 +78,7 @@ public class QueryLexer {
    */
   public QueryLexer() {
     //todo: refactor handler registration
-    List<TokenHandler> listHandlers = new ArrayList<TokenHandler>();
+    List<TokenHandler> listHandlers = new ArrayList<>();
     listHandlers.add(new LogicalUnaryOperatorTokenHandler());
     listHandlers.add(new OpenBracketTokenHandler());
     listHandlers.add(new PropertyOperandTokenHandler());
@@ -87,26 +87,26 @@ public class QueryLexer {
     TOKEN_HANDLERS.put(Token.TYPE.LOGICAL_OPERATOR, listHandlers);
     TOKEN_HANDLERS.put(Token.TYPE.LOGICAL_UNARY_OPERATOR, listHandlers);
 
-    listHandlers= new ArrayList<TokenHandler>();
+    listHandlers= new ArrayList<>();
     listHandlers.add(new RelationalOperatorTokenHandler());
     listHandlers.add(new RelationalOperatorFuncTokenHandler());
     TOKEN_HANDLERS.put(Token.TYPE.PROPERTY_OPERAND, listHandlers);
 
-    listHandlers = new ArrayList<TokenHandler>();
+    listHandlers = new ArrayList<>();
     listHandlers.add(new ValueOperandTokenHandler());
     TOKEN_HANDLERS.put(Token.TYPE.RELATIONAL_OPERATOR, listHandlers);
 
-    listHandlers = new ArrayList<TokenHandler>();
+    listHandlers = new ArrayList<>();
     listHandlers.add(new CloseBracketTokenHandler());
     listHandlers.add(new ComplexValueOperandTokenHandler());
     TOKEN_HANDLERS.put(Token.TYPE.RELATIONAL_OPERATOR_FUNC, listHandlers);
 
-    listHandlers = new ArrayList<TokenHandler>();
+    listHandlers = new ArrayList<>();
     listHandlers.add(new CloseBracketTokenHandler());
     listHandlers.add(new LogicalOperatorTokenHandler());
     TOKEN_HANDLERS.put(Token.TYPE.BRACKET_CLOSE, listHandlers);
 
-    listHandlers = new ArrayList<TokenHandler>(listHandlers);
+    listHandlers = new ArrayList<>(listHandlers);
     // complex value operands can span multiple tokens
     listHandlers.add(0, new ComplexValueOperandTokenHandler());
     TOKEN_HANDLERS.put(Token.TYPE.VALUE_OPERAND, listHandlers);
@@ -170,7 +170,7 @@ public class QueryLexer {
   private List<String> parseStringTokens(String exp) {
     Pattern      pattern       = generatePattern();
     Matcher      matcher       = pattern.matcher(exp);
-    List<String> listStrTokens = new ArrayList<String>();
+    List<String> listStrTokens = new ArrayList<>();
     int pos = 0;
 
     while (matcher.find()) { // while there's a delimiter in the string
@@ -238,7 +238,7 @@ public class QueryLexer {
     /**
      * List of tokens generated by the scan
      */
-    private List<Token> m_listTokens = new ArrayList<Token>();
+    private List<Token> m_listTokens = new ArrayList<>();
 
     /**
      * If non-null, ignore all tokens up to and including this token type.
@@ -248,7 +248,7 @@ public class QueryLexer {
     /**
      * Property names which are to be ignored.
      */
-    private Set<String> m_propertiesToIgnore = new HashSet<String>();
+    private Set<String> m_propertiesToIgnore = new HashSet<>();
 
     /**
      * Bracket score.  This score is the difference between the number of
@@ -264,7 +264,7 @@ public class QueryLexer {
      * tokens and then in subsequent invocations combine/alter/remove/etc
      * these tokens prior to adding them to the context tokens.
      */
-    private Deque<Token> m_intermediateTokens = new ArrayDeque<Token>();
+    private Deque<Token> m_intermediateTokens = new ArrayDeque<>();
 
 
     /**

+ 4 - 4
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/QueryParser.java

@@ -52,7 +52,7 @@ public class QueryParser {
    * Map of token type to token handlers.
    */
   private static final Map<Token.TYPE, TokenHandler> TOKEN_HANDLERS =
-      new HashMap<Token.TYPE, TokenHandler>();
+    new HashMap<>();
 
   /**
    * Constructor.
@@ -152,7 +152,7 @@ public class QueryParser {
    */
   private List<Expression> mergeExpressions(List<Expression> listExpressions, int precedenceLevel) {
     if (listExpressions.size() > 1) {
-      Stack<Expression> stack = new Stack<Expression>();
+      Stack<Expression> stack = new Stack<>();
 
       stack.push(listExpressions.remove(0));
       while (! listExpressions.isEmpty()) {
@@ -161,7 +161,7 @@ public class QueryParser {
         Expression right = listExpressions.remove(0);
         stack.addAll(exp.merge(left, right, precedenceLevel));
       }
-      return mergeExpressions(new ArrayList<Expression>(stack), precedenceLevel - 1);
+      return mergeExpressions(new ArrayList<>(stack), precedenceLevel - 1);
     }
     return listExpressions;
   }
@@ -193,7 +193,7 @@ public class QueryParser {
     /**
      * The list of expressions which are generated from the tokens.
      */
-    private List<Expression> m_listExpressions = new ArrayList<Expression>();
+    private List<Expression> m_listExpressions = new ArrayList<>();
 
     /**
      * Highest precedence level in expression.

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/AbstractExpression.java

@@ -92,7 +92,7 @@ public abstract class AbstractExpression<T> implements Expression<T> {
    * @return a list containing the un-merged left expression, this and right expression
    */
   protected List<Expression> defaultMerge(Expression left, Expression right) {
-    List<Expression> listExpressions = new ArrayList<Expression>();
+    List<Expression> listExpressions = new ArrayList<>();
     if (left != null) {
       listExpressions.add(left);
     }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/NotLogicalExpression.java

@@ -43,7 +43,7 @@ public class NotLogicalExpression extends LogicalExpression {
   @Override
   public List<Expression> merge(Expression left, Expression right, int precedence) {
     if (getOperator().getPrecedence() == precedence && getRightOperand() == null) {
-      List<Expression> listExpressions = new ArrayList<Expression>();
+      List<Expression> listExpressions = new ArrayList<>();
       if (left != null) {
         listExpressions.add(left);
       }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/EqualsOperator.java

@@ -40,7 +40,7 @@ public class EqualsOperator extends AbstractOperator implements RelationalOperat
 
   @Override
   public Predicate toPredicate(String prop, String val) {
-    return new EqualsPredicate<String>(prop, val);
+    return new EqualsPredicate<>(prop, val);
   }
 
   @Override

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/GreaterEqualsOperator.java

@@ -40,7 +40,7 @@ public class GreaterEqualsOperator extends AbstractOperator implements Relationa
 
   @Override
   public Predicate toPredicate(String prop, String val) {
-    return new GreaterEqualsPredicate<String>(prop, val);
+    return new GreaterEqualsPredicate<>(prop, val);
   }
 
   @Override

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/GreaterOperator.java

@@ -40,7 +40,7 @@ public class GreaterOperator extends AbstractOperator implements RelationalOpera
 
   @Override
   public Predicate toPredicate(String prop, String val) {
-    return new GreaterPredicate<String>(prop, val);
+    return new GreaterPredicate<>(prop, val);
   }
 
   @Override

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/InOperator.java

@@ -51,7 +51,7 @@ public class InOperator extends AbstractOperator implements RelationalOperator {
     }
 
     String[] tokens = val.split(",");
-    List<EqualsPredicate> listPredicates = new ArrayList<EqualsPredicate>();
+    List<EqualsPredicate> listPredicates = new ArrayList<>();
     for (String token : tokens) {
       listPredicates.add(new EqualsPredicate<>(prop, token.trim()));
     }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LessEqualsOperator.java

@@ -40,7 +40,7 @@ public class LessEqualsOperator extends AbstractOperator implements RelationalOp
 
   @Override
   public Predicate toPredicate(String prop, String val) {
-    return new LessEqualsPredicate<String>(prop, val);
+    return new LessEqualsPredicate<>(prop, val);
   }
 
   @Override

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LessOperator.java

@@ -40,7 +40,7 @@ public class LessOperator extends AbstractOperator implements RelationalOperator
 
   @Override
   public Predicate toPredicate(String prop, String val) {
-    return new LessPredicate<String>(prop, val);
+    return new LessPredicate<>(prop, val);
   }
 
   @Override

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/NotEqualsOperator.java

@@ -41,7 +41,7 @@ public class NotEqualsOperator extends AbstractOperator implements RelationalOpe
 
   @Override
   public Predicate toPredicate(String prop, String val) {
-    return new NotPredicate(new EqualsPredicate<String>(prop, val));
+    return new NotPredicate(new EqualsPredicate<>(prop, val));
   }
 
   @Override

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/query/ExtendedResourcePredicateVisitor.java

@@ -72,7 +72,7 @@ public class ExtendedResourcePredicateVisitor implements PredicateVisitor {
 
   @Override
   public void acceptArrayPredicate(ArrayPredicate arrayPredicate) {
-    List<Predicate> predicateList = new LinkedList<Predicate>();
+    List<Predicate> predicateList = new LinkedList<>();
 
     Predicate[] predicates = arrayPredicate.getPredicates();
     if (predicates.length > 0) {

+ 2 - 2
ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java

@@ -79,7 +79,7 @@ public abstract class JpaPredicateVisitor<T> implements PredicateVisitor {
    * closed, the chain is completed and added to the prior chain's list.
    */
   private ArrayDeque<List<javax.persistence.criteria.Predicate>> m_queue =
-      new ArrayDeque<List<javax.persistence.criteria.Predicate>>();
+    new ArrayDeque<>();
 
   /**
    * Constructor.
@@ -225,7 +225,7 @@ public abstract class JpaPredicateVisitor<T> implements PredicateVisitor {
     }
 
     // create a new list for all of the predicates in this chain
-    List<javax.persistence.criteria.Predicate> predicateList = new ArrayList<javax.persistence.criteria.Predicate>();
+    List<javax.persistence.criteria.Predicate> predicateList = new ArrayList<>();
     m_queue.add(predicateList);
 
     // visit every child predicate so it can be added to the list

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaSortBuilder.java

@@ -74,7 +74,7 @@ public class JpaSortBuilder<T> {
 
     CriteriaBuilder builder = visitor.getCriteriaBuilder();
     List<SortRequestProperty> sortProperties = sortRequest.getProperties();
-    List<Order> sortOrders = new ArrayList<Order>(sortProperties.size());
+    List<Order> sortOrders = new ArrayList<>(sortProperties.size());
 
     for (SortRequestProperty sort : sortProperties) {
       String propertyId = sort.getPropertyId();

+ 3 - 3
ambari-server/src/main/java/org/apache/ambari/server/api/query/ProcessingPredicateVisitor.java

@@ -72,12 +72,12 @@ public class ProcessingPredicateVisitor implements PredicateVisitor {
   /**
    * The set of sub-resource categories.
    */
-  private final Set<String> subResourceCategories = new HashSet<String>();
+  private final Set<String> subResourceCategories = new HashSet<>();
 
   /**
    * The set of sub-resource properties.
    */
-  private final Set<String> subResourceProperties = new HashSet<String>();
+  private final Set<String> subResourceProperties = new HashSet<>();
 
 
   // ----- Constructors ----------------------------------------------------
@@ -115,7 +115,7 @@ public class ProcessingPredicateVisitor implements PredicateVisitor {
 
   @Override
   public void acceptArrayPredicate(ArrayPredicate arrayPredicate) {
-    List<Predicate> predicateList = new LinkedList<Predicate>();
+    List<Predicate> predicateList = new LinkedList<>();
 
     Predicate[] predicates = arrayPredicate.getPredicates();
     if (predicates.length > 0) {

+ 30 - 30
ambari-server/src/main/java/org/apache/ambari/server/api/query/QueryImpl.java

@@ -81,38 +81,38 @@ public class QueryImpl implements Query, ResourceInstance {
   /**
    * Properties of the query which make up the select portion of the query.
    */
-  private final Set<String> requestedProperties = new HashSet<String>();
+  private final Set<String> requestedProperties = new HashSet<>();
 
   /**
    * Map that associates categories with temporal data.
    */
-  private final Map<String, TemporalInfo> temporalInfoMap = new HashMap<String, TemporalInfo>();
+  private final Map<String, TemporalInfo> temporalInfoMap = new HashMap<>();
 
   /**
    * Map of primary and foreign key values.
    */
-  private final Map<Resource.Type, String> keyValueMap = new HashMap<Resource.Type, String>();
+  private final Map<Resource.Type, String> keyValueMap = new HashMap<>();
 
   /**
    * Map of properties from the request.
    */
-  private final Map<String, String> requestInfoProperties = new HashMap<String, String>();
+  private final Map<String, String> requestInfoProperties = new HashMap<>();
 
   /**
    * Set of query results.
    */
-  Map<Resource, QueryResult> queryResults = new LinkedHashMap<Resource, QueryResult>();
+  Map<Resource, QueryResult> queryResults = new LinkedHashMap<>();
 
   /**
    * Set of populated query results
    */
-  Map<Resource, QueryResult> populatedQueryResults = new LinkedHashMap<Resource, QueryResult>();
+  Map<Resource, QueryResult> populatedQueryResults = new LinkedHashMap<>();
 
   /**
    * Sub-resources of the resource which is being operated on.
    * Should only be added via {@link #addSubResource(String, QueryImpl)}
    */
-  private final Map<String, QueryImpl> requestedSubResources = new HashMap<String, QueryImpl>();
+  private final Map<String, QueryImpl> requestedSubResources = new HashMap<>();
 
   /**
    * Sub-resource instances of this resource.
@@ -143,7 +143,7 @@ public class QueryImpl implements Query, ResourceInstance {
   /**
    * The sub resource properties referenced in the user predicate.
    */
-  private final Set<String> subResourcePredicateProperties = new HashSet<String>();
+  private final Set<String> subResourcePredicateProperties = new HashSet<>();
 
   /**
    * Associated renderer. The default renderer is used unless
@@ -276,7 +276,7 @@ public class QueryImpl implements Query, ResourceInstance {
 
   @Override
   public Map<Resource.Type, String> getKeyValueMap() {
-    return new HashMap<Resource.Type, String>((keyValueMap));
+    return new HashMap<>((keyValueMap));
   }
 
   @Override
@@ -341,7 +341,7 @@ public class QueryImpl implements Query, ResourceInstance {
    */
   protected Map<String, QueryImpl> ensureSubResources() {
     if (availableSubResources == null) {
-      availableSubResources = new HashMap<String, QueryImpl>();
+      availableSubResources = new HashMap<>();
       Set<SubResourceDefinition> setSubResourceDefs =
           getResourceDefinition().getSubResourceDefinitions();
 
@@ -392,8 +392,8 @@ public class QueryImpl implements Query, ResourceInstance {
     // use linked hash sets so that we maintain insertion and traversal order
     // in the event that the resource provider already gave us a sorted set
     // back
-    Set<Resource> resourceSet = new LinkedHashSet<Resource>();
-    Set<Resource> providerResourceSet = new LinkedHashSet<Resource>();
+    Set<Resource> resourceSet = new LinkedHashSet<>();
+    Set<Resource> providerResourceSet = new LinkedHashSet<>();
 
     QueryResponse queryResponse = doQuery(resourceType, request, queryPredicate, true);
 
@@ -444,7 +444,7 @@ public class QueryImpl implements Query, ResourceInstance {
           queryResponse.getTotalResourceCount());
       PageResponse pageResponse = clusterController.getPage(resourceType, newResponse, request, queryPredicate, pageRequest, sortRequest);
       // build a new set
-      Set<Resource> newResourceSet = new LinkedHashSet<Resource>();
+      Set<Resource> newResourceSet = new LinkedHashSet<>();
       for (Resource r : pageResponse.getIterable()) {
         newResourceSet.add(r);
       }
@@ -468,14 +468,14 @@ public class QueryImpl implements Query, ResourceInstance {
       QueryImpl     subResource         = entry.getValue();
       Resource.Type resourceType        = subResource.getResourceDefinition().getType();
       Request       request             = subResource.createRequest();
-      Set<Resource> providerResourceSet = new HashSet<Resource>();
+      Set<Resource> providerResourceSet = new HashSet<>();
 
       for (QueryResult queryResult : populatedQueryResults.values()) {
         for (Resource resource : queryResult.getQueryResponse().getResources()) {
           Map<Resource.Type, String> map = getKeyValueMap(resource, queryResult.getKeyValueMap());
 
           Predicate     queryPredicate = subResource.createPredicate(map, subResource.processedPredicate);
-          Set<Resource> resourceSet    = new LinkedHashSet<Resource>();
+          Set<Resource> resourceSet    = new LinkedHashSet<>();
 
           try {
             Set<Resource> queryResources =
@@ -611,7 +611,7 @@ public class QueryImpl implements Query, ResourceInstance {
       throws SystemException, UnsupportedPropertyException, NoSuchParentResourceException, NoSuchResourceException {
 
     Map<Resource, Set<Map<String, Object>>> resourcePropertyMaps =
-        new HashMap<Resource, Set<Map<String, Object>>>();
+      new HashMap<>();
 
     Map<String, String> categoryPropertyIdMap =
         getPropertyIdsForCategory(propertyIds, category);
@@ -629,7 +629,7 @@ public class QueryImpl implements Query, ResourceInstance {
 
         for (Resource resource : iterResource) {
           // get the resource properties
-          Map<String, Object> resourcePropertyMap = new HashMap<String, Object>();
+          Map<String, Object> resourcePropertyMap = new HashMap<>();
           for (Map.Entry<String, String> categoryPropertyIdEntry : categoryPropertyIdMap.entrySet()) {
             Object value = resource.getPropertyValue(categoryPropertyIdEntry.getValue());
             if (value != null) {
@@ -637,7 +637,7 @@ public class QueryImpl implements Query, ResourceInstance {
             }
           }
 
-          Set<Map<String, Object>> propertyMaps = new HashSet<Map<String, Object>>();
+          Set<Map<String, Object>> propertyMaps = new HashSet<>();
 
           // For each sub category get the property maps for the sub resources
           for (Map.Entry<String, QueryImpl> entry : requestedSubResources.entrySet()) {
@@ -648,7 +648,7 @@ public class QueryImpl implements Query, ResourceInstance {
             Map<Resource, Set<Map<String, Object>>> subResourcePropertyMaps =
                 subResource.getJoinedResourceProperties(propertyIds, resource, subResourceCategory);
 
-            Set<Map<String, Object>> combinedSubResourcePropertyMaps = new HashSet<Map<String, Object>>();
+            Set<Map<String, Object>> combinedSubResourcePropertyMaps = new HashSet<>();
             for (Set<Map<String, Object>> maps : subResourcePropertyMaps.values()) {
               combinedSubResourcePropertyMaps.addAll(maps);
             }
@@ -678,8 +678,8 @@ public class QueryImpl implements Query, ResourceInstance {
     ResourceDefinition rootDefinition = resourceDefinition;
 
     QueryInfo rootQueryInfo = new QueryInfo(rootDefinition, requestedProperties);
-    TreeNode<QueryInfo> rootNode = new TreeNodeImpl<QueryInfo>(
-        null, rootQueryInfo, rootDefinition.getType().name());
+    TreeNode<QueryInfo> rootNode = new TreeNodeImpl<>(
+      null, rootQueryInfo, rootDefinition.getType().name());
 
     TreeNode<QueryInfo> requestedPropertyTree = buildQueryPropertyTree(this, rootNode);
 
@@ -745,7 +745,7 @@ public class QueryImpl implements Query, ResourceInstance {
   // Map the given set of property ids to corresponding property ids in the
   // given sub-resource category.
   private Map<String, String> getPropertyIdsForCategory(Set<String> propertyIds, String category) {
-    Map<String, String> map = new HashMap<String, String>();
+    Map<String, String> map = new HashMap<>();
 
     for (String propertyId : propertyIds) {
       if (category == null || propertyId.startsWith(category)) {
@@ -758,7 +758,7 @@ public class QueryImpl implements Query, ResourceInstance {
   // Join two sets of property maps into one.
   private static Set<Map<String, Object>> joinPropertyMaps(Set<Map<String, Object>> propertyMaps1,
                                                            Set<Map<String, Object>> propertyMaps2) {
-    Set<Map<String, Object>> propertyMaps = new HashSet<Map<String, Object>>();
+    Set<Map<String, Object>> propertyMaps = new HashSet<>();
 
     if (propertyMaps1.isEmpty()) {
       return propertyMaps2;
@@ -769,7 +769,7 @@ public class QueryImpl implements Query, ResourceInstance {
 
     for (Map<String, Object> map1 : propertyMaps1) {
       for (Map<String, Object> map2 : propertyMaps2) {
-        Map<String, Object> joinedMap = new HashMap<String, Object>(map1);
+        Map<String, Object> joinedMap = new HashMap<>(map1);
         joinedMap.putAll(map2);
         propertyMaps.add(joinedMap);
       }
@@ -902,12 +902,12 @@ public class QueryImpl implements Query, ResourceInstance {
     Resource.Type resourceType = getResourceDefinition().getType();
     Schema schema = clusterController.getSchema(resourceType);
 
-    Set<Predicate> setPredicates = new HashSet<Predicate>();
+    Set<Predicate> setPredicates = new HashSet<>();
     for (Map.Entry<Resource.Type, String> entry : mapResourceIds.entrySet()) {
       if (entry.getValue() != null) {
         String keyPropertyId = schema.getKeyPropertyId(entry.getKey());
         if (keyPropertyId != null) {
-          setPredicates.add(new EqualsPredicate<String>(keyPropertyId, entry.getValue()));
+          setPredicates.add(new EqualsPredicate<>(keyPropertyId, entry.getValue()));
         }
       }
     }
@@ -985,7 +985,7 @@ public class QueryImpl implements Query, ResourceInstance {
 
   private Request createRequest() {
     // Initiate this request's requestInfoProperties with the ones set from the original request
-    Map<String, String> requestInfoProperties = new HashMap<String, String>(this.requestInfoProperties);
+    Map<String, String> requestInfoProperties = new HashMap<>(this.requestInfoProperties);
 
     if (pageRequest != null) {
       requestInfoProperties.put(BaseRequest.PAGE_SIZE_PROPERTY_KEY,
@@ -1002,10 +1002,10 @@ public class QueryImpl implements Query, ResourceInstance {
           requestInfoProperties, null, pageRequest, sortRequest);
     }
 
-    Map<String, TemporalInfo> mapTemporalInfo    = new HashMap<String, TemporalInfo>();
+    Map<String, TemporalInfo> mapTemporalInfo    = new HashMap<>();
     TemporalInfo              globalTemporalInfo = temporalInfoMap.get(null);
 
-    Set<String> setProperties = new HashSet<String>();
+    Set<String> setProperties = new HashSet<>();
     setProperties.addAll(requestedProperties);
     for (String propertyId : setProperties) {
       TemporalInfo temporalInfo = temporalInfoMap.get(propertyId);
@@ -1024,7 +1024,7 @@ public class QueryImpl implements Query, ResourceInstance {
   // Get a key value map based on the given resource and an existing key value map
   private Map<Resource.Type, String> getKeyValueMap(Resource resource,
                                                     Map<Resource.Type, String> keyValueMap) {
-    Map<Resource.Type, String> resourceKeyValueMap = new HashMap<Resource.Type, String>(keyValueMap.size());
+    Map<Resource.Type, String> resourceKeyValueMap = new HashMap<>(keyValueMap.size());
     for (Map.Entry<Resource.Type, String> resourceIdEntry : keyValueMap.entrySet()) {
       Resource.Type type = resourceIdEntry.getKey();
       String value = resourceIdEntry.getValue();

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/query/QueryInfo.java

@@ -48,7 +48,7 @@ public class QueryInfo {
    */
   public QueryInfo(ResourceDefinition resource, Set<String> properties) {
     m_resource   = resource;
-    m_properties = new HashSet<String>(properties);
+    m_properties = new HashSet<>(properties);
   }
 
   // ----- QueryInfo ---------------------------------------------------------

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/query/SubResourcePredicateVisitor.java

@@ -87,7 +87,7 @@ public class SubResourcePredicateVisitor implements PredicateVisitor {
 
   @Override
   public void acceptArrayPredicate(ArrayPredicate arrayPredicate) {
-    List<Predicate> predicateList = new LinkedList<Predicate>();
+    List<Predicate> predicateList = new LinkedList<>();
 
     Predicate[] predicates = arrayPredicate.getPredicates();
     if (predicates.length > 0) {

+ 3 - 3
ambari-server/src/main/java/org/apache/ambari/server/api/query/render/AlertSummaryGroupedRenderer.java

@@ -125,7 +125,7 @@ public class AlertSummaryGroupedRenderer extends AlertSummaryRenderer {
   @Override
   public Result finalizeResult(Result queryResult) {
     TreeNode<Resource> resultTree = queryResult.getResultTree();
-    Map<String, AlertDefinitionSummary> summaries = new HashMap<String, AlertDefinitionSummary>();
+    Map<String, AlertDefinitionSummary> summaries = new HashMap<>();
 
     // iterate over all returned flattened alerts and build the summary info
     for (TreeNode<Resource> node : resultTree.getChildren()) {
@@ -204,8 +204,8 @@ public class AlertSummaryGroupedRenderer extends AlertSummaryRenderer {
     }
 
     Set<Entry<String, AlertDefinitionSummary>> entrySet = summaries.entrySet();
-    List<AlertDefinitionSummary> groupedResources = new ArrayList<AlertDefinitionSummary>(
-        entrySet.size());
+    List<AlertDefinitionSummary> groupedResources = new ArrayList<>(
+      entrySet.size());
 
     // iterate over all summary groups, adding them to the final list
     for (Entry<String, AlertDefinitionSummary> entry : entrySet) {

+ 2 - 2
ambari-server/src/main/java/org/apache/ambari/server/api/query/render/AlertSummaryRenderer.java

@@ -87,8 +87,8 @@ public class AlertSummaryRenderer extends BaseRenderer implements Renderer {
       TreeNode<QueryInfo> queryTree, boolean isCollection) {
 
     QueryInfo queryInfo = queryTree.getObject();
-    TreeNode<Set<String>> resultTree = new TreeNodeImpl<Set<String>>(
-        null, queryInfo.getProperties(), queryTree.getName());
+    TreeNode<Set<String>> resultTree = new TreeNodeImpl<>(
+      null, queryInfo.getProperties(), queryTree.getName());
 
     copyPropertiesToResult(queryTree, resultTree);
 

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/query/render/BaseRenderer.java

@@ -131,7 +131,7 @@ public abstract class BaseRenderer implements Renderer {
     ResourceDefinition resource = queryInfo.getResource();
     Set<SubResourceDefinition> subResources = resource.getSubResourceDefinitions();
     for (SubResourceDefinition subResource : subResources) {
-      Set<String> resourceProperties = new HashSet<String>();
+      Set<String> resourceProperties = new HashSet<>();
       populateSubResourceDefaults(subResource, resourceProperties);
       propertyTree.addChild(resourceProperties, subResource.getType().name());
     }

+ 18 - 18
ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java

@@ -94,9 +94,9 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer {
   public TreeNode<Set<String>> finalizeProperties(
       TreeNode<QueryInfo> queryProperties, boolean isCollection) {
 
-    Set<String> properties = new HashSet<String>(queryProperties.getObject().getProperties());
-    TreeNode<Set<String>> resultTree = new TreeNodeImpl<Set<String>>(
-        null, properties, queryProperties.getName());
+    Set<String> properties = new HashSet<>(queryProperties.getObject().getProperties());
+    TreeNode<Set<String>> resultTree = new TreeNodeImpl<>(
+      null, properties, queryProperties.getName());
 
     copyPropertiesToResult(queryProperties, resultTree);
 
@@ -153,8 +153,8 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer {
 
     for (TreeNode<Resource> node : resultTree.getChildren()) {
       Resource blueprintResource = createBlueprintResource(node);
-      blueprintResultTree.addChild(new TreeNodeImpl<Resource>(
-          blueprintResultTree, blueprintResource, node.getName()));
+      blueprintResultTree.addChild(new TreeNodeImpl<>(
+        blueprintResultTree, blueprintResource, node.getName()));
     }
     return result;
   }
@@ -268,11 +268,11 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer {
     LOG.info("ClusterBlueprintRenderer: getSettings()");
 
     //Initialize collections to create appropriate json structure
-    Collection<Map<String, Object>> blueprintSetting = new ArrayList<Map<String, Object>>();
+    Collection<Map<String, Object>> blueprintSetting = new ArrayList<>();
 
-    Set<Map<String, String>> recoverySettingValue = new HashSet<Map<String, String>>();
-    Set<Map<String, String>> serviceSettingValue = new HashSet<Map<String, String>>();
-    Set<Map<String, String>> componentSettingValue = new HashSet<Map<String, String>>();
+    Set<Map<String, String>> recoverySettingValue = new HashSet<>();
+    Set<Map<String, String>> serviceSettingValue = new HashSet<>();
+    Set<Map<String, String>> componentSettingValue = new HashSet<>();
 
     HashMap<String, String> property = new HashMap<>();
     HashMap<String, String> componentProperty = new HashMap<>();
@@ -366,7 +366,7 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer {
       if (propertyMap != null) {
         Map<String, Object> artifactData = propertyMap.get(ArtifactResourceProvider.ARTIFACT_DATA_PROPERTY);
         Map<String, Object> artifactDataProperties = propertyMap.get(ArtifactResourceProvider.ARTIFACT_DATA_PROPERTY + "/properties");
-        HashMap<String, Object> data = new HashMap<String, Object>();
+        HashMap<String, Object> data = new HashMap<>();
 
         if (artifactData != null) {
           data.putAll(artifactData);
@@ -389,14 +389,14 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer {
    */
   private List<Map<String, Map<String, Map<String, ?>>>>  processConfigurations(ClusterTopology topology) {
 
-    List<Map<String, Map<String, Map<String, ?>>>> configList = new ArrayList<Map<String, Map<String, Map<String, ?>>>>();
+    List<Map<String, Map<String, Map<String, ?>>>> configList = new ArrayList<>();
 
     Configuration configuration = topology.getConfiguration();
-    Collection<String> allTypes = new HashSet<String>();
+    Collection<String> allTypes = new HashSet<>();
     allTypes.addAll(configuration.getFullProperties().keySet());
     allTypes.addAll(configuration.getFullAttributes().keySet());
     for (String type : allTypes) {
-      Map<String, Map<String, ?>> typeMap = new HashMap<String, Map<String, ?>>();
+      Map<String, Map<String, ?>> typeMap = new HashMap<>();
       typeMap.put("properties", configuration.getFullProperties().get(type));
       if (! configuration.getFullAttributes().isEmpty()) {
         typeMap.put("properties_attributes", configuration.getFullAttributes().get(type));
@@ -415,9 +415,9 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer {
    * @return list of host group property maps, one element for each host group
    */
   private List<Map<String, Object>> formatGroupsAsList(ClusterTopology topology) {
-    List<Map<String, Object>> listHostGroups = new ArrayList<Map<String, Object>>();
+    List<Map<String, Object>> listHostGroups = new ArrayList<>();
     for (HostGroupInfo group : topology.getHostGroupInfo().values()) {
-      Map<String, Object> mapGroupProperties = new HashMap<String, Object>();
+      Map<String, Object> mapGroupProperties = new HashMap<>();
       listHostGroups.add(mapGroupProperties);
 
       String name = group.getHostGroupName();
@@ -426,7 +426,7 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer {
       mapGroupProperties.put("components", processHostGroupComponents(topology.getBlueprint().getHostGroup(name)));
 
       Configuration configuration = topology.getHostGroupInfo().get(name).getConfiguration();
-      List<Map<String, Map<String, String>>> configList = new ArrayList<Map<String, Map<String, String>>>();
+      List<Map<String, Map<String, String>>> configList = new ArrayList<>();
       for (Map.Entry<String, Map<String, String>> typeEntry : configuration.getProperties().entrySet()) {
         Map<String, Map<String, String>> propertyMap = Collections.singletonMap(
             typeEntry.getKey(), typeEntry.getValue());
@@ -447,9 +447,9 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer {
    * @return list of component names for the host
    */
   private List<Map<String, String>> processHostGroupComponents(HostGroup group) {
-    List<Map<String, String>> listHostGroupComponents = new ArrayList<Map<String, String>>();
+    List<Map<String, String>> listHostGroupComponents = new ArrayList<>();
     for (Component component : group.getComponents()) {
-      Map<String, String> mapComponentProperties = new HashMap<String, String>();
+      Map<String, String> mapComponentProperties = new HashMap<>();
       listHostGroupComponents.add(mapComponentProperties);
       mapComponentProperties.put("name", component.getName());
     }

+ 2 - 2
ambari-server/src/main/java/org/apache/ambari/server/api/query/render/DefaultRenderer.java

@@ -41,8 +41,8 @@ public class DefaultRenderer extends BaseRenderer implements Renderer {
       TreeNode<QueryInfo> queryTree, boolean isCollection) {
 
     QueryInfo queryInfo = queryTree.getObject();
-    TreeNode<Set<String>> resultTree = new TreeNodeImpl<Set<String>>(
-        null, queryInfo.getProperties(), queryTree.getName());
+    TreeNode<Set<String>> resultTree = new TreeNodeImpl<>(
+      null, queryInfo.getProperties(), queryTree.getName());
 
     copyPropertiesToResult(queryTree, resultTree);
 

+ 5 - 5
ambari-server/src/main/java/org/apache/ambari/server/api/query/render/MinimalRenderer.java

@@ -69,7 +69,7 @@ public class MinimalRenderer extends BaseRenderer implements Renderer {
    * Map of requested properties.
    */
   private Map<Resource.Type, Set<String>> m_originalProperties =
-      new HashMap<Resource.Type, Set<String>>();
+    new HashMap<>();
 
   // ----- Renderer ----------------------------------------------------------
 
@@ -78,8 +78,8 @@ public class MinimalRenderer extends BaseRenderer implements Renderer {
       TreeNode<QueryInfo> queryTree, boolean isCollection) {
 
     QueryInfo queryInfo = queryTree.getObject();
-    TreeNode<Set<String>> resultTree = new TreeNodeImpl<Set<String>>(
-        null, queryInfo.getProperties(), queryTree.getName());
+    TreeNode<Set<String>> resultTree = new TreeNodeImpl<>(
+      null, queryInfo.getProperties(), queryTree.getName());
 
     copyPropertiesToResult(queryTree, resultTree);
 
@@ -123,7 +123,7 @@ public class MinimalRenderer extends BaseRenderer implements Renderer {
       Resource.Type type = queryInfo.getResource().getType();
       Set<String> properties = m_originalProperties.get(type);
       if (properties == null) {
-        properties = new HashSet<String>();
+        properties = new HashSet<>();
         m_originalProperties.put(type, properties);
       }
       properties.addAll(queryInfo.getProperties());
@@ -203,7 +203,7 @@ public class MinimalRenderer extends BaseRenderer implements Renderer {
    * @return set of pk's for a type
    */
   private Set<String> getPrimaryKeys(Resource.Type type) {
-    Set<String> primaryKeys = new HashSet<String>();
+    Set<String> primaryKeys = new HashSet<>();
 
     if (type == Resource.Type.Configuration) {
       primaryKeys.add("type");

+ 4 - 4
ambari-server/src/main/java/org/apache/ambari/server/api/resources/BaseResourceDefinition.java

@@ -54,12 +54,12 @@ public abstract class BaseResourceDefinition implements ResourceDefinition {
   /**
    * The sub-resource type definitions.
    */
-  private final Set<SubResourceDefinition> subResourceDefinitions = new HashSet<SubResourceDefinition>();
+  private final Set<SubResourceDefinition> subResourceDefinitions = new HashSet<>();
 
   /**
    * A map of directives for the different request types, each entry is expected to be modifiable by sub resources.
    */
-  private final Map<DirectiveType, Collection<String>> directives = new HashMap<DirectiveType, Collection<String>>();
+  private final Map<DirectiveType, Collection<String>> directives = new HashMap<>();
 
   /**
    * Constructor.
@@ -116,7 +116,7 @@ public abstract class BaseResourceDefinition implements ResourceDefinition {
 
   @Override
   public List<PostProcessor> getPostProcessors() {
-    List<PostProcessor> listProcessors = new ArrayList<PostProcessor>();
+    List<PostProcessor> listProcessors = new ArrayList<>();
     listProcessors.add(new BaseHrefPostProcessor());
     return listProcessors;
   }
@@ -242,7 +242,7 @@ public abstract class BaseResourceDefinition implements ResourceDefinition {
    * @param directives the map of directives from which to copy
    */
   private void initializeDirectives(DirectiveType type, Map<DirectiveType, ? extends Collection<String>> directives) {
-    HashSet<String> requestDirectives = new HashSet<String>();
+    HashSet<String> requestDirectives = new HashSet<>();
 
     if ((directives != null) && directives.get(type) != null) {
       requestDirectives.addAll(directives.get(type));

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/resources/ClusterResourceDefinition.java

@@ -60,7 +60,7 @@ public class ClusterResourceDefinition extends BaseResourceDefinition {
 
   @Override
   public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    Set<SubResourceDefinition> setChildren = new HashSet<SubResourceDefinition>();
+    Set<SubResourceDefinition> setChildren = new HashSet<>();
     setChildren.add(new SubResourceDefinition(Resource.Type.Service));
     setChildren.add(new SubResourceDefinition(Resource.Type.Host));
     setChildren.add(new SubResourceDefinition(Resource.Type.Configuration));

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/resources/ComponentStackVersionResourceDefinition.java

@@ -42,7 +42,7 @@ public class ComponentStackVersionResourceDefinition extends BaseResourceDefinit
 
   @Override
   public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    final Set<SubResourceDefinition> subResourceDefintions = new HashSet<SubResourceDefinition>();
+    final Set<SubResourceDefinition> subResourceDefintions = new HashSet<>();
     subResourceDefintions.add(new SubResourceDefinition(Resource.Type.RepositoryVersion));
     return subResourceDefintions;
   }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/resources/ExtensionLinkResourceDefinition.java

@@ -51,7 +51,7 @@ public class ExtensionLinkResourceDefinition extends BaseResourceDefinition {
 
   @Override
   public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    Set<SubResourceDefinition> setChildren = new HashSet<SubResourceDefinition>();
+    Set<SubResourceDefinition> setChildren = new HashSet<>();
     return setChildren;
   }
 

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/resources/ExtensionResourceDefinition.java

@@ -51,7 +51,7 @@ public class ExtensionResourceDefinition extends BaseResourceDefinition {
 
   @Override
   public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    Set<SubResourceDefinition> setChildren = new HashSet<SubResourceDefinition>();
+    Set<SubResourceDefinition> setChildren = new HashSet<>();
     setChildren.add(new SubResourceDefinition(Resource.Type.ExtensionVersion));
     return setChildren;
   }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/resources/ExtensionVersionResourceDefinition.java

@@ -47,7 +47,7 @@ public class ExtensionVersionResourceDefinition extends BaseResourceDefinition {
   @Override
   public Set<SubResourceDefinition> getSubResourceDefinitions() {
 
-    Set<SubResourceDefinition> children = new HashSet<SubResourceDefinition>();
+    Set<SubResourceDefinition> children = new HashSet<>();
 
     return children;
   }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/resources/FeedResourceDefinition.java

@@ -47,7 +47,7 @@ public class FeedResourceDefinition extends BaseResourceDefinition {
 
   @Override
   public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    Set<SubResourceDefinition> setChildren = new HashSet<SubResourceDefinition>();
+    Set<SubResourceDefinition> setChildren = new HashSet<>();
     setChildren.add(new SubResourceDefinition(Resource.Type.DRInstance));
 
     return setChildren;

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/resources/GroupResourceDefinition.java

@@ -42,7 +42,7 @@ public class GroupResourceDefinition extends BaseResourceDefinition {
 
   @Override
   public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    final Set<SubResourceDefinition> subResourceDefinitions = new HashSet<SubResourceDefinition>();
+    final Set<SubResourceDefinition> subResourceDefinitions = new HashSet<>();
     subResourceDefinitions.add(new SubResourceDefinition(Resource.Type.Member));
     subResourceDefinitions.add(new SubResourceDefinition(Resource.Type.GroupPrivilege));
     return subResourceDefinitions;

+ 2 - 2
ambari-server/src/main/java/org/apache/ambari/server/api/resources/HostComponentResourceDefinition.java

@@ -56,7 +56,7 @@ public class HostComponentResourceDefinition extends BaseResourceDefinition {
 
   @Override
   public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    Set<SubResourceDefinition> setSubResources = new HashSet<SubResourceDefinition>();
+    Set<SubResourceDefinition> setSubResources = new HashSet<>();
 
     setSubResources.add(new SubResourceDefinition(Resource.Type.Component,
         Collections.singleton(Resource.Type.Service), false));
@@ -68,7 +68,7 @@ public class HostComponentResourceDefinition extends BaseResourceDefinition {
 
   @Override
   public List<PostProcessor> getPostProcessors() {
-    List<PostProcessor> listProcessors = new ArrayList<PostProcessor>();
+    List<PostProcessor> listProcessors = new ArrayList<>();
     listProcessors.add(new HostComponentHrefProcessor());
     listProcessors.add(new HostComponentHostProcessor());
 

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/resources/JobResourceDefinition.java

@@ -47,7 +47,7 @@ public class JobResourceDefinition extends BaseResourceDefinition {
 
   @Override
   public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    Set<SubResourceDefinition> setChildren = new HashSet<SubResourceDefinition>();
+    Set<SubResourceDefinition> setChildren = new HashSet<>();
     setChildren.add(new SubResourceDefinition(Resource.Type.TaskAttempt));
     return setChildren;
   }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/resources/PermissionResourceDefinition.java

@@ -53,7 +53,7 @@ public class PermissionResourceDefinition extends BaseResourceDefinition {
 
   @Override
   public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    Set<SubResourceDefinition> subResourceDefinitions = new HashSet<SubResourceDefinition>();
+    Set<SubResourceDefinition> subResourceDefinitions = new HashSet<>();
     subResourceDefinitions.add(new SubResourceDefinition(Resource.Type.RoleAuthorization));
     return subResourceDefinitions;
   }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactoryImpl.java

@@ -49,7 +49,7 @@ public class ResourceInstanceFactoryImpl implements ResourceInstanceFactory {
    * Map of external resource definitions (added through views).
    */
   private final static Map<Resource.Type, ResourceDefinition> resourceDefinitions =
-      new HashMap<Resource.Type, ResourceDefinition>();
+    new HashMap<>();
 
 
   @Override

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/resources/RootServiceHostComponentResourceDefinition.java

@@ -51,7 +51,7 @@ public class RootServiceHostComponentResourceDefinition extends
   
   @Override
   public List<PostProcessor> getPostProcessors() {
-    List<PostProcessor> listProcessors = new ArrayList<PostProcessor>();
+    List<PostProcessor> listProcessors = new ArrayList<>();
     listProcessors.add(new RootServiceHostComponentHrefProcessor());
 
     return listProcessors;

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/resources/RootServiceResourceDefinition.java

@@ -47,7 +47,7 @@ public class RootServiceResourceDefinition extends BaseResourceDefinition {
   @Override
   public Set<SubResourceDefinition> getSubResourceDefinitions() {
     
-    Set<SubResourceDefinition> setChildren = new HashSet<SubResourceDefinition>();
+    Set<SubResourceDefinition> setChildren = new HashSet<>();
     setChildren.add(new SubResourceDefinition(Resource.Type.RootServiceComponent));
 
     return setChildren;

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/resources/ServiceResourceDefinition.java

@@ -48,7 +48,7 @@ public class ServiceResourceDefinition extends BaseResourceDefinition {
 
   @Override
   public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    Set<SubResourceDefinition> subs = new HashSet<SubResourceDefinition>();
+    Set<SubResourceDefinition> subs = new HashSet<>();
     subs.add(new SubResourceDefinition(Resource.Type.Component));
     subs.add(new SubResourceDefinition(Resource.Type.Alert));
     //todo: dynamic sub-resource definition

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/resources/StackConfigurationResourceDefinition.java

@@ -47,7 +47,7 @@ public class StackConfigurationResourceDefinition extends BaseResourceDefinition
 
   @Override
   public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    Set<SubResourceDefinition> subs = new HashSet<SubResourceDefinition>();
+    Set<SubResourceDefinition> subs = new HashSet<>();
     subs.add(new SubResourceDefinition(Resource.Type.StackConfigurationDependency));
 
     return subs;

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/resources/StackLevelConfigurationResourceDefinition.java

@@ -46,7 +46,7 @@ public class StackLevelConfigurationResourceDefinition extends BaseResourceDefin
 
   @Override
   public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    Set<SubResourceDefinition> subs = new HashSet<SubResourceDefinition>();
+    Set<SubResourceDefinition> subs = new HashSet<>();
     subs.add(new SubResourceDefinition(Resource.Type.StackConfigurationDependency));
 
     return subs;

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/resources/StackResourceDefinition.java

@@ -46,7 +46,7 @@ public class StackResourceDefinition extends BaseResourceDefinition {
 
   @Override
   public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    Set<SubResourceDefinition> setChildren = new HashSet<SubResourceDefinition>();
+    Set<SubResourceDefinition> setChildren = new HashSet<>();
     setChildren.add(new SubResourceDefinition(Resource.Type.StackVersion));
     return setChildren;
   }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/resources/StackServiceResourceDefinition.java

@@ -47,7 +47,7 @@ public class StackServiceResourceDefinition extends BaseResourceDefinition {
   @Override
   public Set<SubResourceDefinition> getSubResourceDefinitions() {
 
-    Set<SubResourceDefinition> setChildren = new HashSet<SubResourceDefinition>();
+    Set<SubResourceDefinition> setChildren = new HashSet<>();
     setChildren.add(new SubResourceDefinition(Resource.Type.StackConfiguration));
     setChildren.add(new SubResourceDefinition(Resource.Type.StackServiceComponent));
     setChildren.add(new SubResourceDefinition(Type.StackArtifact));

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/resources/StackVersionResourceDefinition.java

@@ -42,7 +42,7 @@ public class StackVersionResourceDefinition extends BaseResourceDefinition {
   @Override
   public Set<SubResourceDefinition> getSubResourceDefinitions() {
 
-    Set<SubResourceDefinition> children = new HashSet<SubResourceDefinition>();
+    Set<SubResourceDefinition> children = new HashSet<>();
 
     children.add(new SubResourceDefinition(Resource.Type.OperatingSystem));
     children.add(new SubResourceDefinition(Resource.Type.StackService));

Some files were not shown because too many files changed in this diff