浏览代码

AMBARI-5934. Provide ability to rebalance HDFS.

Mahadev Konar 11 年之前
父节点
当前提交
cb662f494f
共有 49 个文件被更改,包括 2630 次插入276 次删除
  1. 1 0
      .gitignore
  2. 68 16
      ambari-agent/src/main/python/ambari_agent/ActionQueue.py
  3. 44 0
      ambari-agent/src/main/python/ambari_agent/BackgroundCommandExecutionHandle.py
  4. 21 2
      ambari-agent/src/main/python/ambari_agent/CommandStatusDict.py
  5. 46 13
      ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
  6. 91 37
      ambari-agent/src/main/python/ambari_agent/PythonExecutor.py
  7. 212 3
      ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
  8. 34 0
      ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
  9. 2 0
      ambari-server/pom.xml
  10. 45 21
      ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java
  11. 1 0
      ambari-server/src/main/java/org/apache/ambari/server/agent/AgentCommand.java
  12. 1 0
      ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
  13. 5 18
      ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
  14. 1 13
      ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java
  15. 13 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
  16. 8 0
      ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java
  17. 1 10
      ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
  18. 6 1
      ambari-server/src/main/java/org/apache/ambari/server/state/CustomCommandDefinition.java
  19. 13 1
      ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
  20. 10 10
      ambari-server/src/main/resources/custom_action_definitions/system_action_definitions.xml
  21. 0 59
      ambari-server/src/main/resources/custom_actions/ambari_hdfs_rebalancer.py
  22. 41 0
      ambari-server/src/main/resources/custom_actions/cancel_background_task.py
  23. 8 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml
  24. 1032 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer-err.log
  25. 29 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer.log
  26. 45 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
  27. 130 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_rebalance.py
  28. 52 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
  29. 1 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
  30. 83 0
      ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
  31. 8 3
      ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
  32. 2 1
      ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
  33. 275 0
      ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java
  34. 3 15
      ambari-server/src/test/java/org/apache/ambari/server/customactions/ActionDefinitionManagerTest.java
  35. 7 0
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
  36. 76 0
      ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
  37. 0 10
      ambari-server/src/test/resources/custom_action_definitions/cust_action_definitions1.xml
  38. 0 32
      ambari-server/src/test/resources/custom_action_definitions/system_action_definitions.xml
  39. 9 0
      ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/metainfo.xml
  40. 6 0
      ambari-web/app/controllers/global/background_operations_controller.js
  41. 69 6
      ambari-web/app/controllers/main/service/item.js
  42. 8 0
      ambari-web/app/messages.js
  43. 18 1
      ambari-web/app/templates/common/host_progress_popup.hbs
  44. 4 1
      ambari-web/app/templates/common/prompt_popup.hbs
  45. 50 0
      ambari-web/app/utils/ajax/ajax.js
  46. 3 0
      ambari-web/app/utils/helper.js
  47. 36 1
      ambari-web/app/utils/host_progress_popup.js
  48. 2 0
      ambari-web/app/views/common/modal_popup.js
  49. 10 1
      ambari-web/app/views/main/service/item.js

+ 1 - 0
.gitignore

@@ -20,3 +20,4 @@ derby.log
 pass.txt
 pass.txt
 ambari-agent/src/test/python/ambari_agent/dummy_files/current-stack
 ambari-agent/src/test/python/ambari_agent/dummy_files/current-stack
 velocity.log*
 velocity.log*
+*.pydevproject

+ 68 - 16
ambari-agent/src/main/python/ambari_agent/ActionQueue.py

@@ -26,11 +26,13 @@ import pprint
 import os
 import os
 import json
 import json
 
 
+from AgentException import AgentException
 from LiveStatus import LiveStatus
 from LiveStatus import LiveStatus
 from shell import shellRunner
 from shell import shellRunner
 from ActualConfigHandler import ActualConfigHandler
 from ActualConfigHandler import ActualConfigHandler
 from CommandStatusDict import CommandStatusDict
 from CommandStatusDict import CommandStatusDict
 from CustomServiceOrchestrator import CustomServiceOrchestrator
 from CustomServiceOrchestrator import CustomServiceOrchestrator
+from ambari_agent.BackgroundCommandExecutionHandle import BackgroundCommandExecutionHandle
 
 
 
 
 logger = logging.getLogger()
 logger = logging.getLogger()
@@ -52,9 +54,12 @@ class ActionQueue(threading.Thread):
 
 
   STATUS_COMMAND = 'STATUS_COMMAND'
   STATUS_COMMAND = 'STATUS_COMMAND'
   EXECUTION_COMMAND = 'EXECUTION_COMMAND'
   EXECUTION_COMMAND = 'EXECUTION_COMMAND'
+  BACKGROUND_EXECUTION_COMMAND = 'BACKGROUND_EXECUTION_COMMAND'
+  CANCEL_BACKGROUND_EXECUTION_COMMAND = 'CANCEL_BACKGROUND_EXECUTION_COMMAND'
   ROLE_COMMAND_INSTALL = 'INSTALL'
   ROLE_COMMAND_INSTALL = 'INSTALL'
   ROLE_COMMAND_START = 'START'
   ROLE_COMMAND_START = 'START'
   ROLE_COMMAND_STOP = 'STOP'
   ROLE_COMMAND_STOP = 'STOP'
+  ROLE_COMMAND_CANCEL = 'CANCEL'
   ROLE_COMMAND_CUSTOM_COMMAND = 'CUSTOM_COMMAND'
   ROLE_COMMAND_CUSTOM_COMMAND = 'CUSTOM_COMMAND'
   CUSTOM_COMMAND_RESTART = 'RESTART'
   CUSTOM_COMMAND_RESTART = 'RESTART'
 
 
@@ -66,6 +71,7 @@ class ActionQueue(threading.Thread):
     super(ActionQueue, self).__init__()
     super(ActionQueue, self).__init__()
     self.commandQueue = Queue.Queue()
     self.commandQueue = Queue.Queue()
     self.statusCommandQueue = Queue.Queue()
     self.statusCommandQueue = Queue.Queue()
+    self.backgroundCommandQueue = Queue.Queue()
     self.commandStatuses = CommandStatusDict(callback_action =
     self.commandStatuses = CommandStatusDict(callback_action =
       self.status_update_callback)
       self.status_update_callback)
     self.config = config
     self.config = config
@@ -74,8 +80,7 @@ class ActionQueue(threading.Thread):
     self.configTags = {}
     self.configTags = {}
     self._stop = threading.Event()
     self._stop = threading.Event()
     self.tmpdir = config.get('agent', 'prefix')
     self.tmpdir = config.get('agent', 'prefix')
-    self.customServiceOrchestrator = CustomServiceOrchestrator(config,
-                                                               controller)
+    self.customServiceOrchestrator = CustomServiceOrchestrator(config, controller, self.commandStatuses)
 
 
 
 
   def stop(self):
   def stop(self):
@@ -106,7 +111,10 @@ class ActionQueue(threading.Thread):
                   command['serviceName'] + " of cluster " + \
                   command['serviceName'] + " of cluster " + \
                   command['clusterName'] + " to the queue.")
                   command['clusterName'] + " to the queue.")
       logger.debug(pprint.pformat(command))
       logger.debug(pprint.pformat(command))
-      self.commandQueue.put(command)
+      if command['commandType'] == self.BACKGROUND_EXECUTION_COMMAND :
+        self.backgroundCommandQueue.put(self.createCommandHandle(command))
+      else:
+        self.commandQueue.put(command)
 
 
   def cancel(self, commands):
   def cancel(self, commands):
     for command in commands:
     for command in commands:
@@ -136,25 +144,45 @@ class ActionQueue(threading.Thread):
 
 
   def run(self):
   def run(self):
     while not self.stopped():
     while not self.stopped():
-      while  not self.statusCommandQueue.empty():
-        try:
-          command = self.statusCommandQueue.get(False)
-          self.process_command(command)
-        except (Queue.Empty):
-          pass
+      self.processBackgroundQueueSafeEmpty();
+      self.processStatusCommandQueueSafeEmpty();
       try:
       try:
         command = self.commandQueue.get(True, self.EXECUTION_COMMAND_WAIT_TIME)
         command = self.commandQueue.get(True, self.EXECUTION_COMMAND_WAIT_TIME)
         self.process_command(command)
         self.process_command(command)
       except (Queue.Empty):
       except (Queue.Empty):
         pass
         pass
+  def processBackgroundQueueSafeEmpty(self):
+    while not self.backgroundCommandQueue.empty():
+      try:
+        command = self.backgroundCommandQueue.get(False)
+        if(command.has_key('__handle') and command['__handle'].status == None): 
+          self.process_command(command)
+      except (Queue.Empty):
+        pass
+  
+  def processStatusCommandQueueSafeEmpty(self):
+    while not self.statusCommandQueue.empty():
+      try:
+        command = self.statusCommandQueue.get(False)
+        self.process_command(command)
+      except (Queue.Empty):
+        pass
+
+
+  def createCommandHandle(self, command):
+    if(command.has_key('__handle')):
+      raise AgentException("Command already has __handle")
+    command['__handle'] = BackgroundCommandExecutionHandle(command, command['commandId'], self.on_background_command_started, self.on_background_command_complete_callback)
+    return command
 
 
   def process_command(self, command):
   def process_command(self, command):
     logger.debug("Took an element of Queue: " + pprint.pformat(command))
     logger.debug("Took an element of Queue: " + pprint.pformat(command))
     # make sure we log failures
     # make sure we log failures
+    commandType = command['commandType']
     try:
     try:
-      if command['commandType'] == self.EXECUTION_COMMAND:
+      if commandType in [self.EXECUTION_COMMAND, self.BACKGROUND_EXECUTION_COMMAND]:
         self.execute_command(command)
         self.execute_command(command)
-      elif command['commandType'] == self.STATUS_COMMAND:
+      elif commandType == self.STATUS_COMMAND:
         self.execute_status_command(command)
         self.execute_status_command(command)
       else:
       else:
         logger.error("Unrecognized command " + pprint.pformat(command))
         logger.error("Unrecognized command " + pprint.pformat(command))
@@ -165,11 +193,11 @@ class ActionQueue(threading.Thread):
 
 
   def execute_command(self, command):
   def execute_command(self, command):
     '''
     '''
-    Executes commands of type  EXECUTION_COMMAND
+    Executes commands of type EXECUTION_COMMAND
     '''
     '''
     clusterName = command['clusterName']
     clusterName = command['clusterName']
     commandId = command['commandId']
     commandId = command['commandId']
-
+    isCommandBackground = command['commandType'] == self.BACKGROUND_EXECUTION_COMMAND
     message = "Executing command with id = {commandId} for role = {role} of " \
     message = "Executing command with id = {commandId} for role = {role} of " \
               "cluster {cluster}.".format(
               "cluster {cluster}.".format(
               commandId = str(commandId), role=command['role'],
               commandId = str(commandId), role=command['role'],
@@ -189,13 +217,17 @@ class ActionQueue(threading.Thread):
       'status': self.IN_PROGRESS_STATUS
       'status': self.IN_PROGRESS_STATUS
     })
     })
     self.commandStatuses.put_command_status(command, in_progress_status)
     self.commandStatuses.put_command_status(command, in_progress_status)
+    
     # running command
     # running command
     commandresult = self.customServiceOrchestrator.runCommand(command,
     commandresult = self.customServiceOrchestrator.runCommand(command,
       in_progress_status['tmpout'], in_progress_status['tmperr'])
       in_progress_status['tmpout'], in_progress_status['tmperr'])
+   
+    
     # dumping results
     # dumping results
-    status = self.COMPLETED_STATUS
-    if commandresult['exitcode'] != 0:
-      status = self.FAILED_STATUS
+    if isCommandBackground:
+      return
+    else:
+      status = self.COMPLETED_STATUS if commandresult['exitcode'] == 0 else self.FAILED_STATUS  
     roleResult = self.commandStatuses.generate_report_template(command)
     roleResult = self.commandStatuses.generate_report_template(command)
     roleResult.update({
     roleResult.update({
       'stdout': commandresult['stdout'],
       'stdout': commandresult['stdout'],
@@ -249,6 +281,26 @@ class ActionQueue(threading.Thread):
 
 
     self.commandStatuses.put_command_status(command, roleResult)
     self.commandStatuses.put_command_status(command, roleResult)
 
 
+  def on_background_command_started(self, handle):
+    #update command with given handle
+    self.commandStatuses.update_command_status(handle.command, {'pid' : handle.pid})
+     
+     
+  def on_background_command_complete_callback(self, process_condenced_result, handle):
+    logger.debug('Start callback: %s' % process_condenced_result)
+    logger.debug('The handle is: %s' % handle)
+    status = self.COMPLETED_STATUS if handle.exitCode == 0 else self.FAILED_STATUS
+    roleResult = self.commandStatuses.generate_report_template(handle.command)
+    
+    roleResult.update({
+      'stdout': process_condenced_result['stdout'],
+      'stderr': process_condenced_result['stderr'],
+      'exitCode': process_condenced_result['exitcode'],
+      'structuredOut': str(json.dumps(process_condenced_result['structuredOut'])) if 'structuredOut' in process_condenced_result else '',
+      'status': status,
+    })
+    
+    self.commandStatuses.put_command_status(handle.command, roleResult)
 
 
   def execute_status_command(self, command):
   def execute_status_command(self, command):
     '''
     '''

+ 44 - 0
ambari-agent/src/main/python/ambari_agent/BackgroundCommandExecutionHandle.py

@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import logging
+
+logger = logging.getLogger()
+installScriptHash = -1
+
+class BackgroundCommandExecutionHandle:
+  
+  SCHEDULED_STATUS = 'SCHEDULED'
+  RUNNING_STATUS = 'RUNNING'
+  STOP_REQUEST_STATUS = 'STOP_REQUEST'
+  STOPPED_STATUS = 'SCHEDULED'
+  
+  def __init__(self, command, commandId, on_background_command_started, on_background_command_complete_callback):
+    self.command = command
+    self.pid = 0
+    self.status = None
+    self.exitCode = None
+    self.commandId = commandId
+    self.on_background_command_started = on_background_command_started
+    self.on_background_command_complete_callback = on_background_command_complete_callback
+
+
+
+  def __str__(self):
+    return "[BackgroundHandle: pid='{0}', status='{1}', exitCode='{2}', commandId='{3}']".format(self.pid, self.status, self.exitCode, self.commandId)

+ 21 - 2
ambari-agent/src/main/python/ambari_agent/CommandStatusDict.py

@@ -21,6 +21,7 @@ limitations under the License.
 import json
 import json
 import logging
 import logging
 import threading
 import threading
+import copy
 from Grep import Grep
 from Grep import Grep
 
 
 logger = logging.getLogger()
 logger = logging.getLogger()
@@ -58,7 +59,25 @@ class CommandStatusDict():
     if not status_command:
     if not status_command:
       self.callback_action()
       self.callback_action()
 
 
-
+  def update_command_status(self, command, delta):
+    """
+    Updates status of command without replacing (overwrites with delta value)
+    """
+    if 'taskId' in command:
+      key = command['taskId']
+      status_command = False
+    else: # Status command reports has no task id
+      key = id(command)
+      status_command = True
+    with self.lock: # Synchronized
+      self.current_state[key][1].update(delta)
+    if not status_command:
+      self.callback_action()
+  
+  def get_command_status(self, taskId):
+    with self.lock:
+      c = copy.copy(self.current_state[taskId][1])
+    return c
   def generate_report(self):
   def generate_report(self):
     """
     """
     Generates status reports about commands that are IN_PROGRESS, COMPLETE or
     Generates status reports about commands that are IN_PROGRESS, COMPLETE or
@@ -72,7 +91,7 @@ class CommandStatusDict():
       for key, item in self.current_state.items():
       for key, item in self.current_state.items():
         command = item[0]
         command = item[0]
         report = item[1]
         report = item[1]
-        if command ['commandType'] == ActionQueue.EXECUTION_COMMAND:
+        if command ['commandType'] in [ActionQueue.EXECUTION_COMMAND, ActionQueue.BACKGROUND_EXECUTION_COMMAND]:
           if (report['status']) != ActionQueue.IN_PROGRESS_STATUS:
           if (report['status']) != ActionQueue.IN_PROGRESS_STATUS:
             resultReports.append(report)
             resultReports.append(report)
             # Removing complete/failed command status from dict
             # Removing complete/failed command status from dict

+ 46 - 13
ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py

@@ -50,7 +50,7 @@ class CustomServiceOrchestrator():
   PING_PORTS_KEY = "all_ping_ports"
   PING_PORTS_KEY = "all_ping_ports"
   AMBARI_SERVER_HOST = "ambari_server_host"
   AMBARI_SERVER_HOST = "ambari_server_host"
 
 
-  def __init__(self, config, controller):
+  def __init__(self, config, controller, commandStatuses = None):
     self.config = config
     self.config = config
     self.tmp_dir = config.get('agent', 'prefix')
     self.tmp_dir = config.get('agent', 'prefix')
     self.exec_tmp_dir = config.get('agent', 'tmp_dir')
     self.exec_tmp_dir = config.get('agent', 'tmp_dir')
@@ -63,6 +63,8 @@ class CustomServiceOrchestrator():
     self.public_fqdn = hostname.public_hostname(config)
     self.public_fqdn = hostname.public_hostname(config)
     # cache reset will be called on every agent registration
     # cache reset will be called on every agent registration
     controller.registration_listeners.append(self.file_cache.reset)
     controller.registration_listeners.append(self.file_cache.reset)
+    
+    self.commandStatuses = commandStatuses
     # Clean up old status command files if any
     # Clean up old status command files if any
     try:
     try:
       os.unlink(self.status_commands_stdout)
       os.unlink(self.status_commands_stdout)
@@ -93,6 +95,8 @@ class CustomServiceOrchestrator():
       script_type = command['commandParams']['script_type']
       script_type = command['commandParams']['script_type']
       script = command['commandParams']['script']
       script = command['commandParams']['script']
       timeout = int(command['commandParams']['command_timeout'])
       timeout = int(command['commandParams']['command_timeout'])
+      before_interceptor_method = command['commandParams']['before_system_hook_function']  if command['commandParams'].has_key('before_system_hook_function') else None
+      
       if 'hostLevelParams' in command and 'jdk_location' in command['hostLevelParams']:
       if 'hostLevelParams' in command and 'jdk_location' in command['hostLevelParams']:
         server_url_prefix = command['hostLevelParams']['jdk_location']
         server_url_prefix = command['hostLevelParams']['jdk_location']
       else:
       else:
@@ -110,6 +114,12 @@ class CustomServiceOrchestrator():
       if command_name == self.CUSTOM_ACTION_COMMAND:
       if command_name == self.CUSTOM_ACTION_COMMAND:
         base_dir = self.file_cache.get_custom_actions_base_dir(server_url_prefix)
         base_dir = self.file_cache.get_custom_actions_base_dir(server_url_prefix)
         script_tuple = (os.path.join(base_dir, script) , base_dir)
         script_tuple = (os.path.join(base_dir, script) , base_dir)
+        
+        # Call systemHook functions in current virtual machine. This function can enrich custom action 
+        # command with some information from current machine. And can be considered as plugin
+        if before_interceptor_method != None: 
+          self.processSystemHookFunctions(script_tuple, before_interceptor_method, command)
+        
         hook_dir = None
         hook_dir = None
       else:
       else:
         if command_name == self.CUSTOM_COMMAND_COMMAND:
         if command_name == self.CUSTOM_COMMAND_COMMAND:
@@ -127,6 +137,11 @@ class CustomServiceOrchestrator():
         message = "Unknown script type {0}".format(script_type)
         message = "Unknown script type {0}".format(script_type)
         raise AgentException(message)
         raise AgentException(message)
       # Execute command using proper interpreter
       # Execute command using proper interpreter
+      handle = None
+      if(command.has_key('__handle')):
+        handle = command['__handle']
+        del command['__handle']
+      
       json_path = self.dump_command_to_json(command)
       json_path = self.dump_command_to_json(command)
       pre_hook_tuple = self.resolve_hook_script_path(hook_dir,
       pre_hook_tuple = self.resolve_hook_script_path(hook_dir,
           self.PRE_HOOK_PREFIX, command_name, script_type)
           self.PRE_HOOK_PREFIX, command_name, script_type)
@@ -141,12 +156,16 @@ class CustomServiceOrchestrator():
       # Executing hooks and script
       # Executing hooks and script
       ret = None
       ret = None
 
 
+      from ActionQueue import ActionQueue
+      if(command.has_key('commandType') and command['commandType'] == ActionQueue.BACKGROUND_EXECUTION_COMMAND and len(filtered_py_file_list) > 1):
+        raise AgentException("Background commands are supported without hooks only")
+
       for py_file, current_base_dir in filtered_py_file_list:
       for py_file, current_base_dir in filtered_py_file_list:
         script_params = [command_name, json_path, current_base_dir]
         script_params = [command_name, json_path, current_base_dir]
         ret = self.python_executor.run_file(py_file, script_params,
         ret = self.python_executor.run_file(py_file, script_params,
                                self.exec_tmp_dir, tmpoutfile, tmperrfile, timeout,
                                self.exec_tmp_dir, tmpoutfile, tmperrfile, timeout,
                                tmpstrucoutfile, logger_level, self.map_task_to_process,
                                tmpstrucoutfile, logger_level, self.map_task_to_process,
-                               task_id, override_output_files)
+                               task_id, override_output_files, handle = handle)
         # Next run_file() invocations should always append to current output
         # Next run_file() invocations should always append to current output
         override_output_files = False
         override_output_files = False
         if ret['exitcode'] != 0:
         if ret['exitcode'] != 0:
@@ -156,16 +175,17 @@ class CustomServiceOrchestrator():
         raise AgentException("No script has been executed")
         raise AgentException("No script has been executed")
 
 
       # if canceled
       # if canceled
-      pid = self.commands_in_progress.pop(task_id)
-      if not isinstance(pid, int):
-        reason = '\nCommand aborted. ' + pid
-        ret['stdout'] += reason
-        ret['stderr'] += reason
-
-        with open(tmpoutfile, "a") as f:
-          f.write(reason)
-        with open(tmperrfile, "a") as f:
-          f.write(reason)
+      if self.commands_in_progress.has_key(task_id):#Background command do not push in this collection (TODO)
+        pid = self.commands_in_progress.pop(task_id)
+        if not isinstance(pid, int):
+          reason = '\nCommand aborted. ' + pid
+          ret['stdout'] += reason
+          ret['stderr'] += reason
+  
+          with open(tmpoutfile, "a") as f:
+            f.write(reason)
+          with open(tmperrfile, "a") as f:
+            f.write(reason)
 
 
     except Exception: # We do not want to let agent fail completely
     except Exception: # We do not want to let agent fail completely
       exc_type, exc_obj, exc_tb = sys.exc_info()
       exc_type, exc_obj, exc_tb = sys.exc_info()
@@ -180,7 +200,20 @@ class CustomServiceOrchestrator():
       }
       }
     return ret
     return ret
 
 
-
+  def fetch_bg_pid_by_taskid(self,command):
+    cancel_command_pid = None
+    try:
+      cancelTaskId = int(command['commandParams']['cancel_task_id'])
+      status = self.commandStatuses.get_command_status(cancelTaskId)
+      cancel_command_pid = status['pid']
+    except Exception:
+      pass
+    logger.info("Found PID=%s for cancel taskId=%s" % (cancel_command_pid,cancelTaskId))
+    command['commandParams']['cancel_command_pid'] = cancel_command_pid
+
+  def processSystemHookFunctions(self, script_tuple, before_interceptor_method, command):
+    getattr(self, before_interceptor_method)(command)
+    
   def requestComponentStatus(self, command):
   def requestComponentStatus(self, command):
     """
     """
      Component status is determined by exit code, returned by runCommand().
      Component status is determined by exit code, returned by runCommand().

+ 91 - 37
ambari-agent/src/main/python/ambari_agent/PythonExecutor.py

@@ -24,6 +24,9 @@ import subprocess
 import pprint
 import pprint
 import threading
 import threading
 from threading import Thread
 from threading import Thread
+import time
+from BackgroundCommandExecutionHandle import BackgroundCommandExecutionHandle 
+
 from Grep import Grep
 from Grep import Grep
 import shell, sys
 import shell, sys
 
 
@@ -36,7 +39,6 @@ class PythonExecutor:
   Warning: class maintains internal state. As a result, instances should not be
   Warning: class maintains internal state. As a result, instances should not be
   used as a singleton for a concurrent execution of python scripts
   used as a singleton for a concurrent execution of python scripts
   """
   """
-
   NO_ERROR = "none"
   NO_ERROR = "none"
   grep = Grep()
   grep = Grep()
   event = threading.Event()
   event = threading.Event()
@@ -47,9 +49,19 @@ class PythonExecutor:
     self.config = config
     self.config = config
     pass
     pass
 
 
+
+  def open_subporcess_files(self, tmpoutfile, tmperrfile, override_output_files):
+    if override_output_files: # Recreate files
+      tmpout =  open(tmpoutfile, 'w')
+      tmperr =  open(tmperrfile, 'w')
+    else: # Append to files
+      tmpout =  open(tmpoutfile, 'a')
+      tmperr =  open(tmperrfile, 'a')
+    return tmpout, tmperr
+    
   def run_file(self, script, script_params, tmp_dir, tmpoutfile, tmperrfile,
   def run_file(self, script, script_params, tmp_dir, tmpoutfile, tmperrfile,
                timeout, tmpstructedoutfile, logger_level, callback, task_id,
                timeout, tmpstructedoutfile, logger_level, callback, task_id,
-               override_output_files = True):
+               override_output_files = True, handle = None):
     """
     """
     Executes the specified python file in a separate subprocess.
     Executes the specified python file in a separate subprocess.
     Method returns only when the subprocess is finished.
     Method returns only when the subprocess is finished.
@@ -59,13 +71,6 @@ class PythonExecutor:
     override_output_files option defines whether stdout/stderr files will be
     override_output_files option defines whether stdout/stderr files will be
     recreated or appended
     recreated or appended
     """
     """
-    if override_output_files: # Recreate files
-      tmpout =  open(tmpoutfile, 'w')
-      tmperr =  open(tmperrfile, 'w')
-    else: # Append to files
-      tmpout =  open(tmpoutfile, 'a')
-      tmperr =  open(tmperrfile, 'a')
-
     # need to remove this file for the following case:
     # need to remove this file for the following case:
     # status call 1 does not write to file; call 2 writes to file;
     # status call 1 does not write to file; call 2 writes to file;
     # call 3 does not write to file, so contents are still call 2's result
     # call 3 does not write to file, so contents are still call 2's result
@@ -77,45 +82,58 @@ class PythonExecutor:
     script_params += [tmpstructedoutfile, logger_level, tmp_dir]
     script_params += [tmpstructedoutfile, logger_level, tmp_dir]
     pythonCommand = self.python_command(script, script_params)
     pythonCommand = self.python_command(script, script_params)
     logger.info("Running command " + pprint.pformat(pythonCommand))
     logger.info("Running command " + pprint.pformat(pythonCommand))
-    process = self.launch_python_subprocess(pythonCommand, tmpout, tmperr)
-    # map task_id to pid
-    callback(task_id, process.pid)
-    logger.debug("Launching watchdog thread")
-    self.event.clear()
-    self.python_process_has_been_killed = False
-    thread = Thread(target =  self.python_watchdog_func, args = (process, timeout))
-    thread.start()
-    # Waiting for the process to be either finished or killed
-    process.communicate()
-    self.event.set()
-    thread.join()
+    if(handle == None) :
+      tmpout, tmperr = self.open_subporcess_files(tmpoutfile, tmperrfile, override_output_files)
+      
+      process = self.launch_python_subprocess(pythonCommand, tmpout, tmperr)
+      # map task_id to pid
+      callback(task_id, process.pid)
+      logger.debug("Launching watchdog thread")
+      self.event.clear()
+      self.python_process_has_been_killed = False
+      thread = Thread(target =  self.python_watchdog_func, args = (process, timeout))
+      thread.start()
+      # Waiting for the process to be either finished or killed
+      process.communicate()
+      self.event.set()
+      thread.join()
+      return self.prepare_process_result(process, tmpoutfile, tmperrfile, tmpstructedoutfile)
+    else:
+      holder = Holder(pythonCommand, tmpoutfile, tmperrfile, tmpstructedoutfile, handle)
+      
+      background = BackgroundThread(holder, self)
+      background.start()
+      return {"exitcode": 777}
+
+  def prepare_process_result (self, process, tmpoutfile, tmperrfile, tmpstructedoutfile):
+    out, error, structured_out = self.read_result_from_files(tmpoutfile, tmperrfile, tmpstructedoutfile)
     # Building results
     # Building results
-    error = self.NO_ERROR
     returncode = process.returncode
     returncode = process.returncode
-    out = open(tmpoutfile, 'r').read()
-    error = open(tmperrfile, 'r').read()
 
 
+    if self.python_process_has_been_killed:
+      error = str(error) + "\n Python script has been killed due to timeout"
+      returncode = 999
+    result = self.condenseOutput(out, error, returncode, structured_out)
+    logger.info("Result: %s" % result)
+    return result
+  
+  def read_result_from_files(self, out_path, err_path, structured_out_path):
+    out = open(out_path, 'r').read()
+    error = open(err_path, 'r').read()
     try:
     try:
-      with open(tmpstructedoutfile, 'r') as fp:
+      with open(structured_out_path, 'r') as fp:
         structured_out = json.load(fp)
         structured_out = json.load(fp)
     except Exception:
     except Exception:
-      if os.path.exists(tmpstructedoutfile):
-        errMsg = 'Unable to read structured output from ' + tmpstructedoutfile
+      if os.path.exists(structured_out_path):
+        errMsg = 'Unable to read structured output from ' + structured_out_path
         structured_out = {
         structured_out = {
           'msg' : errMsg
           'msg' : errMsg
         }
         }
         logger.warn(structured_out)
         logger.warn(structured_out)
       else:
       else:
         structured_out = {}
         structured_out = {}
-
-    if self.python_process_has_been_killed:
-      error = str(error) + "\n Python script has been killed due to timeout"
-      returncode = 999
-    result = self.condenseOutput(out, error, returncode, structured_out)
-    logger.info("Result: %s" % result)
-    return result
-
-
+    return out, error, structured_out
+  
   def launch_python_subprocess(self, command, tmpout, tmperr):
   def launch_python_subprocess(self, command, tmpout, tmperr):
     """
     """
     Creates subprocess with given parameters. This functionality was moved to separate method
     Creates subprocess with given parameters. This functionality was moved to separate method
@@ -124,7 +142,7 @@ class PythonExecutor:
     return subprocess.Popen(command,
     return subprocess.Popen(command,
       stdout=tmpout,
       stdout=tmpout,
       stderr=tmperr, close_fds=True)
       stderr=tmperr, close_fds=True)
-
+    
   def isSuccessfull(self, returncode):
   def isSuccessfull(self, returncode):
     return not self.python_process_has_been_killed and returncode == 0
     return not self.python_process_has_been_killed and returncode == 0
 
 
@@ -153,3 +171,39 @@ class PythonExecutor:
       shell.kill_process_with_children(python.pid)
       shell.kill_process_with_children(python.pid)
       self.python_process_has_been_killed = True
       self.python_process_has_been_killed = True
     pass
     pass
+
+class Holder:
+  def __init__(self, command, out_file, err_file, structured_out_file, handle):
+    self.command = command
+    self.out_file = out_file
+    self.err_file = err_file
+    self.structured_out_file = structured_out_file
+    self.handle = handle
+    
+class BackgroundThread(threading.Thread):
+  def __init__(self, holder, pythonExecutor):
+    threading.Thread.__init__(self)
+    self.holder = holder
+    self.pythonExecutor = pythonExecutor
+  
+  def run(self):
+    process_out, process_err  = self.pythonExecutor.open_subporcess_files(self.holder.out_file, self.holder.err_file, True)
+    
+    logger.info("Starting process command %s" % self.holder.command)
+    process = self.pythonExecutor.launch_python_subprocess(self.holder.command, process_out, process_err)
+    
+    logger.info("Process has been started. Pid = %s" % process.pid)
+    
+    self.holder.handle.pid = process.pid
+    self.holder.handle.status = BackgroundCommandExecutionHandle.RUNNING_STATUS
+    self.holder.handle.on_background_command_started(self.holder.handle)
+    
+    process.communicate()
+    
+    self.holder.handle.exitCode = process.returncode
+    process_condenced_result = self.pythonExecutor.prepare_process_result(process, self.holder.out_file, self.holder.err_file, self.holder.structured_out_file)
+    logger.info("Calling callback with args %s" % process_condenced_result)
+    self.holder.handle.on_background_command_complete_callback(process_condenced_result, self.holder.handle)
+    logger.info("Exiting from thread for holder pid %s" % self.holder.handle.pid)
+    
+  

+ 212 - 3
ambari-agent/src/test/python/ambari_agent/TestActionQueue.py

@@ -27,6 +27,7 @@ import os, errno, time, pprint, tempfile, threading, json
 import StringIO
 import StringIO
 import sys
 import sys
 from threading import Thread
 from threading import Thread
+import copy
 
 
 from mock.mock import patch, MagicMock, call
 from mock.mock import patch, MagicMock, call
 from ambari_agent.StackVersionsFileHandler import StackVersionsFileHandler
 from ambari_agent.StackVersionsFileHandler import StackVersionsFileHandler
@@ -34,13 +35,11 @@ from ambari_agent.CustomServiceOrchestrator import CustomServiceOrchestrator
 from ambari_agent.PythonExecutor import PythonExecutor
 from ambari_agent.PythonExecutor import PythonExecutor
 from ambari_agent.CommandStatusDict import CommandStatusDict
 from ambari_agent.CommandStatusDict import CommandStatusDict
 from ambari_agent.ActualConfigHandler import ActualConfigHandler
 from ambari_agent.ActualConfigHandler import ActualConfigHandler
+from FileCache import FileCache
 
 
 
 
 class TestActionQueue(TestCase):
 class TestActionQueue(TestCase):
-
   def setUp(self):
   def setUp(self):
-    out = StringIO.StringIO()
-    sys.stdout = out
     # save original open() method for later use
     # save original open() method for later use
     self.original_open = open
     self.original_open = open
 
 
@@ -155,6 +154,49 @@ class TestActionQueue(TestCase):
     'hostLevelParams': {}
     'hostLevelParams': {}
   }
   }
 
 
+  background_command = {
+    'commandType': 'BACKGROUND_EXECUTION_COMMAND',
+    'role': 'NAMENODE',
+    'roleCommand': 'CUSTOM_COMMAND',
+    'commandId': '1-1',
+    'taskId': 19,
+    'clusterName': 'c1',
+    'serviceName': 'HDFS',
+    'configurations':{'global' : {}},
+    'configurationTags':{'global' : { 'tag': 'v123' }},
+    'hostLevelParams':{'custom_command': 'REBALANCE_HDFS'},
+    'commandParams' :  {
+      'script_type' : 'PYTHON',
+      'script' : 'script.py',
+      'command_timeout' : '600',
+      'jdk_location' : '.',
+      'service_package_folder' : '.'
+      }
+  }
+  cancel_background_command = {
+    'commandType': 'EXECUTION_COMMAND',
+    'role': 'NAMENODE',
+    'roleCommand': 'ACTIONEXECUTE',
+    'commandId': '1-1',
+    'taskId': 20,
+    'clusterName': 'c1',
+    'serviceName': 'HDFS',
+    'configurations':{'global' : {}},
+    'configurationTags':{'global' : {}},
+    'hostLevelParams':{},
+    'commandParams' :  {
+      'script_type' : 'PYTHON',
+      'script' : 'cancel_background_task.py',
+      'before_system_hook_function' : 'fetch_bg_pid_by_taskid',
+      'jdk_location' : '.',
+      'command_timeout' : '600',
+      'service_package_folder' : '.',
+      'cancel_policy': 'SIGKILL',
+      'cancel_task_id': "19",
+      }
+  }
+
+
   @patch.object(ActionQueue, "process_command")
   @patch.object(ActionQueue, "process_command")
   @patch.object(Queue, "get")
   @patch.object(Queue, "get")
   @patch.object(CustomServiceOrchestrator, "__init__")
   @patch.object(CustomServiceOrchestrator, "__init__")
@@ -526,3 +568,170 @@ class TestActionQueue(TestCase):
     actionQueue.join()
     actionQueue.join()
     self.assertEqual(actionQueue.stopped(), True, 'Action queue is not stopped.')
     self.assertEqual(actionQueue.stopped(), True, 'Action queue is not stopped.')
 
 
+  @patch.object(StackVersionsFileHandler, "read_stack_version")
+  @patch.object(CustomServiceOrchestrator, "runCommand")
+  @patch.object(CustomServiceOrchestrator, "__init__")
+  def test_execute_background_command(self, CustomServiceOrchestrator_mock,
+                                  runCommand_mock, read_stack_version_mock
+                                  ):
+    CustomServiceOrchestrator_mock.return_value = None
+    CustomServiceOrchestrator.runCommand.return_value = {'exitcode' : 0,
+                                                         'stdout': 'out-11',
+                                                         'stderr' : 'err-13'}
+    
+    dummy_controller = MagicMock()
+    actionQueue = ActionQueue(AmbariConfig().getConfig(), dummy_controller)
+
+    execute_command = copy.deepcopy(self.background_command)
+    actionQueue.put([execute_command])
+    actionQueue.processBackgroundQueueSafeEmpty();
+    actionQueue.processStatusCommandQueueSafeEmpty();
+    
+    #assert that python execturor start
+    self.assertTrue(runCommand_mock.called)
+    runningCommand = actionQueue.commandStatuses.current_state.get(execute_command['taskId'])
+    self.assertTrue(runningCommand is not None)
+    self.assertEqual(runningCommand[1]['status'], ActionQueue.IN_PROGRESS_STATUS)
+    
+    report = actionQueue.result()
+    self.assertEqual(len(report['reports']),1)
+    
+
+      
+  @patch.object(StackVersionsFileHandler, "read_stack_version")
+  @patch.object(CustomServiceOrchestrator, "resolve_script_path")
+  @patch.object(FileCache, "__init__")
+  def test_execute_python_executor(self, read_stack_version_mock, FileCache_mock, resolve_script_path_mock):
+    FileCache_mock.return_value = None
+    
+    
+    dummy_controller = MagicMock()
+    cfg = AmbariConfig().getConfig()
+    cfg.set('agent', 'tolerate_download_failures', 'true')
+    cfg.set('agent', 'prefix', '.')
+    cfg.set('agent', 'cache_dir', 'background_tasks')
+    
+    actionQueue = ActionQueue(cfg, dummy_controller)
+    patch_output_file(actionQueue.customServiceOrchestrator.python_executor)
+    actionQueue.customServiceOrchestrator.dump_command_to_json = MagicMock()
+   
+    result = {}
+    lock = threading.RLock()
+    complete_done = threading.Condition(lock)
+    start_done = threading.Condition(lock)
+    
+    def command_started_w(handle):
+      with lock:
+        result['command_started'] = {'handle': copy.copy(handle), 'command_status' : actionQueue.commandStatuses.get_command_status(handle.command['taskId'])}
+        start_done.notifyAll()
+    
+    def command_complete_w(process_condenced_result, handle):
+      with lock:
+        result['command_complete'] = {'condenced_result' : copy.copy(process_condenced_result), 
+                                      'handle' : copy.copy(handle),
+                                      'command_status' : actionQueue.commandStatuses.get_command_status(handle.command['taskId'])
+                                      }
+        complete_done.notifyAll()
+    
+    actionQueue.on_background_command_complete_callback = wraped(actionQueue.on_background_command_complete_callback,None, command_complete_w)
+    actionQueue.on_background_command_started = wraped(actionQueue.on_background_command_started,None,command_started_w)
+    actionQueue.put([self.background_command])
+    actionQueue.processBackgroundQueueSafeEmpty();
+    actionQueue.processStatusCommandQueueSafeEmpty();
+    
+    with lock:
+      start_done.wait(5)
+      
+      self.assertTrue(result.has_key('command_started'), 'command started callback was not fired')
+      started_handle = result['command_started']['handle']
+      started_status = result['command_started']['command_status']
+      
+      self.assertEqual(started_handle.pid, started_status['pid'])
+      self.assertTrue(started_handle.pid > 0, "PID was not assigned to handle")
+      self.assertEqual(started_status['status'], ActionQueue.IN_PROGRESS_STATUS)
+      
+      complete_done.wait(2)
+      
+      finished_handle = result['command_complete']['handle']
+      self.assertEqual(started_handle.pid, finished_handle.pid)
+      finished_status = result['command_complete']['command_status']
+      self.assertEqual(finished_status['status'], ActionQueue.COMPLETED_STATUS)
+      self.assertEqual(finished_status['stdout'], 'process_out')
+      self.assertEqual(finished_status['stderr'], 'process_err')
+      self.assertEqual(finished_status['exitCode'], 0)
+      
+    
+    runningCommand = actionQueue.commandStatuses.current_state.get(self.background_command['taskId'])
+    self.assertTrue(runningCommand is not None)
+    
+    report = actionQueue.result()
+    self.assertEqual(len(report['reports']),1)
+    self.assertEqual(report['reports'][0]['stdout'],'process_out')
+#     self.assertEqual(report['reports'][0]['structuredOut'],'{"a": "b."}')
+    
+        
+  @patch.object(StackVersionsFileHandler, "read_stack_version")
+  @patch.object(FileCache, "__init__")
+  def test_cancel_backgound_command(self, read_stack_version_mock, FileCache_mock):
+    FileCache_mock.return_value = None
+    
+    dummy_controller = MagicMock()
+    cfg = AmbariConfig().getConfig()
+    cfg.set('agent', 'tolerate_download_failures', 'true')
+    cfg.set('agent', 'prefix', '.')
+    cfg.set('agent', 'cache_dir', 'background_tasks')
+    
+    actionQueue = ActionQueue(cfg, dummy_controller)
+    patch_output_file(actionQueue.customServiceOrchestrator.python_executor)
+    actionQueue.customServiceOrchestrator.python_executor.prepare_process_result = MagicMock()
+    actionQueue.customServiceOrchestrator.dump_command_to_json = MagicMock()
+
+    lock = threading.RLock()
+    complete_done = threading.Condition(lock)
+    
+    def command_complete_w(process_condenced_result, handle):
+      with lock:
+        complete_done.wait(4)
+    
+    actionQueue.on_background_command_complete_callback = wraped(actionQueue.on_background_command_complete_callback,None, command_complete_w)
+    execute_command = copy.deepcopy(self.background_command)
+    actionQueue.put([execute_command])
+    actionQueue.processBackgroundQueueSafeEmpty();
+    
+    time.sleep(1)
+    
+    actionQueue.process_command(self.cancel_background_command)
+    #TODO add assert
+    
+    with lock:
+      complete_done.notifyAll()
+      
+      
+def patch_output_file(pythonExecutor):
+  def windows_py(command, tmpout, tmperr):
+    proc = MagicMock()
+    proc.pid = 33
+    proc.returncode = 0
+    with tmpout:
+      tmpout.write('process_out')
+    with tmperr:
+      tmperr.write('process_err')
+    return proc
+  def open_subporcess_files_win(fout, ferr, f):
+    return MagicMock(), MagicMock()
+  def read_result_from_files(out_path, err_path, structured_out_path):
+    return 'process_out', 'process_err', '{"a": "b."}'
+  pythonExecutor.launch_python_subprocess = windows_py
+  pythonExecutor.open_subporcess_files = open_subporcess_files_win
+  pythonExecutor.read_result_from_files = read_result_from_files
+    
+def wraped(func, before = None, after = None):
+    def wrapper(*args, **kwargs):
+      if(before is not None):
+        before(*args, **kwargs)
+      ret =  func(*args, **kwargs)
+      if(after is not None):
+        after(*args, **kwargs)
+      return ret
+    return wrapper   
+  

+ 34 - 0
ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py

@@ -39,6 +39,7 @@ import sys
 from AgentException import AgentException
 from AgentException import AgentException
 from FileCache import FileCache
 from FileCache import FileCache
 from LiveStatus import LiveStatus
 from LiveStatus import LiveStatus
+from BackgroundCommandExecutionHandle import BackgroundCommandExecutionHandle
 
 
 
 
 class TestCustomServiceOrchestrator(TestCase):
 class TestCustomServiceOrchestrator(TestCase):
@@ -396,6 +397,39 @@ class TestCustomServiceOrchestrator(TestCase):
     self.assertEqual(runCommand_mock.return_value, status)
     self.assertEqual(runCommand_mock.return_value, status)
 
 
 
 
+  @patch.object(CustomServiceOrchestrator, "dump_command_to_json")
+  @patch.object(FileCache, "__init__")
+  @patch.object(FileCache, "get_custom_actions_base_dir")
+  def test_runCommand_background_action(self, get_custom_actions_base_dir_mock,
+                                    FileCache_mock,
+                                    dump_command_to_json_mock):
+    FileCache_mock.return_value = None
+    get_custom_actions_base_dir_mock.return_value = "some path"
+    _, script = tempfile.mkstemp()
+    command = {
+      'role' : 'any',
+      'commandParams': {
+        'script_type': 'PYTHON',
+        'script': 'some_custom_action.py',
+        'command_timeout': '600',
+        'jdk_location' : 'some_location'
+      },
+      'taskId' : '13',
+      'roleCommand': 'ACTIONEXECUTE',
+      'commandType': 'BACKGROUND_EXECUTION_COMMAND',
+      '__handle' : BackgroundCommandExecutionHandle(None,13,MagicMock(), MagicMock())
+    }
+    dummy_controller = MagicMock()
+    orchestrator = CustomServiceOrchestrator(self.config, dummy_controller)
+    
+    import TestActionQueue
+    TestActionQueue.patch_output_file(orchestrator.python_executor)
+    orchestrator.python_executor.condenseOutput = MagicMock()
+    orchestrator.dump_command_to_json = MagicMock()
+    
+    ret = orchestrator.runCommand(command, "out.txt", "err.txt")
+    self.assertEqual(ret['exitcode'], 777)
+
   def tearDown(self):
   def tearDown(self):
     # enable stdout
     # enable stdout
     sys.stdout = sys.__stdout__
     sys.stdout = sys.__stdout__

+ 2 - 0
ambari-server/pom.xml

@@ -135,6 +135,8 @@
             <exclude>src/main/resources/db/serial</exclude>
             <exclude>src/main/resources/db/serial</exclude>
             <exclude>src/main/resources/db/index.txt</exclude>
             <exclude>src/main/resources/db/index.txt</exclude>
             <exclude>src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/templates/exclude_hosts_list.j2</exclude>
             <exclude>src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/templates/exclude_hosts_list.j2</exclude>
+            <exclude>src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer-err.log</exclude>
+            <exclude>src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer.log</exclude>
             <exclude>conf/unix/ca.config</exclude>
             <exclude>conf/unix/ca.config</exclude>
             <exclude>conf/unix/krb5JAASLogin.conf</exclude>
             <exclude>conf/unix/krb5JAASLogin.conf</exclude>
             <exclude>**/*.json</exclude>
             <exclude>**/*.json</exclude>

+ 45 - 21
ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java

@@ -31,18 +31,13 @@ import java.util.TreeMap;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeUnit;
 
 
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.collect.ArrayListMultimap;
-import com.google.common.collect.ListMultimap;
-import com.google.common.reflect.TypeToken;
-import com.google.inject.persist.UnitOfWork;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.ServiceComponentHostNotFoundException;
 import org.apache.ambari.server.ServiceComponentHostNotFoundException;
 import org.apache.ambari.server.ServiceComponentNotFoundException;
 import org.apache.ambari.server.ServiceComponentNotFoundException;
 import org.apache.ambari.server.agent.ActionQueue;
 import org.apache.ambari.server.agent.ActionQueue;
+import org.apache.ambari.server.agent.AgentCommand.AgentCommandType;
 import org.apache.ambari.server.agent.CancelCommand;
 import org.apache.ambari.server.agent.CancelCommand;
 import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.agent.ExecutionCommand;
@@ -65,6 +60,13 @@ import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.ListMultimap;
+import com.google.common.reflect.TypeToken;
+import com.google.inject.persist.UnitOfWork;
+
 
 
 
 
 /**
 /**
@@ -193,7 +195,6 @@ class ActionScheduler implements Runnable {
       processCancelledRequestsList();
       processCancelledRequestsList();
 
 
       Set<Long> runningRequestIds = new HashSet<Long>();
       Set<Long> runningRequestIds = new HashSet<Long>();
-      Set<String> affectedHosts = new HashSet<String>();
       List<Stage> stages = db.getStagesInProgress();
       List<Stage> stages = db.getStagesInProgress();
       if (LOG.isDebugEnabled()) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Scheduler wakes up");
         LOG.debug("Scheduler wakes up");
@@ -207,6 +208,10 @@ class ActionScheduler implements Runnable {
         return;
         return;
       }
       }
       int i_stage = 0;
       int i_stage = 0;
+      
+      
+      stages = filterParallelPerHostStages(stages);
+      
       for (Stage s : stages) {
       for (Stage s : stages) {
         // Check if we can process this stage in parallel with another stages
         // Check if we can process this stage in parallel with another stages
         i_stage ++;
         i_stage ++;
@@ -225,20 +230,7 @@ class ActionScheduler implements Runnable {
           }
           }
         }
         }
 
 
-        List<String> stageHosts = s.getHosts();
-        boolean conflict = false;
-        for (String host : stageHosts) {
-          if (affectedHosts.contains(host)) {
-            conflict = true;
-            break;
-          }
-        }
-        if (conflict) {
-          // Also we don't want to perform stages in parallel at the same hosts
-          continue;
-        } else {
-          affectedHosts.addAll(stageHosts);
-        }
+        
 
 
         // Commands that will be scheduled in current scheduler wakeup
         // Commands that will be scheduled in current scheduler wakeup
         List<ExecutionCommand> commandsToSchedule = new ArrayList<ExecutionCommand>();
         List<ExecutionCommand> commandsToSchedule = new ArrayList<ExecutionCommand>();
@@ -354,6 +346,38 @@ class ActionScheduler implements Runnable {
     }
     }
   }
   }
 
 
+  /**
+   * Returns filtered list of stages following the rule:
+   * 1) remove stages that has the same host. Leave only first stage, the rest that have same host of any operation will be filtered
+   * 2) do not remove stages intersected by host if they have intersection by background command
+   * @param stages
+   * @return
+   */
+  private List<Stage> filterParallelPerHostStages(List<Stage> stages) {
+    List<Stage> retVal = new ArrayList<Stage>();
+    Set<String> affectedHosts = new HashSet<String>();
+    for(Stage s : stages){
+      for (String host : s.getHosts()) {
+        if (!affectedHosts.contains(host)) {
+          if(!isStageHasBackgroundCommandsOnly(s, host)){
+            affectedHosts.add(host);
+          }
+          retVal.add(s);
+        }
+      }
+    }
+    return retVal;
+  }
+
+  private boolean isStageHasBackgroundCommandsOnly(Stage s, String host) {
+    for (ExecutionCommandWrapper c : s.getExecutionCommands(host)) {
+      if(c.getExecutionCommand().getCommandType() != AgentCommandType.BACKGROUND_EXECUTION_COMMAND)
+      {
+        return false;
+      }
+    }
+    return true;
+  }
 
 
   /**
   /**
    * Executes internal ambari-server action
    * Executes internal ambari-server action

+ 1 - 0
ambari-server/src/main/java/org/apache/ambari/server/agent/AgentCommand.java

@@ -31,6 +31,7 @@ public abstract class AgentCommand {
 
 
   public enum AgentCommandType {
   public enum AgentCommandType {
     EXECUTION_COMMAND,
     EXECUTION_COMMAND,
+    BACKGROUND_EXECUTION_COMMAND,
     STATUS_COMMAND,
     STATUS_COMMAND,
     CANCEL_COMMAND,
     CANCEL_COMMAND,
     REGISTRATION_COMMAND
     REGISTRATION_COMMAND

+ 1 - 0
ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java

@@ -563,6 +563,7 @@ public class HeartBeatHandler {
           throw new AmbariException("Could not get jaxb string for command", e);
           throw new AmbariException("Could not get jaxb string for command", e);
         }
         }
         switch (ac.getCommandType()) {
         switch (ac.getCommandType()) {
+          case BACKGROUND_EXECUTION_COMMAND: 
           case EXECUTION_COMMAND: {
           case EXECUTION_COMMAND: {
             response.addExecutionCommand((ExecutionCommand) ac);
             response.addExecutionCommand((ExecutionCommand) ac);
             break;
             break;

+ 5 - 18
ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java

@@ -402,15 +402,7 @@ public class AmbariMetaInfo {
   public boolean isValidServiceComponent(String stackName, String version,
   public boolean isValidServiceComponent(String stackName, String version,
                                          String serviceName, String componentName) throws AmbariException {
                                          String serviceName, String componentName) throws AmbariException {
     ServiceInfo service = getServiceInfo(stackName, version, serviceName);
     ServiceInfo service = getServiceInfo(stackName, version, serviceName);
-    if (service == null) {
-      return false;
-    }
-    for (ComponentInfo compInfo : service.getComponents()) {
-      if (compInfo.getName().equals(componentName)) {
-        return true;
-      }
-    }
-    return false;
+    return service != null && service.getComponentByName(componentName) != null;
   }
   }
 
 
   /**
   /**
@@ -436,17 +428,12 @@ public class AmbariMetaInfo {
         || services.isEmpty()) {
         || services.isEmpty()) {
       return retService;
       return retService;
     }
     }
-    boolean found = false;
     for (Map.Entry<String, ServiceInfo> entry : services.entrySet()) {
     for (Map.Entry<String, ServiceInfo> entry : services.entrySet()) {
-      for (ComponentInfo compInfo : entry.getValue().getComponents()) {
-        if (compInfo.getName().equals(componentName)) {
-          retService = entry.getKey();
-          found = true;
-          break;
-        }
-      }
-      if (found)
+      ComponentInfo vu = entry.getValue().getComponentByName(componentName);
+      if(vu != null){
+        retService = entry.getKey();
         break;
         break;
+      }
     }
     }
     return retService;
     return retService;
   }
   }

+ 1 - 13
ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java

@@ -254,8 +254,7 @@ public class StackExtensionHelper {
 
 
     for (ComponentInfo childComponent : childService.getComponents()) {
     for (ComponentInfo childComponent : childService.getComponents()) {
       if (!childComponent.isDeleted()) {
       if (!childComponent.isDeleted()) {
-        ComponentInfo parentComponent = getComponent(parentService,
-                childComponent.getName());
+        ComponentInfo parentComponent = parentService.getComponentByName(childComponent.getName());
         if (parentComponent != null) { // If parent has similar component
         if (parentComponent != null) { // If parent has similar component
           ComponentInfo mergedComponent = mergeComponents(parentComponent,
           ComponentInfo mergedComponent = mergeComponents(parentComponent,
                   childComponent);
                   childComponent);
@@ -278,17 +277,6 @@ public class StackExtensionHelper {
     }
     }
   }
   }
 
 
-
-  private ComponentInfo getComponent(ServiceInfo service, String componentName) {
-    for (ComponentInfo component : service.getComponents()) {
-      if (component.getName().equals(componentName)) {
-        return component;
-      }
-    }
-    return null;
-  }
-
-
   ComponentInfo mergeComponents(ComponentInfo parent, ComponentInfo child) {
   ComponentInfo mergeComponents(ComponentInfo parent, ComponentInfo child) {
     ComponentInfo result = new ComponentInfo(child); // cloning child
     ComponentInfo result = new ComponentInfo(child); // cloning child
     CommandScriptDefinition commandScript = child.getCommandScript();
     CommandScriptDefinition commandScript = child.getCommandScript();

+ 13 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java

@@ -42,6 +42,7 @@ import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.Stage;
 import org.apache.ambari.server.actionmanager.Stage;
+import org.apache.ambari.server.agent.AgentCommand.AgentCommandType;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
 import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
@@ -53,6 +54,7 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.CommandScriptDefinition;
 import org.apache.ambari.server.state.CommandScriptDefinition;
 import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.CustomCommandDefinition;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostComponentAdminState;
 import org.apache.ambari.server.state.HostComponentAdminState;
 import org.apache.ambari.server.state.MaintenanceState;
 import org.apache.ambari.server.state.MaintenanceState;
@@ -236,7 +238,6 @@ public class AmbariCustomCommandExecutionHelper {
       throw new AmbariException(message);
       throw new AmbariException(message);
     }
     }
 
 
-
     StackId stackId = cluster.getDesiredStackVersion();
     StackId stackId = cluster.getDesiredStackVersion();
     AmbariMetaInfo ambariMetaInfo = managementController.getAmbariMetaInfo();
     AmbariMetaInfo ambariMetaInfo = managementController.getAmbariMetaInfo();
     ServiceInfo serviceInfo = ambariMetaInfo.getServiceInfo
     ServiceInfo serviceInfo = ambariMetaInfo.getServiceInfo
@@ -244,6 +245,12 @@ public class AmbariCustomCommandExecutionHelper {
     StackInfo stackInfo = ambariMetaInfo.getStackInfo
     StackInfo stackInfo = ambariMetaInfo.getStackInfo
       (stackId.getStackName(), stackId.getStackVersion());
       (stackId.getStackName(), stackId.getStackVersion());
 
 
+    CustomCommandDefinition customCommandDefinition = null;
+    ComponentInfo ci = serviceInfo.getComponentByName(componentName);
+    if(ci != null){
+      customCommandDefinition = ci.getCustomCommandByName(commandName);
+    }
+    
     long nowTimestamp = System.currentTimeMillis();
     long nowTimestamp = System.currentTimeMillis();
 
 
     for (String hostName : candidateHosts) {
     for (String hostName : candidateHosts) {
@@ -271,6 +278,11 @@ public class AmbariCustomCommandExecutionHelper {
       ExecutionCommand execCmd = stage.getExecutionCommandWrapper(hostName,
       ExecutionCommand execCmd = stage.getExecutionCommandWrapper(hostName,
           componentName).getExecutionCommand();
           componentName).getExecutionCommand();
 
 
+      //set type background
+      if(customCommandDefinition != null && customCommandDefinition.isBackground()){
+        execCmd.setCommandType(AgentCommandType.BACKGROUND_EXECUTION_COMMAND);
+      }
+      
       execCmd.setConfigurations(configurations);
       execCmd.setConfigurations(configurations);
       execCmd.setConfigurationAttributes(configurationAttributes);
       execCmd.setConfigurationAttributes(configurationAttributes);
       execCmd.setConfigurationTags(configTags);
       execCmd.setConfigurationTags(configTags);

+ 8 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java

@@ -156,6 +156,14 @@ public class ComponentInfo {
     }
     }
     return false;
     return false;
   }
   }
+  public CustomCommandDefinition getCustomCommandByName(String commandName){
+    for(CustomCommandDefinition ccd : getCustomCommands()){
+      if (ccd.getName().equals(commandName)){
+        return ccd;
+      }
+    }
+    return null;
+  }
 
 
   public List<DependencyInfo> getDependencies() {
   public List<DependencyInfo> getDependencies() {
     return dependencies;
     return dependencies;

+ 1 - 10
ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java

@@ -563,7 +563,7 @@ public class ConfigHelper {
     
     
     ServiceInfo serviceInfo = ambariMetaInfo.getService(stackId.getStackName(),
     ServiceInfo serviceInfo = ambariMetaInfo.getService(stackId.getStackName(),
         stackId.getStackVersion(), sch.getServiceName());
         stackId.getStackVersion(), sch.getServiceName());
-    ComponentInfo componentInfo = getComponentInfo(serviceInfo,sch.getServiceComponentName());
+    ComponentInfo componentInfo = serviceInfo.getComponentByName(sch.getServiceComponentName());
     // Configs are considered stale when:
     // Configs are considered stale when:
     // - desired type DOES NOT exist in actual
     // - desired type DOES NOT exist in actual
     // --- desired type DOES NOT exist in stack: not_stale
     // --- desired type DOES NOT exist in stack: not_stale
@@ -621,15 +621,6 @@ public class ConfigHelper {
     return stale;
     return stale;
   }
   }
 
 
-  private ComponentInfo getComponentInfo(ServiceInfo serviceInfo, String componentName) {
-    for(ComponentInfo componentInfo : serviceInfo.getComponents()) {
-      if(componentInfo.getName().equals(componentName)){
-        return componentInfo;
-      }
-    }
-    return null;
-  }
-
   /**
   /**
    * @return <code>true</code> if any service on the stack defines a property
    * @return <code>true</code> if any service on the stack defines a property
    * for the type.
    * for the type.

+ 6 - 1
ambari-server/src/main/java/org/apache/ambari/server/state/CustomCommandDefinition.java

@@ -30,15 +30,20 @@ public class CustomCommandDefinition {
 
 
   private String name;
   private String name;
   private CommandScriptDefinition commandScript;
   private CommandScriptDefinition commandScript;
+  private boolean background = false;
 
 
   public String getName() {
   public String getName() {
     return name;
     return name;
   }
   }
+  
+  public boolean isBackground() {
+    return background;
+  }
 
 
   public CommandScriptDefinition getCommandScript() {
   public CommandScriptDefinition getCommandScript() {
     return commandScript;
     return commandScript;
   }
   }
-
+  
   @Override
   @Override
   public boolean equals(Object obj) {
   public boolean equals(Object obj) {
     if (obj == null) {
     if (obj == null) {

+ 13 - 1
ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java

@@ -180,7 +180,19 @@ public class ServiceInfo {
     if (components == null) components = new ArrayList<ComponentInfo>();
     if (components == null) components = new ArrayList<ComponentInfo>();
     return components;
     return components;
   }
   }
-
+  /**
+   * Finds ComponentInfo by component name
+   * @param componentName
+   * @return ComponentInfo componentName or null
+   */
+  public ComponentInfo getComponentByName(String componentName){
+    for(ComponentInfo componentInfo : getComponents()) {
+      if(componentInfo.getName().equals(componentName)){
+        return componentInfo;
+      }
+    }
+    return null;
+  }
   public boolean isClientOnlyService() {
   public boolean isClientOnlyService() {
     if (components == null || components.isEmpty()) {
     if (components == null || components.isEmpty()) {
       return false;
       return false;

+ 10 - 10
ambari-server/src/main/resources/custom_action_definitions/system_action_definitions.xml

@@ -19,16 +19,6 @@
 -->
 -->
 
 
 <actionDefinitions>
 <actionDefinitions>
-  <actionDefinition>
-    <actionName>ambari_hdfs_rebalancer</actionName>
-    <actionType>SYSTEM</actionType>
-    <inputs>threshold,[principal],[keytab]</inputs>
-    <targetService>HDFS</targetService>
-    <targetComponent>NAMENODE</targetComponent>
-    <defaultTimeout>600</defaultTimeout>
-    <description>HDFS Rebalance</description>
-    <targetType>ANY</targetType>
-  </actionDefinition>
   <actionDefinition>
   <actionDefinition>
     <actionName>nagios_update_ignore</actionName>
     <actionName>nagios_update_ignore</actionName>
     <actionType>SYSTEM</actionType>
     <actionType>SYSTEM</actionType>
@@ -59,4 +49,14 @@
     <description>Validate if provided service config can be applied to specified hosts</description>
     <description>Validate if provided service config can be applied to specified hosts</description>
     <targetType>ALL</targetType>
     <targetType>ALL</targetType>
   </actionDefinition>
   </actionDefinition>
+  <actionDefinition>
+    <actionName>cancel_background_task</actionName>
+    <actionType>SYSTEM</actionType>
+    <inputs></inputs>
+    <targetService></targetService>
+    <targetComponent></targetComponent>
+    <defaultTimeout>60</defaultTimeout>
+    <description>Cancel background task</description>
+    <targetType>ANY</targetType>
+  </actionDefinition>
 </actionDefinitions>
 </actionDefinitions>

+ 0 - 59
ambari-server/src/main/resources/custom_actions/ambari_hdfs_rebalancer.py

@@ -1,59 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-
-class HdfsRebalance(Script):
-  def actionexecute(self, env):
-    config = Script.get_config()
-
-    hdfs_user = config['configurations']['global']['hdfs_user']
-    conf_dir = "/etc/hadoop/conf"
-
-    _authentication = config['configurations']['core-site']['hadoop.security.authentication']
-    security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-
-    threshold = config['commandParams']['threshold']
-
-    if security_enabled:
-      kinit_path_local = functions.get_kinit_path(
-        ["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-      principal = config['commandParams']['principal']
-      keytab = config['commandParams']['keytab']
-      Execute(format("{kinit_path_local}  -kt {keytab} {principal}"))
-
-    ExecuteHadoop(format('balancer -threshold {threshold}'),
-                  user=hdfs_user,
-                  conf_dir=conf_dir,
-                  logoutput=True
-    )
-
-    structured_output_example = {
-      'result': 'Rebalancer completed.'
-    }
-
-    self.put_structured_out(structured_output_example)
-
-
-if __name__ == "__main__":
-  HdfsRebalance().execute()

+ 41 - 0
ambari-server/src/main/resources/custom_actions/cancel_background_task.py

@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import Script
+from ambari_agent import shell
+
+class CancelBackgroundTaskCommand(Script):
+  def actionexecute(self, env):
+    config = Script.get_config()
+
+    cancel_command_pid = config['commandParams']['cancel_command_pid'] if config['commandParams'].has_key('cancel_command_pid') else None
+    cancel_task_id = config['commandParams']['cancel_task_id']
+    if cancel_command_pid == None:
+      print "Nothing to cancel: there is no any task running with given taskId = '%s'" % cancel_task_id
+    else:
+      cancel_policy = config['commandParams']['cancel_policy']
+      print "Send Kill to process pid = %s for task = %s with policy %s" % (cancel_command_pid, cancel_task_id, cancel_policy)
+  
+      shell.kill_process_with_children(cancel_command_pid)
+      print "Process pid = %s for task = %s has been killed successfully" % (cancel_command_pid, cancel_task_id)
+    
+if __name__ == "__main__":
+  CancelBackgroundTaskCommand().execute()

+ 8 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml

@@ -42,6 +42,14 @@
                 <timeout>600</timeout>
                 <timeout>600</timeout>
               </commandScript>
               </commandScript>
             </customCommand>
             </customCommand>
+            <customCommand>
+              <name>REBALANCEHDFS</name>
+              <background>true</background>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+              </commandScript>
+            </customCommand>
           </customCommands>
           </customCommands>
         </component>
         </component>
 
 

+ 1032 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer-err.log

@@ -0,0 +1,1032 @@
+14/07/28 17:01:48 INFO balancer.Balancer: Using a threshold of 5.0
+14/07/28 17:01:48 INFO balancer.Balancer: namenodes = [hdfs://evhubudsd1aae.budapest.epam.com:8020]
+14/07/28 17:01:48 INFO balancer.Balancer: p         = Balancer.Parameters[BalancingPolicy.Node, threshold=5.0]
+14/07/28 17:01:49 INFO balancer.Balancer: Block token params received from NN: keyUpdateInterval=600 min(s), tokenLifetime=600 min(s)
+14/07/28 17:01:49 INFO block.BlockTokenSecretManager: Setting block keys
+14/07/28 17:01:49 INFO balancer.Balancer: Balancer will update its block keys every 150 minute(s)
+14/07/28 17:01:49 INFO block.BlockTokenSecretManager: Setting block keys
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:01:49 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.887235026238486]]
+14/07/28 17:01:49 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.178140109955496]]
+14/07/28 17:01:49 INFO balancer.Balancer: Need to move 5.74 GB to make the cluster balanced.
+14/07/28 17:01:49 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:01:49 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:01:57 INFO balancer.Balancer: Moving block 1073950748 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:01:58 INFO balancer.Balancer: Moving block 1073939272 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:02:06 INFO balancer.Balancer: Moving block 1073863504 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:02:13 INFO balancer.Balancer: Moving block 1073863516 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:02:31 INFO balancer.Balancer: Moving block 1073743089 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:03:00 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.803451571241915]]
+14/07/28 17:03:00 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.262867215362437]]
+14/07/28 17:03:00 INFO balancer.Balancer: Need to move 5.58 GB to make the cluster balanced.
+14/07/28 17:03:00 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:03:00 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073937443 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073926003 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073916372 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073926002 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073920016 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:03:05 INFO balancer.Balancer: Moving block 1073937461 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:03:11 INFO balancer.Balancer: Moving block 1073743437 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:03:20 INFO balancer.Balancer: Moving block 1073743443 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:03:31 INFO balancer.Balancer: Moving block 1073743449 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:03:34 INFO balancer.Balancer: Moving block 1073743440 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:04:07 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.70875539052811]]
+14/07/28 17:04:07 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.35756339607624]]
+14/07/28 17:04:07 INFO balancer.Balancer: Need to move 5.40 GB to make the cluster balanced.
+14/07/28 17:04:07 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:04:07 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:04:07 INFO balancer.Balancer: Moving block 1073743776 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073915941 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073930160 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073930161 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073908316 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:04:09 INFO balancer.Balancer: Moving block 1073930163 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:04:51 INFO balancer.Balancer: Moving block 1073947549 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:05:04 INFO balancer.Balancer: Moving block 1073863141 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:05:06 INFO balancer.Balancer: Moving block 1073863139 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:05:14 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.53815392807349]]
+14/07/28 17:05:14 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.528164858530864]]
+14/07/28 17:05:14 INFO balancer.Balancer: Need to move 5.06 GB to make the cluster balanced.
+14/07/28 17:05:14 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:05:14 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073945158 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918874 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918873 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073945162 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918867 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073945160 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073914540 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918868 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073931861 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:05:50 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.538117645568114]]
+14/07/28 17:05:50 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.52820114103624]]
+14/07/28 17:05:50 INFO balancer.Balancer: Need to move 5.06 GB to make the cluster balanced.
+14/07/28 17:05:50 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:05:50 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073916888 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073925481 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073920767 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073908143 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073911961 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073929306 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:09 INFO balancer.Balancer: Moving block 1073863170 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:06:33 INFO balancer.Balancer: Moving block 1073929250 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:35 INFO balancer.Balancer: Moving block 1073863186 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:06:56 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.407811418798076]]
+14/07/28 17:06:56 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.658507367806276]]
+14/07/28 17:06:56 INFO balancer.Balancer: Need to move 4.81 GB to make the cluster balanced.
+14/07/28 17:06:56 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:06:56 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:06:56 INFO balancer.Balancer: Moving block 1073919724 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:56 INFO balancer.Balancer: Moving block 1073915864 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073910902 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073949844 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073926217 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073919721 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073926320 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073946575 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073949843 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:07:33 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.4068167244793]]
+14/07/28 17:07:33 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.659502062125057]]
+14/07/28 17:07:33 INFO balancer.Balancer: Need to move 4.80 GB to make the cluster balanced.
+14/07/28 17:07:33 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:07:33 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:07:33 INFO balancer.Balancer: Moving block 1073948620 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:07:33 INFO balancer.Balancer: Moving block 1073917051 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:07:34 INFO balancer.Balancer: Moving block 1073924651 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:07:40 INFO balancer.Balancer: Moving block 1073742834 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:08:55 INFO balancer.Balancer: Moving block 1073894040 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:08:56 INFO balancer.Balancer: Moving block 1073932476 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:08:59 INFO balancer.Balancer: Moving block 1073742598 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:09:00 INFO balancer.Balancer: Moving block 1073893997 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:09:11 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.144332676814294]]
+14/07/28 17:09:11 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.92198610979006]]
+14/07/28 17:09:11 INFO balancer.Balancer: Need to move 4.29 GB to make the cluster balanced.
+14/07/28 17:09:11 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:09:11 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073920127 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073743556 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073743557 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073929950 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073942945 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073920115 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073743559 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073947343 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073920075 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:09:47 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.14396676101451]]
+14/07/28 17:09:47 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.92215625345692]]
+14/07/28 17:09:47 INFO balancer.Balancer: Need to move 4.29 GB to make the cluster balanced.
+14/07/28 17:09:47 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:09:47 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951772 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951752 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951754 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951766 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:52 INFO balancer.Balancer: Moving block 1073951747 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:56 INFO balancer.Balancer: Moving block 1073951765 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:10:53 INFO balancer.Balancer: Moving block 1073951746 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:10:54 INFO balancer.Balancer: Moving block 1073951745 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:10:54 INFO balancer.Balancer: Moving block 1073951744 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:11:24 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.9413931647133]]
+14/07/28 17:11:24 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:11:24 INFO balancer.Balancer: Need to move 3.89 GB to make the cluster balanced.
+14/07/28 17:11:24 INFO balancer.Balancer: Decided to move 5.84 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:11:24 INFO balancer.Balancer: Decided to move 2.64 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
+14/07/28 17:11:24 INFO balancer.Balancer: Decided to move 1.31 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:11:24 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073940539 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073940537 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073927798 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073935420 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073927775 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073923954 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073918163 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073949253 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073931581 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:11:25 INFO balancer.Balancer: Moving block 1073923922 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:11:25 INFO balancer.Balancer: Moving block 1073931532 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:11:25 INFO balancer.Balancer: Moving block 1073949248 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073923928 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073927787 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073949252 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073906578 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073914353 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:11:30 INFO balancer.Balancer: Moving block 1073931557 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:11:30 INFO balancer.Balancer: Moving block 1073910459 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:12:00 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.923538618186065]]
+14/07/28 17:12:00 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:12:00 INFO balancer.Balancer: Need to move 3.86 GB to make the cluster balanced.
+14/07/28 17:12:00 INFO balancer.Balancer: Decided to move 2.61 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
+14/07/28 17:12:00 INFO balancer.Balancer: Decided to move 7.18 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:12:00 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073949133 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.7:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073945194 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073927453 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073923118 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905689 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073914494 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905688 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073923119 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073914488 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905681 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905677 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073927648 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073945235 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073945226 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073910053 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073927664 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:12:29 INFO balancer.Balancer: Moving block 1073905173 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:13:19 INFO balancer.Balancer: Moving block 1073905177 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:19 INFO balancer.Balancer: Moving block 1073905171 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:21 INFO balancer.Balancer: Moving block 1073905175 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:27 INFO balancer.Balancer: Moving block 1073905172 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:13:37 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.60177342833359]]
+14/07/28 17:13:37 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:13:37 INFO balancer.Balancer: Need to move 3.23 GB to make the cluster balanced.
+14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 1.73 GB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
+14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 375.17 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
+14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 1.00 GB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 3.03 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:13:37 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073914692 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073927391 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073927383 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073923582 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073905952 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073914693 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073923467 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073918495 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073923466 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073948829 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945548 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073948902 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945546 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073905987 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945549 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073918570 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945542 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073927370 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073914708 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.8:50010 is succeeded.
+14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073948908 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.1:50010 is succeeded.
+14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073918565 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073923572 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:13:46 INFO balancer.Balancer: Moving block 1073936056 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:49 INFO balancer.Balancer: Moving block 1073936057 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:52 INFO balancer.Balancer: Moving block 1073936063 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:14:09 INFO balancer.Balancer: Moving block 1073936045 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:14:09 INFO balancer.Balancer: Moving block 1073936034 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:14:40 INFO balancer.Balancer: Moving block 1073936032 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:14:40 INFO balancer.Balancer: Moving block 1073936033 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:14:41 INFO balancer.Balancer: Moving block 1073936036 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:15:13 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.2458785989085]]
+14/07/28 17:15:13 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:15:13 INFO balancer.Balancer: Need to move 2.53 GB to make the cluster balanced.
+14/07/28 17:15:13 INFO balancer.Balancer: Decided to move 5.46 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:15:13 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:15:13 INFO balancer.Balancer: Decided to move 683.02 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:15:13 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073934407 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073926699 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073907624 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073930612 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073950332 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073934387 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073930508 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073934414 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073945924 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073922816 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:15:14 INFO balancer.Balancer: Moving block 1073934411 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:14 INFO balancer.Balancer: Moving block 1073926698 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:14 INFO balancer.Balancer: Moving block 1073922838 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073919113 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073922843 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073907649 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073950223 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:15:49 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.23893576243181]]
+14/07/28 17:15:49 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:15:49 INFO balancer.Balancer: Need to move 2.52 GB to make the cluster balanced.
+14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 375.06 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
+14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 4.44 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
+14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 1.33 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:15:49 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073931740 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073927810 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073923141 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073910191 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073905793 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073940704 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073949348 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936134 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073914594 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073949356 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.8:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936148 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936164 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936158 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073949359 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073918912 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073914616 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936151 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073923999 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:15:50 INFO balancer.Balancer: Moving block 1073940722 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073927855 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073906497 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073949350 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.224:50010 is succeeded.
+14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073945051 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:16:25 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.236639727566796]]
+14/07/28 17:16:25 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:16:25 INFO balancer.Balancer: Need to move 2.51 GB to make the cluster balanced.
+14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 2.36 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
+14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 1.53 GB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
+14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 5.45 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 463.99 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:16:25 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073942946 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073947339 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073912361 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073926131 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073947341 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073929961 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743570 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916254 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743604 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743581 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073926130 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073920078 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916287 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073933727 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073908503 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743586 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743580 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073937539 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073908497 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073942916 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743590 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073947329 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743599 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743600 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073895265 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073937542 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916258 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916286 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.1:50010 is succeeded.
+14/07/28 17:16:47 INFO balancer.Balancer: Moving block 1073862841 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:17:01 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.1720712908457]]
+14/07/28 17:17:01 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:17:01 INFO balancer.Balancer: Need to move 2.39 GB to make the cluster balanced.
+14/07/28 17:17:01 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:17:01 INFO balancer.Balancer: Decided to move 5.45 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:17:01 INFO balancer.Balancer: Decided to move 698.32 MB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
+14/07/28 17:17:01 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915689 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073946573 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915690 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915841 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073919491 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915694 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915842 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073949829 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073895888 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073949830 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073922418 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073931011 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073949848 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073904475 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073946583 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073904561 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073949813 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073915703 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073926226 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:17:37 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.17123487505752]]
+14/07/28 17:17:37 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:17:37 INFO balancer.Balancer: Need to move 2.38 GB to make the cluster balanced.
+14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 2.23 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
+14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 373.37 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
+14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 4.43 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
+14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 2.76 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:17:37 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951505 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951406 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951465 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951428 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951479 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951294 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951363 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951445 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951368 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951466 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951325 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.129.224:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951296 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951333 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951315 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951502 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951383 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951489 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951504 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951313 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951326 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951310 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:44 INFO balancer.Balancer: Moving block 1073951520 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.1:50010 is succeeded.
+14/07/28 17:17:44 INFO balancer.Balancer: Moving block 1073864141 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:18:14 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.13074467796647]]
+14/07/28 17:18:14 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:18:14 INFO balancer.Balancer: Need to move 2.31 GB to make the cluster balanced.
+14/07/28 17:18:14 INFO balancer.Balancer: Decided to move 9.08 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:18:14 INFO balancer.Balancer: Decided to move 729.65 MB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
+14/07/28 17:18:14 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935830 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073931492 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073931497 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073913899 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073910416 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073928121 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073931496 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073927763 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935825 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935414 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073928117 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073928114 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935419 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073927766 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935418 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073910423 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073949598 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:18:50 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.1305062958578]]
+14/07/28 17:18:50 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:18:50 INFO balancer.Balancer: Need to move 2.30 GB to make the cluster balanced.
+14/07/28 17:18:50 INFO balancer.Balancer: Decided to move 895.07 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:18:50 INFO balancer.Balancer: Decided to move 1.53 GB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
+14/07/28 17:18:50 INFO balancer.Balancer: Decided to move 7.38 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:18:50 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073930642 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073950456 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934505 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073950457 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.8:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934524 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073930646 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073915219 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934502 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073930640 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073926854 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934510 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934503 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:51 INFO balancer.Balancer: Moving block 1073926851 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:51 INFO balancer.Balancer: Moving block 1073926857 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:18:51 INFO balancer.Balancer: Moving block 1073930652 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:18:52 INFO balancer.Balancer: Moving block 1073930651 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:19:02 INFO balancer.Balancer: Moving block 1073934496 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:19:03 INFO balancer.Balancer: Moving block 1073934497 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:19:26 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.07965400229293]]
+14/07/28 17:19:26 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:19:26 INFO balancer.Balancer: Need to move 2.21 GB to make the cluster balanced.
+14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 333.25 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
+14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 4.43 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
+14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 881.78 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 4.17 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:19:26 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073931910 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073905704 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073905703 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073936313 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073918732 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073927646 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073923306 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073927634 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073905206 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073918731 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073914433 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073949065 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.1:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073918837 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073905696 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073914425 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073936315 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073918730 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073918835 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073931908 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073918836 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073941480 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073918720 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:19:42 INFO balancer.Balancer: Moving block 1073905207 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:20:02 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:20:02 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:20:02 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:20:02 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:20:02 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:20:02 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:20:02 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:20:02 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:20:02 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:20:02 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:20:02 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:20:02 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:20:02 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:20:02 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:20:02 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:20:02 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.02941190187794]]
+14/07/28 17:20:02 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:20:02 INFO balancer.Balancer: Need to move 2.10 GB to make the cluster balanced.
+14/07/28 17:20:02 INFO balancer.Balancer: Decided to move 886.17 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:20:02 INFO balancer.Balancer: Decided to move 2.20 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
+14/07/28 17:20:02 INFO balancer.Balancer: Decided to move 338.05 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
+14/07/28 17:20:02 INFO balancer.Balancer: Decided to move 6.40 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:20:02 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:20:02 INFO balancer.Balancer: Moving block 1073862707 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:20:02 INFO balancer.Balancer: Moving block 1073937420 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:20:02 INFO balancer.Balancer: Moving block 1073920222 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:20:02 INFO balancer.Balancer: Moving block 1073929845 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:20:02 INFO balancer.Balancer: Moving block 1073916304 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:20:02 INFO balancer.Balancer: Moving block 1073929850 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.1:50010 is succeeded.
+14/07/28 17:20:02 INFO balancer.Balancer: Moving block 1073862702 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:20:02 INFO balancer.Balancer: Moving block 1073929729 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:20:02 INFO balancer.Balancer: Moving block 1073862751 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.224:50010 is succeeded.
+14/07/28 17:20:02 INFO balancer.Balancer: Moving block 1073916400 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:20:02 INFO balancer.Balancer: Moving block 1073929733 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:20:02 INFO balancer.Balancer: Moving block 1073929734 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:20:02 INFO balancer.Balancer: Moving block 1073862696 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:20:02 INFO balancer.Balancer: Moving block 1073908637 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:20:03 INFO balancer.Balancer: Moving block 1073862705 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:20:03 INFO balancer.Balancer: Moving block 1073862717 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:20:03 INFO balancer.Balancer: Moving block 1073925937 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:20:03 INFO balancer.Balancer: Moving block 1073743479 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:20:03 INFO balancer.Balancer: Moving block 1073929842 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:20:03 INFO balancer.Balancer: Moving block 1073743477 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:20:03 INFO balancer.Balancer: Moving block 1073743472 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:20:03 INFO balancer.Balancer: Moving block 1073743469 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:20:07 INFO balancer.Balancer: Moving block 1073743457 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:20:38 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:20:38 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:20:38 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:20:38 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:20:38 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:20:38 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:20:38 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:20:38 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:20:38 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:20:38 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:20:38 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:20:38 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:20:38 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:20:38 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:20:38 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:20:38 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.006662157613235]]
+14/07/28 17:20:38 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:20:38 INFO balancer.Balancer: Need to move 2.06 GB to make the cluster balanced.
+14/07/28 17:20:38 INFO balancer.Balancer: Decided to move 5.45 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:20:38 INFO balancer.Balancer: Decided to move 3.67 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:20:38 INFO balancer.Balancer: Decided to move 691.93 MB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
+14/07/28 17:20:38 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:20:38 INFO balancer.Balancer: Moving block 1073949706 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:20:38 INFO balancer.Balancer: Moving block 1073949705 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:20:38 INFO balancer.Balancer: Moving block 1073934960 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:20:38 INFO balancer.Balancer: Moving block 1073934961 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:20:38 INFO balancer.Balancer: Moving block 1073931129 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:20:38 INFO balancer.Balancer: Moving block 1073919626 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:20:38 INFO balancer.Balancer: Moving block 1073895836 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:20:38 INFO balancer.Balancer: Moving block 1073919668 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:20:38 INFO balancer.Balancer: Moving block 1073922334 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:20:38 INFO balancer.Balancer: Moving block 1073895835 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:20:38 INFO balancer.Balancer: Moving block 1073919616 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:20:38 INFO balancer.Balancer: Moving block 1073915788 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:20:38 INFO balancer.Balancer: Moving block 1073922324 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:20:38 INFO balancer.Balancer: Moving block 1073895837 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:20:38 INFO balancer.Balancer: Moving block 1073934964 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:20:38 INFO balancer.Balancer: Moving block 1073946479 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:21:41 INFO balancer.Balancer: Moving block 1073743886 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:21:41 INFO balancer.Balancer: Moving block 1073743887 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:21:42 INFO balancer.Balancer: Moving block 1073743884 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:22:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:22:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:22:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:22:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:22:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:22:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:22:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:22:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:22:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:22:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:22:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:22:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:22:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:22:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:22:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:22:14 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=32.813526740600096]]
+14/07/28 17:22:14 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:22:14 INFO balancer.Balancer: Need to move 1.68 GB to make the cluster balanced.
+14/07/28 17:22:14 INFO balancer.Balancer: Decided to move 881.14 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:22:14 INFO balancer.Balancer: Decided to move 5.45 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:22:14 INFO balancer.Balancer: Decided to move 3.48 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
+14/07/28 17:22:14 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:22:14 INFO balancer.Balancer: Moving block 1073950920 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:22:14 INFO balancer.Balancer: Moving block 1073947104 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:22:14 INFO balancer.Balancer: Moving block 1073950924 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:22:14 INFO balancer.Balancer: Moving block 1073920884 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:22:14 INFO balancer.Balancer: Moving block 1073907747 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:22:14 INFO balancer.Balancer: Moving block 1073947030 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:22:14 INFO balancer.Balancer: Moving block 1073939445 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:22:14 INFO balancer.Balancer: Moving block 1073925345 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:22:14 INFO balancer.Balancer: Moving block 1073863660 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:22:14 INFO balancer.Balancer: Moving block 1073939444 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:22:14 INFO balancer.Balancer: Moving block 1073863557 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:22:15 INFO balancer.Balancer: Moving block 1073863581 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:22:36 INFO balancer.Balancer: Moving block 1073929681 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:22:40 INFO balancer.Balancer: Moving block 1073863555 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:22:42 INFO balancer.Balancer: Moving block 1073863565 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:22:46 INFO balancer.Balancer: Moving block 1073743310 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:22:59 INFO balancer.Balancer: Moving block 1073743307 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:23:04 INFO balancer.Balancer: Moving block 1073929662 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:23:08 INFO balancer.Balancer: Moving block 1073743304 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:23:20 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:23:20 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:23:20 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:23:20 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:23:20 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:23:20 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:23:20 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:23:20 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:23:20 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:23:20 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:23:20 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:23:20 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:23:20 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:23:20 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:23:20 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:23:20 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=32.4671023778226]]
+14/07/28 17:23:20 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:23:20 INFO balancer.Balancer: Need to move 1.00 GB to make the cluster balanced.
+14/07/28 17:23:20 INFO balancer.Balancer: Decided to move 554.71 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:23:20 INFO balancer.Balancer: Decided to move 9.05 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:23:20 INFO balancer.Balancer: Decided to move 211.11 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
+14/07/28 17:23:20 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:23:20 INFO balancer.Balancer: Moving block 1073934378 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:23:20 INFO balancer.Balancer: Moving block 1073934336 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:23:20 INFO balancer.Balancer: Moving block 1073926731 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:23:20 INFO balancer.Balancer: Moving block 1073911382 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:23:20 INFO balancer.Balancer: Moving block 1073911481 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:23:20 INFO balancer.Balancer: Moving block 1073907615 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:23:20 INFO balancer.Balancer: Moving block 1073945914 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:23:20 INFO balancer.Balancer: Moving block 1073934355 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:23:20 INFO balancer.Balancer: Moving block 1073922911 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:23:20 INFO balancer.Balancer: Moving block 1073950385 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:23:20 INFO balancer.Balancer: Moving block 1073930527 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:23:20 INFO balancer.Balancer: Moving block 1073922928 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:23:20 INFO balancer.Balancer: Moving block 1073926729 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:23:20 INFO balancer.Balancer: Moving block 1073934349 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.129.224:50010 is succeeded.
+14/07/28 17:23:20 INFO balancer.Balancer: Moving block 1073934382 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:23:20 INFO balancer.Balancer: Moving block 1073950390 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:23:20 INFO balancer.Balancer: Moving block 1073919034 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:23:21 INFO balancer.Balancer: Moving block 1073911384 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:23:21 INFO balancer.Balancer: Moving block 1073911377 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:23:22 INFO balancer.Balancer: Moving block 1073930512 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:23:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:23:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:23:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:23:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:23:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:23:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:23:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:23:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:23:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:23:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:23:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:23:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:23:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:23:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:23:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:23:56 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=32.4630526940786]]
+14/07/28 17:23:56 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:23:56 INFO balancer.Balancer: Need to move 1016.16 MB to make the cluster balanced.
+14/07/28 17:23:56 INFO balancer.Balancer: Decided to move 554.67 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:23:56 INFO balancer.Balancer: Decided to move 5.22 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:23:56 INFO balancer.Balancer: Decided to move 3.67 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:23:56 INFO balancer.Balancer: Decided to move 374.03 MB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:23:56 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:23:57 INFO balancer.Balancer: Moving block 1073938017 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:23:57 INFO balancer.Balancer: Moving block 1073899247 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:23:57 INFO balancer.Balancer: Moving block 1073938020 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:23:57 INFO balancer.Balancer: Moving block 1073912005 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:23:57 INFO balancer.Balancer: Moving block 1073946677 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:23:57 INFO balancer.Balancer: Moving block 1073946666 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:23:57 INFO balancer.Balancer: Moving block 1073938034 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:23:57 INFO balancer.Balancer: Moving block 1073946679 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:23:57 INFO balancer.Balancer: Moving block 1073938033 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:23:57 INFO balancer.Balancer: Moving block 1073938031 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:23:57 INFO balancer.Balancer: Moving block 1073938030 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:24:04 INFO balancer.Balancer: Moving block 1073742882 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:24:10 INFO balancer.Balancer: Moving block 1073863240 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:24:16 INFO balancer.Balancer: Moving block 1073863258 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:24:39 INFO balancer.Balancer: Moving block 1073742864 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:24:43 INFO balancer.Balancer: Moving block 1073867063 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:24:47 INFO balancer.Balancer: Moving block 1073863253 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:25:12 INFO balancer.Balancer: Moving block 1073742858 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.8:50010 is succeeded.
+14/07/28 17:25:20 INFO balancer.Balancer: Moving block 1073863255 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:25:22 INFO balancer.Balancer: Moving block 1073863257 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:25:22 INFO balancer.Balancer: Moving block 1073863260 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:25:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:25:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:25:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:25:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:25:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:25:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:25:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:25:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:25:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:25:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:25:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:25:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:25:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:25:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:25:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:25:33 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=31.97163242012186]]
+14/07/28 17:25:33 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:25:33 INFO balancer.Balancer: Need to move 30.55 MB to make the cluster balanced.
+14/07/28 17:25:33 INFO balancer.Balancer: Decided to move 248.57 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:25:33 INFO balancer.Balancer: Decided to move 337.85 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
+14/07/28 17:25:33 INFO balancer.Balancer: Decided to move 4.71 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:25:33 INFO balancer.Balancer: Decided to move 1.81 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
+14/07/28 17:25:33 INFO balancer.Balancer: Decided to move 2.70 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
+14/07/28 17:25:33 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073948028 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073948002 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073948009 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073922033 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073928518 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073940208 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073917638 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073742157 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073742158 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073940203 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073924209 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073928371 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073944103 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073742148 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073864439 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073940224 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073921556 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073948006 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073742012 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073741997 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073922030 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073940222 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073921548 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.1:50010 is succeeded.
+14/07/28 17:25:33 INFO balancer.Balancer: Moving block 1073741991 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:25:54 INFO balancer.Balancer: Moving block 1073898105 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:25:54 INFO balancer.Balancer: Moving block 1073898097 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:26:05 INFO balancer.Balancer: Moving block 1073898083 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:26:16 INFO balancer.Balancer: Moving block 1073898080 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:26:39 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:26:39 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:26:39 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:26:39 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:26:39 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:26:39 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:26:39 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:26:39 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:26:39 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:26:39 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:26:39 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:26:39 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:26:39 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:26:39 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:26:39 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:26:39 INFO balancer.Balancer: 0 over-utilized: []
+14/07/28 17:26:39 INFO balancer.Balancer: 0 underutilized: []

+ 29 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer.log

@@ -0,0 +1,29 @@
+Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved
+Jul 28, 2014 5:01:49 PM           0                  0 B             5.74 GB            9.79 GB
+Jul 28, 2014 5:03:00 PM           1                  0 B             5.58 GB            9.79 GB
+Jul 28, 2014 5:04:07 PM           2                  0 B             5.40 GB            9.79 GB
+Jul 28, 2014 5:05:14 PM           3                  0 B             5.06 GB            9.79 GB
+Jul 28, 2014 5:05:50 PM           4                  0 B             5.06 GB            9.79 GB
+Jul 28, 2014 5:06:56 PM           5                  0 B             4.81 GB            9.79 GB
+Jul 28, 2014 5:07:33 PM           6                  0 B             4.80 GB            9.79 GB
+Jul 28, 2014 5:09:11 PM           7                  0 B             4.29 GB            9.79 GB
+Jul 28, 2014 5:09:47 PM           8                  0 B             4.29 GB            9.79 GB
+Jul 28, 2014 5:11:24 PM           9                  0 B             3.89 GB            9.79 GB
+Jul 28, 2014 5:12:00 PM          10                  0 B             3.86 GB            9.79 GB
+Jul 28, 2014 5:13:37 PM          11                  0 B             3.23 GB            9.79 GB
+Jul 28, 2014 5:15:13 PM          12                  0 B             2.53 GB            9.79 GB
+Jul 28, 2014 5:15:49 PM          13                  0 B             2.52 GB            9.79 GB
+Jul 28, 2014 5:16:25 PM          14                  0 B             2.51 GB            9.79 GB
+Jul 28, 2014 5:17:01 PM          15                  0 B             2.39 GB            9.79 GB
+Jul 28, 2014 5:17:37 PM          16                  0 B             2.38 GB            9.79 GB
+Jul 28, 2014 5:18:14 PM          17                  0 B             2.31 GB            9.79 GB
+Jul 28, 2014 5:18:50 PM          18                  0 B             2.30 GB            9.79 GB
+Jul 28, 2014 5:19:26 PM          19                  0 B             2.21 GB            9.79 GB
+Jul 28, 2014 5:20:02 PM          20                  0 B             2.10 GB            9.79 GB
+Jul 28, 2014 5:20:38 PM          21                  0 B             2.06 GB            9.79 GB
+Jul 28, 2014 5:22:14 PM          22                  0 B             1.68 GB            9.79 GB
+Jul 28, 2014 5:23:20 PM          23                  0 B             1.00 GB            9.79 GB
+Jul 28, 2014 5:23:56 PM          24                  0 B          1016.16 MB            9.79 GB
+Jul 28, 2014 5:25:33 PM          25                  0 B            30.55 MB            9.79 GB
+The cluster is balanced. Exiting...
+Balancing took 24.858033333333335 minutes

+ 45 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py

@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import time
+import sys
+from threading import Thread
+
+
+def write_function(path, handle, interval):
+  with open(path) as f:
+      for line in f:
+          handle.write(line)
+          handle.flush()
+          time.sleep(interval)
+          
+thread = Thread(target =  write_function, args = ('balancer.log', sys.stdout, 1))
+thread.start()
+
+threaderr = Thread(target =  write_function, args = ('balancer-err.log', sys.stderr, 0.3))
+threaderr.start()
+
+thread.join()  
+
+
+def rebalancer_out():
+  write_function('balancer.log', sys.stdout)
+  
+def rebalancer_err():
+  write_function('balancer-err.log', sys.stdout)

+ 130 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_rebalance.py

@@ -0,0 +1,130 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import re
+
+class HdfsParser():
+  def __init__(self):
+    self.initialLine = None
+    self.state = None
+  
+  def parseLine(self, line):
+    hdfsLine = HdfsLine()
+    type, matcher = hdfsLine.recognizeType(line)
+    if(type == HdfsLine.LineType.HeaderStart):
+      self.state = 'PROCESS_STARTED'
+    elif (type == HdfsLine.LineType.Progress):
+      self.state = 'PROGRESS'
+      hdfsLine.parseProgressLog(line, matcher)
+      if(self.initialLine == None): self.initialLine = hdfsLine
+      
+      return hdfsLine 
+    elif (type == HdfsLine.LineType.ProgressEnd):
+      self.state = 'PROCESS_FINISED'
+    return None
+    
+class HdfsLine():
+  
+  class LineType:
+    HeaderStart, Progress, ProgressEnd, Unknown = range(4)
+  
+  
+  MEMORY_SUFFIX = ['B','KB','MB','GB','TB','PB','EB']
+  MEMORY_PATTERN = '(?P<memmult_%d>(?P<memory_%d>(\d+)(.|,)?(\d+)?) (?P<mult_%d>'+"|".join(MEMORY_SUFFIX)+'))'
+  
+  HEADER_BEGIN_PATTERN = re.compile('Time Stamp\w+Iteration#\w+Bytes Already Moved\w+Bytes Left To Move\w+Bytes Being Moved')
+  PROGRESS_PATTERN = re.compile(
+                            "(?P<date>.*?)\s+" + 
+                            "(?P<iteration>\d+)\s+" + 
+                            MEMORY_PATTERN % (1,1,1) + "\s+" + 
+                            MEMORY_PATTERN % (2,2,2) + "\s+" +
+                            MEMORY_PATTERN % (3,3,3)
+                            )
+  PROGRESS_END_PATTERN = re.compile('(The cluster is balanced. Exiting...|The cluster is balanced. Exiting...)')
+  
+  def __init__(self):
+    self.date = None
+    self.iteration = None
+    self.bytesAlreadyMoved = None 
+    self.bytesLeftToMove = None
+    self.bytesBeingMoved = None 
+    self.bytesAlreadyMovedStr = None 
+    self.bytesLeftToMoveStr = None
+    self.bytesBeingMovedStr = None 
+  
+  def recognizeType(self, line):
+    for (type, pattern) in (
+                            (HdfsLine.LineType.HeaderStart, self.HEADER_BEGIN_PATTERN),
+                            (HdfsLine.LineType.Progress, self.PROGRESS_PATTERN), 
+                            (HdfsLine.LineType.ProgressEnd, self.PROGRESS_END_PATTERN)
+                            ):
+      m = re.match(pattern, line)
+      if m:
+        return type, m
+    return HdfsLine.LineType.Unknown, None
+    
+  def parseProgressLog(self, line, m):
+    '''
+    Parse the line of 'hdfs rebalancer' output. The example output being parsed:
+    
+    Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved
+    Jul 28, 2014 5:01:49 PM           0                  0 B             5.74 GB            9.79 GB
+    Jul 28, 2014 5:03:00 PM           1                  0 B             5.58 GB            9.79 GB
+    
+    Throws AmbariException in case of parsing errors
+
+    '''
+    m = re.match(self.PROGRESS_PATTERN, line)
+    if m:
+      self.date = m.group('date') 
+      self.iteration = int(m.group('iteration'))
+       
+      self.bytesAlreadyMoved = self.parseMemory(m.group('memory_1'), m.group('mult_1')) 
+      self.bytesLeftToMove = self.parseMemory(m.group('memory_2'), m.group('mult_2')) 
+      self.bytesBeingMoved = self.parseMemory(m.group('memory_3'), m.group('mult_3'))
+       
+      self.bytesAlreadyMovedStr = m.group('memmult_1') 
+      self.bytesLeftToMoveStr = m.group('memmult_2')
+      self.bytesBeingMovedStr = m.group('memmult_3') 
+    else:
+      raise AmbariException("Failed to parse line [%s]") 
+  
+  def parseMemory(self, memorySize, multiplier_type):
+    try:
+      factor = self.MEMORY_SUFFIX.index(multiplier_type)
+    except ValueError:
+      raise AmbariException("Failed to memory value [%s %s]" % (memorySize, multiplier_type))
+    
+    return float(memorySize) * (1024 ** factor)
+  def toJson(self):
+    return {
+            'timeStamp' : self.date,
+            'iteration' : self.iteration,
+            
+            'dataMoved': self.bytesAlreadyMovedStr,
+            'dataLeft' : self.bytesLeftToMoveStr,
+            'dataBeingMoved': self.bytesBeingMovedStr,
+            
+            'bytesMoved': self.bytesAlreadyMoved,
+            'bytesLeft' : self.bytesLeftToMove,
+            'bytesBeingMoved': self.bytesBeingMoved,
+          }
+  def __str__(self):
+    return "[ date=%s,iteration=%d, bytesAlreadyMoved=%d, bytesLeftToMove=%d, bytesBeingMoved=%d]"%(self.date, self.iteration, self.bytesAlreadyMoved, self.bytesLeftToMove, self.bytesBeingMoved)

+ 52 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py

@@ -20,6 +20,13 @@ limitations under the License.
 from resource_management import *
 from resource_management import *
 from hdfs_namenode import namenode
 from hdfs_namenode import namenode
 from hdfs import hdfs
 from hdfs import hdfs
+import time
+import json
+import subprocess
+import hdfs_rebalance
+import sys
+import os
+from datetime import datetime
 
 
 
 
 class NameNode(Script):
 class NameNode(Script):
@@ -66,6 +73,51 @@ class NameNode(Script):
     env.set_params(params)
     env.set_params(params)
     namenode(action="decommission")
     namenode(action="decommission")
     pass
     pass
+  
+  def rebalancehdfs(self, env):
+    import params
+    env.set_params(params)
 
 
+    name_node_parameters = json.loads( params.name_node_params )
+    threshold = name_node_parameters['threshold']
+    print "Starting balancer with threshold = %s" % threshold
+      
+    def calculateCompletePercent(first, current):
+      return 1.0 - current.bytesLeftToMove/first.bytesLeftToMove
+    
+    
+    def startRebalancingProcess(threshold):
+      rebalanceCommand = format('hadoop --config {hadoop_conf_dir} balancer -threshold {threshold}')
+      return ['su','-',params.hdfs_user,'-c', rebalanceCommand]
+    
+    command = startRebalancingProcess(threshold)
+    
+    basedir = os.path.join(env.config.basedir, 'scripts')
+    if(threshold == 'DEBUG'): #FIXME TODO remove this on PROD
+      basedir = os.path.join(env.config.basedir, 'scripts', 'balancer-emulator')
+      command = ['python','hdfs-command.py']
+    
+    print "Executing command %s" % command
+    
+    parser = hdfs_rebalance.HdfsParser()
+    proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+                          shell=False,
+                          close_fds=True,
+                          cwd=basedir
+                          )
+    for line in iter(proc.stdout.readline, ''):
+      sys.stdout.write('[balancer] %s %s' % (str(datetime.now()), line ))
+      pl = parser.parseLine(line)
+      if pl:
+        res = pl.toJson()
+        res['completePercent'] = calculateCompletePercent(parser.initialLine, pl) 
+        
+        self.put_structured_out(res)
+      elif parser.state == 'PROCESS_FINISED' : 
+        sys.stdout.write('[balancer] %s %s' % (str(datetime.now()), 'Process is finished' ))
+        self.put_structured_out({'completePercent' : 1})
+        break
+      
+      
 if __name__ == "__main__":
 if __name__ == "__main__":
   NameNode().execute()
   NameNode().execute()

+ 1 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py

@@ -184,3 +184,4 @@ if not "com.hadoop.compression.lzo" in io_compression_codecs:
   exclude_packages = ["lzo", "hadoop-lzo", "hadoop-lzo-native", "liblzo2-2"]
   exclude_packages = ["lzo", "hadoop-lzo", "hadoop-lzo-native", "liblzo2-2"]
 else:
 else:
   exclude_packages = []
   exclude_packages = []
+name_node_params = default("/commandParams/namenode", None)

+ 83 - 0
ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java

@@ -46,7 +46,9 @@ import org.apache.ambari.server.agent.ActionQueue;
 import org.apache.ambari.server.agent.AgentCommand;
 import org.apache.ambari.server.agent.AgentCommand;
 import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.agent.ExecutionCommand;
+import org.apache.ambari.server.agent.AgentCommand.AgentCommandType;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariCustomCommandExecutionHelper;
 import org.apache.ambari.server.controller.HostsMap;
 import org.apache.ambari.server.controller.HostsMap;
 import org.apache.ambari.server.serveraction.ServerAction;
 import org.apache.ambari.server.serveraction.ServerAction;
 import org.apache.ambari.server.serveraction.ServerActionManagerImpl;
 import org.apache.ambari.server.serveraction.ServerActionManagerImpl;
@@ -742,6 +744,74 @@ public class TestActionScheduler {
     Assert.assertEquals(HostRoleStatus.PENDING, stages.get(3).getHostRoleStatus(hostname3, "DATANODE"));
     Assert.assertEquals(HostRoleStatus.PENDING, stages.get(3).getHostRoleStatus(hostname3, "DATANODE"));
     Assert.assertEquals(HostRoleStatus.PENDING, stages.get(4).getHostRoleStatus(hostname4, "GANGLIA_MONITOR"));
     Assert.assertEquals(HostRoleStatus.PENDING, stages.get(4).getHostRoleStatus(hostname4, "GANGLIA_MONITOR"));
   }
   }
+  /**
+   * Verifies that ActionScheduler allows to execute background tasks in parallel
+   */
+  @Test
+  public void testBackgroundStagesExecutionEnable() throws Exception {
+    ActionQueue aq = new ActionQueue();
+    Clusters fsm = mock(Clusters.class);
+    Cluster oneClusterMock = mock(Cluster.class);
+    Service serviceObj = mock(Service.class);
+    ServiceComponent scomp = mock(ServiceComponent.class);
+    ServiceComponentHost sch = mock(ServiceComponentHost.class);
+    UnitOfWork unitOfWork = mock(UnitOfWork.class);
+    RequestFactory requestFactory = mock(RequestFactory.class);
+    when(fsm.getCluster(anyString())).thenReturn(oneClusterMock);
+    when(oneClusterMock.getService(anyString())).thenReturn(serviceObj);
+    when(serviceObj.getServiceComponent(anyString())).thenReturn(scomp);
+    when(scomp.getServiceComponentHost(anyString())).thenReturn(sch);
+    when(serviceObj.getCluster()).thenReturn(oneClusterMock);
+    
+    String hostname1 = "ahost.ambari.apache.org";
+    String hostname2 = "bhost.ambari.apache.org";
+    HashMap<String, ServiceComponentHost> hosts =
+        new HashMap<String, ServiceComponentHost>();
+    hosts.put(hostname1, sch);
+    hosts.put(hostname2, sch);
+    when(scomp.getServiceComponentHosts()).thenReturn(hosts);
+    
+    List<Stage> stages = new ArrayList<Stage>();
+    Stage backgroundStage = null;
+    stages.add(//stage with background command
+        backgroundStage = getStageWithSingleTask(
+            hostname1, "cluster1", Role.NAMENODE, RoleCommand.CUSTOM_COMMAND, "REBALANCEHDFS", Service.Type.HDFS, 1, 1, 1));
+    
+    Assert.assertEquals(AgentCommandType.BACKGROUND_EXECUTION_COMMAND ,backgroundStage.getExecutionCommands(hostname1).get(0).getExecutionCommand().getCommandType());
+    
+    stages.add( // Stage with the same hostname, should be scheduled
+        getStageWithSingleTask(
+            hostname1, "cluster1", Role.GANGLIA_MONITOR,
+            RoleCommand.START, Service.Type.GANGLIA, 2, 2, 2));
+    
+    stages.add(
+        getStageWithSingleTask(
+            hostname2, "cluster1", Role.DATANODE,
+            RoleCommand.START, Service.Type.HDFS, 3, 3, 3));
+    
+    
+    ActionDBAccessor db = mock(ActionDBAccessor.class);
+    when(db.getStagesInProgress()).thenReturn(stages);
+    
+    Properties properties = new Properties();
+    properties.put(Configuration.PARALLEL_STAGE_EXECUTION_KEY, "true");
+    Configuration conf = new Configuration(properties);
+    ActionScheduler scheduler = new ActionScheduler(100, 50, db, aq, fsm, 3,
+        new HostsMap((String) null), new ServerActionManagerImpl(fsm),
+        unitOfWork, conf);
+    
+    ActionManager am = new ActionManager(
+        2, 2, aq, fsm, db, new HostsMap((String) null),
+        new ServerActionManagerImpl(fsm), unitOfWork,
+        requestFactory, conf);
+    
+    scheduler.doWork();
+    
+    Assert.assertEquals(HostRoleStatus.QUEUED, stages.get(0).getHostRoleStatus(hostname1, "NAMENODE"));
+    Assert.assertEquals(HostRoleStatus.QUEUED, stages.get(2).getHostRoleStatus(hostname2, "DATANODE"));
+
+    Assert.assertEquals(HostRoleStatus.QUEUED, stages.get(1).getHostRoleStatus(hostname1, "GANGLIA_MONITOR"));
+  }
 
 
 
 
   @Test
   @Test
@@ -1234,6 +1304,19 @@ public class TestActionScheduler {
     return stage;
     return stage;
   }
   }
 
 
+  private Stage getStageWithSingleTask(String hostname, String clusterName, Role role, RoleCommand roleCommand,
+      String customCommandName, Service.Type service, int taskId, int stageId, int requestId) {
+    Stage stage = getStageWithSingleTask(hostname, clusterName, role, roleCommand, service, taskId, stageId, requestId);
+
+    HostRoleCommand cmd = stage.getHostRoleCommand(hostname, role.name());
+    if (cmd != null) {
+      cmd.setCustomCommandName(customCommandName);
+    }
+
+    stage.getExecutionCommandWrapper(hostname, role.toString()).getExecutionCommand().setCommandType(AgentCommandType.BACKGROUND_EXECUTION_COMMAND);
+    return stage;
+  }
+
   private void addInstallTaskToStage(Stage stage, String hostname,
   private void addInstallTaskToStage(Stage stage, String hostname,
                               String clusterName, Role role,
                               String clusterName, Role role,
                               RoleCommand roleCommand, Service.Type service,
                               RoleCommand roleCommand, Service.Type service,

+ 8 - 3
ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java

@@ -1232,13 +1232,18 @@ public class AmbariMetaInfoTest {
     ccd = findCustomCommand("YET_ANOTHER_PARENT_COMMAND", component);
     ccd = findCustomCommand("YET_ANOTHER_PARENT_COMMAND", component);
     Assert.assertEquals("scripts/yet_another_parent_command.py",
     Assert.assertEquals("scripts/yet_another_parent_command.py",
             ccd.getCommandScript().getScript());
             ccd.getCommandScript().getScript());
-
-    Assert.assertEquals(2, component.getCustomCommands().size());
+    
+    ccd = findCustomCommand("REBALANCEHDFS", component);
+    Assert.assertEquals("scripts/namenode.py",
+        ccd.getCommandScript().getScript());
+    Assert.assertTrue(ccd.isBackground());
+    
+    Assert.assertEquals(3, component.getCustomCommands().size());
 
 
     // Test custom command script inheritance
     // Test custom command script inheritance
     component = metaInfo.getComponent(STACK_NAME_HDP, "2.0.8",
     component = metaInfo.getComponent(STACK_NAME_HDP, "2.0.8",
             "HDFS", "NAMENODE");
             "HDFS", "NAMENODE");
-    Assert.assertEquals(3, component.getCustomCommands().size());
+    Assert.assertEquals(4, component.getCustomCommands().size());
 
 
     ccd = findCustomCommand("YET_ANOTHER_PARENT_COMMAND", component);
     ccd = findCustomCommand("YET_ANOTHER_PARENT_COMMAND", component);
     Assert.assertEquals("scripts/yet_another_parent_command.py",
     Assert.assertEquals("scripts/yet_another_parent_command.py",

+ 2 - 1
ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java

@@ -7244,8 +7244,9 @@ public class AmbariManagementControllerTest {
 
 
     StackServiceComponentResponse response = responses.iterator().next();
     StackServiceComponentResponse response = responses.iterator().next();
     assertNotNull(response.getCustomCommands());
     assertNotNull(response.getCustomCommands());
-    assertEquals(1, response.getCustomCommands().size());
+    assertEquals(2, response.getCustomCommands().size());
     assertEquals("DECOMMISSION", response.getCustomCommands().get(0));
     assertEquals("DECOMMISSION", response.getCustomCommands().get(0));
+    assertEquals("REBALANCEHDFS", response.getCustomCommands().get(1));
 
 
     StackServiceComponentRequest journalNodeRequest = new StackServiceComponentRequest(
     StackServiceComponentRequest journalNodeRequest = new StackServiceComponentRequest(
         STACK_NAME, NEW_STACK_VERSION, SERVICE_NAME, "JOURNALNODE");
         STACK_NAME, NEW_STACK_VERSION, SERVICE_NAME, "JOURNALNODE");

+ 275 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java

@@ -0,0 +1,275 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller;
+
+import static org.mockito.Matchers.any;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import junit.framework.Assert;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
+import org.apache.ambari.server.actionmanager.Stage;
+import org.apache.ambari.server.agent.AgentCommand.AgentCommandType;
+import org.apache.ambari.server.agent.ExecutionCommand;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.internal.ComponentResourceProviderTest;
+import org.apache.ambari.server.controller.internal.RequestResourceFilter;
+import org.apache.ambari.server.controller.internal.ServiceResourceProviderTest;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.HostState;
+import org.apache.ambari.server.state.State;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Captor;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.runners.MockitoJUnitRunner;
+
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.persist.PersistService;
+
+@RunWith(MockitoJUnitRunner.class)
+public class BackgroundCustomCommandExecutionTest {
+  private Injector injector;
+  private AmbariManagementController controller;
+  private AmbariMetaInfo ambariMetaInfo;
+  private Configuration configuration;
+  private Clusters clusters;
+  
+  
+  private static final String REQUEST_CONTEXT_PROPERTY = "context";
+  
+  @Captor ArgumentCaptor<List<Stage>> stagesCaptor;
+  @Mock ActionManager am;
+  
+  @Before
+  public void setup() throws Exception {
+    InMemoryDefaultTestModule module = new InMemoryDefaultTestModule(){
+      
+      
+      @Override
+      protected void configure() {
+        getProperties().put(Configuration.CUSTOM_ACTION_DEFINITION_KEY, "src/main/resources/custom_action_definitions");
+        super.configure();
+        bind(ActionManager.class).toInstance(am);
+      }
+    };
+    injector = Guice.createInjector(module);
+    
+    
+    injector.getInstance(GuiceJpaInitializer.class);
+    controller = injector.getInstance(AmbariManagementController.class);
+    clusters = injector.getInstance(Clusters.class);
+    configuration = injector.getInstance(Configuration.class);
+    
+    Assert.assertEquals("src/main/resources/custom_action_definitions", configuration.getCustomActionDefinitionPath());
+    
+    ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
+    ambariMetaInfo.init();
+  }
+  @After
+  public void teardown() {
+    injector.getInstance(PersistService.class).stop();
+  }
+
+  @SuppressWarnings("serial")
+  @Test
+  public void testRebalanceHdfsCustomCommand() {
+    try {
+      createClusterFixture();
+      
+      Map<String, String> requestProperties = new HashMap<String, String>() {
+        {
+          put(REQUEST_CONTEXT_PROPERTY, "Refresh YARN Capacity Scheduler");
+          put("command", "REBALANCEHDFS");
+          put("namenode" , "{\"threshold\":13}");//case is important here
+        }
+      };
+
+      ExecuteActionRequest actionRequest = new ExecuteActionRequest("c1",
+          "REBALANCEHDFS", new HashMap<String, String>());
+      actionRequest.getResourceFilters().add(new RequestResourceFilter("HDFS", "NAMENODE",Collections.singletonList("c6401")));
+      
+      controller.createAction(actionRequest, requestProperties);
+      
+      Mockito.verify(am, Mockito.times(1)).sendActions(stagesCaptor.capture(), any(ExecuteActionRequest.class));
+      
+      
+      List<Stage> stages = stagesCaptor.getValue();
+      Assert.assertEquals(1, stages.size());
+      Stage stage = stages.get(0);
+      
+      System.out.println(stage);
+      
+      Assert.assertEquals(1, stage.getHosts().size());
+      
+      List<ExecutionCommandWrapper> commands = stage.getExecutionCommands("c6401");
+      Assert.assertEquals(1, commands.size());
+      
+      ExecutionCommand command = commands.get(0).getExecutionCommand();
+      
+      Assert.assertEquals(AgentCommandType.BACKGROUND_EXECUTION_COMMAND, command.getCommandType());
+      Assert.assertEquals("{\"threshold\":13}", command.getCommandParams().get("namenode"));
+      
+    } catch (AmbariException e) {
+      Assert.fail(e.getMessage());
+    }
+  }
+  @SuppressWarnings("serial")
+  @Test
+  public void testCancelCommand() {
+    try {
+      createClusterFixture();
+      
+      Map<String, String> requestProperties = new HashMap<String, String>() {
+        {
+          put(REQUEST_CONTEXT_PROPERTY, "Stop background command");
+//          put("cancel_policy","SIGKILL");
+//          put("cancel_task_id","19");
+        }
+      };
+
+      ExecuteActionRequest actionRequest = new ExecuteActionRequest(
+          "c1", 
+          "actionexecute","cancel_background_task",
+          null,
+          null,
+          new HashMap<String, String>(){{
+            put("cancel_policy","SIGKILL"); // parameters/cancel_policy -- in request params
+            put("cancel_task_id","19");
+          }});
+      actionRequest.getResourceFilters().add(new RequestResourceFilter("HDFS", "NAMENODE", Collections.singletonList("c6401")));
+      
+      controller.createAction(actionRequest, requestProperties);
+      
+      Mockito.verify(am, Mockito.times(1)).sendActions(stagesCaptor.capture(), any(ExecuteActionRequest.class));
+      
+      List<Stage> stages = stagesCaptor.getValue();
+      Assert.assertEquals(1, stages.size());
+      Stage stage = stages.get(0);
+      
+      Assert.assertEquals(1, stage.getHosts().size());
+      
+      List<ExecutionCommandWrapper> commands = stage.getExecutionCommands("c6401");
+      Assert.assertEquals(1, commands.size());
+      
+      ExecutionCommand command = commands.get(0).getExecutionCommand();
+      
+      Assert.assertEquals(AgentCommandType.EXECUTION_COMMAND, command.getCommandType());
+      Assert.assertEquals("ACTIONEXECUTE", command.getRoleCommand().name());
+      Assert.assertEquals("cancel_background_task.py", command.getCommandParams().get("script"));
+      Assert.assertEquals("SIGKILL", command.getCommandParams().get("cancel_policy"));
+      Assert.assertEquals("19", command.getCommandParams().get("cancel_task_id"));
+      
+      
+    } catch (AmbariException e) {
+      Assert.fail(e.getMessage());
+    }
+  }
+  
+  private void createClusterFixture() throws AmbariException {
+    createCluster("c1");
+    addHost("c6401","c1");
+    addHost("c6402","c1");
+    
+    clusters.getCluster("c1");
+    createService("c1", "HDFS", null);
+    
+    createServiceComponent("c1","HDFS","NAMENODE", State.INIT);
+    
+    createServiceComponentHost("c1","HDFS","NAMENODE","c6401", null);
+  }
+  private void addHost(String hostname, String clusterName) throws AmbariException {
+    clusters.addHost(hostname);
+    setOsFamily(clusters.getHost(hostname), "redhat", "6.3");
+    clusters.getHost(hostname).setState(HostState.HEALTHY);
+    clusters.getHost(hostname).persist();
+    if (null != clusterName)
+      clusters.mapHostToCluster(hostname, clusterName);
+  }
+  private void setOsFamily(Host host, String osFamily, String osVersion) {
+    Map<String, String> hostAttributes = new HashMap<String, String>();
+    hostAttributes.put("os_family", osFamily);
+    hostAttributes.put("os_release_version", osVersion);
+    
+    host.setHostAttributes(hostAttributes);
+  }
+
+  private void createCluster(String clusterName) throws AmbariException {
+    ClusterRequest r = new ClusterRequest(null, clusterName, State.INSTALLED.name(), "HDP-2.0.6", null);
+    controller.createCluster(r);
+  }
+  
+  private void createService(String clusterName,
+      String serviceName, State desiredState) throws AmbariException {
+    String dStateStr = null;
+    if (desiredState != null) {
+      dStateStr = desiredState.toString();
+    }
+    ServiceRequest r1 = new ServiceRequest(clusterName, serviceName, dStateStr);
+    Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
+    requests.add(r1);
+
+    ServiceResourceProviderTest.createServices(controller, requests);
+  }
+
+  private void createServiceComponent(String clusterName,
+      String serviceName, String componentName, State desiredState)
+          throws AmbariException {
+    String dStateStr = null;
+    if (desiredState != null) {
+      dStateStr = desiredState.toString();
+    }
+    ServiceComponentRequest r = new ServiceComponentRequest(clusterName,
+        serviceName, componentName, dStateStr);
+    Set<ServiceComponentRequest> requests =
+        new HashSet<ServiceComponentRequest>();
+    requests.add(r);
+    ComponentResourceProviderTest.createComponents(controller, requests);
+  }
+
+  private void createServiceComponentHost(String clusterName, String serviceName, String componentName, String hostname, State desiredState) throws AmbariException {
+    String dStateStr = null;
+    if (desiredState != null) {
+      dStateStr = desiredState.toString();
+    }
+    ServiceComponentHostRequest r = new ServiceComponentHostRequest(clusterName,
+        serviceName, componentName, hostname, dStateStr);
+    Set<ServiceComponentHostRequest> requests =
+        new HashSet<ServiceComponentHostRequest>();
+    requests.add(r);
+    controller.createHostComponents(requests);
+  }
+
+}

+ 3 - 15
ambari-server/src/test/java/org/apache/ambari/server/customactions/ActionDefinitionManagerTest.java

@@ -27,27 +27,15 @@ import org.junit.Test;
 
 
 public class ActionDefinitionManagerTest {
 public class ActionDefinitionManagerTest {
 
 
-  private final String customActionDefinitionRoot = "./src/test/resources/custom_action_definitions/".
-          replaceAll("/", File.separator);
+  private final String customActionDefinitionRoot = "./src/test/resources/custom_action_definitions/";
 
 
   @Test
   @Test
   public void testReadCustomActionDefinitions() throws Exception {
   public void testReadCustomActionDefinitions() throws Exception {
     ActionDefinitionManager manager = new ActionDefinitionManager();
     ActionDefinitionManager manager = new ActionDefinitionManager();
     manager.readCustomActionDefinitions(new File(customActionDefinitionRoot));
     manager.readCustomActionDefinitions(new File(customActionDefinitionRoot));
 
 
-    Assert.assertEquals(3, manager.getAllActionDefinition().size());
-    ActionDefinition ad = manager.getActionDefinition("ambari_hdfs_rebalancer");
-    Assert.assertNotNull(ad);
-    Assert.assertEquals("ambari_hdfs_rebalancer", ad.getActionName());
-    Assert.assertEquals("HDFS Rebalance", ad.getDescription());
-    Assert.assertEquals("threshold,[principal],[keytab]", ad.getInputs());
-    Assert.assertEquals("NAMENODE", ad.getTargetComponent());
-    Assert.assertEquals("HDFS", ad.getTargetService());
-    Assert.assertEquals(600, (int)ad.getDefaultTimeout());
-    Assert.assertEquals(TargetHostType.ANY, ad.getTargetType());
-    Assert.assertEquals(ActionType.SYSTEM, ad.getActionType());
-
-    ad = manager.getActionDefinition("customAction1");
+    Assert.assertEquals(2, manager.getAllActionDefinition().size());
+    ActionDefinition ad = manager.getActionDefinition("customAction1");
     Assert.assertNotNull(ad);
     Assert.assertNotNull(ad);
     Assert.assertEquals("customAction1", ad.getActionName());
     Assert.assertEquals("customAction1", ad.getActionName());
     Assert.assertEquals("A random test", ad.getDescription());
     Assert.assertEquals("A random test", ad.getDescription());

+ 7 - 0
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py

@@ -451,3 +451,10 @@ class TestNamenode(RMFTestCase):
                               recursive = True,
                               recursive = True,
                               mode = 0755,
                               mode = 0755,
                               )
                               )
+#   def test_rebalance_hdfs(self): ##Does not work because of exectuteScript Framework does not works with strcuturedoutput
+#     self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+#                        classname = "NameNode",
+#                        command = "rebalancehdfs",
+#                        config_file="rebalancehdfs_default.json"
+#     )
+

文件差异内容过多而无法显示
+ 76 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json


+ 0 - 10
ambari-server/src/test/resources/custom_action_definitions/cust_action_definitions1.xml

@@ -42,14 +42,4 @@
     <targetComponent>TASKTRACKER</targetComponent>
     <targetComponent>TASKTRACKER</targetComponent>
     <description>A random test</description>
     <description>A random test</description>
   </actionDefinition>
   </actionDefinition>
-  <actionDefinition>
-    <actionName>ambari_hdfs_rebalancer</actionName>
-    <actionType>SYSTEM</actionType>
-    <inputs>threshold,[principal],[keytab]</inputs>
-    <targetService>HDFS</targetService>
-    <targetComponent>NAMENODE</targetComponent>
-    <description>HDFS Rebalance</description>
-    <targetType>ANY</targetType>
-    <defaultTimeout>600</defaultTimeout>
-  </actionDefinition>
 </actionDefinitions>
 </actionDefinitions>

+ 0 - 32
ambari-server/src/test/resources/custom_action_definitions/system_action_definitions.xml

@@ -1,32 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="action_definition.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<actionDefinitions>
-  <actionDefinition>
-    <actionName>ambari_hdfs_rebalancer</actionName>
-    <actionType>SYSTEM</actionType>
-    <inputs>threshold,[principal],[keytab]</inputs>
-    <targetService>HDFS</targetService>
-    <targetComponent>NAMENODE</targetComponent>
-    <defaultTimeout>600</defaultTimeout>
-    <description>HDFS Rebalance</description>
-    <targetType>ANY</targetType>
-  </actionDefinition>
-</actionDefinitions>

+ 9 - 0
ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/metainfo.xml

@@ -42,6 +42,15 @@
                 <timeout>600</timeout>
                 <timeout>600</timeout>
               </commandScript>
               </commandScript>
             </customCommand>
             </customCommand>
+            <customCommand>
+              <name>REBALANCEHDFS</name>
+              <background>true</background>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+              </commandScript>
+            </customCommand>
+            
           </customCommands>
           </customCommands>
         </component>
         </component>
 
 

+ 6 - 0
ambari-web/app/controllers/global/background_operations_controller.js

@@ -172,6 +172,12 @@ App.BackgroundOperationsController = Em.Controller.extend({
     task.Tasks.status = data.Tasks.status;
     task.Tasks.status = data.Tasks.status;
     task.Tasks.stdout = data.Tasks.stdout;
     task.Tasks.stdout = data.Tasks.stdout;
     task.Tasks.stderr = data.Tasks.stderr;
     task.Tasks.stderr = data.Tasks.stderr;
+
+    // Put some command information to task object
+    task.Tasks.command = data.Tasks.command;
+    task.Tasks.custom_command_name = data.Tasks.custom_command_name;
+    task.Tasks.structured_out = data.Tasks.structured_out;
+
     task.Tasks.output_log = data.Tasks.output_log;
     task.Tasks.output_log = data.Tasks.output_log;
     task.Tasks.error_log = data.Tasks.error_log;
     task.Tasks.error_log = data.Tasks.error_log;
     this.set('serviceTimestamp', App.dateTime());
     this.set('serviceTimestamp', App.dateTime());

+ 69 - 6
ambari-web/app/controllers/main/service/item.js

@@ -230,15 +230,11 @@ App.MainServiceItemController = Em.Controller.extend({
    * @param event
    * @param event
    */
    */
   refreshYarnQueues : function (event) {
   refreshYarnQueues : function (event) {
-    var self = this;
+    var controller = this;
     return App.showConfirmationPopup(function() {
     return App.showConfirmationPopup(function() {
-      self.refreshYarnQueuesPrimary();
-    });
-  },
-  refreshYarnQueuesPrimary : function(){
     App.ajax.send({
     App.ajax.send({
       name : 'service.item.refreshQueueYarnRequest',
       name : 'service.item.refreshQueueYarnRequest',
-      sender : this,
+        sender: controller,
       data : {
       data : {
         command : "REFRESHQUEUES",
         command : "REFRESHQUEUES",
         context : Em.I18n.t('services.service.actions.run.yarnRefreshQueues.context') ,
         context : Em.I18n.t('services.service.actions.run.yarnRefreshQueues.context') ,
@@ -250,6 +246,7 @@ App.MainServiceItemController = Em.Controller.extend({
       success : 'refreshYarnQueuesSuccessCallback',
       success : 'refreshYarnQueuesSuccessCallback',
       error : 'refreshYarnQueuesErrorCallback'
       error : 'refreshYarnQueuesErrorCallback'
     });
     });
+    });
   },
   },
   refreshYarnQueuesSuccessCallback  : function(data, ajaxOptions, params) {
   refreshYarnQueuesSuccessCallback  : function(data, ajaxOptions, params) {
     if (data.Requests.id) {
     if (data.Requests.id) {
@@ -269,6 +266,72 @@ App.MainServiceItemController = Em.Controller.extend({
     App.showAlertPopup(Em.I18n.t('services.service.actions.run.yarnRefreshQueues.error'), error);
     App.showAlertPopup(Em.I18n.t('services.service.actions.run.yarnRefreshQueues.error'), error);
     console.warn('Error during refreshYarnQueues:'+error);
     console.warn('Error during refreshYarnQueues:'+error);
   },
   },
+ /**
+   * On click handler for rebalance Hdfs command from items menu
+   */
+  rebalanceHdfsNodes: function () {
+    var controller = this;
+    App.ModalPopup.show({
+      classNames: ['fourty-percent-width-modal'],
+      header: Em.I18n.t('services.service.actions.run.rebalanceHdfsNodes.context'),
+      primary: Em.I18n.t('common.start'),
+      secondary: Em.I18n.t('common.cancel'),
+      inputValue: 0,
+      errorMessage: Em.I18n.t('services.service.actions.run.rebalanceHdfsNodes.promptError'),
+      isInvalid: function () {
+        var intValue = Number(this.get('inputValue'));
+        if (this.get('inputValue')!=='DEBUG' && (isNaN(intValue) || intValue < 0 || intValue > 100)) {
+          return true;
+        }
+        return false;
+      }.property('inputValue'),
+      onPrimary: function () {
+        if (this.get('isInvalid')) {
+          return;
+        }
+    App.ajax.send({
+      name : 'service.item.rebalanceHdfsNodes',
+          sender: controller,
+      data : {
+        hosts : App.Service.find('HDFS').get('hostComponents').findProperty('componentName', 'NAMENODE').get('hostName'),
+            threshold: this.get('inputValue')
+      },
+      success : 'rebalanceHdfsNodesSuccessCallback',
+      error : 'rebalanceHdfsNodesErrorCallback'
+    });
+        this.hide();
+  },
+      bodyClass: Ember.View.extend({
+        templateName: require('templates/common/prompt_popup'),
+        text: Em.I18n.t('services.service.actions.run.rebalanceHdfsNodes.prompt'),
+        didInsertElement: function () {
+          App.tooltip(this.$(".prompt-input"), {
+            placement: "bottom",
+            title: Em.I18n.t('services.service.actions.run.rebalanceHdfsNodes.promptTooltip')
+          });
+        }
+      })
+    });
+  },
+  rebalanceHdfsNodesSuccessCallback: function (data) {
+    if (data.Requests.id) {
+      App.router.get('backgroundOperationsController').showPopup();
+    } else {
+      console.warn('Error during runRebalanceHdfsNodes');
+    }
+  },
+  rebalanceHdfsNodesErrorCallback : function(data) {
+    var error = Em.I18n.t('services.service.actions.run.rebalanceHdfsNodes.error');
+    if(data && data.responseText){
+      try {
+        var json = $.parseJSON(data.responseText);
+        error += json.message;
+      } catch (err) {
+      }
+    }
+    App.showAlertPopup(Em.I18n.t('services.service.actions.run.rebalanceHdfsNodes.error'), error);
+    console.warn('Error during runRebalanceHdfsNodes:'+error);
+  },
 
 
   /**
   /**
    * On click callback for <code>run compaction</code> button
    * On click callback for <code>run compaction</code> button

+ 8 - 0
ambari-web/app/messages.js

@@ -294,6 +294,7 @@ Em.I18n.translations = {
   'host.spacesValidation': 'Can\'t contain whitespaces',
   'host.spacesValidation': 'Can\'t contain whitespaces',
   'host.trimspacesValidation': 'Can\'t contain leading or trailing whitespaces',
   'host.trimspacesValidation': 'Can\'t contain leading or trailing whitespaces',
 
 
+  'services.hdfs.rebalance.title' : 'HDFS Rebalance',
   'services.nagios.description':'Nagios Monitoring and Alerting system',
   'services.nagios.description':'Nagios Monitoring and Alerting system',
   'services.ganglia.description':'Ganglia Metrics Collection system',
   'services.ganglia.description':'Ganglia Metrics Collection system',
   'services.hdfs.description':'Apache Hadoop Distributed File System',
   'services.hdfs.description':'Apache Hadoop Distributed File System',
@@ -1173,6 +1174,13 @@ Em.I18n.translations = {
   'services.service.summary.clientCount': '{0} Client Hosts',
   'services.service.summary.clientCount': '{0} Client Hosts',
   'services.service.summary.historyServer': 'History Server Web UI',
   'services.service.summary.historyServer': 'History Server Web UI',
   'services.service.actions.run.rebalancer':'Run Rebalancer',
   'services.service.actions.run.rebalancer':'Run Rebalancer',
+  'services.service.actions.run.rebalanceHdfsNodes':'Run Rebalance HDFS nodes',
+  'services.service.actions.run.rebalanceHdfsNodes.title':'HDFS Rebalance NameNode',
+  'services.service.actions.run.rebalanceHdfsNodes.prompt':'Please enter Balancer treshold:',
+  'services.service.actions.run.rebalanceHdfsNodes.promptTooltip':'Percentage of disk capacity. This overwrites the default threshold',
+  'services.service.actions.run.rebalanceHdfsNodes.promptError':'Value should be integer between 0 and 100',
+  'services.service.actions.run.rebalanceHdfsNodes.context':'Rebalance HDFS nodes',
+  'services.service.actions.run.rebalanceHdfsNodes.error':'Error during remote command: ',
   'services.service.actions.run.yarnRefreshQueues.menu':'Refresh YARN Capacity Scheduler',
   'services.service.actions.run.yarnRefreshQueues.menu':'Refresh YARN Capacity Scheduler',
   'services.service.actions.run.yarnRefreshQueues.context':'Refresh YARN Capacity Scheduler',
   'services.service.actions.run.yarnRefreshQueues.context':'Refresh YARN Capacity Scheduler',
   'services.service.actions.run.yarnRefreshQueues.error':'Error during remote command: ',
   'services.service.actions.run.yarnRefreshQueues.error':'Error during remote command: ',

+ 18 - 1
ambari-web/app/templates/common/host_progress_popup.hbs

@@ -193,7 +193,24 @@
         <div class="task-detail-log-info">
         <div class="task-detail-log-info">
             <div class="content-area">
             <div class="content-area">
                 <div class="task-detail-log-clipboard-wrap"></div>
                 <div class="task-detail-log-clipboard-wrap"></div>
-                <div class="task-detail-log-maintext">
+                <div class="task-detail-log-maintext">  
+                    {{#if view.openedTask.isRebalanceHDFSTask }}
+                      <h5>{{t services.hdfs.rebalance.title}}</h5>
+                      <div class="progresspopup-rebalancehdfs">
+                        <div class="progress progress-striped active">
+                          <div class="bar" {{bindAttr style="view.openedTask.completionProgressStyle"}}></div>
+                        </div>
+                      </div>
+                      <div class="clearfix">
+                        <div class="pull-left">
+                          {{view.openedTask.dataMoved}} moved /
+                          {{view.openedTask.dataLeft}} left /
+                          {{view.openedTask.dataBeingMoved}} being processed
+                        </div>
+                        <button class="btn btn-danger pull-right" {{action stopRebalanceHDFS}}>{{t common.cancel}}</button>
+                      </div>
+                      <hr>
+                    {{/if}}
                     <h5>stderr: &nbsp; <span class="muted">{{view.openedTask.errorLog}} </span></h5>
                     <h5>stderr: &nbsp; <span class="muted">{{view.openedTask.errorLog}} </span></h5>
                     <pre class="stderr">{{view.openedTask.stderr}}</pre>
                     <pre class="stderr">{{view.openedTask.stderr}}</pre>
                     <h5>stdout: &nbsp; <span class="muted"> {{view.openedTask.outputLog}} </span></h5>
                     <h5>stdout: &nbsp; <span class="muted"> {{view.openedTask.outputLog}} </span></h5>

+ 4 - 1
ambari-web/app/templates/common/prompt_popup.hbs

@@ -17,11 +17,14 @@
 }}
 }}
 <div class="prompt-popup">
 <div class="prompt-popup">
   <form>
   <form>
-    <div class="control-group">
+    <div {{bindAttr class=":control-group view.parentView.isInvalid:error"}}>
       <label class="control-label">{{view.text}}</label>
       <label class="control-label">{{view.text}}</label>
       <div class="controls">
       <div class="controls">
         {{view Em.TextField class="prompt-input" valueBinding="view.parentView.inputValue"}}
         {{view Em.TextField class="prompt-input" valueBinding="view.parentView.inputValue"}}
       </div>
       </div>
+      {{#if view.parentView.isInvalid}}
+        <span class="help-inline">{{view.parentView.errorMessage}}</span>
+      {{/if}}
     </div>
     </div>
   </form>
   </form>
 </div>
 </div>

+ 50 - 0
ambari-web/app/utils/ajax/ajax.js

@@ -302,6 +302,55 @@ var urls = {
       };
       };
     }
     }
   },
   },
+  'service.item.rebalanceHdfsNodes' : {
+    'real' : '/clusters/{clusterName}/requests',
+    'mock' : '',
+    'format' : function(data) {
+      return {
+        type : 'POST',
+        data : JSON.stringify({
+          RequestInfo : {
+            'context' : Em.I18n.t('services.service.actions.run.rebalanceHdfsNodes.context'),
+            'command' : 'REBALANCEHDFS',
+            'namenode' : JSON.stringify({threshold: data.threshold})
+          },
+          "Requests/resource_filters" : [ {
+            'service_name' : 'HDFS',
+            'component_name' : 'NAMENODE',
+            'hosts' : data.hosts
+          } ]
+        })
+      }
+    }
+  },
+  
+  'cancel.background.operation' : {
+    'real' : '/clusters/{clusterName}/requests',
+    'mock' : '',
+    'format' : function(data) {
+      return {
+        type : 'POST',
+        data : JSON.stringify({
+          RequestInfo : {
+            'context' : 'Cancel background operation',
+            'action'  : 'cancel_background_task',
+            "parameters" : {
+              "cancel_policy"   : "SIGKILL",
+              'before_system_hook_function' : 'fetch_bg_pid_by_taskid',
+              "cancel_task_id"  : data.cancelTaskId
+            }
+          },
+          "Requests/resource_filters" : [ {
+            "service_name" : data.serviceName,
+            "component_name" : data.componentName,
+            'hosts' : data.hosts
+          } ]
+        })
+      }
+    }
+  },
+
+
   'service.item.refreshQueueYarnRequest':{
   'service.item.refreshQueueYarnRequest':{
     'real': '/clusters/{clusterName}/requests',
     'real': '/clusters/{clusterName}/requests',
     'mock': '',
     'mock': '',
@@ -319,6 +368,7 @@ var urls = {
         }
         }
       }
       }
   },
   },
+
   'service.load_config_groups': {
   'service.load_config_groups': {
     'real': '/clusters/{clusterName}/config_groups?ConfigGroup/tag={serviceName}&fields=*',
     'real': '/clusters/{clusterName}/config_groups?ConfigGroup/tag={serviceName}&fields=*',
     'mock': '/data/configurations/config_group.json'
     'mock': '/data/configurations/config_group.json'

+ 3 - 0
ambari-web/app/utils/helper.js

@@ -434,6 +434,9 @@ App.format = {
     if (result === ' Nagios Update Ignore Actionexecute') {
     if (result === ' Nagios Update Ignore Actionexecute') {
        result = Em.I18n.t('common.maintenance.task');
        result = Em.I18n.t('common.maintenance.task');
     }
     }
+    if (result === 'Rebalancehdfs NameNode') {
+       result = Em.I18n.t('services.service.actions.run.rebalanceHdfsNodes.title');
+    }
     return result;
     return result;
   },
   },
 
 

+ 36 - 1
ambari-web/app/utils/host_progress_popup.js

@@ -380,6 +380,23 @@ App.HostPopup = Em.Object.create({
                   existTask.set('errorLog', _task.Tasks.error_log);
                   existTask.set('errorLog', _task.Tasks.error_log);
                   existTask.set('startTime', date.startTime(_task.Tasks.start_time));
                   existTask.set('startTime', date.startTime(_task.Tasks.start_time));
                   existTask.set('duration', date.durationSummary(_task.Tasks.start_time, _task.Tasks.end_time));
                   existTask.set('duration', date.durationSummary(_task.Tasks.start_time, _task.Tasks.end_time));
+                  // Puts some command information to render it 
+                  var isRebalanceHDFSTask = (_task.Tasks.command === 'CUSTOM_COMMAND' && _task.Tasks.custom_command_name === 'REBALANCEHDFS');
+                  existTask.set('isRebalanceHDFSTask', isRebalanceHDFSTask);
+                  if(isRebalanceHDFSTask){
+                    var structuredOut = _task.Tasks.structured_out;
+                    if (!structuredOut || structuredOut === 'null') {
+                      structuredOut = {};
+                    }
+                    
+                    existTask.set('dataMoved', structuredOut['dataMoved'] || '0');
+                    existTask.set('dataLeft', structuredOut['dataLeft'] || '0');
+                    existTask.set('dataBeingMoved', structuredOut['dataBeingMoved'] || '0');
+                    existTask.set('completionProgressStyle', 'width:' + (structuredOut['completePercent'] || 0) * 100 + '%;');
+
+                    existTask.set('command', _task.Tasks.command);
+                    existTask.set('custom_command_name', _task.Tasks.custom_command_name);
+                  }
                 } else {
                 } else {
                   existTasks.pushObject(this.createTask(_task));
                   existTasks.pushObject(this.createTask(_task));
                 }
                 }
@@ -917,7 +934,25 @@ App.HostPopup = Em.Object.create({
           $(".modal").scrollTop(0);
           $(".modal").scrollTop(0);
           $(".modal-body").scrollTop(0);
           $(".modal-body").scrollTop(0);
         },
         },
-
+        
+        stopRebalanceHDFS: function () {
+          var hostPopup = this;
+          return App.showConfirmationPopup(function () {
+          App.ajax.send({
+            name : 'cancel.background.operation',
+              sender : hostPopup,
+            data : {
+                cancelTaskId : hostPopup.get('openedTaskId'),
+              command : "REFRESHQUEUES",
+              context : Em.I18n.t('services.service.actions.run.yarnRefreshQueues.context') ,
+              hosts : App.Service.find('HDFS').get('hostComponents').findProperty('componentName', 'NAMENODE').get('hostName'),
+              serviceName : "HDFS",
+              componentName : "NAMENODE"
+            }
+          });
+            hostPopup.backToServiceList();
+          });
+        },
         /**
         /**
          * Onclick handler for selected Task
          * Onclick handler for selected Task
          */
          */

+ 2 - 0
ambari-web/app/views/common/modal_popup.js

@@ -260,6 +260,8 @@ App.showPromptPopup = function (text, primary, defaultValue, secondary) {
       text: text
       text: text
     }),
     }),
     inputValue: defaultValue || '',
     inputValue: defaultValue || '',
+    isInvalid: false,
+    errorMessage: '',
     onPrimary: function () {
     onPrimary: function () {
       this.hide();
       this.hide();
       primary(this.get('inputValue'));
       primary(this.get('inputValue'));

+ 10 - 1
ambari-web/app/views/main/service/item.js

@@ -92,6 +92,13 @@ App.MainServiceItemView = Em.View.extend({
         label: Em.I18n.t('services.service.actions.reassign.master'),
         label: Em.I18n.t('services.service.actions.reassign.master'),
         cssClass: 'icon-share-alt',
         cssClass: 'icon-share-alt',
         disabled: false
         disabled: false
+      },
+      REBALANCE_HDFS: {
+        action: 'rebalanceHdfsNodes',
+        context: Em.I18n.t('services.service.actions.run.rebalanceHdfsNodes.context'),
+        label: Em.I18n.t('services.service.actions.run.rebalanceHdfsNodes'),
+        cssClass: 'icon-refresh',
+        disabled: false
       }
       }
     }
     }
   },
   },
@@ -181,8 +188,10 @@ App.MainServiceItemView = Em.View.extend({
         options.push(actionMap.RUN_SMOKE_TEST);
         options.push(actionMap.RUN_SMOKE_TEST);
       }
       }
       options.push(actionMap.TOGGLE_PASSIVE);
       options.push(actionMap.TOGGLE_PASSIVE);
-
       var serviceName = service.get('serviceName');
       var serviceName = service.get('serviceName');
+      if (serviceName === 'HDFS') {
+        options.push(actionMap.REBALANCE_HDFS);
+      }
       self.addActionMap().filterProperty('service', serviceName).forEach(function(item) {
       self.addActionMap().filterProperty('service', serviceName).forEach(function(item) {
         item.action = 'add' + item.component;
         item.action = 'add' + item.component;
         item.disabled = self.get('controller.isAddDisabled-' + item.component);
         item.disabled = self.get('controller.isAddDisabled-' + item.component);

部分文件因为文件数量过多而无法显示