فهرست منبع

AMBARI-5968. Decommission of DataNode does not work (Ivan Kozlov via ncole)

Nate Cole 11 سال پیش
والد
کامیت
b849349bc3

+ 7 - 3
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py

@@ -145,9 +145,13 @@ def decommission():
           user=hdfs_user
   )
 
-  # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
-  # need to execute each command scoped to a particular namenode
-  ExecuteHadoop(format('dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes'),
+  if params.dfs_ha_enabled:
+    # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
+    # need to execute each command scoped to a particular namenode
+    nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
+  else:
+    nn_refresh_cmd = format('dfsadmin -refreshNodes')
+  ExecuteHadoop(nn_refresh_cmd,
                 user=hdfs_user,
                 conf_dir=conf_dir,
                 kinit_override=True)

+ 19 - 1
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py

@@ -370,7 +370,25 @@ class TestNamenode(RMFTestCase):
                               only_if = "su - hdfs -c 'hdfs haadmin -getServiceState nn1 | grep active > /dev/null'",
                               )
     self.assertNoMoreResources()
-    
+
+  def test_decommission_default(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "decommission",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
+                              owner = 'hdfs',
+                              content = Template('exclude_hosts_list.j2'),
+                              group = 'hadoop',
+                              )
+    self.assertResourceCalled('Execute', '', user = 'hdfs')
+    self.assertResourceCalled('ExecuteHadoop', 'dfsadmin -refreshNodes',
+                              user = 'hdfs',
+                              conf_dir = '/etc/hadoop/conf',
+                              kinit_override = True)
+    self.assertNoMoreResources()
+
   def test_decommission_ha(self):
     self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
                        classname = "NameNode",