Browse Source

AMBARI-11750. Can not start RM and HiveServer2 after upgrade from 1.7.0 to 2.1.0 (it may be not only Upgrade issue.) (aonishuk)

Andrew Onishuk 10 năm trước cách đây
mục cha
commit
f8a3294844

+ 26 - 3
ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java

@@ -66,6 +66,8 @@ import com.google.gson.JsonParser;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
 import com.google.inject.persist.Transactional;
+import java.net.URI;
+import java.net.URISyntaxException;
 
 
 /**
@@ -1044,9 +1046,6 @@ public class UpgradeCatalog210 extends AbstractUpgradeCatalog {
   }
 
   protected void updateHdfsConfigs() throws AmbariException {
-    /***
-     * Append -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 to HADOOP_NAMENODE_OPTS from hadoop-env.sh
-     */
     AmbariManagementController ambariManagementController = injector.getInstance(
         AmbariManagementController.class);
     Clusters clusters = ambariManagementController.getClusters();
@@ -1058,6 +1057,9 @@ public class UpgradeCatalog210 extends AbstractUpgradeCatalog {
 
       if (clusterMap != null && !clusterMap.isEmpty()) {
         for (final Cluster cluster : clusterMap.values()) {
+          /***
+           * Append -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 to HADOOP_NAMENODE_OPTS from hadoop-env.sh
+           */
           content = null;
           if (cluster.getDesiredConfigByType("hadoop-env") != null) {
             content = cluster.getDesiredConfigByType(
@@ -1071,6 +1073,27 @@ public class UpgradeCatalog210 extends AbstractUpgradeCatalog {
             updateConfigurationPropertiesForCluster(cluster, "hadoop-env",
                 prop, true, false);
           }
+          /***
+           * Update dfs.namenode.rpc-address set hostname instead of localhost
+           */
+          if (cluster.getDesiredConfigByType("hdfs-site") != null && !cluster.getHosts("HDFS","NAMENODE").isEmpty()) {
+
+            URI nameNodeRpc = null;
+            String hostName = cluster.getHosts("HDFS","NAMENODE").iterator().next();
+            // Try to generate dfs.namenode.rpc-address
+            if (cluster.getDesiredConfigByType("core-site").getProperties() != null &&
+                      cluster.getDesiredConfigByType("core-site").getProperties().get("fs.defaultFS") != null) {
+              try {
+                nameNodeRpc = new URI(cluster.getDesiredConfigByType("core-site").getProperties().get("fs.defaultFS"));
+                Map<String, String> hdfsProp = new HashMap<String, String>();
+                hdfsProp.put("dfs.namenode.rpc-address", hostName + ":" + nameNodeRpc.getPort());
+                updateConfigurationPropertiesForCluster(cluster, "hdfs-site",
+                        hdfsProp, true, false);
+              } catch (URISyntaxException e) {
+                e.printStackTrace();
+              }
+            }
+          }
         }
       }
     }

+ 55 - 0
ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java

@@ -36,11 +36,13 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 
 import javax.persistence.EntityManager;
 
+import com.google.inject.AbstractModule;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariManagementController;
@@ -63,10 +65,13 @@ import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntityP
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.HostComponentAdminState;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.stack.OsFamily;
 import org.easymock.Capture;
+import org.easymock.EasyMockSupport;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -306,6 +311,56 @@ public class UpgradeCatalog210Test {
   }
 
 
+  @Test
+  public void testUpdateClusterEnvConfiguration() throws Exception {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController  mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
+
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+
+    final Config mockHdfsSite = easyMockSupport.createNiceMock(Config.class);
+    final Config mockCoreSite = easyMockSupport.createStrictMock(Config.class);
+
+    final Map<String, String> propertiesExpectedHdfs = new HashMap<String, String>();
+    final Map<String, String> propertiesExpectedCoreSite = new HashMap<String, String>();
+    propertiesExpectedCoreSite.put("fs.defaultFS", "hdfs://EXAMPLE.COM:8020");
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(ConfigHelper.class).toInstance(mockConfigHelper);
+        bind(Clusters.class).toInstance(mockClusters);
+
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      }
+    });
+
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
+    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", mockClusterExpected);
+    }}).once();
+
+    // Expected operation
+    expect(mockClusterExpected.getDesiredConfigByType("hadoop-env")).andReturn(null).once();
+
+    // Expected operation
+    expect(mockClusterExpected.getDesiredConfigByType("hdfs-site")).andReturn(mockHdfsSite).atLeastOnce();
+    expect(mockClusterExpected.getHosts("HDFS", "NAMENODE")).andReturn( new HashSet<String>() {{
+      add("host1");
+    }}).atLeastOnce();
+    expect(mockHdfsSite.getProperties()).andReturn(propertiesExpectedHdfs).anyTimes();
+
+    expect(mockClusterExpected.getDesiredConfigByType("core-site")).andReturn(mockCoreSite).anyTimes();
+    expect(mockCoreSite.getProperties()).andReturn(propertiesExpectedCoreSite).anyTimes();
+
+    easyMockSupport.replayAll();
+    mockInjector.getInstance(UpgradeCatalog210.class).updateHdfsConfigs();
+    easyMockSupport.verifyAll();
+  }
+
   /**
    * @param dbAccessor
    * @return