Browse Source

AMBARI-4055. set core file size on hosts to get core dump when JVM crashes (Dmytro Shkvyra via dlysnichenko)

Lisnichenko Dmitro 11 years ago
parent
commit
52e67f92c0

+ 10 - 2
ambari-agent/src/main/python/resource_management/core/system.py

@@ -96,12 +96,20 @@ class System(object):
     if operatingsystem == "linux":
       lsb = self.lsb
       if not lsb:
-        if os.path.exists("/etc/redhat-release"):
-          return "redhat"
         if os.path.exists("/etc/fedora-release"):
           return "fedora"
         if os.path.exists("/etc/centos-release"):
           return "centos"
+        if os.path.exists("/etc/oracle-release"):
+          return "oracle"        
+        if os.path.exists("/etc/redhat-release"):
+          with file('/etc/redhat-release') as f:
+           release = f.read().lower() 
+           if 'centos' in release:
+             return 'centos'
+           elif 'fedora' in release:
+             return 'fedora'
+          return 'redhat'
         if os.path.exists("/etc/SuSE-release"):
           return "suse"
         if os.path.exists("/etc/system-release"):

+ 5 - 0
ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/params.py

@@ -23,6 +23,11 @@ import os
 
 config = Script.get_config()
 
+if System.get_instance().platform == "oracle":
+  ulimit_cmd = ''
+else:
+  ulimit_cmd = "ulimit -c unlimited && if [ `ulimit -c` != 'unlimited' ]; then exit 77; fi && "
+
 #security params
 security_enabled = config['configurations']['global']['security_enabled']
 dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/utils.py

@@ -29,7 +29,7 @@ def service(action=None, name=None, user=None, create_pid_dir=False,
   pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
   log_dir = format("{hdfs_log_dir_prefix}/{user}")
   hadoop_daemon = format(
-    "export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
+    "{ulimit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
     "{hadoop_bin}/hadoop-daemon.sh")
   cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
 

+ 6 - 1
ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/params.py

@@ -23,6 +23,11 @@ import os
 
 config = Script.get_config()
 
+if System.get_instance().platform == "oracle":
+  ulimit_cmd = ''
+else:
+  ulimit_cmd = "ulimit -c unlimited && if [ `ulimit -c` != 'unlimited' ]; then exit 77; fi && "
+
 #security params
 security_enabled = config['configurations']['global']['security_enabled']
 dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
@@ -185,4 +190,4 @@ journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-ad
 if journalnode_address:
   journalnode_port = journalnode_address.split(":")[1]
 
-falcon_store_uri = default('configurations/global/falcon_store_uri', None)
+falcon_store_uri = default('configurations/global/falcon_store_uri', None)

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/utils.py

@@ -29,7 +29,7 @@ def service(action=None, name=None, user=None, create_pid_dir=False,
   pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
   log_dir = format("{hdfs_log_dir_prefix}/{user}")
   hadoop_daemon = format(
-    "export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
+    "{ulimit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
     "{hadoop_bin}/hadoop-daemon.sh")
   cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")