|
@@ -0,0 +1,521 @@
|
|
|
+{
|
|
|
+ "roleCommand": "START",
|
|
|
+ "clusterName": "cl1",
|
|
|
+ "hostname": "c6401.ambari.apache.org",
|
|
|
+ "passiveInfo": [],
|
|
|
+ "hostLevelParams": {
|
|
|
+ "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
|
|
|
+ "ambari_db_rca_password": "mapred",
|
|
|
+ "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
|
|
|
+ "jce_name": "UnlimitedJCEPolicyJDK7.zip",
|
|
|
+ "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
|
|
|
+ "repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0.6\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]",
|
|
|
+ "package_list": "[{\"type\":\"rpm\",\"name\":\"lzo\"},{\"type\":\"rpm\",\"name\":\"hadoop\"},{\"type\":\"rpm\",\"name\":\"hadoop-libhdfs\"},{\"type\":\"rpm\",\"name\":\"hadoop-lzo\"},{\"type\":\"rpm\",\"name\":\"hadoop-lzo-native\"},{\"type\":\"rpm\",\"name\":\"snappy\"},{\"type\":\"rpm\",\"name\":\"snappy-devel\"},{\"type\":\"rpm\",\"name\":\"ambari-log4j\"}]",
|
|
|
+ "stack_version": "2.0.6",
|
|
|
+ "stack_name": "HDP",
|
|
|
+ "db_name": "ambari",
|
|
|
+ "ambari_db_rca_driver": "org.postgresql.Driver",
|
|
|
+ "jdk_name": "jdk-7u45-linux-x64.tar.gz",
|
|
|
+ "ambari_db_rca_username": "mapred",
|
|
|
+ "java_home": "/usr/jdk64/jdk1.7.0_45",
|
|
|
+ "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
|
|
|
+ },
|
|
|
+ "commandType": "EXECUTION_COMMAND",
|
|
|
+ "roleParams": {},
|
|
|
+ "serviceName": "HDFS",
|
|
|
+ "role": "ZKFC",
|
|
|
+ "commandParams": {
|
|
|
+ "service_package_folder": "HDP/2.0.6/services/HDFS/package",
|
|
|
+ "script": "scripts/zkfc_slave.py",
|
|
|
+ "hooks_folder": "HDP/2.0.6/hooks",
|
|
|
+ "schema_version": "2.0",
|
|
|
+ "command_timeout": "600",
|
|
|
+ "script_type": "PYTHON"
|
|
|
+ },
|
|
|
+ "taskId": 138,
|
|
|
+ "public_hostname": "c6401.ambari.apache.org",
|
|
|
+ "configurations": {
|
|
|
+ "mapred-site": {
|
|
|
+ "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020",
|
|
|
+ "mapreduce.jobhistory.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab",
|
|
|
+ "mapreduce.reduce.input.buffer.percent": "0.0",
|
|
|
+ "mapreduce.output.fileoutputformat.compress": "false",
|
|
|
+ "mapreduce.framework.name": "yarn",
|
|
|
+ "mapreduce.map.speculative": "false",
|
|
|
+ "mapreduce.reduce.shuffle.merge.percent": "0.66",
|
|
|
+ "yarn.app.mapreduce.am.resource.mb": "683",
|
|
|
+ "mapreduce.map.java.opts": "-Xmx273m",
|
|
|
+ "mapreduce.cluster.administrators": " hadoop",
|
|
|
+ "mapreduce.application.classpath": "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*",
|
|
|
+ "mapreduce.job.reduce.slowstart.completedmaps": "0.05",
|
|
|
+ "mapreduce.output.fileoutputformat.compress.type": "BLOCK",
|
|
|
+ "mapreduce.reduce.speculative": "false",
|
|
|
+ "mapreduce.reduce.java.opts": "-Xmx546m",
|
|
|
+ "mapreduce.am.max-attempts": "2",
|
|
|
+ "yarn.app.mapreduce.am.admin-command-opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
|
|
|
+ "mapreduce.reduce.log.level": "INFO",
|
|
|
+ "mapreduce.map.sort.spill.percent": "0.7",
|
|
|
+ "mapreduce.task.io.sort.mb": "136",
|
|
|
+ "mapreduce.task.timeout": "300000",
|
|
|
+ "mapreduce.map.memory.mb": "341",
|
|
|
+ "mapreduce.task.io.sort.factor": "100",
|
|
|
+ "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp",
|
|
|
+ "mapreduce.reduce.memory.mb": "683",
|
|
|
+ "mapreduce.jobhistory.principal": "jhs/_HOST@EXAMPLE.COM",
|
|
|
+ "yarn.app.mapreduce.am.log.level": "INFO",
|
|
|
+ "mapreduce.map.log.level": "INFO",
|
|
|
+ "mapreduce.shuffle.port": "13562",
|
|
|
+ "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &> /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`",
|
|
|
+ "mapreduce.map.output.compress": "false",
|
|
|
+ "yarn.app.mapreduce.am.staging-dir": "/user",
|
|
|
+ "mapreduce.reduce.shuffle.parallelcopies": "30",
|
|
|
+ "mapreduce.reduce.shuffle.input.buffer.percent": "0.7",
|
|
|
+ "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888",
|
|
|
+ "mapreduce.jobhistory.keytab": "/etc/security/keytabs/jhs.service.keytab",
|
|
|
+ "mapreduce.jobhistory.done-dir": "/mr-history/done",
|
|
|
+ "mapreduce.admin.reduce.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
|
|
|
+ "mapreduce.jobhistory.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM",
|
|
|
+ "yarn.app.mapreduce.am.command-opts": "-Xmx546m",
|
|
|
+ "mapreduce.admin.map.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN"
|
|
|
+ },
|
|
|
+ "global": {
|
|
|
+ "syncLimit": "5",
|
|
|
+ "resourcemanager_principal_name": "rm/_HOST",
|
|
|
+ "hadoop_http_principal_name": "HTTP/_HOST",
|
|
|
+ "kinit_path_local": "/usr/bin",
|
|
|
+ "resourcemanager_http_primary_name": "HTTP",
|
|
|
+ "datanode_primary_name": "dn",
|
|
|
+ "namenode_principal_name": "nn/_HOST",
|
|
|
+ "namenode_keytab": "/etc/security/keytabs/nn.service.keytab",
|
|
|
+ "dfs_datanode_http_address": "1022",
|
|
|
+ "falcon_user": "falcon",
|
|
|
+ "hbase_user_keytab": "/etc/security/keytabs/hbase.headless.keytab",
|
|
|
+ "namenode_opt_maxnewsize": "200m",
|
|
|
+ "journalnode_keytab": "/etc/security/keytabs/jn.service.keytab",
|
|
|
+ "snamenode_primary_name": "nn",
|
|
|
+ "nagios_primary_name": "nagios",
|
|
|
+ "jobhistory_http_keytab": "/etc/security/keytabs/spnego.service.keytab",
|
|
|
+ "clientPort": "2181",
|
|
|
+ "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce",
|
|
|
+ "jobhistory_keytab": "/etc/security/keytabs/jhs.service.keytab",
|
|
|
+ "datanode_principal_name": "dn/_HOST",
|
|
|
+ "namenode_opt_newsize": "200m",
|
|
|
+ "nagios_group": "nagios",
|
|
|
+ "hcat_user": "hcat",
|
|
|
+ "hadoop_heapsize": "1024",
|
|
|
+ "hbase_regionserver_primary_name": "hbase",
|
|
|
+ "zk_user": "zookeeper",
|
|
|
+ "keytab_path": "/etc/security/keytabs",
|
|
|
+ "nodemanager_primary_name": "nm",
|
|
|
+ "zk_data_dir": "/hadoop/zookeeper",
|
|
|
+ "zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab",
|
|
|
+ "namenode_heapsize": "1024m",
|
|
|
+ "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab",
|
|
|
+ "kerberos_domain": "EXAMPLE.COM",
|
|
|
+ "yarn_nodemanager_container-executor_class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor",
|
|
|
+ "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce",
|
|
|
+ "ganglia_runtime_dir": "/var/run/ganglia/hdp",
|
|
|
+ "lzo_enabled": "true",
|
|
|
+ "dtnode_heapsize": "1024m",
|
|
|
+ "dfs_datanode_address": "1019",
|
|
|
+ "yarn_pid_dir_prefix": "/var/run/hadoop-yarn",
|
|
|
+ "initLimit": "10",
|
|
|
+ "zk_pid_dir": "/var/run/zookeeper",
|
|
|
+ "namenode_primary_name": "nn",
|
|
|
+ "tickTime": "2000",
|
|
|
+ "storm_user": "storm",
|
|
|
+ "datanode_keytab": "/etc/security/keytabs/dn.service.keytab",
|
|
|
+ "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
|
|
|
+ "journalnode_primary_name": "jn",
|
|
|
+ "hbase_user": "hbase",
|
|
|
+ "gmetad_user": "nobody",
|
|
|
+ "nodemanager_http_primary_name": "HTTP",
|
|
|
+ "smokeuser": "ambari-qa",
|
|
|
+ "nodemanager_keytab": "/etc/security/keytabs/nm.service.keytab",
|
|
|
+ "nagios_user": "nagios",
|
|
|
+ "security_enabled": "true",
|
|
|
+ "proxyuser_group": "users",
|
|
|
+ "hbase_primary_name": "hbase",
|
|
|
+ "oozie_http_primary_name": "HTTP",
|
|
|
+ "yarn_heapsize": "1024",
|
|
|
+ "zookeeper_principal_name": "zookeeper/_HOST@EXAMPLE.COM",
|
|
|
+ "nodemanager_http_keytab": "/etc/security/keytabs/spnego.service.keytab",
|
|
|
+ "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
|
|
|
+ "oozie_user": "oozie",
|
|
|
+ "hdfs_log_dir_prefix": "/var/log/hadoop",
|
|
|
+ "zookeeper_primary_name": "zookeeper",
|
|
|
+ "yarn_user": "yarn",
|
|
|
+ "gmond_user": "nobody",
|
|
|
+ "hive_metastore_primary_name": "hive",
|
|
|
+ "jobhistory_primary_name": "jhs",
|
|
|
+ "hdfs_user": "hdfs",
|
|
|
+ "webhcat_user": "hcat",
|
|
|
+ "nodemanager_heapsize": "1024",
|
|
|
+ "resourcemanager_http_keytab": "/etc/security/keytabs/spnego.service.keytab",
|
|
|
+ "zk_log_dir": "/var/log/zookeeper",
|
|
|
+ "snamenode_keytab": "/etc/security/keytabs/nn.service.keytab",
|
|
|
+ "smokeuser_principal_name": "ambari-qa",
|
|
|
+ "mapred_user": "mapred",
|
|
|
+ "jobhistory_http_primary_name": "HTTP",
|
|
|
+ "smokeuser_primary_name": "ambari-qa",
|
|
|
+ "hadoop_http_keytab": "/etc/security/keytabs/spnego.service.keytab",
|
|
|
+ "hbase_master_primary_name": "hbase",
|
|
|
+ "hdfs_primary_name": "hdfs",
|
|
|
+ "jobhistory_principal_name": "jhs/_HOST",
|
|
|
+ "webHCat_http_primary_name": "HTTP",
|
|
|
+ "rca_enabled": "false",
|
|
|
+ "hcat_conf_dir": "",
|
|
|
+ "resourcemanager_primary_name": "rm",
|
|
|
+ "hadoop_http_primary_name": "HTTP",
|
|
|
+ "jobhistory_http_principal_name": "HTTP/_HOST",
|
|
|
+ "resourcemanager_keytab": "/etc/security/keytabs/rm.service.keytab",
|
|
|
+ "snamenode_principal_name": "nn/_HOST",
|
|
|
+ "nodemanager_principal_name": "nm/_HOST",
|
|
|
+ "user_group": "hadoop",
|
|
|
+ "nodemanager_http_principal_name": "HTTP/_HOST",
|
|
|
+ "hive_user": "hive",
|
|
|
+ "resourcemanager_http_principal_name": "HTTP/_HOST",
|
|
|
+ "oozie_primary_name": "oozie",
|
|
|
+ "kerberos_install_type": "MANUALLY_SET_KERBEROS",
|
|
|
+ "journalnode_principal_name": "jn/_HOST",
|
|
|
+ "resourcemanager_heapsize": "1024",
|
|
|
+ "hadoop_pid_dir_prefix": "/var/run/hadoop",
|
|
|
+ "hbase_principal_name": "hbase",
|
|
|
+ "hdfs_principal_name": "hdfs"
|
|
|
+ },
|
|
|
+ "capacity-scheduler": {
|
|
|
+ "yarn.scheduler.capacity.node-locality-delay": "40",
|
|
|
+ "yarn.scheduler.capacity.root.capacity": "100",
|
|
|
+ "yarn.scheduler.capacity.root.acl_administer_queues": "*",
|
|
|
+ "yarn.scheduler.capacity.root.queues": "default",
|
|
|
+ "yarn.scheduler.capacity.maximum-applications": "10000",
|
|
|
+ "yarn.scheduler.capacity.root.default.user-limit-factor": "1",
|
|
|
+ "yarn.scheduler.capacity.root.unfunded.capacity": "50",
|
|
|
+ "yarn.scheduler.capacity.root.default.maximum-capacity": "100",
|
|
|
+ "yarn.scheduler.capacity.root.default.state": "RUNNING",
|
|
|
+ "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2",
|
|
|
+ "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*",
|
|
|
+ "yarn.scheduler.capacity.root.default.capacity": "100",
|
|
|
+ "yarn.scheduler.capacity.root.default.acl_submit_jobs": "*"
|
|
|
+ },
|
|
|
+ "hdfs-site": {
|
|
|
+ "dfs.namenode.avoid.write.stale.datanode": "true",
|
|
|
+ "dfs.namenode.shared.edits.dir": "qjournal://c6401.ambari.apache.org:8485;c6402.ambari.apache.org:8485;c6403.ambari.apache.org:8485/ns1",
|
|
|
+ "dfs.namenode.rpc-address.ns1.nn1": "c6401.ambari.apache.org:8020",
|
|
|
+ "dfs.namenode.http-address.ns1.nn2": "c6402.ambari.apache.org:50070",
|
|
|
+ "dfs.namenode.avoid.read.stale.datanode": "true",
|
|
|
+ "dfs.namenode.http-address.ns1.nn1": "c6401.ambari.apache.org:50070",
|
|
|
+ "dfs.namenode.checkpoint.txns": "1000000",
|
|
|
+ "dfs.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}",
|
|
|
+ "dfs.block.access.token.enable": "true",
|
|
|
+ "dfs.support.append": "true",
|
|
|
+ "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}",
|
|
|
+ "dfs.cluster.administrators": " hdfs",
|
|
|
+ "ambari.dfs.datanode.http.port": "1022",
|
|
|
+ "dfs.journalnode.kerberos.principal": "jn/_HOST@EXAMPLE.COM",
|
|
|
+ "dfs.datanode.balance.bandwidthPerSec": "6250000",
|
|
|
+ "dfs.namenode.safemode.threshold-pct": "1.0f",
|
|
|
+ "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
|
|
|
+ "dfs.permissions.enabled": "true",
|
|
|
+ "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
|
|
|
+ "dfs.client.read.shortcircuit": "true",
|
|
|
+ "dfs.https.port": "50470",
|
|
|
+ "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
|
|
|
+ "dfs.ha.automatic-failover.enabled": "true",
|
|
|
+ "dfs.blockreport.initialDelay": "120",
|
|
|
+ "dfs.journalnode.edits.dir": "/hadoop/hdfs/journal",
|
|
|
+ "dfs.blocksize": "134217728",
|
|
|
+ "dfs.datanode.max.transfer.threads": "1024",
|
|
|
+ "dfs.heartbeat.interval": "3",
|
|
|
+ "dfs.replication": "3",
|
|
|
+ "dfs.namenode.handler.count": "100",
|
|
|
+ "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
|
|
|
+ "fs.permissions.umask-mode": "022",
|
|
|
+ "dfs.namenode.stale.datanode.interval": "30000",
|
|
|
+ "dfs.datanode.ipc.address": "0.0.0.0:8010",
|
|
|
+ "dfs.namenode.name.dir": "/hadoop/hdfs/namenode",
|
|
|
+ "dfs.secondary.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}",
|
|
|
+ "dfs.nameservices": "ns1",
|
|
|
+ "dfs.datanode.data.dir": "/hadoop/hdfs/data",
|
|
|
+ "dfs.namenode.https-address.ns1.nn2": "c6402.ambari.apache.org:50470",
|
|
|
+ "dfs.webhdfs.enabled": "true",
|
|
|
+ "dfs.namenode.https-address.ns1.nn1": "c6401.ambari.apache.org:50470",
|
|
|
+ "dfs.client.failover.proxy.provider.ns1": "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider",
|
|
|
+ "dfs.datanode.failed.volumes.tolerated": "0",
|
|
|
+ "dfs.namenode.accesstime.precision": "0",
|
|
|
+ "ambari.dfs.datanode.port": "1019",
|
|
|
+ "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary",
|
|
|
+ "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090",
|
|
|
+ "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
|
|
|
+ "dfs.ha.fencing.methods": "shell(/bin/true)",
|
|
|
+ "dfs.journalnode.keytab.file": "/etc/security/keytabs/jn.service.keytab",
|
|
|
+ "dfs.datanode.http.address": "0.0.0.0:${ambari.dfs.datanode.http.port}",
|
|
|
+ "dfs.datanode.du.reserved": "1073741824",
|
|
|
+ "dfs.client.read.shortcircuit.streams.cache.size": "4096",
|
|
|
+ "dfs.namenode.rpc-address.ns1.nn2": "c6402.ambari.apache.org:8020",
|
|
|
+ "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
|
|
|
+ "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
|
|
|
+ "dfs.ha.namenodes.ns1": "nn1,nn2",
|
|
|
+ "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
|
|
|
+ "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
|
|
|
+ "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
|
|
|
+ "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
|
|
|
+ "dfs.permissions.superusergroup": "hdfs",
|
|
|
+ "dfs.journalnode.http-address": "0.0.0.0:8480",
|
|
|
+ "dfs.journalnode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
|
|
|
+ "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
|
|
|
+ "dfs.namenode.write.stale.datanode.ratio": "1.0f",
|
|
|
+ "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
|
|
|
+ "dfs.datanode.data.dir.perm": "750",
|
|
|
+ "dfs.namenode.name.dir.restore": "true",
|
|
|
+ "dfs.replication.max": "50",
|
|
|
+ "dfs.namenode.checkpoint.period": "21600"
|
|
|
+ },
|
|
|
+ "yarn-log4j": {
|
|
|
+ "log4j.appender.JSA.layout.ConversionPattern": "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n",
|
|
|
+ "log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary": "${yarn.server.resourcemanager.appsummary.logger}",
|
|
|
+ "log4j.appender.RMSUMMARY.File": "/var/log/hadoop-yarn/yarn/${yarn.server.resourcemanager.appsummary.log.file}",
|
|
|
+ "log4j.appender.RMSUMMARY.layout": "org.apache.log4j.PatternLayout",
|
|
|
+ "log4j.appender.RMSUMMARY.layout.ConversionPattern": "%d{ISO8601} %p %c{2}: %m%n",
|
|
|
+ "hadoop.mapreduce.jobsummary.log.file": "hadoop-mapreduce.jobsummary.log",
|
|
|
+ "log4j.appender.RMSUMMARY.MaxBackupIndex": "20",
|
|
|
+ "log4j.appender.RMSUMMARY": "org.apache.log4j.RollingFileAppender",
|
|
|
+ "log4j.appender.JSA": "org.apache.log4j.DailyRollingFileAppender",
|
|
|
+ "hadoop.mapreduce.jobsummary.logger": "${hadoop.root.logger}",
|
|
|
+ "yarn.server.resourcemanager.appsummary.log.file": "hadoop-mapreduce.jobsummary.log",
|
|
|
+ "log4j.appender.JSA.DatePattern": ".yyyy-MM-dd",
|
|
|
+ "yarn.server.resourcemanager.appsummary.logger": "${hadoop.root.logger}",
|
|
|
+ "log4j.appender.JSA.layout": "org.apache.log4j.PatternLayout",
|
|
|
+ "log4j.appender.RMSUMMARY.MaxFileSize": "256MB",
|
|
|
+ "log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary": "false"
|
|
|
+ },
|
|
|
+ "core-site": {
|
|
|
+ "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
|
|
|
+ "gluster.daemon.user": "null",
|
|
|
+ "fs.trash.interval": "360",
|
|
|
+ "hadoop.security.authentication": "kerberos",
|
|
|
+ "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec",
|
|
|
+ "mapreduce.jobtracker.webinterface.trusted": "false",
|
|
|
+ "fs.AbstractFileSystem.glusterfs.impl": "null",
|
|
|
+ "fs.defaultFS": "hdfs://ns1",
|
|
|
+ "ipc.client.connect.max.retries": "50",
|
|
|
+ "ipc.client.idlethreshold": "8000",
|
|
|
+ "io.file.buffer.size": "131072",
|
|
|
+ "hadoop.security.authorization": "true",
|
|
|
+ "ha.zookeeper.quorum": "c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181,c6403.ambari.apache.org:2181",
|
|
|
+ "ipc.client.connection.maxidletime": "30000",
|
|
|
+ "hadoop.security.auth_to_local": "RULE:[2:$1@$0](rm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](jhs@.*EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](jn/_HOST@.*EXAMPLE.COM)s/.*/hdfs/\nDEFAULT"
|
|
|
+ },
|
|
|
+ "hdfs-log4j": {
|
|
|
+ "log4j.appender.DRFA.layout": "org.apache.log4j.PatternLayout",
|
|
|
+ "log4j.appender.DRFA.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n",
|
|
|
+ "log4j.appender.DRFAAUDIT.DatePattern": ".yyyy-MM-dd",
|
|
|
+ "log4j.appender.EventCounter": "org.apache.hadoop.log.metrics.EventCounter",
|
|
|
+ "log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit": "false",
|
|
|
+ "log4j.appender.DRFAS.File": "${hadoop.log.dir}/${hadoop.security.log.file}",
|
|
|
+ "log4j.appender.NullAppender": "org.apache.log4j.varia.NullAppender",
|
|
|
+ "log4j.appender.MRAUDIT.layout.ConversionPattern": "%d{ISO8601} %p %c{2}: %m%n",
|
|
|
+ "log4j.additivity.org.apache.hadoop.mapred.AuditLogger": "false",
|
|
|
+ "log4j.appender.DRFAS": "org.apache.log4j.DailyRollingFileAppender",
|
|
|
+ "hadoop.tasklog.noKeepSplits": "4",
|
|
|
+ "log4j.appender.DRFAAUDIT": "org.apache.log4j.DailyRollingFileAppender",
|
|
|
+ "log4j.appender.DRFAAUDIT.File": "${hadoop.log.dir}/hdfs-audit.log",
|
|
|
+ "log4j.appender.DRFAS.DatePattern": ".yyyy-MM-dd",
|
|
|
+ "log4j.appender.MRAUDIT": "org.apache.log4j.DailyRollingFileAppender",
|
|
|
+ "hadoop.security.log.maxbackupindex": "20",
|
|
|
+ "log4j.appender.DRFA.DatePattern": ".yyyy-MM-dd",
|
|
|
+ "log4j.appender.console.layout": "org.apache.log4j.PatternLayout",
|
|
|
+ "log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service": "ERROR",
|
|
|
+ "log4j.appender.RFA.layout": "org.apache.log4j.PatternLayout",
|
|
|
+ "hadoop.tasklog.taskid": "null",
|
|
|
+ "log4j.appender.RFAS.layout": "org.apache.log4j.PatternLayout",
|
|
|
+ "hadoop.root.logger": "INFO,console",
|
|
|
+ "hadoop.security.logger": "INFO,console",
|
|
|
+ "log4j.appender.DRFAAUDIT.layout.ConversionPattern": "%d{ISO8601} %p %c{2}: %m%n",
|
|
|
+ "log4j.appender.RFAS.MaxFileSize": "${hadoop.security.log.maxfilesize}",
|
|
|
+ "log4j.appender.MRAUDIT.DatePattern": ".yyyy-MM-dd",
|
|
|
+ "log4j.appender.RFA.File": "${hadoop.log.dir}/${hadoop.log.file}",
|
|
|
+ "log4j.appender.RFAS.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n",
|
|
|
+ "log4j.appender.TLA": "org.apache.hadoop.mapred.TaskLogAppender",
|
|
|
+ "log4j.logger.org.apache.hadoop.metrics2": "${hadoop.metrics.log.level}",
|
|
|
+ "log4j.appender.DRFA.File": "${hadoop.log.dir}/${hadoop.log.file}",
|
|
|
+ "log4j.appender.TLA.layout": "org.apache.log4j.PatternLayout",
|
|
|
+ "hadoop.log.file": "hadoop.log",
|
|
|
+ "hadoop.security.log.file": "SecurityAuth.audit",
|
|
|
+ "log4j.appender.console.target": "System.err",
|
|
|
+ "log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit": "${hdfs.audit.logger}",
|
|
|
+ "hdfs.audit.logger": "INFO,console",
|
|
|
+ "log4j.appender.RFAS.MaxBackupIndex": "${hadoop.security.log.maxbackupindex}",
|
|
|
+ "log4j.appender.TLA.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n",
|
|
|
+ "hadoop.tasklog.iscleanup": "false",
|
|
|
+ "mapred.audit.logger": "INFO,console",
|
|
|
+ "log4j.appender.DRFAAUDIT.layout": "org.apache.log4j.PatternLayout",
|
|
|
+ "hadoop.tasklog.logsRetainHours": "12",
|
|
|
+ "log4j.appender.MRAUDIT.File": "${hadoop.log.dir}/mapred-audit.log",
|
|
|
+ "log4j.appender.TLA.totalLogFileSize": "${hadoop.tasklog.totalLogFileSize}",
|
|
|
+ "log4j.appender.DRFA": "org.apache.log4j.DailyRollingFileAppender",
|
|
|
+ "log4j.category.SecurityLogger": "${hadoop.security.logger}",
|
|
|
+ "hadoop.tasklog.totalLogFileSize": "100",
|
|
|
+ "log4j.appender.RFA.MaxFileSize": "256MB",
|
|
|
+ "log4j.appender.RFAS": "org.apache.log4j.RollingFileAppender",
|
|
|
+ "log4j.appender.RFA": "org.apache.log4j.RollingFileAppender",
|
|
|
+ "log4j.appender.RFA.layout.ConversionPattern": "%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
|
|
|
+ "log4j.appender.DRFAS.layout": "org.apache.log4j.PatternLayout",
|
|
|
+ "log4j.threshhold": "ALL",
|
|
|
+ "log4j.appender.TLA.isCleanup": "${hadoop.tasklog.iscleanup}",
|
|
|
+ "log4j.appender.TLA.taskId": "${hadoop.tasklog.taskid}",
|
|
|
+ "log4j.appender.console.layout.ConversionPattern": "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n",
|
|
|
+ "log4j.appender.MRAUDIT.layout": "org.apache.log4j.PatternLayout",
|
|
|
+ "log4j.appender.console": "org.apache.log4j.ConsoleAppender",
|
|
|
+ "hadoop.log.dir": ".",
|
|
|
+ "hadoop.security.log.maxfilesize": "256MB",
|
|
|
+ "hadoop.metrics.log.level": "INFO",
|
|
|
+ "log4j.appender.RFA.MaxBackupIndex": "10",
|
|
|
+ "log4j.rootLogger": "${hadoop.root.logger}, EventCounter",
|
|
|
+ "log4j.appender.RFAS.File": "${hadoop.log.dir}/${hadoop.security.log.file}",
|
|
|
+ "log4j.logger.org.apache.hadoop.mapred.AuditLogger": "${mapred.audit.logger}",
|
|
|
+ "hadoop.tasklog.purgeLogSplits": "true",
|
|
|
+ "log4j.appender.DRFAS.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n"
|
|
|
+ },
|
|
|
+ "zookeeper-log4j": {
|
|
|
+ "log4j.appender.CONSOLE.layout": "org.apache.log4j.PatternLayout",
|
|
|
+ "log4j.appender.TRACEFILE.layout.ConversionPattern": "%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n",
|
|
|
+ "log4j.appender.CONSOLE.layout.ConversionPattern": "%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n",
|
|
|
+ "log4j.appender.ROLLINGFILE": "org.apache.log4j.RollingFileAppender",
|
|
|
+ "log4j.appender.CONSOLE.Threshold": "INFO",
|
|
|
+ "log4j.appender.CONSOLE": "org.apache.log4j.ConsoleAppender",
|
|
|
+ "log4j.appender.ROLLINGFILE.layout": "org.apache.log4j.PatternLayout",
|
|
|
+ "log4j.appender.TRACEFILE.layout": "org.apache.log4j.PatternLayout",
|
|
|
+ "log4j.appender.TRACEFILE.Threshold": "TRACE",
|
|
|
+ "log4j.appender.ROLLINGFILE.layout.ConversionPattern": "%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n",
|
|
|
+ "log4j.appender.TRACEFILE": "org.apache.log4j.FileAppender",
|
|
|
+ "log4j.appender.TRACEFILE.File": "zookeeper_trace.log",
|
|
|
+ "log4j.appender.ROLLINGFILE.File": "zookeeper.log",
|
|
|
+ "log4j.appender.ROLLINGFILE.MaxFileSize": "10MB",
|
|
|
+ "log4j.appender.ROLLINGFILE.Threshold": "DEBUG"
|
|
|
+ },
|
|
|
+ "yarn-site": {
|
|
|
+ "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25",
|
|
|
+ "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor",
|
|
|
+ "yarn.resourcemanager.principal": "rm/_HOST@EXAMPLE.COM",
|
|
|
+ "yarn.nodemanager.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab",
|
|
|
+ "yarn.resourcemanager.resource-tracker.address": "c6402.ambari.apache.org:8025",
|
|
|
+ "yarn.nodemanager.remote-app-log-dir-suffix": "logs",
|
|
|
+ "yarn.resourcemanager.hostname": "c6402.ambari.apache.org",
|
|
|
+ "yarn.nodemanager.health-checker.script.timeout-ms": "60000",
|
|
|
+ "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler",
|
|
|
+ "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude",
|
|
|
+ "yarn.scheduler.minimum-allocation-mb": "683",
|
|
|
+ "yarn.resourcemanager.address": "c6402.ambari.apache.org:8050",
|
|
|
+ "yarn.resourcemanager.scheduler.address": "c6402.ambari.apache.org:8030",
|
|
|
+ "yarn.log-aggregation.retain-seconds": "2592000",
|
|
|
+ "yarn.scheduler.maximum-allocation-mb": "2048",
|
|
|
+ "yarn.log-aggregation-enable": "true",
|
|
|
+ "yarn.nodemanager.address": "0.0.0.0:45454",
|
|
|
+ "yarn.nodemanager.container-monitor.interval-ms": "3000",
|
|
|
+ "yarn.nodemanager.principal": "nm/_HOST@EXAMPLE.COM",
|
|
|
+ "yarn.nodemanager.local-dirs": "/hadoop/yarn/local",
|
|
|
+ "yarn.nodemanager.log-aggregation.compression-type": "gz",
|
|
|
+ "yarn.nodemanager.log.retain-second": "604800",
|
|
|
+ "yarn.nodemanager.delete.debug-delay-sec": "0",
|
|
|
+ "yarn.nodemanager.keytab": "/etc/security/keytabs/nm.service.keytab",
|
|
|
+ "yarn.nodemanager.log-dirs": "/hadoop/yarn/log",
|
|
|
+ "yarn.nodemanager.health-checker.interval-ms": "135000",
|
|
|
+ "yarn.resourcemanager.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM",
|
|
|
+ "yarn.resourcemanager.am.max-attempts": "2",
|
|
|
+ "yarn.nodemanager.remote-app-log-dir": "/app-logs",
|
|
|
+ "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX",
|
|
|
+ "yarn.nodemanager.aux-services": "mapreduce_shuffle",
|
|
|
+ "yarn.nodemanager.vmem-check-enabled": "false",
|
|
|
+ "yarn.nodemanager.vmem-pmem-ratio": "2.1",
|
|
|
+ "yarn.admin.acl": "*",
|
|
|
+ "yarn.resourcemanager.webapp.address": "c6402.ambari.apache.org:8088",
|
|
|
+ "yarn.nodemanager.resource.memory-mb": "2048",
|
|
|
+ "yarn.nodemanager.linux-container-executor.group": "hadoop",
|
|
|
+ "yarn.acl.enable": "true",
|
|
|
+ "yarn.log.server.url": "http://c6402.ambari.apache.org:19888/jobhistory/logs",
|
|
|
+ "yarn.nodemanager.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM",
|
|
|
+ "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*",
|
|
|
+ "yarn.resourcemanager.admin.address": "c6402.ambari.apache.org:8141",
|
|
|
+ "yarn.resourcemanager.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab",
|
|
|
+ "yarn.resourcemanager.keytab": "/etc/security/keytabs/rm.service.keytab",
|
|
|
+ "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler"
|
|
|
+ }
|
|
|
+ },
|
|
|
+ "configurationTags": {
|
|
|
+ "capacity-scheduler": {
|
|
|
+ "tag": "version1"
|
|
|
+ },
|
|
|
+ "global": {
|
|
|
+ "tag": "version1392403922876"
|
|
|
+ },
|
|
|
+ "mapred-site": {
|
|
|
+ "tag": "version1392403922877"
|
|
|
+ },
|
|
|
+ "hdfs-site": {
|
|
|
+ "tag": "version1392403922876"
|
|
|
+ },
|
|
|
+ "yarn-log4j": {
|
|
|
+ "tag": "version1"
|
|
|
+ },
|
|
|
+ "core-site": {
|
|
|
+ "tag": "version1392403922876"
|
|
|
+ },
|
|
|
+ "hdfs-log4j": {
|
|
|
+ "tag": "version1"
|
|
|
+ },
|
|
|
+ "zookeeper-log4j": {
|
|
|
+ "tag": "version1"
|
|
|
+ },
|
|
|
+ "yarn-site": {
|
|
|
+ "tag": "version1392403922877"
|
|
|
+ }
|
|
|
+ },
|
|
|
+ "commandId": "25-4",
|
|
|
+ "clusterHostInfo": {
|
|
|
+ "nm_hosts": [
|
|
|
+ "c6403.ambari.apache.org",
|
|
|
+ "c6401.ambari.apache.org",
|
|
|
+ "c6402.ambari.apache.org"
|
|
|
+ ],
|
|
|
+ "zkfc_hosts": [
|
|
|
+ "c6401.ambari.apache.org",
|
|
|
+ "c6402.ambari.apache.org"
|
|
|
+ ],
|
|
|
+ "all_ping_ports": [
|
|
|
+ "8670",
|
|
|
+ "8670",
|
|
|
+ "8670"
|
|
|
+ ],
|
|
|
+ "journalnode_hosts": [
|
|
|
+ "c6403.ambari.apache.org",
|
|
|
+ "c6401.ambari.apache.org",
|
|
|
+ "c6402.ambari.apache.org"
|
|
|
+ ],
|
|
|
+ "rm_host": [
|
|
|
+ "c6402.ambari.apache.org"
|
|
|
+ ],
|
|
|
+ "all_hosts": [
|
|
|
+ "c6403.ambari.apache.org",
|
|
|
+ "c6401.ambari.apache.org",
|
|
|
+ "c6402.ambari.apache.org"
|
|
|
+ ],
|
|
|
+ "slave_hosts": [
|
|
|
+ "c6403.ambari.apache.org",
|
|
|
+ "c6401.ambari.apache.org",
|
|
|
+ "c6402.ambari.apache.org"
|
|
|
+ ],
|
|
|
+ "namenode_host": [
|
|
|
+ "c6401.ambari.apache.org",
|
|
|
+ "c6402.ambari.apache.org"
|
|
|
+ ],
|
|
|
+ "zookeeper_hosts": [
|
|
|
+ "c6403.ambari.apache.org",
|
|
|
+ "c6401.ambari.apache.org",
|
|
|
+ "c6402.ambari.apache.org"
|
|
|
+ ],
|
|
|
+ "hs_host": [
|
|
|
+ "c6402.ambari.apache.org"
|
|
|
+ ]
|
|
|
+ }
|
|
|
+}
|