123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505 |
- <?xml version="1.0"?>
- <!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
- <upgrade-config-changes xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="upgrade-config.xsd">
- <services>
- <service name="STORM">
- <component name="NIMBUS">
- <changes>
- <definition xsi:type="configure" id="hdp_2_5_0_0_remove_empty_storm_topology_submission_notifier_plugin_class"
- summary="Removing empty storm.topology.submission.notifier.plugin.class property">
- <type>storm-site</type>
- <transfer operation="delete" delete-key="storm.topology.submission.notifier.plugin.class" if-key="storm.topology.submission.notifier.plugin.class"
- if-type="storm-site" if-value=" "/>
- </definition>
- <definition xsi:type="configure" id="increase_storm_zookeeper_timeouts"
- summary="Increase storm.zookeeper.session.timeout and storm.zookeeper.connection.timeout property">
- <type>storm-site</type>
- <set key="storm.zookeeper.session.timeout"
- value="30000"
- if-key="storm.zookeeper.session.timeout"
- if-type="storm-site"
- if-value="20000" />
- <set key="storm.zookeeper.connection.timeout"
- value="30000"
- if-key="storm.zookeeper.connection.timeout"
- if-type="storm-site"
- if-value="15000" />
- </definition>
- <definition xsi:type="configure" id="storm_worker_log4j_parameterize" summary="Parameterizing Storm Worker Log4J Properties">
- <type>storm-worker-log4j</type>
- <set key="storm_wrkr_a1_maxfilesize" value="100"/>
- <set key="storm_wrkr_a1_maxbackupindex" value="9"/>
- <set key="storm_wrkr_out_maxfilesize" value="100"/>
- <set key="storm_wrkr_out_maxbackupindex" value="4"/>
- <set key="storm_wrkr_err_maxfilesize" value="100"/>
- <set key="storm_wrkr_err_maxbackupindex" value="4"/>
- <regex-replace key="content" find="A1"
		fileName="\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}"
		filePattern="\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.%i.gz">
 <PatternLayout>
 <pattern>\$\{pattern}</pattern>
 </PatternLayout>
 <Policies>
 <SizeBasedTriggeringPolicy size="(?:[0-9]+) MB"/> <!-- Or every 100 MB -->
 </Policies>
 <DefaultRolloverStrategy max="([0-9]+)"
- replace-with="A1"
		fileName="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}"
		filePattern="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.%i.gz">
 <PatternLayout>
 <pattern>${pattern}</pattern>
 </PatternLayout>
 <Policies>
 <SizeBasedTriggeringPolicy size="{{storm_wrkr_a1_maxfilesize}} MB"/> <!-- Or every 100 MB -->
 </Policies>
 <DefaultRolloverStrategy max="{{storm_wrkr_a1_maxbackupindex}}"/>
- <regex-replace key="content" find="STDOUT"
		fileName="\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.out"
		filePattern="\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.out.%i.gz">
 <PatternLayout>
 <pattern>\$\{patternNoTime}</pattern>
 </PatternLayout>
 <Policies>
 <SizeBasedTriggeringPolicy size="(?:[0-9]+) MB"/> <!-- Or every 100 MB -->
 </Policies>
 <DefaultRolloverStrategy max="([0-9]+)"
- replace-with="STDOUT"
		fileName="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.out"
		filePattern="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.out.%i.gz">
 <PatternLayout>
 <pattern>${patternNoTime}</pattern>
 </PatternLayout>
 <Policies>
 <SizeBasedTriggeringPolicy size="{{storm_wrkr_out_maxfilesize}} MB"/> <!-- Or every 100 MB -->
 </Policies>
 <DefaultRolloverStrategy max="{{storm_wrkr_out_maxbackupindex}}"/>
- <regex-replace key="content" find="STDERR"
		fileName="\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.err"
		filePattern="\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.err.%i.gz">
 <PatternLayout>
 <pattern>\$\{patternNoTime}</pattern>
 </PatternLayout>
 <Policies>
 <SizeBasedTriggeringPolicy size="(?:[0-9]+) MB"/> <!-- Or every 100 MB -->
 </Policies>
 <DefaultRolloverStrategy max="([0-9]+)"
- replace-with="STDERR"
		fileName="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.err"
		filePattern="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.err.%i.gz">
 <PatternLayout>
 <pattern>${patternNoTime}</pattern>
 </PatternLayout>
 <Policies>
 <SizeBasedTriggeringPolicy size="{{storm_wrkr_err_maxfilesize}} MB"/> <!-- Or every 100 MB -->
 </Policies>
 <DefaultRolloverStrategy max="{{storm_wrkr_err_maxbackupindex}}"/>
- </definition>
- <definition xsi:type="configure" id="storm_cluster_log4j_parameterize" summary="Parameterizing Storm Cluster Log4J Properties">
- <type>storm-cluster-log4j</type>
- <set key="storm_a1_maxfilesize" value="100"/>
- <set key="storm_a1_maxbackupindex" value="9"/>
- <regex-replace key="content" find="A1" immediateFlush="false"
 fileName="\$\{sys:storm.log.dir}/\$\{sys:logfile.name}"
 filePattern="\$\{sys:storm.log.dir}/\$\{sys:logfile.name}.%i.gz">
 <PatternLayout>
 <pattern>\$\{pattern}</pattern>
 </PatternLayout>
 <Policies>
 <SizeBasedTriggeringPolicy size="(?:[0-9]+) MB"/> <!-- Or every 100 MB -->
 </Policies>
 <DefaultRolloverStrategy max="([0-9]+)"
- replace-with="A1" immediateFlush="false"
 fileName="${sys:storm.log.dir}/${sys:logfile.name}"
 filePattern="${sys:storm.log.dir}/${sys:logfile.name}.%i.gz">
 <PatternLayout>
 <pattern>${pattern}</pattern>
 </PatternLayout>
 <Policies>
 <SizeBasedTriggeringPolicy size="{{storm_a1_maxfilesize}} MB"/> <!-- Or every 100 MB -->
 </Policies>
 <DefaultRolloverStrategy max="{{storm_a1_maxbackupindex}}"/>
- </definition>
- </changes>
- </component>
- </service>
- <service name="SPARK">
- <component name="LIVY_SERVER">
- <changes>
- <definition xsi:type="configure" id="hdp_2_5_0_0_rename_spark_livy_configs">
- <type>livy-conf</type>
- <transfer operation="move" from-key="livy.server.kerberos.keytab" to-key="livy.server.launch.kerberos.keytab" />
- <transfer operation="move" from-key="livy.server.kerberos.principal" to-key="livy.server.launch.kerberos.principal" />
- </definition>
- <definition xsi:type="configure" id="hdp_2_5_0_0_add_spark_conf_dir_livy_configs">
- <type>livy-env</type>
- <insert key="content" value="export SPARK_CONF_DIR=/usr/hdp/current/spark-client/conf" insert-type="append" newline-before="true" newline-after="true" />
- </definition>
- </changes>
- </component>
- </service>
- <service name="ZOOKEEPER">
- <component name="ZOOKEEPER_SERVER">
- <changes>
- <!-- Zookeeper Rolling properties for log4j need to be parameterized. -->
- <definition xsi:type="configure" id="zookeeper_log4j_parameterize" summary="Parameterizing ZooKeeper Log4J Properties">
- <type>zookeeper-log4j</type>
- <set key="zookeeper_log_max_backup_size" value="10"/>
- <set key="zookeeper_log_number_of_backup_files" value="10"/>
- <regex-replace key="content" find="^log4j.appender.ROLLINGFILE.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.ROLLINGFILE.MaxFileSize={{zookeeper_log_max_backup_size}}MB"/>
- <regex-replace key="content" find="^#log4j.appender.ROLLINGFILE.MaxBackupIndex=([0-9]+)" replace-with="#log4j.appender.ROLLINGFILE.MaxBackupIndex={{zookeeper_log_number_of_backup_files}}"/>
- </definition>
- </changes>
- </component>
- </service>
- <service name="ATLAS">
- <component name="ATLAS_SERVER">
- <changes>
- <definition xsi:type="configure" id="atlas_log4j_parameterize" summary="Parameterizing Atlas Log4J Properties">
- <type>atlas-log4j</type>
- <set key="atlas_log_max_backup_size" value="256"/>
- <set key="atlas_log_number_of_backup_files" value="20"/>
- <replace key="content" find="<appender name="FILE" class="org.apache.log4j.DailyRollingFileAppender">" replace-with="<appender name="FILE" class="org.apache.log4j.DailyRollingFileAppender">\n<param name="MaxFileSize" value="{{atlas_log_max_backup_size}}MB" />"/>
- <replace key="content" find="<appender name="FILE" class="org.apache.log4j.DailyRollingFileAppender">" replace-with="<appender name="FILE" class="org.apache.log4j.DailyRollingFileAppender">\n<param name="MaxFileSize" value="{{atlas_log_number_of_backup_files}}" />"/>
- </definition>
- <definition xsi:type="configure" id="hdp_2_5_4_0_atlas_exclude_tls_protocol" summary="Excluding TLS v1.2 Protocol">
- <type>application-properties</type>
- <set key="atlas.ssl.exclude.protocols" value="TLSv1.2" if-type="application-properties" if-key="atlas.ssl.exclude.protocols" if-key-state="absent"/>
- </definition>
- <definition xsi:type="configure" id="increase_atlas_zookeeper_timeouts" summary="Updating Atlas zookeeper timeout values">
- <type>application-properties</type>
- <set key="atlas.kafka.zookeeper.connection.timeout.ms" value="30000" if-type="application-properties" if-key="atlas.kafka.zookeeper.connection.timeout.ms" if-key-state="present"/>
- <set key="atlas.kafka.zookeeper.session.timeout.ms" value="60000" if-type="application-properties" if-key="atlas.kafka.zookeeper.session.timeout.ms" if-key-state="present"/>
- <set key="atlas.audit.zookeeper.session.timeout.ms" value="60000" if-type="application-properties" if-key="atlas.audit.zookeeper.session.timeout.ms" if-key-state="present"/>
- </definition>
- </changes>
- </component>
- <component name="SPARK_CLIENT">
- <changes>
- <definition xsi:type="configure" id="hdp_2_5_0_0_spark_yarn_queue">
- <type>spark-defaults</type>
- <set key="spark.yarn.queue" value="default" if-type="spark-defaults" if-key="spark.yarn.queue" if-key-state="absent"/>
- </definition>
- </changes>
- </component>
- </service>
- <service name="SPARK2">
- <component name="SPARK2_CLIENT">
- <changes>
- <definition xsi:type="configure" id="hdp_2_5_0_0_spark2_yarn_queue">
- <type>spark2-defaults</type>
- <set key="spark.yarn.queue" value="default" if-type="spark-defaults" if-key="spark.yarn.queue" if-key-state="absent"/>
- </definition>
- </changes>
- </component>
- </service>
- <service name="TEZ">
- <component name="TEZ_CLIENT">
- <changes>
- <definition xsi:type="configure" id="hdp_2_5_0_0_tez_queue_name">
- <type>tez-site</type>
- <set key="tez.queue.name" value="default" if-type="tez-site" if-key="tez.queue.name" if-key-state="absent"/>
- </definition>
- </changes>
- </component>
- </service>
- <service name="OOZIE">
- <component name="OOZIE_SERVER">
- <changes>
- <!-- Oozie Rolling properties for log4j need to be parameterized. -->
- <definition xsi:type="configure" id="oozie_log4j_parameterize" summary="Parameterizing Oozie Log4J Properties">
- <type>oozie-log4j</type>
- <set key="oozie_log_maxhistory" value="720"/>
- <regex-replace key="content" find="^log4j.appender.oozie.RollingPolicy.MaxHistory=([0-9]+)" replace-with="log4j.appender.oozie.RollingPolicy.MaxHistory={{oozie_log_maxhistory}}"/>
- </definition>
- </changes>
- </component>
- </service>
- <service name="YARN">
- <component name="RESOURCEMANAGER">
- <changes>
- <!-- Yarn Rolling properties for log4j need to be parameterized. -->
- <definition xsi:type="configure" id="yarn_log4j_parameterize" summary="Parameterizing Yarn Log4J Properties">
- <type>yarn-log4j</type>
- <set key="yarn_rm_summary_log_max_backup_size" value="256"/>
- <set key="yarn_rm_summary_log_number_of_backup_files" value="20"/>
- <regex-replace key="content" find="^log4j.appender.RMSUMMARY.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.RMSUMMARY.MaxFileSize={{yarn_rm_summary_log_max_backup_size}}MB"/>
- <regex-replace key="content" find="^log4j.appender.RMSUMMARY.MaxBackupIndex=([0-9]+)" replace-with="log4j.appender.RMSUMMARY.MaxBackupIndex={{yarn_rm_summary_log_number_of_backup_files}}"/>
- </definition>
- <definition xsi:type="configure" id="yarn_env_security_opts" summary="Adding YARN Security ACLs">
- <type>yarn-env</type>
- <insert key="content" value="{% if rm_security_opts is defined %} YARN_OPTS="{{rm_security_opts}} $YARN_OPTS" {% endif %}" insert-type="append" newline-before="true" newline-after="true" />
- </definition>
- <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_priority_utilization_underutilized_preemption">
- <type>yarn-site</type>
- <transfer operation="copy"
- from-key="yarn.resourcemanager.scheduler.monitor.enable"
- to-key="yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled"
- default-value="false"/>
- </definition>
- <definition xsi:type="configure" id="yarn_site_retained_log_count" summary="Updating Yarn retained file count for continuous Log Aggregation">
- <type>yarn-site</type>
- <set key="yarn.nodemanager.log-aggregation.num-log-files-per-app"
- value="336" />
- </definition>
- <definition xsi:type="configure" id="hdp_2_6_0_0_service_check_queue_name">
- <type>yarn-env</type>
- <set key="service_check.queue.name" value="default" if-type="yarn-env" if-key="service_check.queue.name" if-key-state="absent"/>
- </definition>
- <definition xsi:type="configure" id="hdp_2_6_0_0_ats_scan_interval_default">
- <type>yarn-site</type>
- <set key="yarn.timeline-service.entity-group-fs-store.scan-interval-seconds" value="15"
- if-type="yarn-site" if-key="yarn.timeline-service.entity-group-fs-store.scan-interval-seconds" if-value="60"/>
- </definition>
- </changes>
- </component>
- </service>
- <service name="MAPREDUCE2">
- <component name="MAPREDUCE2_CLIENT">
- <changes>
- <definition xsi:type="configure" id="hdp_2_6_0_0_mapreduce_job_queuename">
- <type>mapred-site</type>
- <set key="mapreduce.job.queuename" value="default" if-type="mapred-site" if-key="mapreduce.job.queuename" if-key-state="absent"/>
- </definition>
- </changes>
- </component>
- </service>
- <service name="HDFS">
- <component name="NAMENODE">
- <changes>
- <!-- HDFS Rolling properties for log4j need to be parameterized. -->
- <definition xsi:type="configure" id="hdfs_log4j_parameterize" summary="Parameterizing Hdfs Log4J Properties">
- <type>hdfs-log4j</type>
- <set key="hadoop_log_max_backup_size" value="256"/>
- <set key="hadoop_log_number_of_backup_files" value="10"/>
- <set key="hadoop_security_log_max_backup_size" value="256"/>
- <set key="hadoop_security_log_number_of_backup_files" value="20"/>
- <regex-replace key="content" find="log4j.appender.RFA.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.RFA.MaxFileSize={{hadoop_log_max_backup_size}}MB"/>
- <regex-replace key="content" find="log4j.appender.RFA.MaxBackupIndex=([0-9]+)" replace-with="log4j.appender.RFA.MaxBackupIndex={{hadoop_log_number_of_backup_files}}"/>
- <regex-replace key="content" find="hadoop.security.log.maxfilesize=([0-9]+)MB" replace-with="hadoop.security.log.maxfilesize={{hadoop_security_log_max_backup_size}}MB"/>
- <regex-replace key="content" find="hadoop.security.log.maxbackupindex=([0-9]+)" replace-with="hadoop.security.log.maxbackupindex={{hadoop_security_log_number_of_backup_files}}"/>
- </definition>
- <definition xsi:type="configure" id="hadoop_env_zkfc_security_opts" summary="Adding HDFS ZKFC Security ACLs">
- <type>hadoop-env</type>
- <insert key="content" value="{% if hadoop_zkfc_opts is defined %} export HADOOP_ZKFC_OPTS="{{hadoop_zkfc_opts}} $HADOOP_ZKFC_OPTS" {% endif %}" insert-type="append" newline-before="true" newline-after="true" />
- </definition>
- <definition xsi:type="configure" id="hdfs_securitylogger_additivity" summary="Set additivity of SecurityLogger to false">
- <type>hdfs-log4j</type>
- <regex-replace key="content" find="hadoop.security.log.file=SecurityAuth.audit" replace-with="hadoop.security.log.file=SecurityAuth.audit log4j.additivity.SecurityLogger=false"/>
- <regex-replace key="content" find="log4j.additivity.SecurityLogger=true" replace-with="log4j.additivity.SecurityLogger=false"/>
- </definition>
- </changes>
- </component>
- </service>
- <service name="HBASE">
- <component name="HBASE_MASTER">
- <changes>
- <!-- HBase Rolling properties for log4j need to be parameterized. -->
- <definition xsi:type="configure" id="hbase_log4j_parameterize" summary="Parameterizing HBase Log4J Properties">
- <type>hbase-log4j</type>
- <set key="hbase_log_maxfilesize" value="256"/>
- <set key="hbase_log_maxbackupindex" value="20"/>
- <set key="hbase_security_log_maxfilesize" value="256"/>
- <set key="hbase_security_log_maxbackupindex" value="20"/>
- <regex-replace key="content" find="hbase.log.maxfilesize=([0-9]+)MB" replace-with="hbase.log.maxfilesize={{hbase_log_maxfilesize}}MB"/>
- <regex-replace key="content" find="hbase.log.maxbackupindex=([0-9]+)" replace-with="hbase.log.maxbackupindex={{hbase_log_maxbackupindex}}"/>
- <regex-replace key="content" find="hbase.security.log.maxfilesize=([0-9]+)MB" replace-with="hbase.security.log.maxfilesize={{hbase_security_log_maxfilesize}}MB"/>
- <regex-replace key="content" find="hbase.security.log.maxbackupindex=([0-9]+)" replace-with="hbase.security.log.maxbackupindex={{hbase_security_log_maxbackupindex}}"/>
- </definition>
- </changes>
- </component>
- </service>
- <service name="FALCON">
- <component name="FALCON_SERVER">
- <changes>
- <definition xsi:type="configure" id="falcon_log4j_parameterize" summary="Parameterizing Falcon Log4J Properties">
- <type>falcon-log4j</type>
- <set key="falcon_log_maxfilesize" value="256"/>
- <set key="falcon_log_maxbackupindex" value="20"/>
- <set key="falcon_security_log_maxfilesize" value="256"/>
- <set key="falcon_security_log_maxbackupindex" value="20"/>
- <replace key="content" find="<appender name="FILE" class="org.apache.log4j.DailyRollingFileAppender">" replace-with="<appender name="FILE" class="org.apache.log4j.DailyRollingFileAppender">
<param name="MaxFileSize" value="{{falcon_log_maxfilesize}}MB" />"/>
- <replace key="content" find="<appender name="FILE" class="org.apache.log4j.DailyRollingFileAppender">" replace-with="<appender name="FILE" class="org.apache.log4j.DailyRollingFileAppender">
<param name="MaxBackupIndex" value="{{falcon_log_maxbackupindex}}" />"/>
- <replace key="content" find="<appender name="SECURITY" class="org.apache.log4j.DailyRollingFileAppender">" replace-with="<appender name="SECURITY" class="org.apache.log4j.DailyRollingFileAppender">
<param name="MaxFileSize" value="{{falcon_security_log_maxfilesize}}MB"/>"/>
- <replace key="content" find="<appender name="SECURITY" class="org.apache.log4j.DailyRollingFileAppender">" replace-with="<appender name="SECURITY" class="org.apache.log4j.DailyRollingFileAppender">
<param name="MaxBackupIndex" value="{{falcon_security_log_maxbackupindex}}"/>"/>
- </definition>
- </changes>
- </component>
- </service>
- <service name="RANGER">
- <component name="RANGER_ADMIN">
- <changes>
- <definition xsi:type="configure" id="hdp_2_6_0_0_remove_bind_anonymous">
- <type>ranger-env</type>
- <transfer operation="delete" delete-key="bind_anonymous" />
- </definition>
- <definition xsi:type="configure" id="admin_log4j_parameterize" summary="Parameterizing Ranger Log4J Properties">
- <type>admin-log4j</type>
- <set key="ranger_xa_log_maxfilesize" value="256"/>
- <set key="ranger_xa_log_maxbackupindex" value="20"/>
- <replace key="content" find="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.xa_log_appender.MaxFileSize={{ranger_xa_log_maxfilesize}}MB"/>
- <replace key="content" find="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.xa_log_appender.MaxBackupIndex={{ranger_xa_log_maxbackupindex}}"/>
- </definition>
- </changes>
- </component>
- <component name="RANGER_USERSYNC">
- <changes>
- <definition xsi:type="configure" id="usersync_log4j_parameterize" summary="Parameterizing Ranger Usersync Log4J Properties">
- <type>usersync-log4j</type>
- <set key="ranger_usersync_log_maxfilesize" value="256"/>
- <set key="ranger_usersync_log_maxbackupindex" value="20"/>
- <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender
log4j.appender.logFile.MaxFileSize = {{ranger_usersync_log_maxfilesize}}MB"/>
- <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender
log4j.appender.logFile.MaxBackupIndex = {{ranger_usersync_log_maxbackupindex}}"/>
- </definition>
- <definition xsi:type="configure" id="hdp_2_6_0_0_disable_delta_sync_during_upgrade">
- <type>ranger-ugsync-site</type>
- <set key="ranger.usersync.ldap.deltasync" value="false"
- if-type="ranger-ugsync-site" if-key="ranger.usersync.source.impl.class" if-value="org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder"/>
- </definition>
- </changes>
- </component>
- <component name="RANGER_TAGSYNC">
- <changes>
- <definition xsi:type="configure" id="tagsync_log4j_parameterize" summary="Parameterizing Ranger Tagsync Log4J Properties">
- <type>tagsync-log4j</type>
- <set key="ranger_tagsync_log_maxfilesize" value="256"/>
- <set key="ranger_tagsync_log_number_of_backup_files" value="20"/>
- <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender
log4j.appender.logFile.MaxFileSize = {{ranger_tagsync_log_maxfilesize}}MB"/>
- <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender
log4j.appender.logFile.MaxBackupIndex = {{ranger_tagsync_log_number_of_backup_files}}"/>
- </definition>
- </changes>
- </component>
- </service>
- <service name="RANGER_KMS">
- <component name="RANGER_KMS_SERVER">
- <changes>
- <definition xsi:type="configure" id="kms_log4j_parameterize" summary="Parameterizing Ranger KMS Log4J Properties">
- <type>kms-log4j</type>
- <set key="ranger_kms_log_maxfilesize" value="256"/>
- <set key="ranger_kms_log_maxbackupindex" value="20"/>
- <set key="ranger_kms_audit_log_maxfilesize" value="256"/>
- <set key="ranger_kms_audit_log_maxbackupindex" value="20"/>
- <replace key="content" find="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender
log4j.appender.kms.MaxFileSize = {{ranger_kms_log_maxfilesize}}MB"/>
- <replace key="content" find="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender
log4j.appender.kms.MaxBackupIndex = {{ranger_kms_log_maxbackupindex}}"/>
- <replace key="content" find="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender
log4j.appender.kms-audit.MaxFileSize = {{ranger_kms_audit_log_maxfilesize}}MB"/>
- <replace key="content" find="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender
log4j.appender.kms-audit.MaxBackupIndex = {{ranger_kms_audit_log_maxbackupindex}}"/>
- </definition>
- <definition xsi:type="configure" id="hdp_2_6_0_0_remove_ranger_kms_duplicate_ssl">
- <type>ranger-kms-site</type>
- <transfer operation="delete" delete-key="ranger.https.attrib.keystore.file"
- if-type="ranger-kms-site" if-key="ranger.service.https.attrib.keystore.file" if-key-state="present"/>
- <transfer operation="delete" delete-key="ranger.service.https.attrib.clientAuth"
- if-type="ranger-kms-site" if-key="ranger.service.https.attrib.client.auth" if-key-state="present"/>
- </definition>
- </changes>
- </component>
- </service>
- <service name="KAFKA">
- <component name="KAFKA_BROKER">
- <changes>
- <definition xsi:type="configure" id="kafka_log4j_parameterize" summary="Parameterizing Kafka Log4J Properties">
- <type>kafka-log4j</type>
- <set key="kafka_log_maxfilesize" value="256"/>
- <set key="kafka_log_maxbackupindex" value="20"/>
- <set key="controller_log_maxfilesize" value="256"/>
- <set key="controller_log_maxbackupindex" value="20"/>
- <replace key="content" find="log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.kafkaAppender.MaxFileSize = {{kafka_log_maxfilesize}}MB"/>
- <replace key="content" find="log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.kafkaAppender.MaxBackupIndex = {{kafka_log_maxbackupindex}}"/>
- <replace key="content" find="log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.controllerAppender.MaxFileSize = {{controller_log_maxfilesize}}MB"/>
- <replace key="content" find="log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.controllerAppender.MaxBackupIndex = {{controller_log_maxbackupindex}}"/>
- </definition>
- </changes>
- </component>
- </service>
- <service name="KNOX">
- <component name="KNOX_GATEWAY">
- <changes>
- <definition xsi:type="configure" id="knox_gateway_log4j_parameterize" summary="Parameterizing Knox Gateway Log4J Properties">
- <type>gateway-log4j</type>
- <set key="knox_gateway_log_maxfilesize" value="256"/>
- <set key="knox_gateway_log_maxbackupindex" value="20"/>
- <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender
log4j.appender.drfa.MaxFileSize = {{knox_gateway_log_maxfilesize}}MB"/>
- <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender
log4j.appender.drfa.MaxBackupIndex = {{knox_gateway_log_maxbackupindex}}"/>
- </definition>
- <definition xsi:type="configure" id="knox_ldap_log4j_parameterize" summary="Parameterizing Knox Ldap Log4J Properties">
- <type>ldap-log4j</type>
- <set key="knox_ldap_log_maxfilesize" value="256"/>
- <set key="knox_ldap_log_maxbackupindex" value="20"/>
- <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender
log4j.appender.drfa.MaxFileSize = {{knox_ldap_log_maxfilesize}}MB"/>
- <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender
log4j.appender.drfa.MaxBackupIndex = {{knox_ldap_log_maxbackupindex}}"/>
- </definition>
- </changes>
- </component>
- </service>
- <service name="PIG">
- <component name="PIG">
- <changes>
- <definition xsi:type="configure" id="hdp_2_6_0_0_pig_use_tez">
- <type>pig-properties</type>
- <regex-replace key="content" find=" *#* *exectype=(\w+)" replace-with="exectype=tez" />
- </definition>
- </changes>
- </component>
- </service>
- <service name="HIVE">
- <component name="HIVE_SERVER">
- <changes>
- <definition xsi:type="configure" id="hdp_2_6_0_0_hive_append_heap_dump_options" summary="Appending optional Java heap dump parameters" >
- <type>hive-env</type>
- <insert key="content" value="export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"" insert-type="append" newline-before="true" newline-after="true" />
- </definition>
- <definition xsi:type="configure" id="hdp_2_6_0_0_tez_append_heap_dump_options_for_tez_task">
- <type>tez-site</type>
- <insert key="tez.task.launch.cmd-opts" value="{{heap_dump_opts}}" insert-type="append" newline-before="false" newline-after="false" />
- </definition>
- <definition xsi:type="configure" id="hdp_2_6_0_0_tez_append_heap_dump_options_for_tez_am">
- <type>tez-site</type>
- <insert key="tez.am.launch.cmd-opts" value="{{heap_dump_opts}}" insert-type="append" newline-before="false" newline-after="false" />
- </definition>
- <definition xsi:type="configure" id="hive_log4j_parameterize" summary="Parameterizing Hive Log4J Properties">
- <type>hive-log4j</type>
- <set key="hive_log_maxfilesize" value="256"/>
- <set key = "hive_log_maxbackupindex" value="30"/>
- <regex-replace key="content" find="#log4j.appender.DRFA.MaxBackupIndex=([0-9]+)" replace-with="#log4j.appender.DRFA.MaxBackupIndex={{hive_log_maxbackupindex}}"/>
- <replace key="content" find="log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFA.MaxFileSize = {{hive_log_maxfilesize}}MB"/>
- </definition>
- <definition xsi:type="configure" id="hive_llap_log4j_parameterize" summary="Parameterizing Hive llap Log4J Properties">
- <type>llap-daemon-log4j</type>
- <set key="hive_llap_log_maxfilesize" value="256"/>
- <set key = "hive_llap_log_maxbackupindex" value="240"/>
- <regex-replace key="content" find="property.llap.daemon.log.maxfilesize = ([0-9]+)MB" replace-with="property.llap.daemon.log.maxfilesize = {{hive_llap_log_maxfilesize}}MB"/>
- <regex-replace key="content" find="property.llap.daemon.log.maxbackupindex = ([0-9]+)" replace-with="property.llap.daemon.log.maxbackupindex = {{hive_llap_log_maxbackupindex}}"/>
- </definition>
- <definition xsi:type="configure" id="hdp_2_6_0_0_hive_set_hive_enforce_bucketing_property">
- <type>hive-site</type>
- <set key="hive.enforce.bucketing" value="true"/>
- </definition>
- </changes>
- </component>
- <component name="HIVE_SERVER_INTERACTIVE">
- <changes>
- <definition xsi:type="configure" id="hdp_2_6_0_0_hive_llap_append_heap_dump_options" summary="Appending optional Java heap dump parameters" >
- <type>hive-interactive-env</type>
- <insert key="content" value="export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"" insert-type="append" newline-before="true" newline-after="true" />
- </definition>
- <definition xsi:type="configure" id="hdp_2_6_0_0_hive_llap_append_java_heap_dump_options">
- <type>hive-interactive-env</type>
- <insert key="llap_java_opts" value="{{heap_dump_opts}}" insert-type="append" newline-before="false" newline-after="false" />
- </definition>
- <definition xsi:type="configure" id="hive_log4j2_parameterize" summary="Parameterizing Hive Log4J2 Properties">
- <type>hive-log4j2</type>
- <set key="hive2_log_maxfilesize" value="256"/>
- <set key = "hive2_log_maxbackupindex" value="30"/>
- <regex-replace key="content" find="appender.DRFA.strategy.max = ([0-9]+)" replace-with="appender.DRFA.strategy.max = {{hive2_log_maxbackupindex}}"/>
- <replace key="content" find="appender.DRFA.strategy.type = DefaultRolloverStrategy" replace-with="appender.DRFA.strategy.type = DefaultRolloverStrategy
appender.DRFA.policies.fsize.type = SizeBasedTriggeringPolicy
appender.DRFA.policies.fsize.size = {{hive2_log_maxfilesize}}MB"/>
- </definition>
- <definition xsi:type="configure" id="llap_cli_log4j2_parameterize" summary="Parameterizing LLAP Cli Log4J2 Properties">
- <type>llap-cli-log4j2</type>
- <set key="llap_cli_log_maxfilesize" value="256"/>
- <set key = "llap_cli_log_maxbackupindex" value="30"/>
- <regex-replace key="content" find="appender.DRFA.strategy.max = ([0-9]+)" replace-with="appender.DRFA.strategy.max = {{llap_cli_log_maxbackupindex}}"/>
- <replace key="content" find="appender.DRFA.strategy.type = DefaultRolloverStrategy" replace-with="appender.DRFA.strategy.type = DefaultRolloverStrategy
appender.DRFA.policies.fsize.type = SizeBasedTriggeringPolicy
appender.DRFA.policies.fsize.size = {{llap_cli_log_maxfilesize}}MB"/>
- </definition>
- <definition xsi:type="configure" id="llap_update_headroom" summary="Update headroom for LLAP">
- <type>hive-interactive-env</type>
- <set key="llap_headroom_space" value="12288"/>
- </definition>
- <definition xsi:type="configure" id="llap_update_hashaggregation" summary="Update Hash Aggregation settings for LLAP">
- <type>hive-interactive-site</type>
- <set key="hive.map.aggr.hash.min.reduction" value="0.99"/>
- <set key="hive.vectorized.groupby.maxentries" value="1000000"/>
- </definition>
- <definition xsi:type="configure" id="hdp_2_6_0_0_copy_hive_tez_container_size_to_hiveInteractive">
- <type>hive-interactive-site</type>
- <transfer operation="copy" from-type="hive-site" from-key="hive.tez.container.size" to-key="hive.tez.container.size" default-value="682" if-type="hive-interactive-site" if-key="hive.tez.container.size" if-key-state="absent"/>
- </definition>
- <definition xsi:type="configure" id="hdp_2_5_0_0_remove_atlas_cluster_name">
- <type>hive-site</type>
- <transfer operation="delete" delete-key="atlas.cluster.name"/>
- </definition>
- </changes>
- </component>
- <component name = "WEBHCAT_SERVER">
- <changes>
- <definition xsi:type="configure" id="webhcat_log4j_parameterize" summary="Parameterizing Webhcat Log4J Properties">
- <type>webhcat-log4j</type>
- <set key="webhcat_log_maxfilesize" value="256"/>
- <set key = "webhcat_log_maxbackupindex" value="20"/>
- <replace key="content" find="log4j.appender.standard = org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.standard = org.apache.log4j.DailyRollingFileAppender
log4j.appender.standard.MaxFileSize = {{webhcat_log_maxfilesize}}MB"/>
- <replace key="content" find="log4j.appender.standard = org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.standard = org.apache.log4j.DailyRollingFileAppender
log4j.appender.standard.MaxBackupIndex = {{webhcat_log_maxbackupindex}}"/>
- </definition>
- <definition xsi:type="configure" id="hdp_2_6_0_0_templeton_hadoop_queue_name">
- <type>webhcat-site</type>
- <set key="templeton.hadoop.queue.name" value="default" if-type="webhcat-site" if-key="templeton.hadoop.queue.name" if-key-state="absent"/>
- </definition>
- </changes>
- </component>
- </service>
- </services>
- </upgrade-config-changes>
|