Przeglądaj źródła

Revert "AMBARI-8714. Refactor UpgradeHelper_HDP2.py script to be compliant with 2.0/2.1 stack upgrade (dlysnichenko)"
UT failures
This reverts commit 106590550000de1716a9216467325a0a940e50e2.

Yusaku Sako 10 lat temu
rodzic
commit
814bf1d43b

+ 1245 - 0
ambari-server/src/main/python/UpgradeHelper_HDP2.py

@@ -0,0 +1,1245 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+from pprint import pprint
+import sys
+import datetime
+import os.path
+import logging
+import shutil
+import json
+import subprocess
+import time
+
+
+# action commands
+GET_MR_MAPPING_ACTION = "save-mr-mapping"
+DELETE_MR_ACTION = "delete-mr"
+ADD_YARN_MR2_ACTION = "add-yarn-mr2"
+MODIFY_CONFIG_ACTION = "update-configs"
+BACKUP_CONFIG_ACTION = "backup-configs"
+INSTALL_YARN_MR2_ACTION = "install-yarn-mr2"
+VALID_ACTIONS = ', '.join([GET_MR_MAPPING_ACTION, DELETE_MR_ACTION, ADD_YARN_MR2_ACTION, MODIFY_CONFIG_ACTION,
+                           INSTALL_YARN_MR2_ACTION, BACKUP_CONFIG_ACTION])
+
+MR_MAPPING_FILE = "mr_mapping"
+UPGRADE_LOG_FILE = "upgrade_log"
+CAPACITY_SCHEDULER_TAG = "capacity-scheduler"
+MAPRED_SITE_TAG = "mapred-site"
+GLOBAL_TAG = "global"
+HDFS_SITE_TAG = "hdfs-site"
+CORE_SITE_TAG = "core-site"
+YARN_SITE_TAG = "yarn-site"
+HBASE_SITE_TAG = "hbase-site"
+HIVE_SITE_TAG = "hive-site"
+REPLACE_JH_HOST_NAME_TAG = "REPLACE_JH_HOST"
+REPLACE_RM_HOST_NAME_TAG = "REPLACE_RM_HOST"
+REPLACE_WITH_TAG = "REPLACE_WITH_"
+DELETE_OLD_TAG = "DELETE_OLD"
+
+AUTH_FORMAT = '{0}:{1}'
+ROOT_FORMAT = 'http://{0}:8080/api/v1'
+URL_FORMAT = ROOT_FORMAT+'/clusters/{1}'
+
+
+logger = logging.getLogger()
+
+# old : new
+PROPERTY_MAPPING = {
+  "create.empty.dir.if.nonexist": "mapreduce.jobcontrol.createdir.ifnotexist",
+  "dfs.access.time.precision": "dfs.namenode.accesstime.precision",
+  "dfs.backup.address": "dfs.namenode.backup.address",
+  "dfs.backup.http.address": "dfs.namenode.backup.http-address",
+  "dfs.balance.bandwidthPerSec": "dfs.datanode.balance.bandwidthPerSec",
+  "dfs.block.size": "dfs.blocksize",
+  "dfs.data.dir": "dfs.datanode.data.dir",
+  "dfs.datanode.max.xcievers": "dfs.datanode.max.transfer.threads",
+  "dfs.df.interval": "fs.df.interval",
+  "dfs.federation.nameservice.id": "dfs.nameservice.id",
+  "dfs.federation.nameservices": "dfs.nameservices",
+  "dfs.http.address": "dfs.namenode.http-address",
+  "dfs.https.address": "dfs.namenode.https-address",
+  "dfs.https.client.keystore.resource": "dfs.client.https.keystore.resource",
+  "dfs.https.need.client.auth": "dfs.client.https.need-auth",
+  "dfs.max.objects": "dfs.namenode.max.objects",
+  "dfs.max-repl-streams": "dfs.namenode.replication.max-streams",
+  "dfs.name.dir": "dfs.namenode.name.dir",
+  "dfs.name.dir.restore": "dfs.namenode.name.dir.restore",
+  "dfs.name.edits.dir": "dfs.namenode.edits.dir",
+  "dfs.permissions": "dfs.permissions.enabled",
+  "dfs.permissions.supergroup": "dfs.permissions.superusergroup",
+  "dfs.read.prefetch.size": "dfs.client.read.prefetch.size",
+  "dfs.replication.considerLoad": "dfs.namenode.replication.considerLoad",
+  "dfs.replication.interval": "dfs.namenode.replication.interval",
+  "dfs.replication.min": "dfs.namenode.replication.min",
+  "dfs.replication.pending.timeout.sec": "dfs.namenode.replication.pending.timeout-sec",
+  "dfs.safemode.extension": "dfs.namenode.safemode.extension",
+  "dfs.safemode.threshold.pct": "dfs.namenode.safemode.threshold-pct",
+  "dfs.secondary.http.address": "dfs.namenode.secondary.http-address",
+  "dfs.socket.timeout": "dfs.client.socket-timeout",
+  "dfs.umaskmode": "fs.permissions.umask-mode",
+  "dfs.write.packet.size": "dfs.client-write-packet-size",
+  "fs.checkpoint.dir": "dfs.namenode.checkpoint.dir",
+  "fs.checkpoint.edits.dir": "dfs.namenode.checkpoint.edits.dir",
+  "fs.checkpoint.period": "dfs.namenode.checkpoint.period",
+  "fs.default.name": "fs.defaultFS",
+  "hadoop.configured.node.mapping": "net.topology.configured.node.mapping",
+  "hadoop.job.history.location": "mapreduce.jobtracker.jobhistory.location",
+  "hadoop.native.lib": "io.native.lib.available",
+  "hadoop.net.static.resolutions": "mapreduce.tasktracker.net.static.resolutions",
+  "hadoop.pipes.command-file.keep": "mapreduce.pipes.commandfile.preserve",
+  "hadoop.pipes.executable.interpretor": "mapreduce.pipes.executable.interpretor",
+  "hadoop.pipes.executable": "mapreduce.pipes.executable",
+  "hadoop.pipes.java.mapper": "mapreduce.pipes.isjavamapper",
+  "hadoop.pipes.java.recordreader": "mapreduce.pipes.isjavarecordreader",
+  "hadoop.pipes.java.recordwriter": "mapreduce.pipes.isjavarecordwriter",
+  "hadoop.pipes.java.reducer": "mapreduce.pipes.isjavareducer",
+  "hadoop.pipes.partitioner": "mapreduce.pipes.partitioner",
+  "heartbeat.recheck.interval": "dfs.namenode.heartbeat.recheck-interval",
+  "io.bytes.per.checksum": "dfs.bytes-per-checksum",
+  "io.sort.factor": "mapreduce.task.io.sort.factor",
+  "io.sort.mb": "mapreduce.task.io.sort.mb",
+  "io.sort.spill.percent": "mapreduce.map.sort.spill.percent",
+  "jobclient.completion.poll.interval": "mapreduce.client.completion.pollinterval",
+  "jobclient.output.filter": "mapreduce.client.output.filter",
+  "jobclient.progress.monitor.poll.interval": "mapreduce.client.progressmonitor.pollinterval",
+  "job.end.notification.url": "mapreduce.job.end-notification.url",
+  "job.end.retry.attempts": "mapreduce.job.end-notification.retry.attempts",
+  "job.end.retry.interval": "mapreduce.job.end-notification.retry.interval",
+  "job.local.dir": "mapreduce.job.local.dir",
+  "keep.failed.task.files": "mapreduce.task.files.preserve.failedtasks",
+  "keep.task.files.pattern": "mapreduce.task.files.preserve.filepattern",
+  "key.value.separator.in.input.line": "mapreduce.input.keyvaluelinerecordreader.key.value.separator",
+  "local.cache.size": "mapreduce.tasktracker.cache.local.size",
+  "map.input.file": "mapreduce.map.input.file",
+  "map.input.length": "mapreduce.map.input.length",
+  "map.input.start": "mapreduce.map.input.start",
+  "map.output.key.field.separator": "mapreduce.map.output.key.field.separator",
+  "map.output.key.value.fields.spec": "mapreduce.fieldsel.map.output.key.value.fields.spec",
+  "mapred.acls.enabled": "mapreduce.cluster.acls.enabled",
+  "mapred.binary.partitioner.left.offset": "mapreduce.partition.binarypartitioner.left.offset",
+  "mapred.binary.partitioner.right.offset": "mapreduce.partition.binarypartitioner.right.offset",
+  "mapred.cache.archives": "mapreduce.job.cache.archives",
+  "mapred.cache.archives.timestamps": "mapreduce.job.cache.archives.timestamps",
+  "mapred.cache.files": "mapreduce.job.cache.files",
+  "mapred.cache.files.timestamps": "mapreduce.job.cache.files.timestamps",
+  "mapred.cache.localArchives": "mapreduce.job.cache.local.archives",
+  "mapred.cache.localFiles": "mapreduce.job.cache.local.files",
+  "mapred.child.tmp": "mapreduce.task.tmp.dir",
+  "mapred.cluster.average.blacklist.threshold": "mapreduce.jobtracker.blacklist.average.threshold",
+  "mapred.cluster.map.memory.mb": "mapreduce.cluster.mapmemory.mb",
+  "mapred.cluster.max.map.memory.mb": "mapreduce.jobtracker.maxmapmemory.mb",
+  "mapred.cluster.max.reduce.memory.mb": "mapreduce.jobtracker.maxreducememory.mb",
+  "mapred.cluster.reduce.memory.mb": "mapreduce.cluster.reducememory.mb",
+  "mapred.committer.job.setup.cleanup.needed": "mapreduce.job.committer.setup.cleanup.needed",
+  "mapred.compress.map.output": "mapreduce.map.output.compress",
+  "mapred.data.field.separator": "mapreduce.fieldsel.data.field.separator",
+  "mapred.debug.out.lines": "mapreduce.task.debugout.lines",
+  "mapred.healthChecker.interval": "mapreduce.tasktracker.healthchecker.interval",
+  "mapred.healthChecker.script.args": "mapreduce.tasktracker.healthchecker.script.args",
+  "mapred.healthChecker.script.path": "mapreduce.tasktracker.healthchecker.script.path",
+  "mapred.healthChecker.script.timeout": "mapreduce.tasktracker.healthchecker.script.timeout",
+  "mapred.heartbeats.in.second": "mapreduce.jobtracker.heartbeats.in.second",
+  "mapred.hosts.exclude": "mapreduce.jobtracker.hosts.exclude.filename",
+  "mapred.hosts": "mapreduce.jobtracker.hosts.filename",
+  "mapred.inmem.merge.threshold": "mapreduce.reduce.merge.inmem.threshold",
+  "mapred.input.dir.formats": "mapreduce.input.multipleinputs.dir.formats",
+  "mapred.input.dir.mappers": "mapreduce.input.multipleinputs.dir.mappers",
+  "mapred.input.dir": "mapreduce.input.fileinputformat.inputdir",
+  "mapred.input.pathFilter.class": "mapreduce.input.pathFilter.class",
+  "mapred.jar": "mapreduce.job.jar",
+  "mapred.job.classpath.archives": "mapreduce.job.classpath.archives",
+  "mapred.job.classpath.files": "mapreduce.job.classpath.files",
+  "mapred.job.id": "mapreduce.job.id",
+  "mapred.jobinit.threads": "mapreduce.jobtracker.jobinit.threads",
+  "mapred.job.map.memory.mb": "mapreduce.map.memory.mb",
+  "mapred.job.name": "mapreduce.job.name",
+  "mapred.job.priority": "mapreduce.job.priority",
+  "mapred.job.queue.name": "mapreduce.job.queuename",
+  "mapred.job.reduce.input.buffer.percent": "mapreduce.reduce.input.buffer.percent",
+  "mapred.job.reduce.markreset.buffer.percent": "mapreduce.reduce.markreset.buffer.percent",
+  "mapred.job.reduce.memory.mb": "mapreduce.reduce.memory.mb",
+  "mapred.job.reduce.total.mem.bytes": "mapreduce.reduce.memory.totalbytes",
+  "mapred.job.reuse.jvm.num.tasks": "mapreduce.job.jvm.numtasks",
+  "mapred.job.shuffle.input.buffer.percent": "mapreduce.reduce.shuffle.input.buffer.percent",
+  "mapred.job.shuffle.merge.percent": "mapreduce.reduce.shuffle.merge.percent",
+  "mapred.job.tracker.handler.count": "mapreduce.jobtracker.handler.count",
+  "mapred.job.tracker.history.completed.location": "mapreduce.jobtracker.jobhistory.completed.location",
+  "mapred.job.tracker.http.address": "mapreduce.jobtracker.http.address",
+  "mapred.jobtracker.instrumentation": "mapreduce.jobtracker.instrumentation",
+  "mapred.jobtracker.job.history.block.size": "mapreduce.jobtracker.jobhistory.block.size",
+  "mapred.job.tracker.jobhistory.lru.cache.size": "mapreduce.jobtracker.jobhistory.lru.cache.size",
+  "mapred.job.tracker": "mapreduce.jobtracker.address",
+  "mapred.jobtracker.maxtasks.per.job": "mapreduce.jobtracker.maxtasks.perjob",
+  "mapred.job.tracker.persist.jobstatus.active": "mapreduce.jobtracker.persist.jobstatus.active",
+  "mapred.job.tracker.persist.jobstatus.dir": "mapreduce.jobtracker.persist.jobstatus.dir",
+  "mapred.job.tracker.persist.jobstatus.hours": "mapreduce.jobtracker.persist.jobstatus.hours",
+  "mapred.jobtracker.restart.recover": "mapreduce.jobtracker.restart.recover",
+  "mapred.job.tracker.retiredjobs.cache.size": "mapreduce.jobtracker.retiredjobs.cache.size",
+  "mapred.job.tracker.retire.jobs": "mapreduce.jobtracker.retirejobs",
+  "mapred.jobtracker.taskalloc.capacitypad": "mapreduce.jobtracker.taskscheduler.taskalloc.capacitypad",
+  "mapred.jobtracker.taskScheduler": "mapreduce.jobtracker.taskscheduler",
+  "mapred.jobtracker.taskScheduler.maxRunningTasksPerJob": "mapreduce.jobtracker.taskscheduler.maxrunningtasks.perjob",
+  "mapred.join.expr": "mapreduce.join.expr",
+  "mapred.join.keycomparator": "mapreduce.join.keycomparator",
+  "mapred.lazy.output.format": "mapreduce.output.lazyoutputformat.outputformat",
+  "mapred.line.input.format.linespermap": "mapreduce.input.lineinputformat.linespermap",
+  "mapred.linerecordreader.maxlength": "mapreduce.input.linerecordreader.line.maxlength",
+  "mapred.local.dir": "mapreduce.cluster.local.dir",
+  "mapred.local.dir.minspacekill": "mapreduce.tasktracker.local.dir.minspacekill",
+  "mapred.local.dir.minspacestart": "mapreduce.tasktracker.local.dir.minspacestart",
+  "mapred.map.child.env": "mapreduce.map.env",
+  "mapred.map.child.java.opts": "mapreduce.map.java.opts",
+  "mapred.map.child.log.level": "mapreduce.map.log.level",
+  "mapred.map.max.attempts": "mapreduce.map.maxattempts",
+  "mapred.map.output.compression.codec": "mapreduce.map.output.compress.codec",
+  "mapred.mapoutput.key.class": "mapreduce.map.output.key.class",
+  "mapred.mapoutput.value.class": "mapreduce.map.output.value.class",
+  "mapred.mapper.regex.group": "mapreduce.mapper.regexmapper..group",
+  "mapred.mapper.regex": "mapreduce.mapper.regex",
+  "mapred.map.task.debug.script": "mapreduce.map.debug.script",
+  "mapred.map.tasks": "mapreduce.job.maps",
+  "mapred.map.tasks.speculative.execution": "mapreduce.map.speculative",
+  "mapred.max.map.failures.percent": "mapreduce.map.failures.maxpercent",
+  "mapred.max.reduce.failures.percent": "mapreduce.reduce.failures.maxpercent",
+  "mapred.max.split.size": "mapreduce.input.fileinputformat.split.maxsize",
+  "mapred.max.tracker.blacklists": "mapreduce.jobtracker.tasktracker.maxblacklists",
+  "mapred.max.tracker.failures": "mapreduce.job.maxtaskfailures.per.tracker",
+  "mapred.merge.recordsBeforeProgress": "mapreduce.task.merge.progress.records",
+  "mapred.min.split.size": "mapreduce.input.fileinputformat.split.minsize",
+  "mapred.min.split.size.per.node": "mapreduce.input.fileinputformat.split.minsize.per.node",
+  "mapred.min.split.size.per.rack": "mapreduce.input.fileinputformat.split.minsize.per.rack",
+  "mapred.output.compression.codec": "mapreduce.output.fileoutputformat.compress.codec",
+  "mapred.output.compression.type": "mapreduce.output.fileoutputformat.compress.type",
+  "mapred.output.compress": "mapreduce.output.fileoutputformat.compress",
+  "mapred.output.dir": "mapreduce.output.fileoutputformat.outputdir",
+  "mapred.output.key.class": "mapreduce.job.output.key.class",
+  "mapred.output.key.comparator.class": "mapreduce.job.output.key.comparator.class",
+  "mapred.output.value.class": "mapreduce.job.output.value.class",
+  "mapred.output.value.groupfn.class": "mapreduce.job.output.group.comparator.class",
+  "mapred.permissions.supergroup": "mapreduce.cluster.permissions.supergroup",
+  "mapred.pipes.user.inputformat": "mapreduce.pipes.inputformat",
+  "mapred.reduce.child.env": "mapreduce.reduce.env",
+  "mapred.reduce.child.java.opts": "mapreduce.reduce.java.opts",
+  "mapred.reduce.child.log.level": "mapreduce.reduce.log.level",
+  "mapred.reduce.max.attempts": "mapreduce.reduce.maxattempts",
+  "mapred.reduce.parallel.copies": "mapreduce.reduce.shuffle.parallelcopies",
+  "mapred.reduce.slowstart.completed.maps": "mapreduce.job.reduce.slowstart.completedmaps",
+  "mapred.reduce.task.debug.script": "mapreduce.reduce.debug.script",
+  "mapred.reduce.tasks": "mapreduce.job.reduces",
+  "mapred.reduce.tasks.speculative.execution": "mapreduce.reduce.speculative",
+  "mapred.seqbinary.output.key.class": "mapreduce.output.seqbinaryoutputformat.key.class",
+  "mapred.seqbinary.output.value.class": "mapreduce.output.seqbinaryoutputformat.value.class",
+  "mapred.shuffle.connect.timeout": "mapreduce.reduce.shuffle.connect.timeout",
+  "mapred.shuffle.read.timeout": "mapreduce.reduce.shuffle.read.timeout",
+  "mapred.skip.attempts.to.start.skipping": "mapreduce.task.skip.start.attempts",
+  "mapred.skip.map.auto.incr.proc.count": "mapreduce.map.skip.proc-count.auto-incr",
+  "mapred.skip.map.max.skip.records": "mapreduce.map.skip.maxrecords",
+  "mapred.skip.on": "mapreduce.job.skiprecords",
+  "mapred.skip.out.dir": "mapreduce.job.skip.outdir",
+  "mapred.skip.reduce.auto.incr.proc.count": "mapreduce.reduce.skip.proc-count.auto-incr",
+  "mapred.skip.reduce.max.skip.groups": "mapreduce.reduce.skip.maxgroups",
+  "mapred.speculative.execution.slowNodeThreshold": "mapreduce.job.speculative.slownodethreshold",
+  "mapred.speculative.execution.slowTaskThreshold": "mapreduce.job.speculative.slowtaskthreshold",
+  "mapred.speculative.execution.speculativeCap": "mapreduce.job.speculative.speculativecap",
+  "mapred.submit.replication": "mapreduce.client.submit.file.replication",
+  "mapred.system.dir": "mapreduce.jobtracker.system.dir",
+  "mapred.task.cache.levels": "mapreduce.jobtracker.taskcache.levels",
+  "mapred.task.id": "mapreduce.task.attempt.id",
+  "mapred.task.is.map": "mapreduce.task.ismap",
+  "mapred.task.partition": "mapreduce.task.partition",
+  "mapred.task.profile": "mapreduce.task.profile",
+  "mapred.task.profile.maps": "mapreduce.task.profile.maps",
+  "mapred.task.profile.params": "mapreduce.task.profile.params",
+  "mapred.task.profile.reduces": "mapreduce.task.profile.reduces",
+  "mapred.task.timeout": "mapreduce.task.timeout",
+  "mapred.tasktracker.dns.interface": "mapreduce.tasktracker.dns.interface",
+  "mapred.tasktracker.dns.nameserver": "mapreduce.tasktracker.dns.nameserver",
+  "mapred.tasktracker.events.batchsize": "mapreduce.tasktracker.events.batchsize",
+  "mapred.tasktracker.expiry.interval": "mapreduce.jobtracker.expire.trackers.interval",
+  "mapred.task.tracker.http.address": "mapreduce.tasktracker.http.address",
+  "mapred.tasktracker.indexcache.mb": "mapreduce.tasktracker.indexcache.mb",
+  "mapred.tasktracker.instrumentation": "mapreduce.tasktracker.instrumentation",
+  "mapred.tasktracker.map.tasks.maximum": "mapreduce.tasktracker.map.tasks.maximum",
+  "mapred.tasktracker.memory_calculator_plugin": "mapreduce.tasktracker.resourcecalculatorplugin",
+  "mapred.tasktracker.memorycalculatorplugin": "mapreduce.tasktracker.resourcecalculatorplugin",
+  "mapred.tasktracker.reduce.tasks.maximum": "mapreduce.tasktracker.reduce.tasks.maximum",
+  "mapred.task.tracker.report.address": "mapreduce.tasktracker.report.address",
+  "mapred.task.tracker.task-controller": "mapreduce.tasktracker.taskcontroller",
+  "mapred.tasktracker.taskmemorymanager.monitoring-interval": "mapreduce.tasktracker.taskmemorymanager.monitoringinterval",
+  "mapred.tasktracker.tasks.sleeptime-before-sigkill": "mapreduce.tasktracker.tasks.sleeptimebeforesigkill",
+  "mapred.temp.dir": "mapreduce.cluster.temp.dir",
+  "mapred.text.key.comparator.options": "mapreduce.partition.keycomparator.options",
+  "mapred.text.key.partitioner.options": "mapreduce.partition.keypartitioner.options",
+  "mapred.textoutputformat.separator": "mapreduce.output.textoutputformat.separator",
+  "mapred.tip.id": "mapreduce.task.id",
+  "mapreduce.combine.class": "mapreduce.job.combine.class",
+  "mapreduce.inputformat.class": "mapreduce.job.inputformat.class",
+  "mapreduce.job.counters.limit": "mapreduce.job.counters.max",
+  "mapreduce.jobtracker.permissions.supergroup": "mapreduce.cluster.permissions.supergroup",
+  "mapreduce.map.class": "mapreduce.job.map.class",
+  "mapreduce.outputformat.class": "mapreduce.job.outputformat.class",
+  "mapreduce.partitioner.class": "mapreduce.job.partitioner.class",
+  "mapreduce.reduce.class": "mapreduce.job.reduce.class",
+  "mapred.used.genericoptionsparser": "mapreduce.client.genericoptionsparser.used",
+  "mapred.userlog.limit.kb": "mapreduce.task.userlog.limit.kb",
+  "mapred.userlog.retain.hours": "mapreduce.job.userlog.retain.hours",
+  "mapred.working.dir": "mapreduce.job.working.dir",
+  "mapred.work.output.dir": "mapreduce.task.output.dir",
+  "min.num.spills.for.combine": "mapreduce.map.combine.minspills",
+  "reduce.output.key.value.fields.spec": "mapreduce.fieldsel.reduce.output.key.value.fields.spec",
+  "security.job.submission.protocol.acl": "security.job.client.protocol.acl",
+  "security.task.umbilical.protocol.acl": "security.job.task.protocol.acl",
+  "sequencefile.filter.class": "mapreduce.input.sequencefileinputfilter.class",
+  "sequencefile.filter.frequency": "mapreduce.input.sequencefileinputfilter.frequency",
+  "sequencefile.filter.regex": "mapreduce.input.sequencefileinputfilter.regex",
+  "session.id": "dfs.metrics.session-id",
+  "slave.host.name": "dfs.datanode.hostname",
+  "slave.host.name": "mapreduce.tasktracker.host.name",
+  "tasktracker.contention.tracking": "mapreduce.tasktracker.contention.tracking",
+  "tasktracker.http.threads": "mapreduce.tasktracker.http.threads",
+  "topology.node.switch.mapping.impl": "net.topology.node.switch.mapping.impl",
+  "topology.script.file.name": "net.topology.script.file.name",
+  "topology.script.number.args": "net.topology.script.number.args",
+  "user.name": "mapreduce.job.user.name",
+  "webinterface.private.actions": "mapreduce.jobtracker.webinterface.trusted"
+}
+
+CAPACITY_SCHEDULER = {
+  "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2",
+  "yarn.scheduler.capacity.maximum-applications": "10000",
+  "yarn.scheduler.capacity.root.acl_administer_queues": "*",
+  "yarn.scheduler.capacity.root.capacity": "100",
+  "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*",
+  "yarn.scheduler.capacity.root.default.acl_submit_jobs": "*",
+  "yarn.scheduler.capacity.root.default.capacity": "100",
+  "yarn.scheduler.capacity.root.default.maximum-capacity": "100",
+  "yarn.scheduler.capacity.root.default.state": "RUNNING",
+  "yarn.scheduler.capacity.root.default.user-limit-factor": "1",
+  "yarn.scheduler.capacity.root.queues": "default"
+}
+
+MAPRED_SITE = {
+  "hadoop.job.history.location": "DELETE_OLD",
+  "hadoop.job.history.user.location": "DELETE_OLD",
+  "io.sort.record.percent": "DELETE_OLD",
+  "jetty.connector": "DELETE_OLD",
+  "mapred.child.java.opts": "DELETE_OLD",
+  "mapred.child.root.logger": "DELETE_OLD",
+  "mapred.create.symlink": "DELETE_OLD",
+  "mapred.fairscheduler.allocation.file": "DELETE_OLD",
+  "mapred.fairscheduler.assignmultiple": "DELETE_OLD",
+  "mapreduce.job.priority": "DELETE_OLD",
+  "mapred.jobtracker.blacklist.fault-bucket-width": "DELETE_OLD",
+  "mapred.jobtracker.blacklist.fault-timeout-window": "DELETE_OLD",
+  "mapred.jobtracker.completeuserjobs.maximum": "DELETE_OLD",
+  "mapred.jobtracker.job.history.block.size": "DELETE_OLD",
+  "mapred.jobtracker.retirejob.check": "DELETE_OLD",
+  "mapred.jobtracker.retirejob.interval": "DELETE_OLD",
+  "mapred.jobtracker.taskScheduler": "DELETE_OLD",
+  "mapred.permissions.supergroup": "DELETE_OLD",
+  "mapred.queue.names": "DELETE_OLD",
+  "mapreduce.cluster.acls.enabled": "DELETE_OLD",
+  "mapreduce.cluster.local.dir": "DELETE_OLD",
+  "mapreduce.cluster.mapmemory.mb": "DELETE_OLD",
+  "mapreduce.cluster.permissions.supergroup": "DELETE_OLD",
+  "mapreduce.cluster.reducememory.mb": "DELETE_OLD",
+  "mapreduce.cluster.temp.dir": "DELETE_OLD",
+  "mapreduce.jobtracker.jobinit.threads": "DELETE_OLD",
+  "mapreduce.jobtracker.permissions.supergroup": "DELETE_OLD",
+  "mapreduce.job.cache.symlink.create": "DELETE_OLD",
+  "mapreduce.job.speculative.slownodethreshold": "DELETE_OLD",
+  "mapreduce.job.userlog.retain.hours": "DELETE_OLD",
+  "mapreduce.admin.map.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
+  "mapreduce.admin.reduce.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
+  "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &> /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`",
+  "mapreduce.am.max-attempts": "2",
+  "mapreduce.application.classpath": "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*",
+  "mapreduce.fileoutputcommitter.marksuccessfuljobs": "DELETE_OLD",
+  "mapreduce.framework.name": "yarn",
+  "mapreduce.history.server.embedded": "DELETE_OLD",
+  "mapreduce.history.server.http.address": "DELETE_OLD",
+  "mapreduce.job.committer.setup.cleanup.needed": "DELETE_OLD",
+  "mapreduce.job.jvm.numtasks": "DELETE_OLD",
+  "mapreduce.jobhistory.address": "REPLACE_JH_HOST:10020",
+  "mapreduce.jobhistory.done-dir": "/mr-history/done",
+  "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp",
+  "mapreduce.jobhistory.webapp.address": "REPLACE_JH_HOST:19888",
+  "mapreduce.jobtracker.address": "DELETE_OLD",
+  "mapreduce.jobtracker.blacklist.average.threshold": "DELETE_OLD",
+  "mapreduce.jobtracker.expire.trackers.interval": "DELETE_OLD",
+  "mapreduce.jobtracker.handler.count": "DELETE_OLD",
+  "mapreduce.jobtracker.heartbeats.in.second": "DELETE_OLD",
+  "mapreduce.jobtracker.hosts.exclude.filename": "DELETE_OLD",
+  "mapreduce.jobtracker.hosts.filename": "DELETE_OLD",
+  "mapreduce.jobtracker.http.address": "DELETE_OLD",
+  "mapreduce.jobtracker.instrumentation": "DELETE_OLD",
+  "mapreduce.jobtracker.jobhistory.block.size": "DELETE_OLD",
+  "mapreduce.jobtracker.jobhistory.completed.location": "DELETE_OLD",
+  "mapreduce.jobtracker.jobhistory.location": "DELETE_OLD",
+  "mapreduce.jobtracker.jobhistory.lru.cache.size": "DELETE_OLD",
+  "mapreduce.jobtracker.maxmapmemory.mb": "DELETE_OLD",
+  "mapreduce.jobtracker.maxreducememory.mb": "DELETE_OLD",
+  "mapreduce.jobtracker.maxtasks.perjob": "DELETE_OLD",
+  "mapreduce.jobtracker.persist.jobstatus.active": "DELETE_OLD",
+  "mapreduce.jobtracker.persist.jobstatus.dir": "DELETE_OLD",
+  "mapreduce.jobtracker.persist.jobstatus.hours": "DELETE_OLD",
+  "mapreduce.jobtracker.restart.recover": "DELETE_OLD",
+  "mapreduce.jobtracker.retiredjobs.cache.size": "DELETE_OLD",
+  "mapreduce.jobtracker.retirejobs": "DELETE_OLD",
+  "mapreduce.jobtracker.split.metainfo.maxsize": "DELETE_OLD",
+  "mapreduce.jobtracker.staging.root.dir": "DELETE_OLD",
+  "mapreduce.jobtracker.system.dir": "DELETE_OLD",
+  "mapreduce.jobtracker.taskcache.levels": "DELETE_OLD",
+  "mapreduce.jobtracker.taskscheduler": "DELETE_OLD",
+  "mapreduce.jobtracker.taskscheduler.maxrunningtasks.perjob": "DELETE_OLD",
+  "mapreduce.jobtracker.taskscheduler.taskalloc.capacitypad": "DELETE_OLD",
+  "mapreduce.jobtracker.tasktracker.maxblacklists": "DELETE_OLD",
+  "mapreduce.jobtracker.webinterface.trusted": "DELETE_OLD",
+  "mapreduce.map.java.opts": "-Xmx756m",
+  "mapreduce.map.log.level": "INFO",
+  "mapreduce.map.memory.mb": "1024",
+  "mapreduce.map.output.compress": "false",
+  "mapreduce.map.output.compress.codec": "DELETE_OLD",
+  "mapreduce.map.sort.spill.percent": "0.7",
+  "mapreduce.output.fileoutputformat.compress": "false",
+  "mapreduce.reduce.input.limit": "DELETE_OLD",
+  "mapreduce.reduce.java.opts": "-Xmx756m",
+  "mapreduce.reduce.log.level": "INFO",
+  "mapreduce.reduce.memory.mb": "1024",
+  "mapreduce.reduce.merge.inmem.threshold": "DELETE_OLD",
+  "mapreduce.shuffle.port": "13562",
+  "mapreduce.task.timeout": "300000",
+  "mapreduce.task.userlog.limit.kb": "DELETE_OLD",
+  "mapreduce.tasktracker.cache.local.size": "DELETE_OLD",
+  "mapreduce.tasktracker.contention.tracking": "DELETE_OLD",
+  "mapreduce.tasktracker.dns.interface": "DELETE_OLD",
+  "mapreduce.tasktracker.dns.nameserver": "DELETE_OLD",
+  "mapreduce.tasktracker.events.batchsize": "DELETE_OLD",
+  "mapreduce.tasktracker.group": "DELETE_OLD",
+  "mapreduce.tasktracker.healthchecker.interval": "DELETE_OLD",
+  "mapreduce.tasktracker.healthchecker.script.args": "DELETE_OLD",
+  "mapreduce.tasktracker.healthchecker.script.path": "DELETE_OLD",
+  "mapreduce.tasktracker.healthchecker.script.timeout": "DELETE_OLD",
+  "mapreduce.tasktracker.host.name": "DELETE_OLD",
+  "mapreduce.tasktracker.http.address": "DELETE_OLD",
+  "mapreduce.tasktracker.http.threads": "DELETE_OLD",
+  "mapreduce.tasktracker.indexcache.mb": "DELETE_OLD",
+  "mapreduce.tasktracker.instrumentation": "DELETE_OLD",
+  "mapreduce.tasktracker.local.dir.minspacekill": "DELETE_OLD",
+  "mapreduce.tasktracker.local.dir.minspacestart": "DELETE_OLD",
+  "mapreduce.tasktracker.map.tasks.maximum": "DELETE_OLD",
+  "mapreduce.tasktracker.net.static.resolutions": "DELETE_OLD",
+  "mapreduce.tasktracker.reduce.tasks.maximum": "DELETE_OLD",
+  "mapreduce.tasktracker.report.address": "DELETE_OLD",
+  "mapreduce.tasktracker.resourcecalculatorplugin": "DELETE_OLD",
+  "mapreduce.tasktracker.taskcontroller": "DELETE_OLD",
+  "mapreduce.tasktracker.taskmemorymanager.monitoringinterval": "DELETE_OLD",
+  "mapreduce.tasktracker.tasks.sleeptimebeforesigkill": "DELETE_OLD",
+  "yarn.app.mapreduce.am.admin-command-opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
+  "yarn.app.mapreduce.am.command-opts": "-Xmx312m -Dhdp.version=${hdp.version}",
+  "yarn.app.mapreduce.am.log.level": "INFO",
+  "yarn.app.mapreduce.am.resource.mb": "512",
+  "yarn.app.mapreduce.am.staging-dir": "/user"
+}
+
+GLOBAL = {
+  "datanode_du_reserved": "DELETE_OLD",
+  "dfs_block_local_path_access_user": "DELETE_OLD",
+  "dfs_datanode_data_dir": "DELETE_OLD",
+  "dfs_exclude": "dfs.exclude",
+  "dfs_include": "DELETE_OLD",
+  "dfs_namenode_checkpoint_dir": "DELETE_OLD",
+  "dfs_namenode_checkpoint_period": "DELETE_OLD",
+  "dfs_namenode_name_dir": "DELETE_OLD",
+  "fs_checkpoint_size": "DELETE_OLD",
+  "io_sort_spill_percent": "DELETE_OLD",
+  "hadoop_conf_dir": "DELETE_OLD",
+  "hdfs_support_append": "DELETE_OLD",
+  "hfile_blockcache_size": "DELETE_OLD",
+  "hregion_majorcompaction": "DELETE_OLD",
+  "hstore_blockingstorefiles": "DELETE_OLD",
+  "jtnode_heapsize": "DELETE_OLD",
+  "jtnode_opt_maxnewsize": "DELETE_OLD",
+  "jtnode_opt_newsize": "DELETE_OLD",
+  "mapred_child_java_opts_sz": "DELETE_OLD",
+  "mapred_cluster_map_mem_mb": "DELETE_OLD",
+  "mapred_cluster_max_map_mem_mb": "DELETE_OLD",
+  "mapred_cluster_max_red_mem_mb": "DELETE_OLD",
+  "mapred_cluster_red_mem_mb": "DELETE_OLD",
+  "mapred_hosts_exclude": "DELETE_OLD",
+  "mapred_hosts_include": "DELETE_OLD",
+  "mapred_job_map_mem_mb": "DELETE_OLD",
+  "mapred_job_red_mem_mb": "DELETE_OLD",
+  "mapred_local_dir": "DELETE_OLD",
+  "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce",
+  "mapred_map_tasks_max": "DELETE_OLD",
+  "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce",
+  "mapred_red_tasks_max": "DELETE_OLD",
+  "mapreduce_jobtracker_system_dir": "DELETE_OLD",
+  "mapreduce_map_memory_mb": "DELETE_OLD",
+  "mapreduce_reduce_memory_mb": "DELETE_OLD",
+  "mapreduce_task_io_sort_mb": "DELETE_OLD",
+  "maxtasks_per_job": "DELETE_OLD",
+  "mapreduce_userlog_retainhours": "DELETE_OLD",
+  "namenode_opt_maxnewsize": "640m",
+  "nodemanager_heapsize": "1024",
+  "rca_enabled": "DELETE_OLD",
+  "resourcemanager_heapsize": "1024",
+  "scheduler_name": "DELETE_OLD",
+  "snappy_enabled": "DELETE_OLD",
+  "task_controller": "DELETE_OLD",
+  "yarn_heapsize": "1024",
+  "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
+  "yarn_pid_dir_prefix": "/var/run/hadoop-yarn",
+  "yarn_user": "yarn",
+  "zookeeper_sessiontimeout": "DELETE_OLD",
+  "apache_artifacts_download_url": "DELETE_OLD",
+  "client_scannercaching": "DELETE_OLD",
+  "dfs_data_dir": "DELETE_OLD",
+  "dfs_datanode_address": "DELETE_OLD",
+  "dfs_datanode_data_dir_perm": "DELETE_OLD",
+  "dfs_datanode_failed_volume_tolerated": "DELETE_OLD",
+  "dfs_datanode_http_address": "DELETE_OLD",
+  "dfs_exclude": "DELETE_OLD",
+  "dfs_name_dir": "DELETE_OLD",
+  "dfs_replication": "DELETE_OLD",
+  "dfs_webhdfs_enabled": "DELETE_OLD",
+  "fs_checkpoint_dir": "DELETE_OLD",
+  "fs_checkpoint_period": "DELETE_OLD",
+  "gpl_artifacts_download_url": "DELETE_OLD",
+  "hbase_conf_dir": "DELETE_OLD",
+  "hbase_hdfs_root_dir": "DELETE_OLD",
+  "hbase_tmp_dir": "DELETE_OLD",
+  "hdfs_enable_shortcircuit_read": "DELETE_OLD",
+  "hfile_max_keyvalue_size": "DELETE_OLD",
+  "hive_conf_dir": "DELETE_OLD",
+  "hive_database_name": "DELETE_OLD",
+  "hive_jdbc_connection_url": "DELETE_OLD",
+  "hive_jdbc_driver": "DELETE_OLD",
+  "hive_lib": "DELETE_OLD",
+  "hive_dbroot": "DELETE_OLD",
+  "hive_metastore_user_name": "DELETE_OLD",
+  "hive_metastore_user_passwd": "DELETE_OLD",
+  "hregion_blockmultiplier": "DELETE_OLD",
+  "hregion_memstoreflushsize": "DELETE_OLD",
+  "hstore_compactionthreshold": "DELETE_OLD",
+  "hstorefile_maxsize": "DELETE_OLD",
+  "io_sort_mb": "DELETE_OLD",
+  "java64_home": "DELETE_OLD",
+  "mapred_jobstatus_dir": "DELETE_OLD",
+  "mapred_system_dir": "DELETE_OLD",
+  "mysql_connector_url": "DELETE_OLD",
+  "namenode_formatted_mark_dir": "DELETE_OLD",
+  "oozie_database_name": "DELETE_OLD",
+  "oozie_jdbc_connection_url": "DELETE_OLD",
+  "oozie_jdbc_driver": "DELETE_OLD",
+  "oozie_metastore_user_name": "DELETE_OLD",
+  "oozie_metastore_user_passwd": "DELETE_OLD",
+  "regionserver_handlers": "DELETE_OLD",
+  "regionserver_memstore_lab": "DELETE_OLD",
+  "regionserver_memstore_lowerlimit": "DELETE_OLD",
+  "regionserver_memstore_upperlimit": "DELETE_OLD",
+  "run_dir": "DELETE_OLD",
+  "zk_pid_file": "DELETE_OLD"
+}
+
+HDFS_SITE = {
+  "dfs.block.local-path-access.user": "DELETE_OLD",
+  "dfs.client.read.shortcircuit": "true",
+  "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+  "dfs.datanode.du.pct": "DELETE_OLD",
+  "dfs.datanode.du.reserved": "1073741824",
+  "dfs.datanode.socket.write.timeout": "DELETE_OLD",
+  "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+  "dfs.hosts": "DELETE_OLD",
+  "dfs.journalnode.http-address": "0.0.0.0:8480",
+  "dfs.secondary.https.port": "DELETE_OLD",
+  "dfs.web.ugi": "DELETE_OLD",
+  "fs.permissions.umask-mode": "022",
+  "ipc.server.max.response.size": "DELETE_OLD",
+  "ipc.server.read.threadpool.size": "DELETE_OLD",
+  "dfs.support.append": "true",
+  "dfs.namenode.checkpoint.txns": "1000000",
+  "dfs.namenode.checkpoint.period": "21600"
+}
+
+CORE_SITE = {
+  "fs.checkpoint.size": "DELETE_OLD",
+  "hadoop.security.auth_to_local": "\n        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n        RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT\n    ",
+  "hadoop.security.authentication": "simple",
+  "hadoop.security.authorization": "false",
+  "io.compression.codec.lzo.class": "DELETE_OLD",
+  "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec",
+}
+
+YARN_SITE = {
+  "yarn.acl.enable": "true",
+  "yarn.admin.acl": "*",
+  "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*",
+  "yarn.log-aggregation-enable": "true",
+  "yarn.log-aggregation.retain-seconds": "2592000",
+  "yarn.log.server.url": "http://REPLACE_JH_HOST:19888/jobhistory/logs",
+  "yarn.nodemanager.address": "0.0.0.0:45454",
+  "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX",
+  "yarn.nodemanager.aux-services": "mapreduce_shuffle",
+  "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler",
+  "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor",
+  "yarn.nodemanager.container-monitor.interval-ms": "3000",
+  "yarn.nodemanager.delete.debug-delay-sec": "0",
+  "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25",
+  "yarn.nodemanager.health-checker.interval-ms": "135000",
+  "yarn.nodemanager.health-checker.script.timeout-ms": "60000",
+  "yarn.nodemanager.linux-container-executor.group": "hadoop",
+  "yarn.nodemanager.local-dirs": "/var/log/hadoop/yarn/local",
+  "yarn.nodemanager.log-aggregation.compression-type": "gz",
+  "yarn.nodemanager.log-dirs": "/var/log/hadoop/yarn/log",
+  "yarn.nodemanager.log.retain-second": "604800",
+  "yarn.nodemanager.remote-app-log-dir": "/app-logs",
+  "yarn.nodemanager.remote-app-log-dir-suffix": "logs",
+  "yarn.nodemanager.resource.memory-mb": "5120",
+  "yarn.nodemanager.vmem-check-enabled": "false",
+  "yarn.nodemanager.vmem-pmem-ratio": "2.1",
+  "yarn.resourcemanager.address": "REPLACE_RM_HOST:8050",
+  "yarn.resourcemanager.admin.address": "REPLACE_RM_HOST:8141",
+  "yarn.resourcemanager.am.max-attempts": "2",
+  "yarn.resourcemanager.hostname": "REPLACE_RM_HOST",
+  "yarn.resourcemanager.resource-tracker.address": "REPLACE_RM_HOST:8025",
+  "yarn.resourcemanager.scheduler.address": "REPLACE_RM_HOST:8030",
+  "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler",
+  "yarn.resourcemanager.webapp.address": "REPLACE_RM_HOST:8088",
+  "yarn.scheduler.maximum-allocation-mb": "2048",
+  "yarn.scheduler.minimum-allocation-mb": "512"
+}
+
+HBASE_SITE = {
+  "dfs.client.read.shortcircuit": "DELETE_OLD",
+  "dfs.support.append": "DELETE_OLD",
+  "hbase.defaults.for.version.skip": "true",
+  "hbase.hregion.majorcompaction": "604800000",
+  "hbase.hregion.max.filesize": "10737418240",
+  "hbase.hstore.blockingStoreFiles": "10",
+  "hbase.hstore.flush.retries.number": "120",
+  "hbase.regionserver.global.memstore.lowerLimit": "0.38",
+  "hbase.regionserver.handler.count": "60",
+  "hbase.rpc.engine": "DELETE_OLD",
+  "hfile.block.cache.size": "0.40",
+  "zookeeper.session.timeout": "30000"
+}
+
+HIVE_SITE = {
+  "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+  "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+  "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator"
+}
+
+
+class FatalException(Exception):
+  def __init__(self, code, reason):
+    self.code = code
+    self.reason = reason
+
+  def __str__(self):
+    return repr("Fatal exception: %s, exit code %s" % (self.reason, self.code))
+
+  def _get_message(self):
+    return str(self)
+
+# Copy file and save with file.# (timestamp)
+def backup_file(filePath):
+  if filePath is not None and os.path.exists(filePath):
+    timestamp = datetime.datetime.now()
+    format = '%Y%m%d%H%M%S'
+    try:
+      shutil.copyfile(filePath, filePath + "." + timestamp.strftime(format))
+      os.remove(filePath)
+    except (Exception), e:
+      logger.warn('Could not backup file "%s": %s' % (filePath, str(e)))
+  return 0
+
+
+def write_mapping(hostmapping):
+  if os.path.isfile(MR_MAPPING_FILE):
+    os.remove(MR_MAPPING_FILE)
+  json.dump(hostmapping, open(MR_MAPPING_FILE, 'w'))
+  pass
+
+
+def write_config(config, type, tag):
+  file_name = type + "_" + tag
+  if os.path.isfile(file_name):
+    os.remove(file_name)
+  json.dump(config, open(file_name, 'w'))
+  pass
+
+
+def read_mapping():
+  if os.path.isfile(MR_MAPPING_FILE):
+    return json.load(open(MR_MAPPING_FILE))
+  else:
+    raise FatalException(-1, "MAPREDUCE host mapping file, mr_mapping, is not available or badly formatted. Execute "
+                             "action save-mr-mapping. Ensure the file is present in the directory where you are "
+                             "executing this command.")
+  pass
+
+
+def get_mr1_mapping(options):
+  components = ["MAPREDUCE_CLIENT", "JOBTRACKER", "TASKTRACKER", "HISTORYSERVER"]
+  GET_URL_FORMAT = URL_FORMAT + '/services/MAPREDUCE/components/{2}'
+  hostmapping = {}
+  for component in components:
+    hostlist = []
+    response = curl(False, '-u',
+                    AUTH_FORMAT.format(options.user, options.password),
+                    GET_URL_FORMAT.format(options.hostname, options.clustername, component))
+    retcode, errdata = validate_response(response, True)
+    if not retcode == 0:
+      raise FatalException(retcode, errdata)
+
+    structured_resp = json.loads(response)
+    if 'host_components' in structured_resp:
+      for hostcomponent in structured_resp['host_components']:
+        if 'HostRoles' in hostcomponent:
+          if 'host_name' in hostcomponent['HostRoles']:
+            hostlist.append(hostcomponent['HostRoles']['host_name'])
+            pass
+          pass
+        pass
+      pass
+    pass
+    hostmapping[component] = hostlist
+  write_mapping(hostmapping)
+
+
+def get_YN_input(prompt, default):
+  yes = set(['yes', 'ye', 'y'])
+  no = set(['no', 'n'])
+  return get_choice_string_input(prompt, default, yes, no)
+
+
+def get_choice_string_input(prompt, default, firstChoice, secondChoice):
+  choice = raw_input(prompt).lower()
+  if choice in firstChoice:
+    return True
+  elif choice in secondChoice:
+    return False
+  elif choice is "": # Just enter pressed
+    return default
+  else:
+    print "input not recognized, please try again: "
+    return get_choice_string_input(prompt, default, firstChoice, secondChoice)
+
+
+def delete_mr(options):
+  saved_mr_mapping = get_YN_input("Have you saved MR host mapping using action save-mr-mapping [y/n] (n)? ", False)
+  if not saved_mr_mapping:
+    raise FatalException(1, "Ensure MAPREDUCE host component mapping is saved before deleting it. Use action "
+                            "save-mr-mapping.")
+
+  SERVICE_URL_FORMAT = URL_FORMAT + '/services/MAPREDUCE'
+  COMPONENT_URL_FORMAT = URL_FORMAT + '/hosts/{2}/host_components/{3}'
+  NON_CLIENTS = ["JOBTRACKER", "TASKTRACKER", "HISTORYSERVER"]
+  PUT_IN_DISABLED = """{"HostRoles": {"state": "DISABLED"}}"""
+  hostmapping = read_mapping()
+
+  for key, value in hostmapping.items():
+    if (key in NON_CLIENTS) and (len(value) > 0):
+      for host in value:
+        response = curl(options.printonly, '-u',
+                        AUTH_FORMAT.format(options.user, options.password),
+                        '-H', 'X-Requested-By: ambari',
+                        '-X', 'PUT', '-d',
+                        PUT_IN_DISABLED,
+                        COMPONENT_URL_FORMAT.format(options.hostname, options.clustername, host, key))
+        retcode, errdata = validate_response(response, False)
+        if not retcode == 0:
+          raise FatalException(retcode, errdata)
+        pass
+      pass
+    pass
+  pass
+
+  response = curl(options.printonly, '-u',
+                  AUTH_FORMAT.format(options.user, options.password),
+                  '-H', 'X-Requested-By: ambari',
+                  '-X', 'DELETE',
+                  SERVICE_URL_FORMAT.format(options.hostname, options.clustername))
+  retcode, errdata = validate_response(response, False)
+  if not retcode == 0:
+    raise FatalException(retcode, errdata)
+  pass
+
+def get_cluster_stackname(options):
+  VERSION_URL_FORMAT = URL_FORMAT + '?fields=Clusters/version'
+  
+  response = curl(False, '-u',
+                AUTH_FORMAT.format(options.user, options.password),
+                VERSION_URL_FORMAT.format(options.hostname, options.clustername))
+  retcode, errdata = validate_response(response, True)
+  
+  if not retcode == 0:
+    raise FatalException(retcode, errdata)
+  
+  structured_resp = json.loads(response)
+  
+  if 'Clusters' in structured_resp:
+    if 'version' in structured_resp['Clusters']:
+      return structured_resp['Clusters']['version']
+        
+  raise FatalException(-1, "Unable to get the cluster version")
+
+def has_component_in_stack_def(options, stack_name, service_name, component_name):
+  STACK_COMPONENT_URL_FORMAT = ROOT_FORMAT+'/stacks2/{1}/versions/{2}/stackServices/{3}/serviceComponents/{4}'
+  stack, stack_version = stack_name.split('-')
+  
+  response = curl(False, '-u',
+                AUTH_FORMAT.format(options.user, options.password),
+                STACK_COMPONENT_URL_FORMAT.format(options.hostname, stack,
+                                                  stack_version, service_name, component_name))
+  retcode, errdata = validate_response(response, True)
+  return not bool(retcode)
+  
+def add_services(options):
+  SERVICE_URL_FORMAT = URL_FORMAT + '/services/{2}'
+  COMPONENT_URL_FORMAT = SERVICE_URL_FORMAT + '/components/{3}'
+  HOST_COMPONENT_URL_FORMAT = URL_FORMAT + '/hosts/{2}/host_components/{3}'
+  service_comp = {
+    "YARN": ["NODEMANAGER", "RESOURCEMANAGER", "YARN_CLIENT"],
+    "MAPREDUCE2": ["HISTORYSERVER", "MAPREDUCE2_CLIENT"]}
+  new_old_host_map = {
+    "NODEMANAGER": "TASKTRACKER",
+    "HISTORYSERVER": "HISTORYSERVER",
+    "RESOURCEMANAGER": "JOBTRACKER",
+    "YARN_CLIENT": "MAPREDUCE_CLIENT",
+    "MAPREDUCE2_CLIENT": "MAPREDUCE_CLIENT"}
+  
+  stack_name = get_cluster_stackname(options)
+  stack_has_ats = has_component_in_stack_def(options, stack_name, "YARN", "APP_TIMELINE_SERVER")
+  
+  # if upgrading to stack > 2.1 (which has ats)
+  if stack_has_ats:
+    service_comp["YARN"].append("APP_TIMELINE_SERVER")
+    new_old_host_map["APP_TIMELINE_SERVER"] = "JOBTRACKER"
+    
+  hostmapping = read_mapping()
+
+  for service in service_comp.keys():
+    response = curl(options.printonly, '-u',
+                    AUTH_FORMAT.format(options.user, options.password),
+                    '-H', 'X-Requested-By: ambari',
+                    '-X', 'POST',
+                    SERVICE_URL_FORMAT.format(options.hostname, options.clustername, service))
+    retcode, errdata = validate_response(response, False)
+    if not retcode == 0:
+      raise FatalException(retcode, errdata)
+    for component in service_comp[service]:
+      response = curl(options.printonly, '-u',
+                      AUTH_FORMAT.format(options.user, options.password),
+                      '-H', 'X-Requested-By: ambari',
+                      '-X', 'POST',
+                      COMPONENT_URL_FORMAT.format(options.hostname, options.clustername, service, component))
+      retcode, errdata = validate_response(response, False)
+      if not retcode == 0:
+        raise FatalException(retcode, errdata)
+      for host in hostmapping[new_old_host_map[component]]:
+        response = curl(options.printonly, '-u',
+                        AUTH_FORMAT.format(options.user, options.password),
+                        '-H', 'X-Requested-By: ambari',
+                        '-X', 'POST',
+                        HOST_COMPONENT_URL_FORMAT.format(options.hostname, options.clustername, host, component))
+        retcode, errdata = validate_response(response, False)
+        if not retcode == 0:
+          raise FatalException(retcode, errdata)
+        pass
+      pass
+    pass
+  pass
+
+
+def update_config(options, properties, type):
+  tag = "version" + str(int(time.time() * 1000))
+  properties_payload = {"Clusters": {"desired_config": {"type": type, "tag": tag, "properties": properties}}}
+  response = curl(options.printonly, '-u',
+                  AUTH_FORMAT.format(options.user, options.password),
+                  '-H', 'X-Requested-By: ambari',
+                  '-X', 'PUT', '-d',
+                  json.dumps(properties_payload),
+                  URL_FORMAT.format(options.hostname, options.clustername))
+  retcode, errdata = validate_response(response, False)
+  if not retcode == 0:
+    raise FatalException(retcode, errdata)
+  pass
+
+
+def get_config(options, type):
+  tag, structured_resp = get_config_resp(options, type)
+  properties = None
+  if 'items' in structured_resp:
+    for item in structured_resp['items']:
+      if (tag == item['tag']) or (type == item['type']):
+        properties = item['properties']
+  if (properties is None):
+    raise FatalException(-1, "Unable to read configuration for type " + type + " and tag " + tag)
+  else:
+    logger.info("Read configuration for type " + type + " and tag " + tag)
+  return properties
+
+
+def get_config_resp(options, type, error_if_na=True):
+  CONFIG_URL_FORMAT = URL_FORMAT + '/configurations?type={2}&tag={3}'
+  response = curl(False, '-u',
+                  AUTH_FORMAT.format(options.user, options.password),
+                  URL_FORMAT.format(options.hostname, options.clustername))
+  retcode, errdata = validate_response(response, True)
+  if not retcode == 0:
+    raise FatalException(retcode, errdata)
+    # Read the config version
+  tag = None
+  structured_resp = json.loads(response)
+  if 'Clusters' in structured_resp:
+    if 'desired_configs' in structured_resp['Clusters']:
+      if type in structured_resp['Clusters']['desired_configs']:
+        tag = structured_resp['Clusters']['desired_configs'][type]['tag']
+
+  if tag != None:
+    # Get the config with the tag and return properties
+    response = curl(False, '-u',
+                    AUTH_FORMAT.format(options.user, options.password),
+                    CONFIG_URL_FORMAT.format(options.hostname, options.clustername, type, tag))
+    retcode, errdata = validate_response(response, True)
+    if not retcode == 0:
+      raise FatalException(retcode, errdata)
+    structured_resp = json.loads(response)
+    return (tag, structured_resp)
+  else:
+    if error_if_na:
+      raise FatalException(-1, "Unable to get the current version for config type " + type)
+    else:
+      return (tag, None)
+  pass
+
+
+def modify_configs(options, config_type):
+  properties_to_move = [
+    "dfs.namenode.checkpoint.edits.dir",
+    "dfs.namenode.checkpoint.dir",
+    "dfs.namenode.checkpoint.period"]
+  hostmapping = read_mapping()
+
+  # Add capacity-scheduler, yarn-site  (added with default values)
+  if (config_type is None) or (config_type == CAPACITY_SCHEDULER_TAG):
+    update_config(options, CAPACITY_SCHEDULER, CAPACITY_SCHEDULER_TAG)
+    pass
+
+  jt_host = hostmapping["JOBTRACKER"][0]
+
+  if (config_type is None) or (config_type == YARN_SITE_TAG):
+    for key in YARN_SITE.keys():
+      if REPLACE_JH_HOST_NAME_TAG in YARN_SITE[key]:
+        YARN_SITE[key] = YARN_SITE[key].replace(REPLACE_JH_HOST_NAME_TAG, jt_host, 1)
+      if REPLACE_RM_HOST_NAME_TAG in YARN_SITE[key]:
+        YARN_SITE[key] = YARN_SITE[key].replace(REPLACE_RM_HOST_NAME_TAG, jt_host, 1)
+        pass
+      pass
+    pass
+    update_config(options, YARN_SITE, YARN_SITE_TAG)
+    pass
+
+  # Update global config
+  if (config_type is None) or (config_type == GLOBAL_TAG):
+    update_config_using_existing(options, GLOBAL_TAG, GLOBAL.copy())
+    pass
+
+  core_site_latest = rename_all_properties(get_config(options, CORE_SITE_TAG), PROPERTY_MAPPING)
+  hdfs_site_latest = rename_all_properties(get_config(options, HDFS_SITE_TAG), PROPERTY_MAPPING)
+  mapred_site_latest = rename_all_properties(get_config(options, MAPRED_SITE_TAG), PROPERTY_MAPPING)
+
+  for property in properties_to_move:
+    if property in core_site_latest.keys():
+      hdfs_site_latest[property] = core_site_latest[property]
+      del core_site_latest[property]
+    pass
+
+  # Update mapred-site config
+  mapred_updated = MAPRED_SITE.copy()
+  if (config_type is None) or (config_type == MAPRED_SITE_TAG):
+    for key in mapred_updated.keys():
+      if REPLACE_JH_HOST_NAME_TAG in mapred_updated[key]:
+        mapred_updated[key] = mapred_updated[key].replace(REPLACE_JH_HOST_NAME_TAG, jt_host, 1)
+        pass
+      pass
+    pass
+    update_config_using_existing_properties(options, MAPRED_SITE_TAG, mapred_updated, mapred_site_latest)
+    pass
+
+  # Update hdfs-site, core-site
+  if (config_type is None) or (config_type == HDFS_SITE_TAG):
+    update_config_using_existing_properties(options, HDFS_SITE_TAG, HDFS_SITE.copy(), hdfs_site_latest)
+    pass
+  if (config_type is None) or (config_type == CORE_SITE_TAG):
+    update_config_using_existing_properties(options, CORE_SITE_TAG, CORE_SITE.copy(), core_site_latest)
+    pass
+
+  # Update hbase-site if exists
+  if (config_type is None) or (config_type == HBASE_SITE_TAG):
+    tag, structured_resp = get_config_resp(options, HBASE_SITE_TAG, False)
+    if structured_resp is not None:
+      update_config_using_existing(options, HBASE_SITE_TAG, HBASE_SITE.copy())
+      pass
+    pass
+
+  # Update hive-site if exists
+  if (config_type is None) or (config_type == HIVE_SITE_TAG):
+    tag, structured_resp = get_config_resp(options, HIVE_SITE_TAG, False)
+    if structured_resp is not None:
+      update_config_using_existing(options, HIVE_SITE_TAG, HIVE_SITE.copy())
+      pass
+  pass
+
+
+def rename_all_properties(properties, name_mapping):
+  for key, val in name_mapping.items():
+    if (key in properties.keys()) and (val not in properties.keys()):
+      properties[val] = properties[key]
+      del properties[key]
+    pass
+  return properties
+
+
+def update_config_using_existing(options, type, properties_template):
+  site_properties = get_config(options, type)
+  update_config_using_existing_properties(options, type, properties_template, site_properties)
+  pass
+
+
+def update_config_using_existing_properties(options, type, properties_template,
+                                            site_properties):
+  keys_processed = []
+  keys_to_delete = []
+  for key in properties_template.keys():
+    keys_processed.append(key)
+    if properties_template[key] == DELETE_OLD_TAG:
+      keys_to_delete.append(key)
+      pass
+    if properties_template[key].find(REPLACE_WITH_TAG) == 0:
+      name_to_lookup = key
+      if len(properties_template[key]) > len(REPLACE_WITH_TAG):
+        name_to_lookup = properties_template[key][len(REPLACE_WITH_TAG):]
+        keys_processed.append(name_to_lookup)
+      value = ""
+      if name_to_lookup in site_properties.keys():
+        value = site_properties[name_to_lookup]
+        pass
+      else:
+        logger.warn("Unable to find the equivalent for " + key + ". Looked for " + name_to_lookup)
+      properties_template[key] = value
+      pass
+    pass
+  pass
+
+  for key in site_properties.keys():
+    if key not in keys_processed:
+      properties_template[key] = site_properties[key]
+      pass
+    pass
+  pass
+
+  for key in keys_to_delete:
+    del properties_template[key]
+  pass
+  update_config(options, properties_template, type)
+
+
+def backup_configs(options, type=None):
+  types_to_save = {"global": True, "mapred-site": True, "hdfs-site": True, "core-site": True,
+                   "webhcat-site": False, "hive-site": False, "hbase-site": False, "oozie-site": False}
+  for type in types_to_save.keys():
+    backup_single_config_type(options, type, types_to_save[type])
+    pass
+  pass
+
+
+def backup_single_config_type(options, type, error_if_na=True):
+  tag, response = get_config_resp(options, type, error_if_na)
+  if response is not None:
+    logger.info("Saving config for type: " + type + " and tag: " + tag)
+    write_config(response, type, tag)
+  else:
+    logger.info("Unable to obtain config for type: " + type)
+    pass
+  pass
+
+
+def install_services(options):
+  SERVICE_URL_FORMAT = URL_FORMAT + '/services/{2}'
+  SERVICES = ["MAPREDUCE2", "YARN"]
+  PUT_IN_INSTALLED = ["""{"RequestInfo":{"context":"Install MapReduce2"}, "Body":{"ServiceInfo": {"state":"INSTALLED"}}}""",
+                      """{"RequestInfo":{"context":"Install YARN"}, "Body":{"ServiceInfo": {"state":"INSTALLED"}}}"""]
+  err_retcode = 0
+  err_message = ""
+  for index in [0, 1]:
+    response = curl(options.printonly, '-u',
+                    AUTH_FORMAT.format(options.user, options.password),
+                    '-H', 'X-Requested-By: ambari',
+                    '-X', 'PUT', '-d',
+                    PUT_IN_INSTALLED[index],
+                    SERVICE_URL_FORMAT.format(options.hostname, options.clustername, SERVICES[index]))
+    retcode, errdata = validate_response(response, not options.printonly)
+    if not retcode == 0:
+      err_retcode = retcode
+      error_msg = err_message + " Error while installing " + SERVICES[index] + ". Details: " + errdata + "."
+  pass
+
+  if err_retcode != 0:
+    raise FatalException(err_retcode, error_msg + "(Services may already be installed or agents are not yet started.)")
+
+  options.exit_message = "Requests has been submitted to install YARN and MAPREDUCE2. Use Ambari Web to monitor " \
+                         "the status of the install requests."
+  pass
+
+
+def validate_response(response, expect_body):
+  if expect_body:
+    if "\"href\" : \"" not in response:
+      return (1, response)
+    else:
+      return (0, "")
+  elif len(response) > 0:
+    return (1, response)
+  else:
+    return (0, "")
+  pass
+
+
+def curl(print_only, *args):
+  curl_path = '/usr/bin/curl'
+  curl_list = [curl_path]
+  for arg in args:
+    curl_list.append(arg)
+  if print_only:
+    logger.info("Command to be executed: " + ' '.join(curl_list))
+    return ""
+  pass
+  logger.info(' '.join(curl_list))
+  osStat = subprocess.Popen(
+    curl_list,
+    stderr=subprocess.PIPE,
+    stdout=subprocess.PIPE)
+  out, err = osStat.communicate()
+  if 0 != osStat.returncode:
+    error = "curl call failed. out: " + out + " err: " + err
+    logger.error(error)
+    raise FatalException(osStat.returncode, error)
+  return out
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options] action\n  Valid actions: " + VALID_ACTIONS
+                                       + "\n  update-configs accepts type, e.g. hdfs-site to update specific configs", )
+
+  parser.add_option("-n", "--printonly",
+                    action="store_true", dest="printonly", default=False,
+                    help="Prints all the curl commands to be executed (only for write/update actions)")
+  parser.add_option("-o", "--log", dest="logfile", default=UPGRADE_LOG_FILE,
+                    help="Log file")
+
+  parser.add_option('--hostname', default=None, help="Hostname for Ambari server", dest="hostname")
+  parser.add_option('--user', default=None, help="Ambari admin user", dest="user")
+  parser.add_option('--password', default=None, help="Ambari admin password", dest="password")
+  parser.add_option('--clustername', default=None, help="Cluster name", dest="clustername")
+
+  (options, args) = parser.parse_args()
+
+  options.warnings = []
+  if options.user is None:
+    options.warnings.append("User name must be provided (e.g. admin)")
+  if options.hostname is None:
+    options.warnings.append("Ambari server host name must be provided")
+  if options.clustername is None:
+    options.warnings.append("Cluster name must be provided")
+  if options.password is None:
+    options.warnings.append("Ambari admin user's password name must be provided (e.g. admin)")
+
+  if len(options.warnings) != 0:
+    print parser.print_help()
+    for warning in options.warnings:
+      print "  " + warning
+    parser.error("Invalid or missing options")
+
+  if len(args) == 0:
+    print parser.print_help()
+    parser.error("No action entered")
+
+  action = args[0]
+
+  options.exit_message = "Upgrade action '%s' completed successfully." % action
+  if options.printonly:
+    options.exit_message = "Simulated execution of action '%s'. Verify the list edit calls." % action
+
+  backup_file(options.logfile)
+  global logger
+  logger = logging.getLogger('UpgradeHelper')
+  handler = logging.FileHandler(options.logfile)
+  formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
+  handler.setFormatter(formatter)
+  logger.addHandler(handler)
+  logging.basicConfig(level=logging.DEBUG)
+
+  try:
+    if action == GET_MR_MAPPING_ACTION:
+      get_mr1_mapping(options)
+      pprint("File mr_mapping contains the host mapping for mapreduce components. This file is critical for later "
+             "steps.")
+    elif action == DELETE_MR_ACTION:
+      delete_mr(options)
+    elif action == ADD_YARN_MR2_ACTION:
+      add_services(options)
+    elif action == MODIFY_CONFIG_ACTION:
+      config_type = None
+      if len(args) > 1:
+        config_type = args[1]
+      modify_configs(options, config_type)
+    elif action == INSTALL_YARN_MR2_ACTION:
+      install_services(options)
+    elif action == BACKUP_CONFIG_ACTION:
+      backup_configs(options)
+    else:
+      parser.error("Invalid action")
+
+  except FatalException as e:
+    if e.reason is not None:
+      error = "ERROR: Exiting with exit code {0}. Reason: {1}".format(e.code, e.reason)
+      pprint(error)
+      logger.error(error)
+    sys.exit(e.code)
+
+  if options.exit_message is not None:
+    print options.exit_message
+
+
+if __name__ == "__main__":
+  try:
+    main()
+  except (KeyboardInterrupt, EOFError):
+    print("\nAborting ... Keyboard Interrupt.")
+    sys.exit(1)

+ 0 - 1183
ambari-server/src/main/python/upgradeHelper.py

@@ -1,1183 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-
-import getpass
-import optparse
-from pprint import pprint
-import re
-import sys
-import datetime
-import os.path
-import logging
-import shutil
-import json
-import subprocess
-import time
-
-
-# ==============================
-#    Error classes definition
-# ==============================
-class FatalException(Exception):
-  def __init__(self, code, reason):
-    self.code = code
-    self.reason = reason
-
-  def __str__(self):
-    return repr("Fatal exception: %s, exit code %s" % (self.reason, self.code))
-
-  def _get_message(self):
-    return str(self)
-
-
-class ReadOnlyPropertyException(Exception):
-  def __str__(self):
-    return "Property is read-only"
-
-  def _get_message(self):
-    return self.__str__()
-
-class NotSupportedCatalogVersion(Exception):
-  def __init__(self, catalog_version):
-    self._version = catalog_version
-
-  def __str__(self):
-    return "Version %s of loaded catalog not supported" % self._version
-
-  def _get_message(self):
-    return self.__str__()
-
-  message = property(__str__)
-
-
-# ==============================
-#    Constant class definition
-# ==============================
-class Const(object):
-  def __new__(cls, *args, **kwargs):
-    raise Exception("Class couldn't be created")
-
-
-class CatConst(Const):
-  VERSION_TAG = "version"
-  STACK_VERSION_OLD = "old-version"
-  STACK_VERSION_TARGET = "target-version"
-  STACK_STAGS_TAG = "stacks"
-  STACK_NAME = "name"
-  CONFIG_OPTIONS = "options"
-  CONFIG_TYPES = "config-types"
-  STACK_PROPERTIES = "properties"
-  PROPERTY_VALUE_TAG = "value"
-  PROPERTY_REMOVE_TAG = "remove"
-  MERGED_COPY_TAG = "merged-copy"
-  ITEMS_TAG = "items"
-  TYPE_TAG = "type"
-  TRUE_TAG = "yes"
-  STACK_PROPERTIES_MAPPING_LIST_TAG = "property-mapping"
-  VALUE_TEMPLATE_TAG = "template"
-  SEARCH_PATTERN = "(\{[^\{\}]+\})"  # {XXXXX}
-
-
-class Options(Const):
-  # action commands
-  API_PROTOCOL = "http"
-  API_PORT = "8080"
-
-  GET_MR_MAPPING_ACTION = "save-mr-mapping"
-  VERIFY_ACTION = "verify"
-  DELETE_MR_ACTION = "delete-mr"
-  ADD_YARN_MR2_ACTION = "add-yarn-mr2"
-  MODIFY_CONFIG_ACTION = "update-configs"
-  BACKUP_CONFIG_ACTION = "backup-configs"
-  INSTALL_YARN_MR2_ACTION = "install-yarn-mr2"
-
-  MR_MAPPING_FILE = "mr_mapping"
-  CAPACITY_SCHEDULER_TAG = "capacity-scheduler"
-  REPLACE_JH_HOST_NAME_TAG = "REPLACE_JH_HOST"
-  REPLACE_RM_HOST_NAME_TAG = "REPLACE_RM_HOST"
-  REPLACE_WITH_TAG = "REPLACE_WITH_"
-  DELETE_OLD_TAG = "DELETE_OLD"
-
-  ZOOKEEPER_SERVER = "ZOOKEEPER_SERVER"
-
-  MR_MAPPING = None
-  logger = None
-
-  # Api constants
-  ROOT_URL = None
-  CLUSTER_URL = None
-  COMPONENTS_FORMAT = None
-
-  # Curl options
-  POST_REQUESTS = ['PUT', 'POST']
-  GET_REQUESTS = ['GET', 'DELETE']
-  CURL_PRINT_ONLY = None
-
-  ARGS = None
-  OPTIONS = None
-  HOST = None
-  CLUSTER_NAME = None
-
-  # for verify action
-  REPORT_FILE = None
-
-  API_TOKENS = {
-    "user": None,
-    "pass": None
-  }
-
-  HEADERS = {
-    'X-Requested-By': 'upgradeHelper'
-  }
-
-  @classmethod
-  def initialize(cls):
-    cls.ROOT_URL = '%s://%s:%s/api/v1' % (cls.API_PROTOCOL, cls.HOST, cls.API_PORT)
-    cls.CLUSTER_URL = cls.ROOT_URL + "/clusters/%s" % cls.CLUSTER_NAME
-    cls.COMPONENTS_FORMAT = cls.CLUSTER_URL + "/components/{0}"
-
-  @classmethod
-  def initialize_logger(cls, filename=None):
-    cls.logger = logging.getLogger('UpgradeHelper')
-    cls.logger.setLevel(logging.DEBUG)
-
-    if filename is not None:
-      handler = logging.FileHandler(filename)
-      handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s - %(message)s'))
-      cls.logger.addHandler(handler)
-      cls.logger.info("")
-      cls.logger.info("Start new logging section")
-
-    handler = logging.StreamHandler(sys.stdout)
-    handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
-    cls.logger.addHandler(handler)
-
-
-# ==============================
-#    Catalog classes definition
-# ==============================
-class UpgradeCatalogFarm(object):
-
-   # versions of catalog which is currently supported
-  _supported_catalog_versions = ["1.0"]
-
-  # private variables
-  _json_catalog = None
-
-  def __init__(self, path):
-    self._load(path)
-
-  def _load(self, path):
-    f = None
-    try:
-      f = open(path, 'r')
-      json_string = f.read()
-      self._json_catalog = json.loads(json_string)
-      self._parse_upgrade_catalog()
-    except IOError as e:
-      raise FatalException(e.errno, "Couldn't open upgrade catalog file %s: %s" % (path, e.strerror))
-    except NotSupportedCatalogVersion as e:
-      raise FatalException(1, e.message)
-    except ValueError as e:
-      raise FatalException(1, "Malformed upgrade catalog: %s" % e.message)
-    finally:
-      try:
-        if f is not None:
-          f.close()
-      except IOError as e:
-        pass
-
-  def _parse_upgrade_catalog(self):
-    catalog_version = None
-    if CatConst.VERSION_TAG in self._json_catalog:
-      catalog_version = self._json_catalog[CatConst.VERSION_TAG]
-
-    if catalog_version is None or catalog_version not in self._supported_catalog_versions:
-      raise NotSupportedCatalogVersion(str(catalog_version))
-
-  def get_catalog(self, from_version=None, to_version=None):
-    search_version = {
-      CatConst.STACK_VERSION_OLD: from_version,
-      CatConst.STACK_VERSION_TARGET: to_version
-    }
-
-    for stack in self._json_catalog[CatConst.STACK_STAGS_TAG]:
-      version = {
-        CatConst.STACK_VERSION_OLD: stack[CatConst.STACK_VERSION_OLD],
-        CatConst.STACK_VERSION_TARGET: stack[CatConst.STACK_VERSION_TARGET]
-      }
-      if version == search_version:
-        return UpgradeCatalog(catalog=stack, version=version)
-
-    return None
-
-
-class UpgradeCatalog(object):
-
-  # private variables
-  _json_catalog = None
-  _properties_catalog = None
-  _properties_map_catalog = None
-  _version = None
-  _search_pattern = None
-
-  """
-   Substitute handler, should return replaced value, as param would be passed value and tokens to substitute
-   Please, be aware! Token should be unique in context of one catalog
-
-   Example:
-    def _substitute(tokens, value):
-      for token in tokens:
-        if token == "{REPLACE_ME}":
-          value = value.replace(token, "\"hello world\"")
-      return value
-
-    catalog.set_substitution_handler = _substitute
-
-    After that, all properties with CatConst.VALUE_TEMPLATE_TAG  set to "yes" would be processed
-  """
-  _substitution_handler = None
-
-  # public variable
-  config_groups = None
-
-  def __init__(self, catalog=None, version=None, substitution_handler=None):
-    self._json_catalog = catalog
-    self._version = version
-    self._search_pattern = re.compile(CatConst.SEARCH_PATTERN)
-
-    if CatConst.STACK_PROPERTIES in catalog:
-      self._properties_catalog = catalog[CatConst.STACK_PROPERTIES]
-
-    if CatConst.STACK_PROPERTIES_MAPPING_LIST_TAG in catalog:
-      self._properties_map_catalog = catalog[CatConst.STACK_PROPERTIES_MAPPING_LIST_TAG]
-
-    if catalog is not None and CatConst.CONFIG_OPTIONS in catalog \
-                           and CatConst.CONFIG_TYPES in catalog[CatConst.CONFIG_OPTIONS]:
-
-      self.config_groups = ConfigConst(catalog[CatConst.CONFIG_OPTIONS][CatConst.CONFIG_TYPES],
-                                       properties_catalog=self._properties_catalog)
-
-    if substitution_handler is not None:
-      self.set_substitution_handler(substitution_handler)
-
-  # deprecated, used for compatibility with old code
-  def get_properties_as_dict(self, properties):
-    target_dict = {}
-    for key in properties:
-      if CatConst.PROPERTY_VALUE_TAG in properties[key] and CatConst.PROPERTY_REMOVE_TAG not in properties[key]:
-        target_dict[key] = properties[key][CatConst.PROPERTY_VALUE_TAG]
-
-    return target_dict
-
-  def set_substitution_handler(self, handler):
-    self._substitution_handler = handler
-
-  def _get_version(self):
-    return "%s-%s" % (self._version[CatConst.STACK_VERSION_OLD], self._version[CatConst.STACK_VERSION_TARGET])
-
-  def get_parsed_version(self):
-    """
-     Get numeric representation of the version for comparation purposes
-
-     Example:
-       1.3-2.1 will be represented as { from: 13, to: 21 }
-
-    :return: Numeric version
-    """
-    v_from = self._version[CatConst.STACK_VERSION_OLD].split(".")
-    v_to = self._version[CatConst.STACK_VERSION_TARGET].split(".")
-    try:
-      v_from = int(v_from[0]) * 10 + int(v_from[1])
-      v_to = int(v_to[0]) * 10 + int(v_to[1])
-    except ValueError:
-      v_from = 0
-      v_to = 0
-
-    version = {
-      "from": v_from,
-      "to": v_to
-    }
-
-    return version
-
-  def _get_name(self):
-    if CatConst.STACK_NAME in self._json_catalog:
-      return self._json_catalog[CatConst.STACK_NAME]
-    return ""
-
-  def _get_propoerty_mapping(self):
-    return self._properties_map_catalog
-
-  def get_properties(self, config_group):
-    if config_group in self._properties_catalog:
-      return self._filter_properties(config_group)
-    return None
-
-  def _filter_properties(self, config_group):
-    def _property_filter_strings(value):
-      if not isinstance(value, dict):
-        return {CatConst.PROPERTY_VALUE_TAG: value}
-      else:
-        if self._substitution_handler is not None and CatConst.VALUE_TEMPLATE_TAG in value  \
-          and CatConst.VALUE_TEMPLATE_TAG in value:  # value contains template
-
-          parsed_value = self._substitution_handler(
-            self._search_pattern.findall(value[CatConst.PROPERTY_VALUE_TAG]), value[CatConst.PROPERTY_VALUE_TAG]
-          )
-          if parsed_value is not None:  # Check if target function returns result
-            value[CatConst.PROPERTY_VALUE_TAG] = parsed_value
-
-      return value
-    properties = self._properties_catalog[config_group].copy()  # pass to process only copy of data
-    properties = dict(zip(properties, map(_property_filter_strings, properties.values())))
-    return properties
-
-  version = property(_get_version)
-  name = property(_get_name)
-  property_map_catalog = property(_get_propoerty_mapping)
-
-
-class ConfigConst(object):
-  _config_types_const_definition = {}
-  _config_types_value_definition = {}
-
-  def __init__(self, config_types_definition, properties_catalog=None):
-    if properties_catalog is not None:  # compensate possibly undefined config groups in options from property definition
-      for item in properties_catalog:
-        if item not in config_types_definition:
-          config_types_definition[item] = {}
-
-    self._config_types_value_definition = config_types_definition
-    for key in config_types_definition:
-      self._config_types_const_definition[key.replace("-", "_").lower()] = key
-
-  def list(self):
-    return self._config_types_value_definition.keys()
-
-  def get(self, name):
-    if name in self._config_types_value_definition:
-      return self._config_types_value_definition[name]
-    raise Exception("No config group with name %s found" % name)
-
-  def __getattr__(self, item):
-    """
-    Support for constant handling like "<name>_tag" which would return real config name.
-    Base list loaded from section options\config-types of json.
-
-    Example:
-      self.hbase_env_tag will return hbase-env
-
-    :param item: accessed attribute
-    :return: attribute value if exists or None
-    """
-    item = item.lower()
-    if "_tag" in item and item[:-4] in self._config_types_const_definition:
-      return self._config_types_const_definition[item[:-4]]
-
-
-# Copy file and save with file.# (timestamp)
-def backup_file(filePath):
-  if filePath is not None and os.path.exists(filePath):
-    timestamp = datetime.datetime.now()
-    format = '%Y%m%d%H%M%S'
-    try:
-      shutil.copyfile(filePath, filePath + "." + timestamp.strftime(format))
-      os.remove(filePath)
-    except Exception as e:
-      Options.logger.warn('Could not backup file "%s": %s' % (filePath, str(e)))
-  return 0
-
-
-def write_mapping(hostmapping):
-  if os.path.isfile(Options.MR_MAPPING_FILE):
-    os.remove(Options.MR_MAPPING_FILE)
-  json.dump(hostmapping, open(Options.MR_MAPPING_FILE, 'w'))
-
-
-def write_config(config, cfg_type, tag):
-  file_name = cfg_type + "_" + tag
-  if os.path.isfile(file_name):
-    os.remove(file_name)
-  json.dump(config, open(file_name, 'w'))
-
-
-def read_mapping():
-  if os.path.isfile(Options.MR_MAPPING_FILE):
-    if Options.MR_MAPPING is not None:
-      return Options.MR_MAPPING
-    else:
-      Options.MR_MAPPING = json.load(open(Options.MR_MAPPING_FILE))
-      return Options.MR_MAPPING
-  else:
-    raise FatalException(-1, "MAPREDUCE host mapping file, mr_mapping, is not available or badly formatted. Execute "
-                             "action save-mr-mapping. Ensure the file is present in the directory where you are "
-                             "executing this command.")
-
-
-def get_mr1_mapping():
-  components = ["MAPREDUCE_CLIENT", "JOBTRACKER", "TASKTRACKER", "HISTORYSERVER"]
-  GET_URL_FORMAT = Options.CLUSTER_URL + '/services/MAPREDUCE/components/%s'
-  hostmapping = {}
-  for component in components:
-    hostlist = []
-    structured_resp = curl(GET_URL_FORMAT % component, parse=True, validate=True, validate_expect_body=True)
-
-    if 'host_components' in structured_resp:
-      for hostcomponent in structured_resp['host_components']:
-        if 'HostRoles' in hostcomponent:
-          if 'host_name' in hostcomponent['HostRoles']:
-            hostlist.append(hostcomponent['HostRoles']['host_name'])
-
-    hostmapping[component] = hostlist
-  write_mapping(hostmapping)
-
-  pprint("File mr_mapping contains the host mapping for mapreduce components. This file is critical for later "
-         "steps.")
-
-
-def get_YN_input(prompt, default):
-  yes = set(['yes', 'ye', 'y'])
-  no = set(['no', 'n'])
-  return get_choice_string_input(prompt, default, yes, no)
-
-
-def get_choice_string_input(prompt, default, firstChoice, secondChoice):
-  choice = raw_input(prompt).lower()
-  if choice in firstChoice:
-    return True
-  elif choice in secondChoice:
-    return False
-  elif choice is "":  # Just enter pressed
-    return default
-  else:
-    print "input not recognized, please try again: "
-    return get_choice_string_input(prompt, default, firstChoice, secondChoice)
-
-
-def delete_mr():
-  saved_mr_mapping = get_YN_input("Have you saved MR host mapping using action save-mr-mapping [y/n] (n)? ", False)
-  if not saved_mr_mapping:
-    raise FatalException(1, "Ensure MAPREDUCE host component mapping is saved before deleting it. Use action "
-                            "save-mr-mapping.")
-
-  SERVICE_URL_FORMAT = Options.CLUSTER_URL + '/services/MAPREDUCE'
-  COMPONENT_URL_FORMAT = Options.CLUSTER_URL + '/hosts/%s/host_components/%s'
-  NON_CLIENTS = ["JOBTRACKER", "TASKTRACKER", "HISTORYSERVER"]
-  PUT_IN_DISABLED = {
-    "HostRoles": {
-      "state": "DISABLED"
-    }
-  }
-
-  hostmapping = read_mapping()
-
-  for key, value in hostmapping.items():
-    if (key in NON_CLIENTS) and (len(value) > 0):
-      for host in value:
-        curl(COMPONENT_URL_FORMAT % (host, key), request_type="PUT", data=PUT_IN_DISABLED,
-             validate=True, validate_expect_body=False)
-
-  curl(SERVICE_URL_FORMAT, request_type="DELETE", validate=True, validate_expect_body=False)
-
-
-def get_cluster_stackname():
-  VERSION_URL_FORMAT = Options.CLUSTER_URL + '?fields=Clusters/version'
-
-  structured_resp = curl(VERSION_URL_FORMAT, simulate=False, validate=True, validate_expect_body=True, parse=True)
-
-  if 'Clusters' in structured_resp:
-    if 'version' in structured_resp['Clusters']:
-      return structured_resp['Clusters']['version']
-
-  raise FatalException(-1, "Unable to get the cluster version")
-
-
-def has_component_in_stack_def(stack_name, service_name, component_name):
-  STACK_COMPONENT_URL_FORMAT = Options.ROOT_URL + '/stacks2/{0}/versions/{1}/stackServices/{2}/serviceComponents/{3}'
-  stack, stack_version = stack_name.split('-')
-
-  try:
-    curl(STACK_COMPONENT_URL_FORMAT.format(stack,stack_version, service_name, component_name),
-          validate=True, validate_expect_body=True, simulate=False)
-    return True
-  except FatalException:
-    return False
-
-
-def add_services():
-  SERVICE_URL_FORMAT = Options.CLUSTER_URL + '/services/{0}'
-  COMPONENT_URL_FORMAT = SERVICE_URL_FORMAT + '/components/{1}'
-  HOST_COMPONENT_URL_FORMAT = Options.CLUSTER_URL + '/hosts/{0}/host_components/{1}'
-  service_comp = {
-    "YARN": ["NODEMANAGER", "RESOURCEMANAGER", "YARN_CLIENT"],
-    "MAPREDUCE2": ["HISTORYSERVER", "MAPREDUCE2_CLIENT"]}
-  new_old_host_map = {
-    "NODEMANAGER": "TASKTRACKER",
-    "HISTORYSERVER": "HISTORYSERVER",
-    "RESOURCEMANAGER": "JOBTRACKER",
-    "YARN_CLIENT": "MAPREDUCE_CLIENT",
-    "MAPREDUCE2_CLIENT": "MAPREDUCE_CLIENT"}
-
-  stack_name = get_cluster_stackname()
-  stack_has_ats = has_component_in_stack_def(stack_name, "YARN", "APP_TIMELINE_SERVER")
-
-  # if upgrading to stack > 2.1 (which has ats)
-  if stack_has_ats:
-    service_comp["YARN"].append("APP_TIMELINE_SERVER")
-    new_old_host_map["APP_TIMELINE_SERVER"] = "JOBTRACKER"
-
-  hostmapping = read_mapping()
-
-  for service in service_comp.keys():
-    curl(SERVICE_URL_FORMAT.format(service), validate=True, validate_expect_body=False, request_type="POST")
-
-    for component in service_comp[service]:
-      curl(COMPONENT_URL_FORMAT.format(service, component),
-           validate=True, validate_expect_body=False, request_type="POST")
-
-      for host in hostmapping[new_old_host_map[component]]:
-        curl(HOST_COMPONENT_URL_FORMAT.format(host, component),
-             validate=True, validate_expect_body=False, request_type="POST")
-
-
-def update_config(properties, config_type):
-  tag = "version" + str(int(time.time() * 1000))
-  properties_payload = {"Clusters": {"desired_config": {"type": config_type, "tag": tag, "properties": properties}}}
-  expect_body = config_type != "cluster-env"  # ToDo: make exceptions more flexible
-
-  curl(Options.CLUSTER_URL, request_type="PUT", data=properties_payload, validate=True,
-       validate_expect_body=expect_body)
-
-
-def get_zookeeper_quorum():
-  zoo_cfg = curl(Options.COMPONENTS_FORMAT.format(Options.ZOOKEEPER_SERVER), validate=False, simulate=False, parse=True)
-  zoo_quorum = []
-  zoo_def_port = "2181"
-  if "host_components" in zoo_cfg:
-    for item in zoo_cfg["host_components"]:
-      zoo_quorum.append("%s:%s" % (item["HostRoles"]["host_name"], zoo_def_port))
-
-  return ",".join(zoo_quorum)
-
-
-def get_config(cfg_type):
-  tag, structured_resp = get_config_resp(cfg_type)
-  properties = None
-  if 'items' in structured_resp:
-    for item in structured_resp['items']:
-      if (tag == item['tag']) or (cfg_type == item['type']):
-        properties = item['properties']
-  if properties is None:
-    raise FatalException(-1, "Unable to read configuration for type " + cfg_type + " and tag " + tag)
-
-  return properties
-
-
-def parse_config_resp(resp):
-  parsed_configs = []
-  if CatConst.ITEMS_TAG in resp:
-    for config_item in resp[CatConst.ITEMS_TAG]:
-      parsed_configs.append({
-        "type": config_item[CatConst.TYPE_TAG],
-        "properties": config_item[CatConst.STACK_PROPERTIES]
-      })
-  return parsed_configs
-
-
-def get_config_resp(cfg_type, error_if_na=True, parsed=False, tag=None):
-  CONFIG_URL_FORMAT = Options.CLUSTER_URL + '/configurations?type={0}&tag={1}'
-
-  # Read the config version
-  if tag in None:
-    structured_resp = curl(Options.CLUSTER_URL, validate=True, validate_expect_body=True, parse=True, simulate=False)
-
-    if 'Clusters' in structured_resp:
-      if 'desired_configs' in structured_resp['Clusters']:
-        if cfg_type in structured_resp['Clusters']['desired_configs']:
-          tag = structured_resp['Clusters']['desired_configs'][cfg_type]['tag']
-
-  if tag is not None:
-    # Get the config with the tag and return properties
-    structured_resp = curl(CONFIG_URL_FORMAT.format(cfg_type, tag), parse=True, simulate=False,
-                           validate=True, validate_expect_body=True)
-    if parsed:
-      return tag, parse_config_resp(structured_resp)
-    else:
-      return tag, structured_resp
-  else:
-    if error_if_na:
-      raise FatalException(-1, "Unable to get the current version for config type " + cfg_type)
-    else:
-      return tag, None
-
-
-def get_config_resp_all():
-  desired_configs = {}
-  CONFIG_ALL_PROPERTIES_URL = Options.CLUSTER_URL  + "/configurations?fields=properties"
-  desired_configs_resp = curl(Options.CLUSTER_URL, validate=True, validate_expect_body=True, parse=True, simulate=False)
-  all_options = curl(CONFIG_ALL_PROPERTIES_URL, validate=True, validate_expect_body=True, parse=True, simulate=False)
-
-  if 'Clusters' in desired_configs_resp:
-    if 'desired_configs' in desired_configs_resp['Clusters']:
-      desired_configs_resp = desired_configs_resp['Clusters']['desired_configs']
-    else:
-      return None
-  else:
-    return None
-
-  if CatConst.ITEMS_TAG in all_options:
-    all_options = all_options["items"]
-  else:
-    return None
-
-  all_options = filter(
-    lambda x: x["type"] in desired_configs_resp and x["tag"] == desired_configs_resp[x["type"]]["tag"],
-    all_options)
-
-  for item in all_options:
-    desired_configs[item["type"]] = item["properties"]
-
-  return desired_configs
-
-
-def modify_config_item(config_type, catalog):
-  #  here should be declared tokens for pattern replace
-  if catalog.get_parsed_version()["from"] == 13:  # ToDo: introduce class for pre-defined tokens
-    hostmapping = read_mapping()
-    jt_host = hostmapping["JOBTRACKER"][0]
-    jh_host = hostmapping["HISTORYSERVER"][0]
-  else:
-    jt_host = ""
-    jh_host = ""
-
-  def _substitute(tokens, value):
-    for token in tokens:
-      if token == "{JOBHISTORY_HOST}":
-        value = value.replace(token, jh_host)
-      elif token == "{RESOURCEMANAGER_HOST}":
-        value = value.replace(token, jt_host)
-      elif token == "{ZOOKEEPER_QUORUM}":
-        value = value.replace(token, get_zookeeper_quorum())
-    return value
-  # Exit from function if was passed not suitable parameters
-  catalog.set_substitution_handler(_substitute)
-
-  try:
-    properties_latest = rename_all_properties(get_config(config_type), catalog.property_map_catalog)
-  except Exception as e:
-    properties_latest = {}
-
-  properties_copy = catalog.get_properties(config_type)
-  is_merged_copy = CatConst.MERGED_COPY_TAG in catalog.config_groups.get(config_type) \
-   and catalog.config_groups.get(config_type)[CatConst.MERGED_COPY_TAG] == CatConst.TRUE_TAG
-
-  # ToDo: implement property transfer from one catalog to other
-  #   properties_to_move = [
-  #     "dfs.namenode.checkpoint.edits.dir",
-  #     "dfs.namenode.checkpoint.dir",
-  #     "dfs.namenode.checkpoint.period"]
-
-  if is_merged_copy:  # Append configs to existed ones
-    tag, structured_resp = get_config_resp(config_type, False)
-    if structured_resp is not None:
-      update_config_using_existing_properties(config_type, properties_copy, properties_latest, catalog)
-  else:  # Rewrite/create config items
-    update_config(catalog.get_properties_as_dict(properties_copy), config_type)
-
-
-def modify_configs():
-  if len(Options.ARGS) > 1:
-    config_type = Options.ARGS[1]
-  else:
-    config_type = None
-
-  catalog_farm = UpgradeCatalogFarm(Options.OPTIONS.upgrade_json)  # Load upgrade catalog
-  catalog = catalog_farm.get_catalog(Options.OPTIONS.from_stack, Options.OPTIONS.to_stack)  # get desired version of catalog
-
-  if catalog is None:
-    raise FatalException(1, "Upgrade catalog for version %s-%s not found, no configs was modified"
-                         % (Options.OPTIONS.from_stack, Options.OPTIONS.to_stack))
-
-  if config_type is not None and config_type not in catalog.config_groups.list():
-    raise FatalException("Config type %s not exists, no configs was modified" % config_type)
-
-  if config_type is not None:
-    modify_config_item(config_type, catalog)
-  else:
-    for collection_name in catalog.config_groups.list():
-      modify_config_item(collection_name, catalog)
-
-
-def rename_all_properties(properties, name_mapping):
-  for key, val in name_mapping.items():
-    if (key in properties.keys()) and (val not in properties.keys()):
-      properties[val] = properties[key]
-      del properties[key]
-  return properties
-
-
-def update_config_using_existing(conf_type, properties_template, catalog):
-  site_properties = get_config(conf_type)
-  update_config_using_existing_properties(conf_type, properties_template, site_properties, catalog)
-
-
-# properties template - passed as dict from UpgradeCatalog
-def update_config_using_existing_properties(conf_type, properties_template,
-                                            site_properties, catalog):
-  keys_processed = []
-  keys_to_delete = []
-  properties_parsed = catalog.get_properties_as_dict(properties_template)
-
-  for key in properties_template.keys():
-    keys_processed.append(key)
-    if CatConst.PROPERTY_REMOVE_TAG in properties_template and properties_template[CatConst.PROPERTY_REMOVE_TAG] == CatConst.TRUE_TAG:
-      keys_to_delete.append(key)
-
-  for key in site_properties.keys():
-    if key not in keys_processed:
-      properties_parsed[key] = site_properties[key]
-
-  for key in keys_to_delete:
-    del properties_parsed[key]
-
-  update_config(properties_parsed, conf_type)
-
-
-def backup_configs(conf_type=None):
-  DESIRED_CONFIGS_URL = Options.CLUSTER_URL + "?fields=Clusters/desired_configs"
-
-  desired_configs = curl(DESIRED_CONFIGS_URL, validate=True, validate_expect_body=True, parse=True, simulate=False)
-
-  if "Clusters" in desired_configs and "desired_configs" in desired_configs["Clusters"]:
-    for conf_type in desired_configs["Clusters"]["desired_configs"].keys():
-      backup_single_config_type(conf_type, True)
-
-
-def backup_single_config_type(conf_type, error_if_na=True):
-  tag, response = get_config_resp(conf_type, error_if_na)
-  if response is not None:
-    Options.logger.info("Saving config for type: " + conf_type + " and tag: " + tag)
-    write_config(response, conf_type, tag)
-  else:
-    Options.logger.info("Unable to obtain config for type: " + conf_type)
-
-
-def install_services():
-  SERVICE_URL_FORMAT = Options.CLUSTER_URL + '/services/{0}'
-  SERVICES = ["MAPREDUCE2", "YARN"]
-  PUT_IN_INSTALLED = [
-    {
-      "RequestInfo": {
-        "context": "Install MapReduce2"
-      },
-      "Body": {
-        "ServiceInfo": {
-          "state": "INSTALLED"
-        }
-      }
-    },
-    {
-      "RequestInfo": {
-        "context": "Install YARN"
-      },
-      "Body": {
-        "ServiceInfo": {
-          "state": "INSTALLED"
-        }
-      }
-    }
-  ]
-
-  err_retcode = 0
-  err_message = ""
-  for index in [0, 1]:
-    try:
-      curl(SERVICE_URL_FORMAT.format(SERVICES[index]), validate=True,
-           validate_expect_body=not Options.OPTIONS.printonly, request_type="PUT", data=PUT_IN_INSTALLED[index])
-    except FatalException as e:
-      if not e.code == 0:
-        err_retcode = e.code
-        err_message = err_message + " Error while installing " + SERVICES[index] + ". Details: " + e.message + "."
-
-  if err_retcode != 0:
-    raise FatalException(err_retcode, err_message + "(Services may already be installed or agents are not yet started.)")
-
-  Options.OPTIONS.exit_message = "Requests has been submitted to install YARN and MAPREDUCE2. Use Ambari Web to monitor " \
-                         "the status of the install requests."
-
-
-def validate_response(response, expect_body):
-  if expect_body:
-    if "\"href\" : \"" not in response:
-      return 1, response
-    else:
-      return 0, ""
-  elif len(response) > 0:
-    return 1, response
-  else:
-    return 0, ""
-
-
-def curl(url, tokens=None, headers=None, request_type="GET", data=None, parse=False,
-         simulate=None, validate=False, validate_expect_body=False):
-
-  simulate_only = Options.CURL_PRINT_ONLY is not None or (simulate is not None and simulate is True)
-  print_url = Options.CURL_PRINT_ONLY is not None and simulate is not None
-
-  curl_path = '/usr/bin/curl'
-  curl_list = [curl_path]
-
-  curl_list.append('-X')
-  curl_list.append(request_type)
-
-  if tokens is not None:
-    curl_list.append('-u')
-    curl_list.append("%s:%s" % (tokens["user"], tokens["pass"]))
-  elif Options.API_TOKENS is not None:
-    curl_list.append('-u')
-    curl_list.append("%s:%s" % (Options.API_TOKENS["user"], Options.API_TOKENS["pass"]))
-
-  if request_type in Options.POST_REQUESTS:
-    curl_list.append(url)
-
-  if headers is None and Options.HEADERS is not None:
-    headers = Options.HEADERS
-
-  if headers is not None:
-    for header in headers:
-      curl_list.append('-H')
-      curl_list.append("%s: %s" % (header, headers[header]))
-
-  if data is not None and request_type in Options.POST_REQUESTS:
-    curl_list.append('--data')
-    curl_list.append(json.dumps(data))
-
-  if request_type in Options.GET_REQUESTS:
-    curl_list.append(url)
-
-  if print_url:
-    Options.logger.info(" ".join(curl_list))
-
-  if not simulate_only:
-    osStat = subprocess.Popen(
-      curl_list,
-      stderr=subprocess.PIPE,
-      stdout=subprocess.PIPE)
-    out, err = osStat.communicate()
-    if 0 != osStat.returncode:
-      error = "curl call failed. out: " + out + " err: " + err
-      Options.logger.error(error)
-      raise FatalException(osStat.returncode, error)
-  else:
-    if not print_url:
-      Options.logger.info(" ".join(curl_list))
-    out = "{}"
-
-  if validate and not simulate_only:
-    retcode, errdata = validate_response(out, validate_expect_body)
-    if not retcode == 0:
-      raise FatalException(retcode, errdata)
-
-  if parse:
-    return json.loads(out)
-  else:
-    return out
-
-
-def configuration_item_diff(collection_name, catalog, actual_properties_list):
-  """
-  Merge catalog item with actual config item on the server
-  Diff item response:
-   {
-     "property" : name,
-     "catalog_item": value,
-     "catalog_value": value,
-     "actual_value": value
-   }
-  :param collection_name:
-  :param catalog:
-  :return:
-  """
-
-  verified_catalog = []
-  catalog_properties = catalog.get_properties(collection_name)
-  actual_properties = None
-
-  if collection_name in actual_properties_list:
-    actual_properties = actual_properties_list[collection_name]
-
-  if actual_properties is None:
-    verified_catalog = map(lambda x: {
-      "property": x,
-      "catalog_item": catalog_properties[x],
-      "catalog_value": catalog_properties[x][CatConst.PROPERTY_VALUE_TAG],
-      "actual_value": None
-    }, catalog_properties.keys())
-  else:
-    # build list of properties according to actual properties
-    verified_catalog = map(lambda x: {
-      "property": x,
-      "catalog_item": catalog_properties[x] if x in catalog_properties else None,
-      "catalog_value": catalog_properties[x][CatConst.PROPERTY_VALUE_TAG] if x in catalog_properties else None,
-      "actual_value": actual_properties[x]
-    }, actual_properties.keys())
-
-    # build list of properties according to catalog properties
-    verified_catalog_catalog = map(lambda x: {
-      "property": x,
-      "catalog_item": catalog_properties[x],
-      "catalog_value": catalog_properties[x][CatConst.PROPERTY_VALUE_TAG] if CatConst.PROPERTY_VALUE_TAG in catalog_properties[x] else None,
-      "actual_value": actual_properties[x] if x in actual_properties else None,
-    }, catalog_properties.keys())
-
-    # append properties, which are listened in catalog but doesn't present in the actual configuration
-    verified_catalog += filter(lambda x: x["property"] not in actual_properties, verified_catalog_catalog)
-
-  return verified_catalog
-
-
-def configuration_diff_analyze(diff_list):
-  report = {}
-  for item_key in diff_list.keys():
-    property_diff_list = diff_list[item_key]
-    item_stat = {
-      "skipped": {"count": 0, "items": []},
-      "ok": {"count": 0, "items": []},
-      "fail": {"count": 0, "items": []},
-      "total": {"count": len(property_diff_list), "items": []}
-    }
-
-    def push_status(status, _property_item):
-      item_stat[status]["count"] += 1
-      item_stat[status]["items"].append(_property_item)
-
-    for property_item in property_diff_list:
-      # process properties which can be absent
-
-      # item was removed, from actual configs according to catalog instructions
-      if property_item["actual_value"] is None and property_item["catalog_value"] is None \
-        and CatConst.PROPERTY_REMOVE_TAG in property_item["catalog_item"] \
-        and property_item["catalog_item"][CatConst.PROPERTY_REMOVE_TAG] == CatConst.TRUE_TAG:
-
-        push_status("ok", property_item)
-
-       # currently skip values with template tag, as there no filter implemented
-       # ToDo: implement possibility to filter values without filter handler,
-       # ToDo: currently filtering is possible only on update-configs stage
-      elif property_item["actual_value"] is not None and property_item["catalog_value"] is not None \
-        and CatConst.VALUE_TEMPLATE_TAG in property_item["catalog_item"] \
-        and property_item["catalog_item"][CatConst.VALUE_TEMPLATE_TAG] == CatConst.TRUE_TAG:
-
-        push_status("skipped", property_item)
-
-      # item not present in actual config, but present in catalog and no remove tag is present
-      elif property_item["actual_value"] is None and property_item["catalog_value"] is not None:
-        push_status("fail", property_item)
-
-      # property exists in actual configuration, but not described in catalog configuration
-      elif property_item["actual_value"] is not None and property_item["catalog_value"] is None:
-        push_status("skipped", property_item)
-
-      # actual and catalog properties are equal
-      elif property_item["catalog_value"] == property_item["actual_value"]:
-        push_status("ok", property_item)
-      elif property_item["catalog_value"] != property_item["actual_value"]:
-        push_status("fail", property_item)
-
-    report[item_key] = item_stat
-  return report
-
-
-def verify_configuration():
-  diff_list = {}
-
-  if len(Options.ARGS) > 1:
-    config_type = Options.ARGS[1]
-  else:
-    config_type = None
-
-  catalog_farm = UpgradeCatalogFarm(Options.OPTIONS.upgrade_json)  # Load upgrade catalog
-  catalog = catalog_farm.get_catalog(Options.OPTIONS.from_stack, Options.OPTIONS.to_stack)  # get desired version of catalog
-
-  if catalog is None:
-    raise FatalException(1, "Upgrade catalog for version %s-%s not found"
-                         % (Options.OPTIONS.from_stack, Options.OPTIONS.to_stack))
-
-  if config_type is not None and config_type not in catalog.config_groups.list():
-    raise FatalException("Config type %s not exists" % config_type)
-
-  # fetch from server all option at one time and filter only desired versions
-  actual_options = get_config_resp_all()
-
-  if config_type is not None:
-    diff_list[config_type] = configuration_item_diff(config_type, catalog, actual_options)
-  else:
-    for collection_name in catalog.config_groups.list():
-      diff_list[collection_name] = configuration_item_diff(collection_name, catalog, actual_options)
-
-  analyzed_list = configuration_diff_analyze(diff_list)
-
-  report_file = None
-  if Options.REPORT_FILE is not None:
-    try:
-      report_file = open(Options.REPORT_FILE, "w")
-    except IOError as e:
-      Options.logger.error("Report file open error: %s" % e.message)
-
-  for config_item in analyzed_list:
-    if analyzed_list[config_item]["fail"]["count"] != 0:
-      Options.logger.info(
-        "%s: %s missing configuration(s) - please look in the output file for the missing params" % (
-         config_item, analyzed_list[config_item]["fail"]["count"]
-        )
-      )
-      if report_file is not None:
-        report_formatter(report_file, config_item, analyzed_list[config_item])
-    else:
-      Options.logger.info("%s: verified" % config_item)
-
-  if report_file is not None:
-    try:
-      report_file.close()
-    except IOError as e:
-      Options.logger.error("Report file close error: %s" % e.message)
-
-
-def report_formatter(report_file, config_item, analyzed_list_item):
-  prefix = "Configuration item %s" % config_item
-  if analyzed_list_item["fail"]["count"] > 0:
-    for item in analyzed_list_item["fail"]["items"]:
-      report_file.write("%s: property \"%s\" is set to \"%s\", but should be set to \"%s\"" % (
-        prefix, item["property"], item["actual_value"], item["catalog_value"]
-      ))
-
-
-#
-# Main.
-#
-def main():
-
-  action_list = {  # list of supported actions
-                   Options.GET_MR_MAPPING_ACTION: get_mr1_mapping,
-                   Options.DELETE_MR_ACTION: delete_mr,
-                   Options.ADD_YARN_MR2_ACTION: add_services,
-                   Options.MODIFY_CONFIG_ACTION: modify_configs,
-                   Options.INSTALL_YARN_MR2_ACTION: install_services,
-                   Options.BACKUP_CONFIG_ACTION: backup_configs,
-                   Options.VERIFY_ACTION: verify_configuration
-  }
-
-  parser = optparse.OptionParser(usage="usage: %prog [options] action\n  Valid actions: "
-                                       + ", ".join(action_list.keys())
-                                       + "\n  update-configs accepts type, e.g. hdfs-site to update specific configs")
-
-  parser.add_option("-n", "--printonly",
-                    action="store_true", dest="printonly", default=False,
-                    help="Prints all the curl commands to be executed (only for write/update actions)")
-  parser.add_option("-o", "--log", dest="logfile", default=None,
-                    help="Log file")
-  parser.add_option("--report", dest="report", default=None,
-                    help="Report file output location")
-
-  parser.add_option('--upgradeCatalog', default=None, help="Upgrade Catalog file full path", dest="upgrade_json")
-  parser.add_option('--fromStack', default=None, help="stack version to upgrade from", dest="from_stack")
-  parser.add_option('--toStack', default=None, help="stack version to upgrade to", dest="to_stack")
-
-  parser.add_option('--hostname', default=None, help="Hostname for Ambari server", dest="hostname")
-  parser.add_option('--user', default=None, help="Ambari admin user", dest="user")
-  parser.add_option('--password', default=None, help="Ambari admin password", dest="password")
-  parser.add_option('--clustername', default=None, help="Cluster name", dest="clustername")
-
-  (options, args) = parser.parse_args()
-  Options.initialize_logger(options.logfile)
-  options.warnings = []
-
-  if len(args) == 0:
-    parser.error("No action entered")
-
-  if options.user is None:
-    options.warnings.append("User name must be provided (e.g. admin)")
-  if options.hostname is None:
-    options.warnings.append("Ambari server host name must be provided")
-  if options.clustername is None:
-    options.warnings.append("Cluster name must be provided")
-  if options.password is None:
-    options.password = getpass.getpass("Please enter Ambari admin password: ")
-    if options.password == "":
-      options.warnings.append("Ambari admin user's password name must be provided (e.g. admin)")
-  action = args[0]
-
-  # check params according to executed action
-  if action == Options.MODIFY_CONFIG_ACTION or action == Options.VERIFY_ACTION:
-    if options.upgrade_json is None:
-      options.warnings.append("Upgrade catalog option need to be set")
-    if options.from_stack is None:
-      options.warnings.append("Should be provided fromStack option")
-    if options.to_stack is None:
-      options.warnings.append("Should be provided toStack option")
-
-  if action == Options.VERIFY_ACTION:
-    if options.report is None:
-      options.warnings.append("Should be provided report option")
-
-  if len(options.warnings) != 0:
-    print parser.print_help()
-    for warning in options.warnings:
-      Options.logger.warn(warning)
-    raise FatalException(1, "Not all required options was set")
-
-  options.exit_message = "Upgrade action '%s' completed successfully." % action
-  if options.printonly:
-    Options.CURL_PRINT_ONLY = "yes"
-    options.exit_message = "Simulated execution of action '%s'. Verify the list edit calls." % action
-
-  Options.ARGS = args
-  Options.OPTIONS = options
-  Options.HOST = options.hostname
-  Options.CLUSTER_NAME = options.clustername
-  Options.API_TOKENS = {
-    "user": options.user,
-    "pass": options.password
-  }
-  Options.REPORT_FILE = options.report
-
-  if action in action_list:
-    Options.initialize()
-    action_list[action]()
-  else:
-    parser.error("Invalid action")
-
-  if options.exit_message is not None:
-    Options.logger.info(options.exit_message)
-
-if __name__ == "__main__":
-  try:
-    main()
-  except (KeyboardInterrupt, EOFError):
-    print("\nAborting ... Keyboard Interrupt.")
-    sys.exit(1)
-  except FatalException as e:
-    if e.reason is not None:
-      error = "Exiting with exit code {0}. Reason: {1}".format(e.code, e.reason)
-      if Options.logger is not None:
-        Options.logger.error(error)
-      sys.exit(e.code)

Plik diff jest za duży
+ 0 - 95
ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_1.3_to_2.2.json


+ 0 - 95
ambari-server/src/test/python/TestUpgradeHelper.py

@@ -1,95 +0,0 @@
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-
-from mock.mock import MagicMock, call
-from mock.mock import patch
-
-from unittest import TestCase
-import sys
-import unittest
-import upgradeHelper
-import StringIO
-import logging
-
-
-class TestUpgradeHelper(TestCase):
-  original_curl = None
-  out = None
-
-  def setUp(self):
-    # replace original curl call to mock
-    self.original_curl = upgradeHelper.curl
-    upgradeHelper.curl = self.magic_curl
-
-    # mock logging methods
-    upgradeHelper.logging.getLogger = MagicMock()
-    upgradeHelper.logging.FileHandler = MagicMock()
-
-    self.out = StringIO.StringIO()
-    sys.stdout = self.out
-
-  def magic_curl(self, *args, **kwargs):
-    def ret_object():
-      return ""
-
-    def communicate():
-      return "{}", ""
-
-    ret_object.returncode = 0
-    ret_object.communicate = communicate
-
-    with patch("upgradeHelper.subprocess") as subprocess:
-      subprocess.Popen.return_value = ret_object
-      self.original_curl(*args, **kwargs)
-
-  def tearDown(self):
-    sys.stdout = sys.__stdout__
-
-  @patch("optparse.OptionParser")
-  @patch("upgradeHelper.modify_configs")
-  @patch("upgradeHelper.backup_file")
-  def test_ParseOptions(self, backup_file_mock, modify_action_mock, option_parser_mock):
-    class options(object):
-      user = "test_user"
-      hostname = "127.0.0.1"
-      clustername = "test1"
-      password = "test_password"
-      upgrade_json = "catalog_file"
-      from_stack = "0.0"
-      to_stack = "1.3"
-      logfile = "test.log"
-      warnings = []
-      printonly = False
-
-    args = ["update-configs"]
-    modify_action_mock.return_value = MagicMock()
-    backup_file_mock.return_value = MagicMock()
-    test_mock = MagicMock()
-    test_mock.parse_args = lambda: (options, args)
-    option_parser_mock.return_value = test_mock
-
-    upgradeHelper.main()
-    self.assertEqual(backup_file_mock.call_count, 1)
-    self.assertEqual(modify_action_mock.call_count, 1)
-    self.assertEqual({"user": options.user, "pass": options.password}, upgradeHelper.Options.TOKENS)
-    self.assertEqual(options.clustername, upgradeHelper.Options.CLUSTER_NAME)
-
-
-if __name__ == "__main__":
-  unittest.main()

+ 646 - 0
ambari-server/src/test/python/TestUpgradeScript_HDP2.py

@@ -0,0 +1,646 @@
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import subprocess
+from mock.mock import MagicMock, call, patch
+from unittest import TestCase
+from mock.mock import create_autospec
+import sys
+import unittest
+import UpgradeHelper_HDP2
+import StringIO
+import logging
+
+
+class TestUpgradeHDP2Script(TestCase):
+  def setUp(self):
+    UpgradeHelper_HDP2.logger = MagicMock()
+    out = StringIO.StringIO()
+    sys.stdout = out
+
+
+  def tearDown(self):
+    sys.stdout = sys.__stdout__
+
+
+  @patch.object(UpgradeHelper_HDP2, 'backup_single_config_type')
+  def test_backup_configs(self, backup_config_mock):
+    UpgradeHelper_HDP2.backup_configs(None)
+    self.assertTrue(backup_config_mock.called)
+
+  @patch.object(UpgradeHelper_HDP2, 'update_config')
+  @patch.object(UpgradeHelper_HDP2, 'get_config')
+  @patch('optparse.Values')
+  def test_update_with_append(self, optparse_mock, get_config_mock, update_config_mock):
+    opm = optparse_mock.return_value
+    update_config_mock.return_value = None
+    options = MagicMock()
+    args = ["save-configs"]
+    opm.parse_args.return_value = (options, args)
+    get_config_mock.return_value = {"a1": "va1", "a2": "va2", "b1": "vb1", "b2": "vb2", "c1": "vc1", "d1": "d1"}
+    site_template = {"y1": "vy1", "a1": "REPLACE_WITH_", "a2": "REPLACE_WITH_", "nb1": "REPLACE_WITH_b1",
+                     "nb2": "REPLACE_WITH_b2", "d1": "DELETE_OLD", "b1" : "DELETE_OLD","c1": "vc2"}
+    expected_site = {"y1": "vy1", "a1": "va1", "a2": "va2", "nb1": "vb1", "nb2": "vb2", "c1": "vc2"}
+    UpgradeHelper_HDP2.update_config_using_existing(opm, "global", site_template)
+    get_config_mock.assert_called_once_with(opm, "global")
+    update_config_mock.assert_called_once_with(opm, expected_site, "global")
+    pass
+
+  @patch.object(UpgradeHelper_HDP2, 'update_config')
+  @patch.object(UpgradeHelper_HDP2, 'get_config')
+  @patch('optparse.Values')
+  def test_update_with_appen_II(self, optparse_mock, get_config_mock, update_config_mock):
+    opm = optparse_mock.return_value
+    update_config_mock.return_value = None
+    options = MagicMock()
+    args = ["save-configs"]
+    opm.parse_args.return_value = (options, args)
+    get_config_mock.return_value = {"a1": "va1", "a2": "va2", "b1": "vb1", "b2": "vb2", "c1": "vc1", "x1": "x1",
+                                    "X1": "X1"}
+    site_template = {"y1": "vy1", "a1": "REPLACE_WITH_", "a2": "REPLACE_WITH_", "nb1": "REPLACE_WITH_b1",
+                     "nb2": "REPLACE_WITH_b2", "x1": "DELETE_OLD", "X1": "DELETE"}
+    expected_site = {"y1": "vy1", "a1": "va1", "a2": "va2", "nb1": "vb1", "nb2": "vb2", "c1": "vc1","X1": "DELETE"}
+    UpgradeHelper_HDP2.update_config_using_existing(opm, "global", site_template)
+    get_config_mock.assert_called_once_with(opm, "global")
+    update_config_mock.assert_called_once_with(opm, expected_site, "global")
+    pass
+
+  @patch.object(logging.FileHandler, 'setFormatter')
+  @patch.object(logging, 'basicConfig')
+  @patch.object(logging, 'FileHandler')
+  @patch.object(UpgradeHelper_HDP2, 'write_config')
+  @patch.object(UpgradeHelper_HDP2, 'get_config_resp')
+  @patch("os.remove")
+  @patch("shutil.copyfile")
+  @patch("os.path.exists")
+  @patch('optparse.OptionParser')
+  def test_save_configs(self, option_parser_mock, path_exists_mock, shutil_copy_mock, os_remove_mock,
+                        get_config_resp_mock, write_config_mock, file_handler_mock, logging_mock, set_formatter_mock):
+    file_handler_mock.return_value = logging.FileHandler('') # disable creating real file
+    opm = option_parser_mock.return_value
+    path_exists_mock.return_value = True
+    shutil_copy_mock = MagicMock()
+    options = self.get_mock_options()
+    args = ["backup-configs"]
+    opm.parse_args.return_value = (options, args)
+
+    def get_config_resp_side_effect(ops, type, error_if_na):
+      if type == "global":
+        return ("version1", "")
+      elif type == "core-site":
+        return ("version1", "")
+      elif type == "hdfs-site":
+        return ("version1", "")
+      elif type == "mapred-site":
+        return ("version2", "")
+      elif type == "hbase-site":
+        return ("version2", "")
+      else:
+        return (None, None)
+
+    get_config_resp_mock.side_effect = get_config_resp_side_effect
+    UpgradeHelper_HDP2.main()
+    shutil_copy_mock.assert_called_once()
+    os_remove_mock.assert_called_once_with("logfile")
+    write_expected = [call("", "global", "version1"), call("", "core-site", "version1"),
+                      call("", "hdfs-site", "version1"), call("", "mapred-site", "version2"),
+                      call("", "hbase-site", "version2")]
+    write_config_mock.assert_has_calls(write_expected, any_order=True)
+    get_config_expected = [call(options, "global", True), call(options, "core-site", True),
+                           call(options, "hdfs-site", True), call(options, "mapred-site", True),
+                           call(options, "hbase-site", False), call(options, "oozie-site", False),
+                           call(options, "webhcat-site", False), call(options, "hive-site", False)]
+    get_config_resp_mock.assert_has_calls(get_config_expected, any_order=True)
+    pass
+
+
+  @patch.object(logging, 'FileHandler')
+  @patch.object(UpgradeHelper_HDP2, "backup_file")
+  @patch.object(UpgradeHelper_HDP2, 'write_mapping')
+  @patch("json.loads")
+  @patch.object(UpgradeHelper_HDP2, 'curl')
+  @patch('optparse.OptionParser')
+  def test_save_mr_mapping(self, option_parser_mock, curl_mock, json_loads_mock, write_mapping_mock,
+                           backup_file_mock, file_handler_mock):
+    file_handler_mock.return_value = logging.FileHandler('') # disable creating real file
+    opm = option_parser_mock.return_value
+    options = self.get_mock_options()
+    args = ["save-mr-mapping"]
+    opm.parse_args.return_value = (options, args)
+    curl_mock.side_effect = ['"href" : "', '"href" : "', '"href" : "', '"href" : "']
+    json_loads_mock.return_value = {"host_components": [{"HostRoles": {"host_name": "host1"}}]}
+    UpgradeHelper_HDP2.main()
+    expected_curl_calls = [
+      call(False, "-u", "admin:admin",
+           "http://localhost:8080/api/v1/clusters/c1/services/MAPREDUCE/components/MAPREDUCE_CLIENT"),
+      call(False, "-u", "admin:admin",
+           "http://localhost:8080/api/v1/clusters/c1/services/MAPREDUCE/components/TASKTRACKER"),
+      call(False, "-u", "admin:admin",
+           "http://localhost:8080/api/v1/clusters/c1/services/MAPREDUCE/components/JOBTRACKER")]
+    curl_mock.assert_has_calls(expected_curl_calls, any_order=True)
+    self.assertTrue(write_mapping_mock.called)
+    write_call = write_mapping_mock.call_args
+    args, kargs = write_call
+    self.assertTrue('MAPREDUCE_CLIENT' in args[0].keys())
+    self.assertTrue(["host1"] == args[0]['MAPREDUCE_CLIENT'])
+    self.assertTrue('TASKTRACKER' in args[0].keys())
+    self.assertTrue('TASKTRACKER' in args[0].keys())
+    pass
+
+
+  @patch.object(UpgradeHelper_HDP2, "get_YN_input")
+  @patch.object(UpgradeHelper_HDP2, "read_mapping")
+  @patch.object(logging, 'FileHandler')
+  @patch.object(UpgradeHelper_HDP2, "backup_file")
+  @patch.object(UpgradeHelper_HDP2, 'curl')
+  @patch('optparse.OptionParser')
+  def test_delete_mr(self, option_parser_mock, curl_mock,
+                     backup_file_mock, file_handler_mock, read_mapping_mock, get_yn_mock):
+    file_handler_mock.return_value = logging.FileHandler('') # disable creating real file
+    opm = option_parser_mock.return_value
+    options = self.get_mock_options()
+    args = ["delete-mr"]
+    opm.parse_args.return_value = (options, args)
+    curl_mock.return_value = ''
+    get_yn_mock.return_value = True
+    read_mapping_mock.return_value = {
+      "TASKTRACKER": ["c6401", "c6402"],
+      "JOBTRACKER": ["c6401"],
+      "MAPREDUCE_CLIENT": ["c6401"]}
+    UpgradeHelper_HDP2.main()
+    expected_curl_calls = [
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "PUT", "-d", """{"HostRoles": {"state": "DISABLED"}}""",
+           "http://localhost:8080/api/v1/clusters/c1/hosts/c6401/host_components/TASKTRACKER"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "PUT", "-d", """{"HostRoles": {"state": "DISABLED"}}""",
+           "http://localhost:8080/api/v1/clusters/c1/hosts/c6402/host_components/TASKTRACKER"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "PUT", "-d", """{"HostRoles": {"state": "DISABLED"}}""",
+           "http://localhost:8080/api/v1/clusters/c1/hosts/c6401/host_components/JOBTRACKER"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "DELETE",
+           "http://localhost:8080/api/v1/clusters/c1/services/MAPREDUCE")]
+    curl_mock.assert_has_calls(expected_curl_calls, any_order=True)
+    pass
+
+
+  @patch.object(UpgradeHelper_HDP2, "has_component_in_stack_def")
+  @patch.object(UpgradeHelper_HDP2, "get_cluster_stackname")
+  @patch.object(UpgradeHelper_HDP2, "read_mapping")
+  @patch.object(logging, 'FileHandler')
+  @patch.object(UpgradeHelper_HDP2, "backup_file")
+  @patch.object(UpgradeHelper_HDP2, 'curl')
+  @patch('optparse.OptionParser')
+  def test_add_yarn_mr_with_ATS(self, option_parser_mock, curl_mock,
+                       backup_file_mock, file_handler_mock, read_mapping_mock, get_stack_mock,  has_comp_mock):
+    file_handler_mock.return_value = logging.FileHandler('') # disable creating real file
+    opm = option_parser_mock.return_value
+    options = self.get_mock_options()
+    args = ["add-yarn-mr2"]
+    opm.parse_args.return_value = (options, args)
+    curl_mock.return_value = ''
+    has_comp_mock.return_value = True
+    read_mapping_mock.return_value = {
+      "TASKTRACKER": ["c6401", "c6402"],
+      "JOBTRACKER": ["c6401"],
+      "HISTORYSERVER": ["c6401"],
+      "MAPREDUCE_CLIENT": ["c6403"]}
+    UpgradeHelper_HDP2.main()
+    expected_curl_calls = [
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/services/YARN"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/services/MAPREDUCE2"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/services/MAPREDUCE2/components/HISTORYSERVER"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/services/MAPREDUCE2/components/MAPREDUCE2_CLIENT"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/services/YARN/components/NODEMANAGER"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/services/YARN/components/YARN_CLIENT"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/services/YARN/components/RESOURCEMANAGER"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/services/YARN/components/APP_TIMELINE_SERVER"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/hosts/c6401/host_components/NODEMANAGER"),
+      call(False, '-u', 'admin:admin', '-H', 'X-Requested-By: ambari', '-X', 'POST',
+           'http://localhost:8080/api/v1/clusters/c1/hosts/c6401/host_components/HISTORYSERVER'),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/hosts/c6402/host_components/NODEMANAGER"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/hosts/c6403/host_components/YARN_CLIENT"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/hosts/c6403/host_components/MAPREDUCE2_CLIENT"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/hosts/c6401/host_components/RESOURCEMANAGER"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/hosts/c6401/host_components/APP_TIMELINE_SERVER")]
+    curl_mock.assert_has_calls(expected_curl_calls, any_order=True)
+    pass
+  
+  @patch.object(UpgradeHelper_HDP2, "has_component_in_stack_def")
+  @patch.object(UpgradeHelper_HDP2, "get_cluster_stackname")
+  @patch.object(UpgradeHelper_HDP2, "read_mapping")
+  @patch.object(logging, 'FileHandler')
+  @patch.object(UpgradeHelper_HDP2, "backup_file")
+  @patch.object(UpgradeHelper_HDP2, 'curl')
+  @patch('optparse.OptionParser')
+  def test_add_yarn_mr_without_ATS(self, option_parser_mock, curl_mock,
+                       backup_file_mock, file_handler_mock, read_mapping_mock, get_stack_mock,  has_comp_mock):
+    file_handler_mock.return_value = logging.FileHandler('') # disable creating real file
+    opm = option_parser_mock.return_value
+    options = self.get_mock_options()
+    args = ["add-yarn-mr2"]
+    opm.parse_args.return_value = (options, args)
+    curl_mock.return_value = ''
+    has_comp_mock.return_value = False
+    read_mapping_mock.return_value = {
+      "TASKTRACKER": ["c6401", "c6402"],
+      "JOBTRACKER": ["c6401"],
+      "HISTORYSERVER": ["c6401"],
+      "MAPREDUCE_CLIENT": ["c6403"]}
+    UpgradeHelper_HDP2.main()
+    expected_curl_calls = [
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/services/YARN"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/services/MAPREDUCE2"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/services/MAPREDUCE2/components/HISTORYSERVER"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/services/MAPREDUCE2/components/MAPREDUCE2_CLIENT"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/services/YARN/components/NODEMANAGER"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/services/YARN/components/YARN_CLIENT"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/services/YARN/components/RESOURCEMANAGER"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/hosts/c6401/host_components/NODEMANAGER"),
+      call(False, '-u', 'admin:admin', '-H', 'X-Requested-By: ambari', '-X', 'POST',
+           'http://localhost:8080/api/v1/clusters/c1/hosts/c6401/host_components/HISTORYSERVER'),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/hosts/c6402/host_components/NODEMANAGER"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/hosts/c6403/host_components/YARN_CLIENT"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/hosts/c6403/host_components/MAPREDUCE2_CLIENT"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+           "http://localhost:8080/api/v1/clusters/c1/hosts/c6401/host_components/RESOURCEMANAGER"),
+      ]
+    curl_mock.assert_has_calls(expected_curl_calls, any_order=True)
+    
+    # assert no ATS was added
+    self.assert_(not call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+        "http://localhost:8080/api/v1/clusters/c1/hosts/c6401/host_components/APP_TIMELINE_SERVER") in curl_mock.call_args_list, "ATS should not be added if it's not present!")     
+    self.assert_(not call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "POST",
+        "http://localhost:8080/api/v1/clusters/c1/services/YARN/components/APP_TIMELINE_SERVER") in curl_mock.call_args_list, "ATS should not be added if it's not present!")  
+    pass
+
+
+  @patch.object(logging, 'FileHandler')
+  @patch.object(UpgradeHelper_HDP2, "backup_file")
+  @patch.object(UpgradeHelper_HDP2, 'curl')
+  @patch('optparse.OptionParser')
+  def test_install_yarn_mr2(self, option_parser_mock, curl_mock,
+                            backup_file_mock, file_handler_mock):
+    file_handler_mock.return_value = logging.FileHandler('') # disable creating real file
+    opm = option_parser_mock.return_value
+    options = self.get_mock_options()
+    args = ["install-yarn-mr2"]
+    opm.parse_args.return_value = (options, args)
+    curl_mock.return_value = '"href" : "'
+    UpgradeHelper_HDP2.main()
+    expected_curl_calls = [
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "PUT", "-d",
+           """{"RequestInfo":{"context":"Install MapReduce2"}, "Body":{"ServiceInfo": {"state":"INSTALLED"}}}""",
+           "http://localhost:8080/api/v1/clusters/c1/services/MAPREDUCE2"),
+      call(False, "-u", "admin:admin", '-H', 'X-Requested-By: ambari', "-X", "PUT", "-d",
+           """{"RequestInfo":{"context":"Install YARN"}, "Body":{"ServiceInfo": {"state":"INSTALLED"}}}""",
+           "http://localhost:8080/api/v1/clusters/c1/services/YARN")]
+    curl_mock.assert_has_calls(expected_curl_calls, any_order=False)
+    pass
+
+
+  @patch.object(UpgradeHelper_HDP2, "get_config")
+  @patch.object(UpgradeHelper_HDP2, "rename_all_properties")
+  @patch.object(UpgradeHelper_HDP2, "update_config_using_existing_properties")
+  @patch.object(UpgradeHelper_HDP2, "read_mapping")
+  @patch.object(logging, 'FileHandler')
+  @patch.object(UpgradeHelper_HDP2, "backup_file")
+  @patch.object(UpgradeHelper_HDP2, 'curl')
+  @patch('optparse.OptionParser')
+  def test_update_single_configs(self, option_parser_mock, curl_mock,
+                                 backup_file_mock, file_handler_mock, read_mapping_mock,
+                                 update_config_mock, rename_all_prop_mock, get_config_mock):
+    file_handler_mock.return_value = logging.FileHandler('') # disable creating real file
+    opm = option_parser_mock.return_value
+    options = MagicMock()
+    args = ["update-configs", "hdfs-site"]
+    opm.parse_args.return_value = (options, args)
+    curl_mock.side_effect = ['', '', '', '', '', '', '']
+    read_mapping_mock.return_value = {"JOBTRACKER": ["c6401"]}
+    update_config_mock.side_effect = [None]
+    get_config_mock.return_value = {}
+    prop_to_move = {"dfs.namenode.checkpoint.edits.dir": "a1",
+                    "dfs.namenode.checkpoint.dir": "a2",
+                    "dfs.namenode.checkpoint.period": "a3"}
+    rename_all_prop_mock.side_effect = [
+      prop_to_move,
+      {}, {}]
+    UpgradeHelper_HDP2.main()
+    self.assertTrue(update_config_mock.call_count == 1)
+    args, kargs = update_config_mock.call_args_list[0]
+    self.assertEqual("hdfs-site", args[1])
+    for key in prop_to_move.keys():
+      self.assertEqual(prop_to_move[key], args[3][key])
+    pass
+
+
+  @patch.object(UpgradeHelper_HDP2, "get_config_resp")
+  @patch.object(UpgradeHelper_HDP2, "get_config")
+  @patch.object(UpgradeHelper_HDP2, "read_mapping")
+  @patch.object(logging, 'FileHandler')
+  @patch.object(UpgradeHelper_HDP2, "backup_file")
+  @patch.object(UpgradeHelper_HDP2, 'curl')
+  @patch('optparse.OptionParser')
+  def test_no_hbase(self, option_parser_mock, curl_mock,
+                          backup_file_mock, file_handler_mock, read_mapping_mock,
+                          get_config_mock, get_config_resp_mock):
+    file_handler_mock.return_value = logging.FileHandler('') # disable creating real file
+    opm = option_parser_mock.return_value
+    options = MagicMock()
+    args = ["update-configs"]
+    opm.parse_args.return_value = (options, args)
+    options.logfile = "logfile"
+    options.user = "admin"
+    options.password = "admin"
+    options.hostname = "localhost"
+    options.clustername = "c1"
+    curl_mock.side_effect = ['', '', '', '', '', '', '']
+    read_mapping_mock.return_value = {
+      "TASKTRACKER": ["c6401", "c6402"],
+      "JOBTRACKER": ["c6401"],
+      "MAPREDUCE_CLIENT": ["c6403"]}
+    get_config_resp_mock.return_value = "hbase-site", None
+    get_config_mock.return_value = {
+      "mapred.hosts": "an_old_value",
+      "mapred.hosts.exclude": "an_old_value",
+      "mapred.jobtracker.maxtasks.per.job": "an_old_value",
+      "mapred.jobtracker.taskScheduler": "an_old_value",
+      "dfs.df.interval": "an_old_value",
+      "mapred.userlog.retain.hours": "will_not_be_stored",
+      "global1": "global11"
+    }
+    UpgradeHelper_HDP2.main()
+    self.assertEqual(6, len(curl_mock.call_args_list))
+
+
+  @patch.object(UpgradeHelper_HDP2, "get_config_resp")
+  @patch.object(UpgradeHelper_HDP2, "get_config")
+  @patch.object(UpgradeHelper_HDP2, "read_mapping")
+  @patch.object(logging, 'FileHandler')
+  @patch.object(UpgradeHelper_HDP2, "backup_file")
+  @patch.object(UpgradeHelper_HDP2, 'curl')
+  @patch('optparse.OptionParser')
+  def test_update_configs(self, option_parser_mock, curl_mock,
+                          backup_file_mock, file_handler_mock, read_mapping_mock,
+                          get_config_mock, get_config_resp_mock):
+    file_handler_mock.return_value = logging.FileHandler('') # disable creating real file
+    opm = option_parser_mock.return_value
+    options = MagicMock()
+    args = ["update-configs"]
+    opm.parse_args.return_value = (options, args)
+    options.logfile = "logfile"
+    options.user = "admin"
+    options.password = "admin"
+    options.hostname = "localhost"
+    options.clustername = "c1"
+    curl_mock.side_effect = ['', '', '', '', '', '', '', '']
+    read_mapping_mock.return_value = {
+      "TASKTRACKER": ["c6401", "c6402"],
+      "JOBTRACKER": ["c6401"],
+      "MAPREDUCE_CLIENT": ["c6403"]}
+    get_config_resp_mock.return_value = "hbase-site", {}
+    site_properties = {
+      "mapred.hosts": "an_old_value",
+      "mapred.hosts.exclude": "an_old_value",
+      "mapred.jobtracker.maxtasks.per.job": "an_old_value",
+      "hbase.rpc.engine": "an_old_value",
+      "dfs.df.interval": "an_old_value",
+      "mapred.userlog.retain.hours": "will_not_be_stored",
+      "global1": "global11"
+    }
+    get_config_mock.side_effect = [
+      site_properties.copy(), site_properties.copy(), site_properties.copy(),
+      site_properties.copy(), site_properties.copy(), site_properties.copy()]
+
+    saved_global = UpgradeHelper_HDP2.GLOBAL
+    saved_hdfs = UpgradeHelper_HDP2.HDFS_SITE
+    saved_core = UpgradeHelper_HDP2.CORE_SITE
+    saved_habse = UpgradeHelper_HDP2.HBASE_SITE
+    saved_hive = UpgradeHelper_HDP2.HIVE_SITE
+    saved_mapred = UpgradeHelper_HDP2.MAPRED_SITE
+    try:
+      UpgradeHelper_HDP2.GLOBAL = {"global2": "REPLACE_WITH_global1"}
+      UpgradeHelper_HDP2.HDFS_SITE = {"global2": "REPLACE_WITH_global1"}
+      UpgradeHelper_HDP2.CORE_SITE = {"global2": "REPLACE_WITH_global1"}
+      UpgradeHelper_HDP2.main()
+    finally:
+      UpgradeHelper_HDP2.GLOBAL = saved_global
+      UpgradeHelper_HDP2.HDFS_SITE = saved_hdfs
+      UpgradeHelper_HDP2.CORE_SITE = saved_core
+      UpgradeHelper_HDP2.MAPRED_SITE = saved_mapred
+
+    self.assertEqual(8, len(curl_mock.call_args_list))
+    self.validate_update_config_call(curl_mock.call_args_list[0], "capacity-scheduler")
+    self.validate_update_config_call(curl_mock.call_args_list[1], "yarn-site")
+    self.validate_update_config_call(curl_mock.call_args_list[3], "mapred-site")
+    self.validate_update_config_call(curl_mock.call_args_list[2], "global")
+    self.validate_config_replacememt(curl_mock.call_args_list[1], "yarn-site")
+    self.validate_config_replacememt(curl_mock.call_args_list[2], "global")
+    self.validate_config_replacememt(curl_mock.call_args_list[3], "mapred-site")
+    self.validate_config_replacememt(curl_mock.call_args_list[4], "hdfs-site")
+    self.validate_config_replacememt(curl_mock.call_args_list[5], "core-site")
+    self.validate_config_replacememt(curl_mock.call_args_list[6], "hbase-site")
+    pass
+
+  @patch.object(UpgradeHelper_HDP2, "read_mapping")
+  @patch("subprocess.Popen")
+  @patch.object(UpgradeHelper_HDP2, "get_YN_input")
+  @patch.object(logging, 'FileHandler')
+  @patch.object(UpgradeHelper_HDP2, "backup_file")
+  @patch.object(UpgradeHelper_HDP2, 'curl')
+  @patch('optparse.OptionParser')
+  def test_print_only(self, option_parser_mock, curl_mock,
+                      backup_file_mock, file_handler_mock, get_yn_mock, popen_mock, read_mapping_mock):
+    file_handler_mock.return_value = logging.FileHandler('') # disable creating real file
+    options = self.get_mock_options(True)
+    get_yn_mock.return_value = True
+    read_mapping_mock.return_value = {
+      "TASKTRACKER": ["c6401", "c6402"]}
+    UpgradeHelper_HDP2.delete_mr(options)
+    self.assertFalse(popen_mock.called)
+    pass
+
+  @patch.object(logging, 'FileHandler')
+  @patch.object(UpgradeHelper_HDP2, "backup_file")
+  @patch.object(UpgradeHelper_HDP2, 'curl')
+  def test_get_and_parse_properties(self, curl_mock,
+                                    backup_file_mock, file_handler_mock):
+    file_handler_mock.return_value = logging.FileHandler('') # disable creating real file
+    options = self.get_mock_options()
+    curl_mock.side_effect = [
+      """{ "href" : "http://localhost:8080/api/v1/clusters/c1",
+           "Clusters" : {
+              "cluster_id" : 2,
+              "cluster_name" : "c1",
+              "version" : "HDP-2.0.5",
+              "desired_configs" : {
+                "capacity-scheduler" : {
+                  "user" : "admin",
+                  "tag" : "version137"
+                },
+                "core-site" : {
+                   "user" : "admin",
+                   "tag" : "version138"
+                }}}}""",
+      """{
+           "href" : "http://localhost:8080/api/v1/clusters/c1/configurations?type=core-site&tag=version138",
+           "items" : [
+             {
+               "href" : "http://localhost:8080/api/v1/clusters/c1/configurations?type=core-site&tag=version138",
+               "tag" : "version1378850572477",
+               "type" : "core-site",
+               "Config" : {
+                 "cluster_name" : "c1"
+               },
+               "properties" : {
+                 "name1" : "value1",
+                 "name2" : "value2",
+                 "name3" : "value3"
+               }}]}""",
+      """{ "href" : "http://localhost:8080/api/v1/clusters/c1",
+           "Clusters" : {
+              "desired_configs" : {
+                "capacity-scheduler" : {
+                  "user" : "admin",
+                  "tag" : "version137"
+                }}}}"""]
+    properties = UpgradeHelper_HDP2.get_config(options, "core-site")
+    self.assertTrue(len(properties.keys()) == 3)
+    self.assertTrue(properties["name1"] == "value1")
+    self.assertTrue(properties["name2"] == "value2")
+    self.assertTrue(properties["name3"] == "value3")
+    try:
+      UpgradeHelper_HDP2.get_config(options, "hdfs-site")
+    except Exception, e:
+      self.assertTrue('Unable to get the current version for config type hdfs-site' in e.reason)
+      pass
+    pass
+
+  def test_rename_all_properties(self):
+    site_properties = {
+      "mapred.task.is.map": "mapreduce.task.ismap",
+      "mapred.task.partition": "mapreduce.task.partition",
+      "mapred.task.profile": "mapreduce.task.profile",
+      "abc": "abc"
+    }
+    site_properties = \
+      UpgradeHelper_HDP2.rename_all_properties(site_properties, UpgradeHelper_HDP2.PROPERTY_MAPPING)
+    for key in site_properties.keys():
+      self.assertEqual(key, site_properties[key])
+    self.assertEqual(4, len(site_properties))
+    pass
+
+  def test_tags_count(self):
+    def count_tags(template):
+      deleted = 0
+      replaced = 0
+      for key in template.keys():
+        value = template[key]
+        if value == UpgradeHelper_HDP2.DELETE_OLD_TAG:
+          deleted += 1
+          continue
+        if value.find(UpgradeHelper_HDP2.REPLACE_WITH_TAG) == 0:
+          replaced += 1
+          continue
+        pass
+      return deleted, replaced
+
+    deleted, replaced = count_tags(UpgradeHelper_HDP2.GLOBAL)
+    self.assertEqual(0, replaced)
+    self.assertEqual(88, deleted)
+
+    deleted, replaced = count_tags(UpgradeHelper_HDP2.MAPRED_SITE)
+    self.assertEqual(0, replaced)
+    self.assertEqual(95, deleted)
+
+    deleted, replaced = count_tags(UpgradeHelper_HDP2.CORE_SITE)
+    self.assertEqual(0, replaced)
+    self.assertEqual(2, deleted)
+
+    deleted, replaced = count_tags(UpgradeHelper_HDP2.HDFS_SITE)
+    self.assertEqual(0, replaced)
+    self.assertEqual(8, deleted)
+    pass
+
+  def validate_update_config_call(self, call, type):
+    args, kargs = call
+    self.assertTrue(args[6] == 'PUT')
+    self.assertTrue(type in args[8])
+    pass
+
+  def validate_config_replacememt(self, call, type):
+    args, kargs = call
+    self.assertFalse("REPLACE_WITH_" in args[8])
+    self.assertFalse("REPLACE_JH_HOST" in args[8])
+    self.assertFalse("REPLACE_RM_HOST" in args[8])
+    if type == "yarn-site":
+      self.assertTrue("c6401" in args[8])
+      self.assertFalse("an_old_value" in args[8])
+    elif type == "mapred-site":
+      self.assertFalse("will_not_be_stored" in args[8])
+      self.assertTrue("fs.df.interval" in args[8])
+      self.assertFalse("dfs.df.interval" in args[8])
+    elif type == "global":
+      self.assertTrue("global11" in args[8])
+      self.assertTrue("an_old_value" in args[8])
+      self.assertTrue("mapred.hosts.exclude" in args[8])
+    elif (type == "core-site") or (type == "hdfs-site"):
+      self.assertTrue("global11" in args[8])
+      self.assertTrue("global2" in args[8])
+      self.assertTrue("hbase.rpc.engine" in args[8])
+    elif type == "hbase-site":
+      self.assertTrue("global11" in args[8])
+      self.assertTrue("hbase.hstore.blockingStoreFiles" in args[8])
+      self.assertTrue("dfs.df.interval" in args[8])
+      self.assertFalse("hbase.rpc.engine" in args[8])
+    pass
+
+  def get_mock_options(self, printonly=False):
+    options = MagicMock()
+    options.logfile = "logfile"
+    options.user = "admin"
+    options.password = "admin"
+    options.hostname = "localhost"
+    options.clustername = "c1"
+    options.printonly = printonly
+    return options
+
+
+if __name__ == "__main__":
+  unittest.main()

Niektóre pliki nie zostały wyświetlone z powodu dużej ilości zmienionych plików