+ "comment" : "Non-relational distributed database and centralized service for configuration management & synchronization",
+ "properties" : [ {
+ "name" : "hbase.cluster.distributed",
+ "value" : "true",
+ "description" : "The mode the cluster will be in. Possible values are\n false for standalone mode and true for distributed mode. If\n false, startup will run all HBase and ZooKeeper daemons together\n in the one JVM.\n ",
+ "description" : "The interval between checks for expired region server leases.\n This value has been reduced due to the other reduced values above so that\n the master will notice a dead region server sooner. The default is 15 seconds.\n ",
+ "filename" : "hbase-site.xml"
+ }, {
+ "name" : "hbase.superuser",
+ "value" : "hbase",
+ "description" : "List of users or groups (comma-separated), who are allowed\n full privileges, regardless of stored ACLs, across the cluster.\n Only used when HBase security is enabled.\n ",
+ "filename" : "hbase-site.xml"
+ }, {
+ "name" : "hbase.zookeeper.property.clientPort",
+ "value" : "2181",
+ "description" : "Property from ZooKeeper's config zoo.cfg.\n The port at which the clients will connect.\n ",
+ "description" : "\n Amount of time to wait since the last time a region was flushed before\n invoking an optional cache flush. Default 60,000.\n ",
+ "filename" : "hbase-site.xml"
+ }, {
+ "name" : "hbase.zookeeper.useMulti",
+ "value" : "true",
+ "description" : "Instructs HBase to make use of ZooKeeper's multi-update functionality.\n This allows certain ZooKeeper operations to complete more quickly and prevents some issues\n with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).åá\n IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+\n and will not be downgraded. ZooKeeper versions before 3.4 do not support multi-update and will\n not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).\n ",
+ "description" : "Determines datanode heartbeat interval in seconds.",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.safemode.threshold.pct",
+ "value" : "1.0f",
+ "description" : "\n Specifies the percentage of blocks that should satisfy\n the minimal replication requirement defined by dfs.replication.min.\n Values less than or equal to 0 mean not to start in safe mode.\n Values greater than 1 will make safe mode permanent.\n ",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.balance.bandwidthPerSec",
+ "value" : "6250000",
+ "description" : "\n Specifies the maximum amount of bandwidth that each datanode\n can utilize for the balancing purpose in term of\n the number of bytes per second.\n ",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.block.size",
+ "value" : "134217728",
+ "description" : "The default block size for new files.",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.datanode.ipc.address",
+ "value" : "0.0.0.0:8010",
+ "description" : "\nThe datanode ipc server address and port.\nIf the port is 0 then the server will start on a free port.\n",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.blockreport.initialDelay",
+ "value" : "120",
+ "description" : "Delay for first block report in seconds.",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.datanode.du.pct",
+ "value" : "0.85f",
+ "description" : "When calculating remaining space, only use this percentage of the real available space\n",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.namenode.handler.count",
+ "value" : "40",
+ "description" : "The number of server threads for the namenode.",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.datanode.max.xcievers",
+ "value" : "4096",
+ "description" : "PRIVATE CONFIG VARIABLE",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.umaskmode",
+ "value" : "077",
+ "description" : "\nThe octal umask used when creating files and directories.\n",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.web.ugi",
+ "value" : "gopher,gopher",
+ "description" : "The user account used by the web interface.\nSyntax: USERNAME,GROUP1,GROUP2, ...\n",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.permissions",
+ "value" : "true",
+ "description" : "\nIf \"true\", enable permission checking in HDFS.\nIf \"false\", permission checking is turned off,\nbut all other behavior is unchanged.\nSwitching from one parameter value to the other does not change the mode,\nowner or group of files or directories.\n",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.permissions.supergroup",
+ "value" : "hdfs",
+ "description" : "The name of the group of super-users.",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.namenode.handler.count",
+ "value" : "100",
+ "description" : "Added to grow Queue size so that more client connections are allowed",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "ipc.server.max.response.size",
+ "value" : "5242880",
+ "description" : null,
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.block.access.token.enable",
+ "value" : "true",
+ "description" : "\nIf \"true\", access tokens are used as capabilities for accessing datanodes.\nIf \"false\", no access tokens are checked on accessing datanodes.\n",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.secondary.https.port",
+ "value" : "50490",
+ "description" : "The https port where secondary-namenode binds",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.https.port",
+ "value" : "50470",
+ "description" : "The https port where namenode binds",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.access.time.precision",
+ "value" : "0",
+ "description" : "The access time for HDFS file is precise upto this value.\n The default value is 1 hour. Setting a value of 0 disables\n access times for HDFS.\n ",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.cluster.administrators",
+ "value" : " hdfs",
+ "description" : "ACL for who all can view the default servlets in the HDFS",
+ "description" : "Number of failed disks datanode would tolerate",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "io.file.buffer.size",
+ "value" : "131072",
+ "description" : "The size of buffer for use in sequence files.\n The size of this buffer should probably be a multiple of hardware\n page size (4096 on Intel x86), and it determines how much data is\n buffered during read and write operations.",
+ "description" : "The implementation for lzo codec.",
+ "filename" : "core-site.xml"
+ }, {
+ "name" : "fs.trash.interval",
+ "value" : "360",
+ "description" : "Number of minutes between trash checkpoints.\n If zero, the trash feature is disabled.\n ",
+ "filename" : "core-site.xml"
+ }, {
+ "name" : "fs.checkpoint.edits.dir",
+ "value" : "${fs.checkpoint.dir}",
+ "description" : "Determines where on the local filesystem the DFS secondary\n name node should store the temporary edits to merge.\n If this is a comma-delimited list of directoires then teh edits is\n replicated in all of the directoires for redundancy.\n Default value is same as fs.checkpoint.dir\n ",
+ "filename" : "core-site.xml"
+ }, {
+ "name" : "fs.checkpoint.period",
+ "value" : "21600",
+ "description" : "The number of seconds between two periodic checkpoints.\n ",
+ "filename" : "core-site.xml"
+ }, {
+ "name" : "fs.checkpoint.size",
+ "value" : "536870912",
+ "description" : "The size of the current edit log (in bytes) that triggers\n a periodic checkpoint even if the fs.checkpoint.period hasn't expired.\n ",
+ "filename" : "core-site.xml"
+ }, {
+ "name" : "ipc.client.idlethreshold",
+ "value" : "8000",
+ "description" : "Defines the threshold number of connections after which\n connections will be inspected for idleness.\n ",
+ "filename" : "core-site.xml"
+ }, {
+ "name" : "ipc.client.connection.maxidletime",
+ "value" : "30000",
+ "description" : "The maximum time after which a client will bring down the\n connection to the server.\n ",
+ "filename" : "core-site.xml"
+ }, {
+ "name" : "ipc.client.connect.max.retries",
+ "value" : "50",
+ "description" : "Defines the maximum number of retries for IPC connections.",
+ "filename" : "core-site.xml"
+ }, {
+ "name" : "webinterface.private.actions",
+ "value" : "false",
+ "description" : " If set to true, the web interfaces of JT and NN may contain\n actions, such as kill job, delete file, etc., that should\n not be exposed to public. Enable this option if the interfaces\n are only reachable by those who have the right authorization.\n ",
+ "description" : "MetaStore Client socket timeout in seconds",
+ "filename" : "hive-site.xml"
+ }, {
+ "name" : "hive.metastore.execute.setugi",
+ "value" : "true",
+ "description" : "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.",
+ "filename" : "hive-site.xml"
+ }, {
+ "name" : "hive.security.authorization.enabled",
+ "value" : "true",
+ "description" : "enable or disable the hive client authorization",
+ "description" : "the hive client authorization manager class name.\n The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider. ",
+ "description" : "Normally, this is the amount of time before killing\n processes, and the recommended-default is 5.000 seconds - a value of\n 5000 here. In this case, we are using it solely to blast tasks before\n killing them, and killing them very quickly (1/4 second) to guarantee\n that we do not leave VMs around for later jobs.\n ",
+ "filename" : "mapred-site.xml"
+ }, {
+ "name" : "mapred.job.tracker.handler.count",
+ "value" : "50",
+ "description" : "\n The number of server threads for the JobTracker. This should be roughly\n 4% of the number of tasktracker nodes.\n ",
+ "description" : "The threshold, in terms of the number of files\n for the in-memory merge process. When we accumulate threshold number of files\n we initiate the in-memory merge and spill to disk. A value of 0 or less than\n 0 indicates we want to DON'T have any threshold and instead depend only on\n the ramfs's memory consumption to trigger the merge.\n ",
+ "filename" : "mapred-site.xml"
+ }, {
+ "name" : "mapred.job.shuffle.merge.percent",
+ "value" : "0.66",
+ "description" : "The usage threshold at which an in-memory merge will be\n initiated, expressed as a percentage of the total memory allocated to\n storing in-memory map outputs, as defined by\n mapred.job.shuffle.input.buffer.percent.\n ",
+ "description" : "The percentage of memory to be allocated from the maximum heap\n size to storing map outputs during the shuffle.\n ",
+ "filename" : "mapred-site.xml"
+ }, {
+ "name" : "mapred.output.compression.type",
+ "value" : "BLOCK",
+ "description" : "If the job outputs are to compressed as SequenceFiles, how should\n they be compressed? Should be one of NONE, RECORD or BLOCK.\n ",
+ "description" : "The percentage of memory- relative to the maximum heap size- to\n retain map outputs during the reduce. When the shuffle is concluded, any\n remaining map outputs in memory must consume less than this threshold before\n the reduce can begin.\n ",
+ "filename" : "mapred-site.xml"
+ }, {
+ "name" : "mapreduce.reduce.input.limit",
+ "value" : "10737418240",
+ "description" : "The limit on the input size of the reduce. (This value\n is 10 Gb.) If the estimated input size of the reduce is greater than\n this value, job is failed. A value of -1 means that there is no limit\n set. ",
+ "filename" : "mapred-site.xml"
+ }, {
+ "name" : "mapred.task.timeout",
+ "value" : "600000",
+ "description" : "The number of milliseconds before a task will be\n terminated if it neither reads an input, writes an output, nor\n updates its status string.\n ",
+ "description" : "The number of hours job status information is persisted in DFS.\n The job status information will be available after it drops of the memory\n queue and between jobtracker restarts. With a zero value the job status\n information is not persisted at all in DFS.\n ",
+ "description" : "The Path prefix for where the staging directories should be placed. The next level is always the user's\n name. It is a path in the default file system.",
+ "filename" : "mapred-site.xml"
+ }, {
+ "name" : "mapreduce.tasktracker.group",
+ "value" : "hadoop",
+ "description" : "The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.",
+ "description" : "\n Specifies whether security (user name/admin role) is enabled or not.\n If disabled any user can manage Oozie system and manage any job.\n ",
+ "description" : "\n Maximum concurrency for a given callable type.\n Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).\n Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).\n All commands that use action executors (action-start, action-end, action-kill and action-check) use\n the action type as the callable type.\n ",
+ "description" : "\n System library path to use for workflow applications.\n This path is added to workflow application if their job properties sets\n the property 'oozie.use.system.libpath' to true.\n ",
+ "description" : "\n If set to true, submissions of MapReduce and Pig jobs will include\n automatically the system library path, thus not requiring users to\n specify where the Pig JAR files are. Instead, the ones from the system\n library path are used.\n ",
+ "description" : "\n Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of\n the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is\n used when there is no exact match for an authority. The HADOOP_CONF_DIR contains\n the relevant Hadoop *-site.xml files. If the path is relative is looked within\n the Oozie configuration directory; though the path can be absolute (i.e. to point\n to Hadoop client conf/ directories in the local filesystem.\n ",
+ "description" : "\n Creates Oozie DB.\n\n If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.\n If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.\n ",
+ "description" : "\n DB user password.\n\n IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,\n if empty Configuration assumes it is NULL.\n ",
+ "comment" : "Non-relational distributed database and centralized service for configuration management & synchronization",
+ "properties" : [ {
+ "name" : "hbase.cluster.distributed",
+ "value" : "true",
+ "description" : "The mode the cluster will be in. Possible values are\n false for standalone mode and true for distributed mode. If\n false, startup will run all HBase and ZooKeeper daemons together\n in the one JVM.\n ",
+ "description" : "The interval between checks for expired region server leases.\n This value has been reduced due to the other reduced values above so that\n the master will notice a dead region server sooner. The default is 15 seconds.\n ",
+ "filename" : "hbase-site.xml"
+ }, {
+ "name" : "hbase.superuser",
+ "value" : "hbase",
+ "description" : "List of users or groups (comma-separated), who are allowed\n full privileges, regardless of stored ACLs, across the cluster.\n Only used when HBase security is enabled.\n ",
+ "filename" : "hbase-site.xml"
+ }, {
+ "name" : "hbase.zookeeper.property.clientPort",
+ "value" : "2181",
+ "description" : "Property from ZooKeeper's config zoo.cfg.\n The port at which the clients will connect.\n ",
+ "description" : "\n Amount of time to wait since the last time a region was flushed before\n invoking an optional cache flush. Default 60,000.\n ",
+ "filename" : "hbase-site.xml"
+ }, {
+ "name" : "hbase.zookeeper.useMulti",
+ "value" : "true",
+ "description" : "Instructs HBase to make use of ZooKeeper's multi-update functionality.\n This allows certain ZooKeeper operations to complete more quickly and prevents some issues\n with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).В·\n IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+\n and will not be downgraded. ZooKeeper versions before 3.4 do not support multi-update and will\n not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).\n ",
+ "description" : "Determines datanode heartbeat interval in seconds.",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.safemode.threshold.pct",
+ "value" : "1.0f",
+ "description" : "\n Specifies the percentage of blocks that should satisfy\n the minimal replication requirement defined by dfs.replication.min.\n Values less than or equal to 0 mean not to start in safe mode.\n Values greater than 1 will make safe mode permanent.\n ",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.balance.bandwidthPerSec",
+ "value" : "6250000",
+ "description" : "\n Specifies the maximum amount of bandwidth that each datanode\n can utilize for the balancing purpose in term of\n the number of bytes per second.\n ",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.block.size",
+ "value" : "134217728",
+ "description" : "The default block size for new files.",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.datanode.ipc.address",
+ "value" : "0.0.0.0:8010",
+ "description" : "\nThe datanode ipc server address and port.\nIf the port is 0 then the server will start on a free port.\n",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.blockreport.initialDelay",
+ "value" : "120",
+ "description" : "Delay for first block report in seconds.",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.datanode.du.pct",
+ "value" : "0.85f",
+ "description" : "When calculating remaining space, only use this percentage of the real available space\n",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.namenode.handler.count",
+ "value" : "40",
+ "description" : "The number of server threads for the namenode.",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.datanode.max.xcievers",
+ "value" : "4096",
+ "description" : "PRIVATE CONFIG VARIABLE",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.umaskmode",
+ "value" : "077",
+ "description" : "\nThe octal umask used when creating files and directories.\n",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.web.ugi",
+ "value" : "gopher,gopher",
+ "description" : "The user account used by the web interface.\nSyntax: USERNAME,GROUP1,GROUP2, ...\n",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.permissions",
+ "value" : "true",
+ "description" : "\nIf \"true\", enable permission checking in HDFS.\nIf \"false\", permission checking is turned off,\nbut all other behavior is unchanged.\nSwitching from one parameter value to the other does not change the mode,\nowner or group of files or directories.\n",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.permissions.supergroup",
+ "value" : "hdfs",
+ "description" : "The name of the group of super-users.",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.namenode.handler.count",
+ "value" : "100",
+ "description" : "Added to grow Queue size so that more client connections are allowed",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "ipc.server.max.response.size",
+ "value" : "5242880",
+ "description" : null,
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.block.access.token.enable",
+ "value" : "true",
+ "description" : "\nIf \"true\", access tokens are used as capabilities for accessing datanodes.\nIf \"false\", no access tokens are checked on accessing datanodes.\n",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.secondary.https.port",
+ "value" : "50490",
+ "description" : "The https port where secondary-namenode binds",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.https.port",
+ "value" : "50470",
+ "description" : "The https port where namenode binds",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.access.time.precision",
+ "value" : "0",
+ "description" : "The access time for HDFS file is precise upto this value.\n The default value is 1 hour. Setting a value of 0 disables\n access times for HDFS.\n ",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "dfs.cluster.administrators",
+ "value" : " hdfs",
+ "description" : "ACL for who all can view the default servlets in the HDFS",
+ "description" : "Number of failed disks datanode would tolerate",
+ "filename" : "hdfs-site.xml"
+ }, {
+ "name" : "io.file.buffer.size",
+ "value" : "131072",
+ "description" : "The size of buffer for use in sequence files.\n The size of this buffer should probably be a multiple of hardware\n page size (4096 on Intel x86), and it determines how much data is\n buffered during read and write operations.",
+ "description" : "The implementation for lzo codec.",
+ "filename" : "core-site.xml"
+ }, {
+ "name" : "fs.trash.interval",
+ "value" : "360",
+ "description" : "Number of minutes between trash checkpoints.\n If zero, the trash feature is disabled.\n ",
+ "filename" : "core-site.xml"
+ }, {
+ "name" : "fs.checkpoint.edits.dir",
+ "value" : "${fs.checkpoint.dir}",
+ "description" : "Determines where on the local filesystem the DFS secondary\n name node should store the temporary edits to merge.\n If this is a comma-delimited list of directoires then teh edits is\n replicated in all of the directoires for redundancy.\n Default value is same as fs.checkpoint.dir\n ",
+ "filename" : "core-site.xml"
+ }, {
+ "name" : "fs.checkpoint.period",
+ "value" : "21600",
+ "description" : "The number of seconds between two periodic checkpoints.\n ",
+ "filename" : "core-site.xml"
+ }, {
+ "name" : "fs.checkpoint.size",
+ "value" : "536870912",
+ "description" : "The size of the current edit log (in bytes) that triggers\n a periodic checkpoint even if the fs.checkpoint.period hasn't expired.\n ",
+ "filename" : "core-site.xml"
+ }, {
+ "name" : "ipc.client.idlethreshold",
+ "value" : "8000",
+ "description" : "Defines the threshold number of connections after which\n connections will be inspected for idleness.\n ",
+ "filename" : "core-site.xml"
+ }, {
+ "name" : "ipc.client.connection.maxidletime",
+ "value" : "30000",
+ "description" : "The maximum time after which a client will bring down the\n connection to the server.\n ",
+ "filename" : "core-site.xml"
+ }, {
+ "name" : "ipc.client.connect.max.retries",
+ "value" : "50",
+ "description" : "Defines the maximum number of retries for IPC connections.",
+ "filename" : "core-site.xml"
+ }, {
+ "name" : "webinterface.private.actions",
+ "value" : "false",
+ "description" : " If set to true, the web interfaces of JT and NN may contain\n actions, such as kill job, delete file, etc., that should\n not be exposed to public. Enable this option if the interfaces\n are only reachable by those who have the right authorization.\n ",
+ "description" : "MetaStore Client socket timeout in seconds",
+ "filename" : "hive-site.xml"
+ }, {
+ "name" : "hive.metastore.execute.setugi",
+ "value" : "true",
+ "description" : "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.",
+ "filename" : "hive-site.xml"
+ }, {
+ "name" : "hive.security.authorization.enabled",
+ "value" : "true",
+ "description" : "enable or disable the hive client authorization",
+ "description" : "the hive client authorization manager class name.\n The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider. ",
+ "comment" : "Hue is a graphical user interface to operate and develop\n applications for Apache Hadoop.",
+ "properties" : [ {
+ "name" : "send_debug_messages",
+ "value" : "1",
+ "description" : null,
+ "filename" : "hue-site.xml"
+ }, {
+ "name" : "database_logging",
+ "value" : "0",
+ "description" : "To show database transactions, set database_logging to 1.\n default, database_logging=0",
+ "filename" : "hue-site.xml"
+ }, {
+ "name" : "http_host",
+ "value" : "0.0.0.0",
+ "description" : "Webserver listens on this address and port",
+ "filename" : "hue-site.xml"
+ }, {
+ "name" : "http_port",
+ "value" : "8000",
+ "description" : "Webserver listens on this address and port",
+ "filename" : "hue-site.xml"
+ }, {
+ "name" : "time_zone",
+ "value" : "America/Los_Angeles",
+ "description" : "Time zone name",
+ "filename" : "hue-site.xml"
+ }, {
+ "name" : "django_debug_mode",
+ "value" : "1",
+ "description" : "Turn off debug",
+ "filename" : "hue-site.xml"
+ }, {
+ "name" : "use_cherrypy_server",
+ "value" : "false",
+ "description" : "Set to true to use CherryPy as the webserver, set to false\n to use Spawning as the webserver. Defaults to Spawning if\n key is not specified.",
+ "filename" : "hue-site.xml"
+ }, {
+ "name" : "http_500_debug_mode",
+ "value" : "1",
+ "description" : "Turn off backtrace for server error",
+ "description" : "Normally, this is the amount of time before killing\n processes, and the recommended-default is 5.000 seconds - a value of\n 5000 here. In this case, we are using it solely to blast tasks before\n killing them, and killing them very quickly (1/4 second) to guarantee\n that we do not leave VMs around for later jobs.\n ",
+ "filename" : "mapred-site.xml"
+ }, {
+ "name" : "mapred.job.tracker.handler.count",
+ "value" : "50",
+ "description" : "\n The number of server threads for the JobTracker. This should be roughly\n 4% of the number of tasktracker nodes.\n ",
+ "description" : "The threshold, in terms of the number of files\n for the in-memory merge process. When we accumulate threshold number of files\n we initiate the in-memory merge and spill to disk. A value of 0 or less than\n 0 indicates we want to DON'T have any threshold and instead depend only on\n the ramfs's memory consumption to trigger the merge.\n ",
+ "filename" : "mapred-site.xml"
+ }, {
+ "name" : "mapred.job.shuffle.merge.percent",
+ "value" : "0.66",
+ "description" : "The usage threshold at which an in-memory merge will be\n initiated, expressed as a percentage of the total memory allocated to\n storing in-memory map outputs, as defined by\n mapred.job.shuffle.input.buffer.percent.\n ",
+ "description" : "The percentage of memory to be allocated from the maximum heap\n size to storing map outputs during the shuffle.\n ",
+ "filename" : "mapred-site.xml"
+ }, {
+ "name" : "mapred.output.compression.type",
+ "value" : "BLOCK",
+ "description" : "If the job outputs are to compressed as SequenceFiles, how should\n they be compressed? Should be one of NONE, RECORD or BLOCK.\n ",
+ "description" : "The percentage of memory- relative to the maximum heap size- to\n retain map outputs during the reduce. When the shuffle is concluded, any\n remaining map outputs in memory must consume less than this threshold before\n the reduce can begin.\n ",
+ "filename" : "mapred-site.xml"
+ }, {
+ "name" : "mapreduce.reduce.input.limit",
+ "value" : "10737418240",
+ "description" : "The limit on the input size of the reduce. (This value\n is 10 Gb.) If the estimated input size of the reduce is greater than\n this value, job is failed. A value of -1 means that there is no limit\n set. ",
+ "filename" : "mapred-site.xml"
+ }, {
+ "name" : "mapred.task.timeout",
+ "value" : "600000",
+ "description" : "The number of milliseconds before a task will be\n terminated if it neither reads an input, writes an output, nor\n updates its status string.\n ",
+ "description" : "The number of hours job status information is persisted in DFS.\n The job status information will be available after it drops of the memory\n queue and between jobtracker restarts. With a zero value the job status\n information is not persisted at all in DFS.\n ",
+ "description" : "The Path prefix for where the staging directories should be placed. The next level is always the user's\n name. It is a path in the default file system.",
+ "filename" : "mapred-site.xml"
+ }, {
+ "name" : "mapreduce.tasktracker.group",
+ "value" : "hadoop",
+ "description" : "The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.",
+ "description" : "\n Specifies whether security (user name/admin role) is enabled or not.\n If disabled any user can manage Oozie system and manage any job.\n ",
+ "description" : "\n Maximum concurrency for a given callable type.\n Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).\n Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).\n All commands that use action executors (action-start, action-end, action-kill and action-check) use\n the action type as the callable type.\n ",
+ "description" : "\n System library path to use for workflow applications.\n This path is added to workflow application if their job properties sets\n the property 'oozie.use.system.libpath' to true.\n ",
+ "description" : "\n If set to true, submissions of MapReduce and Pig jobs will include\n automatically the system library path, thus not requiring users to\n specify where the Pig JAR files are. Instead, the ones from the system\n library path are used.\n ",
+ "description" : "\n Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of\n the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is\n used when there is no exact match for an authority. The HADOOP_CONF_DIR contains\n the relevant Hadoop *-site.xml files. If the path is relative is looked within\n the Oozie configuration directory; though the path can be absolute (i.e. to point\n to Hadoop client conf/ directories in the local filesystem.\n ",
+ "description" : "\n Creates Oozie DB.\n\n If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.\n If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.\n ",
+ "description" : "\n DB user password.\n\n IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,\n if empty Configuration assumes it is NULL.\n ",
- * Generate serviceProperties save it to localdata
- * called form stepController step6WizardController
- */
- loadAdvancedConfig: function (serviceName) {
- var self = this;
- var url = (App.testMode) ? '/data/wizard/stack/hdp/version01/' + serviceName + '.json' : App.apiPrefix + App.get('stackVersionURL') + '/services/' + serviceName; // TODO: get this url from the stack selected by the user in Install Options page
- var method = 'GET';
- var serviceComponents;
- $.ajax({
- type: method,
- url: url,
- async: false,
- dataType: 'text',
- timeout: App.timeout,
- success: function (data) {
- var jsonData = jQuery.parseJSON(data);
- console.log("TRACE: Step6 submit -> In success function for the loadAdvancedConfig call");
- console.log("TRACE: Step6 submit -> value of the url is: " + url);
+ var configGroups = App.config.loadConfigsByTags(this.get('serviceConfigTags'));
+ var configSet = App.config.mergePreDefinedWithLoaded(configGroups, [], this.get('serviceConfigTags'), serviceName);
+
+ var misc_configs = configSet.globalConfigs.filterProperty('serviceName', 'MISC').filterProperty('category', 'Users and Groups').filterProperty('isVisible', true);
- var url = (App.testMode) ? '/data/wizard/stack/hdp/version01/' + serviceName + '.json' : App.apiPrefix + App.get('stackVersionURL') + '/services/' + serviceName; // TODO: get this url from the stack selected by the user in Install Options page
- var method = 'GET';
- $.ajax({
- type: method,
- url: url,
- async: false,
- dataType: 'text',
- timeout: App.timeout,
- success: function (data) {
- var jsonData = jQuery.parseJSON(data);
- console.log("TRACE: Step6 submit -> In success function for the loadAdvancedConfig call");
- console.log("TRACE: Step6 submit -> value of the url is: " + url);
- var url = (App.testMode) ? '/data/wizard/stack/hdp/version01/' + serviceName + '.json' : App.apiPrefix + App.get('stackVersionURL') + '/services/' + serviceName; // TODO: get this url from the stack selected by the user in Install Options page
- var method = 'GET';
- $.ajax({
- type: method,
- url: url,
- async: false,
- dataType: 'text',
- timeout: App.timeout,
- success: function (data) {
- var jsonData = jQuery.parseJSON(data);
- console.log("TRACE: Step6 submit -> In success function for the loadAdvancedConfig call");
- console.log("TRACE: Step6 submit -> value of the url is: " + url);
- //console.log("The value of template is: " + _express);
var index = parseInt(_express.match(/\[([\d]*)(?=\])/)[1]);
var index = parseInt(_express.match(/\[([\d]*)(?=\])/)[1]);
if (this.get('globals').someProperty('name', templateName[index])) {
if (this.get('globals').someProperty('name', templateName[index])) {
- //console.log("The name of the variable is: " + this.get('content.serviceConfigProperties').findProperty('name', templateName[index]).name);
var globValue = this.get('globals').findProperty('name', templateName[index]).value;
var globValue = this.get('globals').findProperty('name', templateName[index]).value;
// Hack for templeton.zookeeper.hosts
// Hack for templeton.zookeeper.hosts
if (value !== null) { // if the property depends on more than one template name like <templateName[0]>/<templateName[1]> then don't proceed to the next if the prior is null or not found in the global configs
if (value !== null) { // if the property depends on more than one template name like <templateName[0]>/<templateName[1]> then don't proceed to the next if the prior is null or not found in the global configs
- "description": "Enter in key=value format to set capacity-scheduler.xml parameters not exposed through this page.<br> New line is the delimiter for every key-value pair.",
+ "description": "Percentage of the number of slots in the cluster that are made to be available for jobs in this queue. The sum of capacities for all queues should be less than or equal 100.",
- "description": "Enter in key=value format to set mapred-queue-acls.xml parameters not exposed through this page.<br> New line is the delimiter for every key-value pair.",
'installer.step2.hostPattern.tooltip.content':'You can use pattern expressions to specify a number of target hosts. For example, to specify host01.domain thru host10.domain, enter host[01-10].domain in the target hosts textarea.',
'installer.step2.hostPattern.tooltip.content':'You can use pattern expressions to specify a number of target hosts. For example, to specify host01.domain thru host10.domain, enter host[01-10].domain in the target hosts textarea.',
'installer.step2.hostName.error.required':'You must specify at least one host name',
'installer.step2.hostName.error.required':'You must specify at least one host name',
'installer.step5.attention':' hosts not running master services',
'installer.step5.attention':' hosts not running master services',
'installer.step5.body':'Assign master components to hosts you want to run them on.',
'installer.step5.body':'Assign master components to hosts you want to run them on.',
- 'installer.step5.body.hive':'<i class="icon-asterisks">✵</i> HiveServer2, Hive Metastore, and WebHCat Server will be co-hosted on the same server.',
+ 'installer.step5.body.hive':'<i class="icon-asterisks">✵</i> HiveServer2, Hive Metastore, and WebHCat Server will be hosted on the same server.',
'installer.step5.hostInfo':'%@ (%@, %@ cores)',
'installer.step5.hostInfo':'%@ (%@, %@ cores)',
+ 'installer.step5.hiveGroup':'HiveServer2, WebHCat Server, MySQL Server',
'installer.step6.header':'Assign Slaves and Clients',
'installer.step6.header':'Assign Slaves and Clients',
'installer.step6.body':'Assign slave and client components to hosts you want to run them on.<br/>Hosts that are assigned master components are shown with <i class=icon-asterisks>✵</i>. <br/>"Client" will install ',
'installer.step6.body':'Assign slave and client components to hosts you want to run them on.<br/>Hosts that are assigned master components are shown with <i class=icon-asterisks>✵</i>. <br/>"Client" will install ',
"You can click on the Retry button to retry upgrading the failed components. Alternatively you can proceed and retry upgrade on individual components in the Host Detail page.",
"You can click on the Retry button to retry upgrading the failed components. Alternatively you can proceed and retry upgrade on individual components in the Host Detail page.",
'installer.stackUpgrade.step3.status.failed':"Failed to upgrade hosts. Click on each host to see what might have gone wrong.\n After fixing the problem, click the Retry button",
'installer.stackUpgrade.step3.status.failed':"Failed to upgrade hosts. Click on each host to see what might have gone wrong.\n After fixing the problem, click the Retry button",
'installer.stackUpgrade.step3.host.nothingToUpgrade':'Waiting (Nothing to upgrade)',
'installer.stackUpgrade.step3.host.nothingToUpgrade':'Waiting (Nothing to upgrade)',
'services.service.config.msgServiceStop':'Stop the service and wait till it stops completely. Thereafter you can apply configuration changes',
'services.service.config.msgServiceStop':'Stop the service and wait till it stops completely. Thereafter you can apply configuration changes',
'services.service.config.msgHDFSMapRServiceStop':'Stop HDFS and MapReduce. Wait till both of them stops completely. Thereafter you can apply configuration changes',
'services.service.config.msgHDFSMapRServiceStop':'Stop HDFS and MapReduce. Wait till both of them stops completely. Thereafter you can apply configuration changes',
'services.service.config.failCreateConfig' : 'Failure in creating service configuration',
'services.service.config.failCreateConfig' : 'Failure in creating service configuration',
'services.service.config.failSaveConfig':'Failure in applying service configuration',
'services.service.config.failSaveConfig':'Failure in applying service configuration',
+ 'services.service.config.failSaveConfigHostExceptions':'Failure in applying service configuration host exceptions',
+ 'services.service.config.addPropertyWindow.errorMessage':'This is required',
+ 'services.service.config.addPropertyWindow.error.derivedKey':'Cannot add a known derived property',
+ 'services.mapReduce.description.queue.name':'Name of the queue',
+ 'services.mapReduce.description.queue.submit.user':"Comma separated list of usernames that are allowed to submit jobs to the queue. " +
+ "If set to the special value '*', it means all users are allowed to submit jobs.",
+ 'services.mapReduce.description.queue.admin.user':"Comma separated list of usernames that are allowed to delete jobs or modify job's priority for " +
+ "jobs not owned by the current user in the queue. If set to the special value '*', it means all users are " +
+ "allowed to do this operation.",
+ 'services.mapReduce.description.queue.submit.group':'Comma separated list of group names that are allowed to submit jobs to the queue.',
+ 'services.mapReduce.description.queue.admin.group':"Comma separated list of group names that are allowed to delete jobs or modify job's priority " +
+ "for jobs not owned by the current user in the queue.",
+
+ 'services.hbase.master.error':'None of the HBase masters is active',