Browse Source

AMBARI-8504 Configuration parameter 'io.compression.codecs' missing in HDP 2.3.GlusterFS stack in ambari-2.1

Scott Creeley 10 years ago
parent
commit
ec196cff1a

+ 3 - 2
ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/repos/repoinfo.xml

@@ -16,10 +16,11 @@
    limitations under the License.
 -->
 <reposinfo>
-  <latest>http://public-repo-1.hortonworks.com/HDP/hdp_urlinfo.json</latest>
+  <!-- <latest>http://public-repo-1.hortonworks.com/HDP/hdp_urlinfo.json</latest> -->
+  <latest>http://s3.amazonaws.com/dev.hortonworks.com/HDP/hdp_urlinfo.json</latest>
   <os family="redhat6">
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.3.0.0</baseurl>
+      <baseurl>REPLACE_WITH_CENTOS6_URL</baseurl>
       <repoid>HDP-2.3</repoid>
       <reponame>HDP</reponame>
     </repo>

+ 154 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml

@@ -39,5 +39,159 @@
     <value>glusterfs:///localhost:8020</value>
   </property>  
 
+<!-- HDFS core-site props and additional props (not sure if all are needed or not)-->
+  <property>
+    <name>ha.failover-controller.active-standby-elector.zk.op.retries</name>
+    <value>120</value>
+    <description>ZooKeeper Failover Controller retries setting for your environment</description>
+  </property>
+
+<!-- i/o properties -->
+
+  <property>
+    <name>io.file.buffer.size</name>
+    <value>131072</value>
+    <description>The size of buffer for use in sequence files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+  </property>
+
+  <property>
+    <name>io.serializations</name>
+    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
+    <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
+    </description>
+  </property>
+
+  <property>
+    <name>io.compression.codecs</name>
+    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
+    <description>A list of the compression codec classes that can be used
+                 for compression/decompression.</description>
+  </property>
+
+  <property>
+    <name>fs.trash.interval</name>
+    <value>360</value>
+    <description>Number of minutes after which the checkpoint gets deleted.
+        If zero, the trash feature is disabled.
+        This option may be configured both on the server and the client.
+        If trash is disabled server side then the client side configuration is checked.
+        If trash is enabled on the server side then the value configured on the server is used and the client configuration value is ignored.
+    </description>
+  </property>
+
+  <!-- ipc properties: copied from kryptonite configuration -->
+  <property>
+    <name>ipc.client.idlethreshold</name>
+    <value>8000</value>
+    <description>Defines the threshold number of connections after which
+               connections will be inspected for idleness.
+  </description>
+  </property>
+
+  <property>
+    <name>ipc.client.connection.maxidletime</name>
+    <value>30000</value>
+    <description>The maximum time after which a client will bring down the
+               connection to the server.
+  </description>
+  </property>
+
+  <property>
+    <name>ipc.client.connect.max.retries</name>
+    <value>50</value>
+    <description>Defines the maximum number of retries for IPC connections.</description>
+  </property>
+
+  <property>
+    <name>ipc.server.tcpnodelay</name>
+    <value>true</value>
+    <description>Turn on/off Nagle's algorithm for the TCP socket
+      connection on
+      the server. Setting to true disables the algorithm and may
+      decrease latency
+      with a cost of more/smaller packets.
+    </description>
+  </property>
+
+  <!-- Web Interface Configuration -->
+  <property>
+    <name>mapreduce.jobtracker.webinterface.trusted</name>
+    <value>false</value>
+    <description> If set to true, the web interfaces of JT and NN may contain
+                actions, such as kill job, delete file, etc., that should
+                not be exposed to public. Enable this option if the interfaces
+                are only reachable by those who have the right authorization.
+  </description>
+  </property>
+
+ <property>
+   <name>hadoop.security.authentication</name>
+   <value>simple</value>
+   <description>
+   Set the authentication for the cluster. Valid values are: simple or
+   kerberos.
+   </description>
+ </property>
+<property>
+  <name>hadoop.security.authorization</name>
+  <value>false</value>
+  <description>
+     Enable authorization for different protocols.
+  </description>
+</property>
+
+  <property>
+    <name>hadoop.security.auth_to_local</name>
+    <value>DEFAULT</value>
+<description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
+  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+The translations rules have 3 sections:
+      base     filter    substitution
+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+
+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+DEFAULT
+    </description>
+  </property>
+  <property>
+    <name>net.topology.script.file.name</name>
+    <value>/etc/hadoop/conf/topology_script.py</value>
+    <description>
+      Location of topology script used by Hadoop to determine the rack location of nodes.
+    </description>
+  </property>
+
 
 </configuration>

+ 11 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/configuration/hbase-site.xml

@@ -367,4 +367,15 @@
     <description>Path to domain socket.</description>
   </property>
 
+  <property>
+    <name>hbase.coprocessor.regionserver.classes</name>
+    <value></value>
+    <depends-on>
+      <property>
+        <type>hbase-site</type>
+        <name>hbase.security.authorization</name>
+      </property>
+    </depends-on>
+  </property>
+
 </configuration>

+ 62 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HIVE/configuration/hive-site.xml

@@ -381,4 +381,66 @@ limitations under the License.
       </entries>
     </value-attributes>
   </property>
+  
+  <!-- added this as required from 2.2 Hive site xml -->
+  <property>
+    <name>hive.server2.authentication.ldap.url</name>
+    <value> </value>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hive.server2.authentication.ldap.baseDN</name>
+    <value>NONE</value>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hive.server2.authentication.kerberos.keytab</name>
+    <value></value>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hive.server2.authentication.kerberos.principal</name>
+    <value></value>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hive.server2.authentication.pam.services</name>
+    <value></value>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hive.server2.custom.authentication.class</name>
+    <value></value>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
 </configuration>