Browse Source

AMBARI-5707. Replace Ganglia with high performant and pluggable Metrics System. (swagle)

Siddharth Wagle 11 năm trước cách đây
mục cha
commit
a52f8a5572
100 tập tin đã thay đổi với 25568 bổ sung0 xóa
  1. 1 0
      .gitignore
  2. 188 0
      ambari-metrics/ambari-metrics-hadoop-sink/pom.xml
  3. 21 0
      ambari-metrics/ambari-metrics-hadoop-sink/src/main/assemblies/empty.xml
  4. 34 0
      ambari-metrics/ambari-metrics-hadoop-sink/src/main/assemblies/sink.xml
  5. 49 0
      ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2-hbase.properties.j2
  6. 58 0
      ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2.properties.j2
  7. 101 0
      ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
  8. 172 0
      ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
  9. 102 0
      ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetrics.java
  10. 128 0
      ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricsCache.java
  11. 211 0
      ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricsSink.java
  12. 203 0
      ambari-metrics/ambari-metrics-host-monitoring/conf/unix/ambari-metrics-monitor
  13. 273 0
      ambari-metrics/ambari-metrics-host-monitoring/pom.xml
  14. 28 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/package/rpm/preremove.sh
  15. 21 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/__init__.py
  16. 33 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/__init__.py
  17. 130 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/application_metric_map.py
  18. 127 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
  19. 103 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py
  20. 88 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
  21. 85 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/event_definition.py
  22. 190 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py
  23. 87 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/metric_collector.py
  24. 64 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py
  25. 27 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/LICENSE
  26. 14 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/MANIFEST.in
  27. 77 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/Makefile
  28. 270 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/README
  29. 57 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build.py
  30. 177 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/Makefile
  31. 15 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/README
  32. 57 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_static/copybutton.js
  33. 161 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_static/sidebar.js
  34. 12 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/globaltoc.html
  35. 4 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/indexcontent.html
  36. 16 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/indexsidebar.html
  37. 66 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/page.html
  38. 187 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_themes/pydoctheme/static/pydoctheme.css
  39. 23 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_themes/pydoctheme/theme.conf
  40. 253 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/conf.py
  41. 1247 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/index.rst
  42. 242 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/make.bat
  43. 63 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/disk_usage.py
  44. 42 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/free.py
  45. 178 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/iotop.py
  46. 32 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/killall.py
  47. 69 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/meminfo.py
  48. 65 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/netstat.py
  49. 165 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/nettop.py
  50. 58 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/pmap.py
  51. 162 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/process_detail.py
  52. 232 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/top.py
  53. 34 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/who.py
  54. 176 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/make.bat
  55. 1987 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/__init__.py
  56. 258 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_common.py
  57. 433 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_compat.py
  58. 389 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psbsd.py
  59. 1225 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pslinux.py
  60. 341 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psosx.py
  61. 157 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psposix.py
  62. 533 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pssunos.py
  63. 2212 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_bsd.c
  64. 51 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_bsd.h
  65. 37 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_common.c
  66. 10 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_common.h
  67. 510 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_linux.c
  68. 20 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_linux.h
  69. 1881 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_osx.c
  70. 41 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_osx.h
  71. 128 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_posix.c
  72. 10 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_posix.h
  73. 1290 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_sunos.c
  74. 27 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_sunos.h
  75. 3241 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_windows.c
  76. 70 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_windows.h
  77. 485 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pswindows.py
  78. 285 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/bsd/process_info.c
  79. 15 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/bsd/process_info.h
  80. 293 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/osx/process_info.c
  81. 16 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/osx/process_info.h
  82. 41 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/glpi.h
  83. 287 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/ntextapi.h
  84. 336 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_handles.c
  85. 10 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_handles.h
  86. 443 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_info.c
  87. 17 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_info.h
  88. 238 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/security.c
  89. 17 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/security.h
  90. 198 0
      ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/setup.py
  91. 67 0
      ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestApplicationMetricMap.py
  92. 78 0
      ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestEmitter.py
  93. 97 0
      ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestHostInfo.py
  94. 49 0
      ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestMetricCollector.py
  95. 133 0
      ambari-metrics/ambari-metrics-host-monitoring/src/test/python/unitTests.py
  96. 269 0
      ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
  97. 16 0
      ambari-metrics/ambari-metrics-timelineservice/conf/unix/ams-env.sh
  98. 25 0
      ambari-metrics/ambari-metrics-timelineservice/conf/unix/ams-site.xml
  99. 31 0
      ambari-metrics/ambari-metrics-timelineservice/conf/unix/log4j.properties
  100. 593 0
      ambari-metrics/ambari-metrics-timelineservice/pom.xml

+ 1 - 0
.gitignore

@@ -20,3 +20,4 @@ derby.log
 pass.txt
 ambari-agent/src/test/python/ambari_agent/dummy_files/current-stack
 velocity.log*
+ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/

+ 188 - 0
ambari-metrics/ambari-metrics-hadoop-sink/pom.xml

@@ -0,0 +1,188 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                             http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <parent>
+    <artifactId>ambari-metrics</artifactId>
+    <groupId>org.apache.ambari</groupId>
+    <version>0.1.0-SNAPSHOT</version>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+  <artifactId>ambari-metrics-hadoop-sink</artifactId>
+  <version>0.1.0-SNAPSHOT</version>
+  <packaging>jar</packaging>
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <descriptors>
+            <descriptor>src/main/assemblies/sink.xml</descriptor>
+          </descriptors>
+          <tarLongFileMode>gnu</tarLongFileMode>
+        </configuration>
+        <executions>
+          <execution>
+            <id>build-tarball</id>
+            <phase>package</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.0</version>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <version>1.8</version>
+        <executions>
+          <execution>
+            <id>parse-version</id>
+            <phase>validate</phase>
+            <goals>
+              <goal>parse-version</goal>
+            </goals>
+          </execution>
+          <execution>
+            <id>regex-property</id>
+            <goals>
+              <goal>regex-property</goal>
+            </goals>
+            <configuration>
+              <name>ambariVersion</name>
+              <value>${project.version}</value>
+              <regex>^([0-9]+)\.([0-9]+)\.([0-9]+)(\.|-).*</regex>
+              <replacement>$1.$2.$3</replacement>
+              <failIfNoMatch>false</failIfNoMatch>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>com.github.goldin</groupId>
+        <artifactId>copy-maven-plugin</artifactId>
+        <version>0.2.5</version>
+        <executions>
+          <execution>
+            <id>create-archive</id>
+            <phase>none</phase>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>rpm-maven-plugin</artifactId>
+        <version>2.0.1</version>
+        <executions>
+          <execution>
+            <phase>none</phase>
+            <goals>
+              <goal>rpm</goal>
+            </goals>
+          </execution>
+        </executions>
+        <configuration>
+          <name>ambari-metrics-hadoop-sink</name>
+          <copyright>2012, Apache Software Foundation</copyright>
+          <group>Development</group>
+          <description>Maven Recipe: RPM Package.</description>
+          <mappings>
+            <mapping>
+              <directory>/usr/lib/ambari-metrics-hadoop-sink</directory>
+              <filemode>644</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+              <sources>
+                <source>
+                  <location>target/${project.artifactId}-${project.version}.jar</location>
+                </source>
+                <softlinkSource>
+                  <destination>ambari-metrics-hadoop-sink.jar</destination>
+                  <location>/usr/lib/ambari-metrics-hadoop-sink/${project.artifactId}-${project.version}.jar</location>
+                </softlinkSource>
+              </sources>
+
+            </mapping>
+
+          </mappings>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>2.4.0</version>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-httpclient</groupId>
+      <artifactId>commons-httpclient</artifactId>
+      <version>3.1</version>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-codec</groupId>
+      <artifactId>commons-codec</artifactId>
+      <version>1.8</version>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
+      <version>2.1</version>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-collections</groupId>
+      <artifactId>commons-collections</artifactId>
+      <version>3.2.1</version>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+      <version>1.1.1</version>
+    </dependency>
+    <dependency>
+      <groupId>commons-configuration</groupId>
+      <artifactId>commons-configuration</artifactId>
+      <version>1.6</version>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.codehaus.jackson</groupId>
+      <artifactId>jackson-core-asl</artifactId>
+      <version>1.9.9</version>
+    </dependency>
+    <dependency>
+      <groupId>org.codehaus.jackson</groupId>
+      <artifactId>jackson-mapper-asl</artifactId>
+      <version>1.9.13</version>
+    </dependency>
+  </dependencies>
+
+</project>

+ 21 - 0
ambari-metrics/ambari-metrics-hadoop-sink/src/main/assemblies/empty.xml

@@ -0,0 +1,21 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+  
+       http://www.apache.org/licenses/LICENSE-2.0
+  
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<assembly>
+    <id>empty</id>
+    <formats/>
+</assembly>

+ 34 - 0
ambari-metrics/ambari-metrics-hadoop-sink/src/main/assemblies/sink.xml

@@ -0,0 +1,34 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<assembly>
+  <!--This 'all' id is not appended to the produced bundle because we do this:
+    http://maven.apache.org/plugins/maven-assembly-plugin/faq.html#required-classifiers
+  -->
+  <id>dist</id>
+  <formats>
+    <format>dir</format>
+    <format>tar.gz</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+  <files>
+    <file>
+      <source>${project.build.directory}/${artifact.artifactId}-${artifact.version}.jar</source>
+      <outputDirectory>ambari-metrics-${project.version}/lib/ambari-metrics</outputDirectory>
+    </file>
+  </files>
+</assembly>

+ 49 - 0
ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2-hbase.properties.j2

@@ -0,0 +1,49 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for timeline metrics service
+hbase.class=org.apache.hadoop.metrics2.sink.timeline.TimelineMetricsSink
+hbase.period=10
+hbase.collector={{timeline_server_hosts}}:8188
+
+# Configuration of the "jvm" context for timeline metrics service
+jvm.class=org.apache.hadoop.metrics2.sink.timeline.TimelineMetricsSink
+jvm.period=10
+jvm.collector={{timeline_server_hosts}}:8188
+
+# Configuration of the "rpc" context for timeline metrics service
+rpc.class=org.apache.hadoop.metrics2.sink.timeline.TimelineMetricsSink
+rpc.period=10
+rpc.collector={{timeline_server_hosts}}:8188
+
+# Following hadoop example
+hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.TimelineMetricsSink
+hbase.sink.timeline.period=10
+hbase.sink.timeline.collector={{timeline_server_hosts}}:8188

+ 58 - 0
ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2.properties.j2

@@ -0,0 +1,58 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_ganglia_server %}
+*.period=60
+
+*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.TimelineMetricsSink
+*.sink.timeline.period=10
+
+
+# Hook up to the server
+datanode.sink.timeline.collector={{timeline_server_hosts}}:8188
+namenode.sink.timeline.collector={{timeline_server_hosts}}:8188
+resourcemanager.sink.timeline.collector={{timeline_server_hosts}}:8188
+nodemanager.sink.timeline.collector={{timeline_server_hosts}}:8188
+historyserver.sink.timeline.collector={{timeline_server_hosts}}:8188
+journalnode.sink.timeline.collector={{timeline_server_hosts}}:8188
+nimbus.sink.timeline.collector={{timeline_server_hosts}}:8188
+supervisor.sink.timeline.collector={{timeline_server_hosts}}:8188
+maptask.sink.timeline.collector={{timeline_server_hosts}}:8188
+reducetask.sink.timeline.collector={{timeline_server_hosts}}:8188
+
+resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
+
+{% endif %}

+ 101 - 0
ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java

@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+import org.apache.commons.configuration.SubsetConfiguration;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.MetricsSink;
+import org.apache.hadoop.metrics2.util.Servers;
+import org.apache.hadoop.net.DNS;
+import java.net.SocketAddress;
+import java.net.UnknownHostException;
+import java.util.List;
+
+public abstract class AbstractTimelineMetricsSink implements MetricsSink {
+
+  public final Log LOG = LogFactory.getLog(this.getClass());
+
+  private SubsetConfiguration conf;
+  private String hostName = "UNKNOWN.example.com";
+  private String serviceName = "";
+  private final String COLLECTOR_HOST_PROPERTY = "collector";
+  private final int DEFAULT_PORT = 8188;
+
+  private List<? extends SocketAddress> metricsServers;
+  private String collectorUri;
+
+  @Override
+  public void init(SubsetConfiguration conf) {
+    this.conf = conf;
+    LOG.info("Initializing Timeline metrics sink.");
+
+    // Take the hostname from the DNS class.
+    if (conf.getString("slave.host.name") != null) {
+      hostName = conf.getString("slave.host.name");
+    } else {
+      try {
+        hostName = DNS.getDefaultHost(
+          conf.getString("dfs.datanode.dns.interface", "default"),
+          conf.getString("dfs.datanode.dns.nameserver", "default"));
+      } catch (UnknownHostException uhe) {
+        LOG.error(uhe);
+        hostName = "UNKNOWN.example.com";
+      }
+    }
+
+    serviceName = getFirstConfigPrefix(conf);
+
+    // Load collector configs
+    metricsServers = Servers.parse(conf.getString(COLLECTOR_HOST_PROPERTY),
+      DEFAULT_PORT);
+
+    if (metricsServers == null || metricsServers.isEmpty()) {
+      LOG.error("No Metric collector configured.");
+    } else {
+      collectorUri = "http://" + conf.getString(COLLECTOR_HOST_PROPERTY).trim()
+        + "/ws/v1/timeline/metrics";
+    }
+  }
+
+  protected String getHostName() {
+    return hostName;
+  }
+
+  protected String getServiceName() {
+    return serviceName;
+  }
+
+  private String getFirstConfigPrefix(SubsetConfiguration conf) {
+    while (conf.getParent() instanceof SubsetConfiguration) {
+      conf = (SubsetConfiguration) conf.getParent();
+    }
+    return conf.getPrefix();
+  }
+
+  protected SocketAddress getServerSocketAddress() {
+    if (metricsServers != null && !metricsServers.isEmpty()) {
+      return metricsServers.get(0);
+    }
+    return null;
+  }
+
+  protected String getCollectorUri() {
+    return collectorUri;
+  }
+}

+ 172 - 0
ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java

@@ -0,0 +1,172 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.Map;
+import java.util.TreeMap;
+
+@XmlRootElement(name = "metric")
+@XmlAccessorType(XmlAccessType.NONE)
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class TimelineMetric implements Comparable<TimelineMetric> {
+
+  private String metricName;
+  private String appId;
+  private String instanceId;
+  private String hostName;
+  private long timestamp;
+  private long startTime;
+  private String type;
+  private Map<Long, Double> metricValues = new TreeMap<Long, Double>();
+
+  @XmlElement(name = "metricname")
+  public String getMetricName() {
+    return metricName;
+  }
+
+  public void setMetricName(String metricName) {
+    this.metricName = metricName;
+  }
+
+  @XmlElement(name = "appid")
+  public String getAppId() {
+    return appId;
+  }
+
+  public void setAppId(String appId) {
+    this.appId = appId;
+  }
+
+  @XmlElement(name = "instanceid")
+  public String getInstanceId() {
+    return instanceId;
+  }
+
+  public void setInstanceId(String instanceId) {
+    this.instanceId = instanceId;
+  }
+
+  @XmlElement(name = "hostname")
+  public String getHostName() {
+    return hostName;
+  }
+
+  public void setHostName(String hostName) {
+    this.hostName = hostName;
+  }
+
+  @XmlElement(name = "timestamp")
+  public long getTimestamp() {
+    return timestamp;
+  }
+
+  public void setTimestamp(long timestamp) {
+    this.timestamp = timestamp;
+  }
+
+  @XmlElement(name = "starttime")
+  public long getStartTime() {
+    return startTime;
+  }
+
+  public void setStartTime(long startTime) {
+    this.startTime = startTime;
+  }
+
+  @XmlElement(name = "type")
+  public String getType() {
+    return type;
+  }
+
+  public void setType(String type) {
+    this.type = type;
+  }
+
+  @XmlElement(name = "metrics")
+  public Map<Long, Double> getMetricValues() {
+    return metricValues;
+  }
+
+  public void setMetricValues(Map<Long, Double> metricValues) {
+    this.metricValues = metricValues;
+  }
+
+  public void addMetricValues(Map<Long, Double> metricValues) {
+    this.metricValues.putAll(metricValues);
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+
+    TimelineMetric metric = (TimelineMetric) o;
+
+    if (!metricName.equals(metric.metricName)) return false;
+    if (hostName != null ? !hostName.equals(metric.hostName) : metric.hostName != null)
+      return false;
+    if (appId != null ? !appId.equals(metric.appId) : metric.appId != null)
+      return false;
+    if (instanceId != null ? !instanceId.equals(metric.instanceId) : metric.instanceId != null)
+      return false;
+    if (timestamp != metric.timestamp) return false;
+    if (startTime != metric.startTime) return false;
+
+    return true;
+  }
+
+  public boolean equalsExceptTime(TimelineMetric metric) {
+    if (!metricName.equals(metric.metricName)) return false;
+    if (hostName != null ? !hostName.equals(metric.hostName) : metric.hostName != null)
+      return false;
+    if (appId != null ? !appId.equals(metric.appId) : metric.appId != null)
+      return false;
+    if (instanceId != null ? !instanceId.equals(metric.instanceId) : metric.instanceId != null)
+      return false;
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    int result = metricName.hashCode();
+    result = 31 * result + (appId != null ? appId.hashCode() : 0);
+    result = 31 * result + (instanceId != null ? instanceId.hashCode() : 0);
+    result = 31 * result + (hostName != null ? hostName.hashCode() : 0);
+    result = 31 * result + (int) (timestamp ^ (timestamp >>> 32));
+    return result;
+  }
+
+  @Override
+  public int compareTo(TimelineMetric other) {
+    if (timestamp > other.timestamp) {
+      return -1;
+    } else if (timestamp < other.timestamp) {
+      return 1;
+    } else {
+      return metricName.compareTo(other.metricName);
+    }
+  }
+}

+ 102 - 0
ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetrics.java

@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * The class that hosts a list of timeline entities.
+ */
+@XmlRootElement(name = "metrics")
+@XmlAccessorType(XmlAccessType.NONE)
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class TimelineMetrics {
+
+  private List<TimelineMetric> allMetrics = new ArrayList<TimelineMetric>();
+
+  public TimelineMetrics() {}
+
+  @XmlElement(name = "metrics")
+  public List<TimelineMetric> getMetrics() {
+    return allMetrics;
+  }
+
+  public void setMetrics(List<TimelineMetric> allMetrics) {
+    this.allMetrics = allMetrics;
+  }
+
+  private boolean isEqualTimelineMetrics(TimelineMetric metric1,
+                                         TimelineMetric metric2) {
+
+    boolean isEqual = true;
+
+    if (!metric1.getMetricName().equals(metric2.getMetricName())) {
+      return false;
+    }
+
+    if (metric1.getHostName() != null) {
+      isEqual = metric1.getHostName().equals(metric2.getHostName());
+    }
+
+    if (metric1.getAppId() != null) {
+      isEqual = metric1.getAppId().equals(metric2.getAppId());
+    }
+
+    return isEqual;
+  }
+
+  /**
+   * Merge with existing TimelineMetric if everything except startTime is
+   * the same.
+   * @param metric {@link TimelineMetric}
+   */
+  public void addOrMergeTimelineMetric(TimelineMetric metric) {
+    TimelineMetric metricToMerge = null;
+
+    if (!allMetrics.isEmpty()) {
+      for (TimelineMetric timelineMetric : allMetrics) {
+        if (timelineMetric.equalsExceptTime(metric)) {
+          metricToMerge = timelineMetric;
+          break;
+        }
+      }
+    }
+
+    if (metricToMerge != null) {
+      metricToMerge.addMetricValues(metric.getMetricValues());
+      if (metricToMerge.getTimestamp() > metric.getTimestamp()) {
+        metricToMerge.setTimestamp(metric.getTimestamp());
+      }
+      if (metricToMerge.getStartTime() > metric.getStartTime()) {
+        metricToMerge.setStartTime(metric.getStartTime());
+      }
+    } else {
+      allMetrics.add(metric);
+    }
+  }
+}

+ 128 - 0
ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricsCache.java

@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class TimelineMetricsCache {
+
+  private final TimelineMetricHolder timelineMetricCache = new TimelineMetricHolder();
+  private static final Log LOG = LogFactory.getLog(TimelineMetric.class);
+  static final int MAX_RECS_PER_NAME_DEFAULT = 10000;
+  static final int MAX_EVICTION_TIME_MILLIS = 59000; // ~ 1 min
+  private final int maxRecsPerName;
+  private final int maxEvictionTimeInMillis;
+
+  TimelineMetricsCache(int maxRecsPerName, int maxEvictionTimeInMillis) {
+    this.maxRecsPerName = maxRecsPerName;
+    this.maxEvictionTimeInMillis = maxEvictionTimeInMillis;
+  }
+
+  class TimelineMetricWrapper {
+    private long timeDiff = -1;
+    private long oldestTimestamp = -1;
+    private TimelineMetric timelineMetric;
+
+    TimelineMetricWrapper(TimelineMetric timelineMetric) {
+      this.timelineMetric = timelineMetric;
+      this.oldestTimestamp = timelineMetric.getStartTime();
+    }
+
+    private void updateTimeDiff(long timestamp) {
+      if (oldestTimestamp != -1 && timestamp > oldestTimestamp) {
+        timeDiff = timestamp - oldestTimestamp;
+      } else {
+        oldestTimestamp = timestamp;
+      }
+    }
+
+    public void putMetric(TimelineMetric metric) {
+      this.timelineMetric.addMetricValues(metric.getMetricValues());
+      updateTimeDiff(metric.getStartTime());
+    }
+
+    public long getTimeDiff() {
+      return timeDiff;
+    }
+
+    public TimelineMetric getTimelineMetric() {
+      return timelineMetric;
+    }
+  }
+
+  // TODO: Change to ConcurentHashMap with weighted eviction
+  class TimelineMetricHolder extends LinkedHashMap<String, TimelineMetricWrapper> {
+    private static final long serialVersionUID = 1L;
+    private boolean gotOverflow = false;
+
+    @Override
+    protected boolean removeEldestEntry(Map.Entry<String, TimelineMetricWrapper> eldest) {
+      boolean overflow = size() > maxRecsPerName;
+      if (overflow && !gotOverflow) {
+        LOG.warn("Metrics cache overflow at "+ size() +" for "+ eldest);
+        gotOverflow = true;
+      }
+      return overflow;
+    }
+
+    public TimelineMetric evict(String metricName) {
+      TimelineMetricWrapper metricWrapper = this.get(metricName);
+
+      if (metricWrapper == null
+        || metricWrapper.getTimeDiff() < maxEvictionTimeInMillis) {
+        return null;
+      }
+
+      TimelineMetric timelineMetric = metricWrapper.getTimelineMetric();
+      this.remove(metricName);
+
+      return timelineMetric;
+    }
+
+    public void put(String metricName, TimelineMetric timelineMetric) {
+
+      TimelineMetricWrapper metric = this.get(metricName);
+      if (metric == null) {
+        this.put(metricName, new TimelineMetricWrapper(timelineMetric));
+      } else {
+        metric.putMetric(timelineMetric);
+      }
+    }
+  }
+
+  public TimelineMetric getTimelineMetric(String metricName) {
+    if (timelineMetricCache.containsKey(metricName)) {
+      return timelineMetricCache.evict(metricName);
+    }
+
+    return null;
+  }
+
+  public void putTimelineMetric(TimelineMetric timelineMetric) {
+    timelineMetricCache.put(timelineMetric.getMetricName(), timelineMetric);
+  }
+}

+ 211 - 0
ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricsSink.java

@@ -0,0 +1,211 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+import org.apache.commons.configuration.SubsetConfiguration;
+import org.apache.commons.httpclient.HttpClient;
+import org.apache.commons.httpclient.methods.PostMethod;
+import org.apache.commons.httpclient.methods.StringRequestEntity;
+import org.apache.commons.lang.ClassUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsException;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.apache.hadoop.metrics2.impl.MsInfo;
+import org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricsCache;
+import org.codehaus.jackson.map.AnnotationIntrospector;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+import org.codehaus.jackson.xc.JaxbAnnotationIntrospector;
+import java.io.IOException;
+import java.net.SocketAddress;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class TimelineMetricsSink extends AbstractTimelineMetricsSink {
+  private static ObjectMapper mapper;
+  private Map<String, Set<String>> useTagsMap = new HashMap<String, Set<String>>();
+  private static final String TAGS_FOR_PREFIX_PROPERTY_PREFIX = "tagsForPrefix.";
+  private static final String MAX_METRIC_ROW_CACHE_SIZE = "maxRowCacheSize";
+  private static final String METRICS_SEND_INTERVAL = "sendInterval";
+  protected HttpClient httpClient = new HttpClient();
+  private TimelineMetricsCache metricsCache;
+
+  static {
+    mapper = new ObjectMapper();
+    AnnotationIntrospector introspector = new JaxbAnnotationIntrospector();
+    mapper.setAnnotationIntrospector(introspector);
+    mapper.getSerializationConfig()
+      .setSerializationInclusion(JsonSerialize.Inclusion.NON_NULL);
+  }
+
+  @Override
+  public void init(SubsetConfiguration conf) {
+    super.init(conf);
+
+    int maxRowCacheSize = conf.getInt(MAX_METRIC_ROW_CACHE_SIZE,
+      TimelineMetricsCache.MAX_RECS_PER_NAME_DEFAULT);
+    int metricsSendInterval = conf.getInt(METRICS_SEND_INTERVAL,
+      TimelineMetricsCache.MAX_EVICTION_TIME_MILLIS); // ~ 1 min
+    metricsCache = new TimelineMetricsCache(maxRowCacheSize, metricsSendInterval);
+
+    conf.setListDelimiter(',');
+    Iterator<String> it = (Iterator<String>) conf.getKeys();
+    while (it.hasNext()) {
+      String propertyName = it.next();
+      if (propertyName != null && propertyName.startsWith(TAGS_FOR_PREFIX_PROPERTY_PREFIX)) {
+        String contextName = propertyName.substring(TAGS_FOR_PREFIX_PROPERTY_PREFIX.length());
+        String[] tags = conf.getStringArray(propertyName);
+        boolean useAllTags = false;
+        Set<String> set = null;
+        if (tags.length > 0) {
+          set = new HashSet<String>();
+          for (String tag : tags) {
+            tag = tag.trim();
+            useAllTags |= tag.equals("*");
+            if (tag.length() > 0) {
+              set.add(tag);
+            }
+          }
+          if (useAllTags) {
+            set = null;
+          }
+        }
+        useTagsMap.put(contextName, set);
+      }
+    }
+  }
+
+  @Override
+  public void putMetrics(MetricsRecord record) {
+    try {
+      String recordName = record.name();
+      String contextName = record.context();
+
+      StringBuilder sb = new StringBuilder();
+      sb.append(contextName);
+      sb.append('.');
+      sb.append(recordName);
+
+      appendPrefix(record, sb);
+      sb.append(".");
+      int sbBaseLen = sb.length();
+
+      Collection<AbstractMetric> metrics =
+        (Collection<AbstractMetric>) record.metrics();
+
+      List<TimelineMetric> metricList = new ArrayList<TimelineMetric>();
+
+      for (AbstractMetric metric : metrics) {
+        sb.append(metric.name());
+        String name = sb.toString();
+        TimelineMetric timelineMetric = new TimelineMetric();
+        timelineMetric.setMetricName(name);
+        timelineMetric.setHostName(getHostName());
+        timelineMetric.setAppId(getServiceName());
+        timelineMetric.setStartTime(record.timestamp());
+        timelineMetric.setType(ClassUtils.getShortCanonicalName(
+          metric.value(), "Number"));
+        timelineMetric.getMetricValues().put(record.timestamp(),
+          metric.value().doubleValue());
+        // Put intermediate values into the cache until it is time to send
+        metricsCache.putTimelineMetric(timelineMetric);
+
+        // Retrieve all values from cache if it is time to send
+        TimelineMetric cachedMetric = metricsCache.getTimelineMetric(name);
+
+        if (cachedMetric != null) {
+          metricList.add(cachedMetric);
+        }
+
+        sb.setLength(sbBaseLen);
+      }
+
+      TimelineMetrics timelineMetrics = new TimelineMetrics();
+      timelineMetrics.setMetrics(metricList);
+
+      if (!metricList.isEmpty()) {
+        emitMetrics(timelineMetrics);
+      }
+
+
+    } catch (IOException io) {
+      throw new MetricsException("Failed to putMetrics", io);
+    }
+  }
+
+  private void emitMetrics(TimelineMetrics metrics) throws IOException {
+    String jsonData = mapper.writeValueAsString(metrics);
+
+    SocketAddress socketAddress = getServerSocketAddress();
+
+    if (socketAddress != null) {
+      StringRequestEntity requestEntity = new StringRequestEntity(
+        jsonData, "application/json", "UTF-8");
+
+      PostMethod postMethod = new PostMethod(getCollectorUri());
+      postMethod.setRequestEntity(requestEntity);
+      int statusCode = httpClient.executeMethod(postMethod);
+      if (statusCode != 200) {
+        LOG.info("Unable to POST metrics to collector, " + getCollectorUri());
+      }
+    }
+  }
+
+  // Taken as is from Ganglia30 implementation
+  @InterfaceAudience.Private
+  public void appendPrefix(MetricsRecord record, StringBuilder sb) {
+    String contextName = record.context();
+    Collection<MetricsTag> tags = record.tags();
+    if (useTagsMap.containsKey(contextName)) {
+      Set<String> useTags = useTagsMap.get(contextName);
+      for (MetricsTag t : tags) {
+        if (useTags == null || useTags.contains(t.name())) {
+
+          // the context is always skipped here because it is always added
+
+          // the hostname is always skipped to avoid case-mismatches
+          // from different DNSes.
+
+          if (t.info() != MsInfo.Context && t.info() != MsInfo.Hostname && t.value() != null) {
+            sb.append('.').append(t.name()).append('=').append(t.value());
+          }
+        }
+      }
+    }
+  }
+
+  @Override
+  public void flush() {
+    // TODO: Buffering implementation
+  }
+
+}

+ 203 - 0
ambari-metrics/ambari-metrics-host-monitoring/conf/unix/ambari-metrics-monitor

@@ -0,0 +1,203 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific
+
+
+MONITOR_CONF_DIR=/etc/ambari-metrics-monitor/conf/
+METRIC_MONITOR=ambari-metrics-monitor
+
+RESOURCE_MONITORING_DIR=/usr/lib/python2.6/site-packages/resource_monitoring
+METRIC_MONITOR_PY_SCRIPT=${RESOURCE_MONITORING_DIR}/main.py
+
+PIDFILE=/var/run/ambari-metrics-monitor/ambari-metrics-monitor.pid
+OUTFILE=/var/log/ambari-metrics-monitor/ambari-metrics-monitor.out
+
+STOP_TIMEOUT=5
+
+OK=0
+NOTOK=1
+
+if [ -a /usr/bin/python2.7 ] && [ -z "${PYTHON}" ]; then
+  PYTHON=/usr/bin/python2.7
+fi
+
+if [ -a /usr/bin/python2.6 ] && [ -z "${PYTHON}" ]; then
+  PYTHON=/usr/bin/python2.6
+fi
+
+if [ "x$PYTHON" == "x" ]; then
+  PYTHON=/usr/bin/python
+fi
+
+export PYTHON=${PYTHON}
+
+check_python_version ()
+{
+  echo "Verifying Python version compatibility..."
+  majversion=`${PYTHON} -V 2>&1 | awk '{print $2}' | cut -d'.' -f1`
+  minversion=`${PYTHON} -V 2>&1 | awk '{print $2}' | cut -d'.' -f2`
+  numversion=$(( 10 * $majversion + $minversion))
+  if (( $numversion < 26 )); then
+    echo "ERROR: Found Python version $majversion.$minversion. Ambari Metric Monitor requires Python version > 2.6"
+    return ${NOTOK}
+  fi
+  echo "Using python " ${PYTHON}
+  return ${OK}
+}
+
+function write_pidfile
+{
+    local pidfile="$1"
+    echo $! > "${pidfile}" 2>/dev/null
+    if [[ $? -gt 0 ]]; then
+      echo "ERROR:  Cannot write pid ${pidfile}."
+      exit 1;
+    fi
+}
+
+#build psutil
+if [ ! "$(ls -A ${RESOURCE_MONITORING_DIR}/psutil/build)" ]; then
+  echo "Building psutil..."
+  dir=$(pwd)
+  cd "${RESOURCE_MONITORING_DIR}/psutil"
+  ${PYTHON} "build.py"
+  cd "${dir}"
+else
+  echo "psutil build directory is not empty, continuing..."
+fi
+
+#locate config dir
+while [[ -z "${_ams_configs_done}" ]]; do
+  case $1 in
+    --config)
+      shift
+      confdir=$1
+      shift
+      if [[ -d "${confdir}" ]]; then
+        MONITOR_CONF_DIR="${confdir}"
+      elif [[ -z "${confdir}" ]]; then
+        echo "ERROR: No parameter provided for --config "
+        exit 1
+      else
+        echo "ERROR: Cannot find configuration directory \"${confdir}\""
+        exit 1
+      fi
+    ;;
+    *)
+      _ams_configs_done=true
+    ;;
+  esac
+done
+
+case "$1" in
+
+  start)
+    check_python_version
+    if [ "$?" -eq "${NOTOK}" ]; then
+          exit -1
+    fi
+
+    echo "Checking for previously running Metric Monitor..."
+    if [ -f ${PIDFILE} ]; then
+      PID=`cat ${PIDFILE}`
+      if [ -z "`ps ax -o pid | grep ${PID}`" ]; then
+        echo "${PIDFILE} found with no process. Removing ${PID}..."
+        rm -f ${PIDFILE}
+      else
+        tput bold
+        echo "ERROR: ${METRIC_MONITOR} already running"
+        tput sgr0
+        echo "Check ${PIDFILE} for PID."
+        exit -1
+      fi
+    fi
+
+    echo "Starting ${METRIC_MONITOR}"
+
+    nohup ${PYTHON} ${METRIC_MONITOR_PY_SCRIPT} "$@" > ${OUTFILE} 2>&1 &
+    PID=$!
+    write_pidfile ${PIDFILE}
+
+    sleep 2
+
+    echo "Verifying ${METRIC_MONITOR} process status..."
+    if [ -z "`ps ax -o pid | grep ${PID}`" ]; then
+      if [ -s ${OUTFILE} ]; then
+        echo "ERROR: ${METRIC_MONITOR} start failed. For more details, see ${OUTFILE}:"
+        echo "===================="
+        tail -n 10 ${OUTFILE}
+        echo "===================="
+      else
+        echo "ERROR: ${METRIC_MONITOR} start failed"
+        rm -f ${PIDFILE}
+      fi
+      echo "Monitor out at: ${OUTFILE}"
+      exit -1
+    fi
+
+    echo "Metric Monitor successfully started"
+    echo "Server log at: ${OUTFILE}"
+  ;;
+  status)
+    if [ -f ${PIDFILE} ]; then
+      PID=`cat ${PIDFILE}`
+      echo "Found ${METRIC_MONITOR} PID: $PID"
+      if [ -z "`ps ax -o pid | grep ${PID}`" ]; then
+        echo "${METRIC_MONITOR} not running. Stale PID File at: $PIDFILE"
+        retcode=2
+      else
+        tput bold
+        echo "${METRIC_MONITOR} running."
+        tput sgr0
+        echo "Monitor PID at: ${PIDFILE}"
+        echo "Monitor out at: ${OUTFILE}"
+      fi
+    else
+      tput bold
+      echo "${METRIC_MONITOR} currently not running"
+      tput sgr0
+      echo "Usage: /usr/sbin/${METRIC_MONITOR} {start|stop|restart|status}"
+      retcode=3
+    fi
+  ;;
+  stop)
+    pidfile=${PIDFILE}
+
+    if [[ -f "${pidfile}" ]]; then
+        pid=$(cat "$pidfile")
+
+        kill "${pid}" >/dev/null 2>&1
+        sleep "${STOP_TIMEOUT}"
+
+        if kill -0 "${pid}" > /dev/null 2>&1; then
+          echo "WARNING: ${METRIC_MONITOR} did not stop gracefully after ${STOP_TIMEOUT} seconds: Trying to kill with kill -9"
+          kill -9 "${pid}" >/dev/null 2>&1
+        fi
+
+        if ps -p "${pid}" > /dev/null 2>&1; then
+          echo "ERROR: Unable to kill ${pid}"
+        else
+          rm -f "${pidfile}" >/dev/null 2>&1
+        fi
+    fi
+
+  ;;
+  restart)
+    echo -e "Restarting ${METRIC_MONITOR}"
+    $0 stop
+    $0 start "$@"
+    retcode=$?
+  ;;
+esac

+ 273 - 0
ambari-metrics/ambari-metrics-host-monitoring/pom.xml

@@ -0,0 +1,273 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+  <parent>
+    <artifactId>ambari-metrics</artifactId>
+    <groupId>org.apache.ambari</groupId>
+    <version>0.1.0-SNAPSHOT</version>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+  <packaging>pom</packaging>
+  <version>0.1.0-SNAPSHOT</version>
+  <artifactId>ambari-metrics-host-monitoring</artifactId>
+  <properties>
+    <resmonitor.install.dir>
+      /usr/lib/python2.6/site-packages/resource_monitoring
+    </resmonitor.install.dir>
+  </properties>
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-clean-plugin</artifactId>
+        <version>2.6</version>
+        <configuration>
+          <filesets>
+            <fileset>
+              <directory>${project.basedir}/src/main/python/psutil/build/</directory>
+              <includes>
+                <include>**/*</include>
+              </includes>
+              <followSymlinks>false</followSymlinks>
+            </fileset>
+          </filesets>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <version>1.8</version>
+        <executions>
+          <execution>
+            <id>parse-version</id>
+            <phase>validate</phase>
+            <goals>
+              <goal>parse-version</goal>
+            </goals>
+          </execution>
+          <execution>
+            <id>regex-property</id>
+            <goals>
+              <goal>regex-property</goal>
+            </goals>
+            <configuration>
+              <name>ambariVersion</name>
+              <value>${project.version}</value>
+              <regex>^([0-9]+)\.([0-9]+)\.([0-9]+)(\.|-).*</regex>
+              <replacement>$1.$2.$3</replacement>
+              <failIfNoMatch>false</failIfNoMatch>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.0</version>
+      </plugin>
+      <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <tarLongFileMode>gnu</tarLongFileMode>
+          <descriptors>
+            <descriptor>${project.basedir}/../../ambari-project/src/main/assemblies/empty.xml</descriptor>
+          </descriptors>
+        </configuration>
+        <executions>
+          <execution>
+            <id>build-tarball</id>
+            <phase>none</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>rpm-maven-plugin</artifactId>
+        <version>2.0.1</version>
+        <executions>
+          <execution>
+            <!-- unbinds rpm creation from maven lifecycle -->
+            <phase>none</phase>
+            <goals>
+              <goal>rpm</goal>
+            </goals>
+          </execution>
+        </executions>
+
+        <configuration>
+          <name>ambari-metrics-monitor</name>
+          <group>Development</group>
+          <needarch>x86_64</needarch>
+          <autoRequires>false</autoRequires>
+          <requires>
+            <require>ambari-metrics-hadoop-sink</require>
+            <require>${python.ver}</require>
+            <require>gcc</require>
+            <require>python-devel</require>
+          </requires>
+          <preremoveScriptlet>
+            <scriptFile>src/main/package/rpm/preremove.sh</scriptFile>
+            <fileEncoding>utf-8</fileEncoding>
+          </preremoveScriptlet>
+          <mappings>
+            <mapping>
+              <directory>${resmonitor.install.dir}</directory>
+              <username>root</username>
+              <groupname>root</groupname>
+              <sources>
+                <source>
+                  <!--<location>-->
+                  <!--${project.build.directory}/${project.artifactId}-${project.version}/resource_monitoring/external/build/*-->
+                  <!--</location>-->
+                  <!--<location>-->
+                  <!--${project.build.directory}/${project.artifactId}-${project.version}/resource_monitoring/core-->
+                  <!--</location>-->
+                  <!--<location>-->
+                  <!--${project.build.directory}/${project.artifactId}-${project.version}/resource_monitoring/__init__.py-->
+                  <!--</location>-->
+                  <location>
+                    ${project.basedir}/src/main/python/__init__.py
+                  </location>
+                </source>
+                <source>
+                  <location>
+                    ${project.basedir}/src/main/python/main.py
+                  </location>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <directory>${resmonitor.install.dir}/core</directory>
+              <sources>
+                <source>
+                  <location>
+                    ${project.basedir}/src/main/python/core
+                  </location>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <directory>${resmonitor.install.dir}/psutil</directory>
+              <sources>
+                <source>
+                  <location>
+                    ${project.basedir}/src/main/python/psutil
+                  </location>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <directory>/etc/ambari-metrics-monitor/conf</directory>
+              <configuration>true</configuration>
+            </mapping>
+            <mapping>
+              <directory>/var/run/ambari-metrics-monitor</directory>
+            </mapping>
+            <mapping>
+              <directory>/var/log/ambari-metrics-monitor</directory>
+            </mapping>
+            <mapping>
+              <directory>/usr/sbin</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+              <directoryIncluded>false</directoryIncluded>
+              <sources>
+                <source>
+                  <location>conf/unix/ambari-metrics-monitor</location>
+                  <filter>true</filter>
+                </source>
+              </sources>
+            </mapping>
+          </mappings>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <version>1.7</version>
+        <executions>
+          <execution>
+            <id>psutils-compile</id>
+            <phase>process-classes</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+            <configuration>
+              <target name="psutils-compile">
+                <exec dir="${basedir}/src/main/python/psutil" executable="python" failonerror="true">
+                  <arg value="setup.py" />
+                  <arg value="build" />
+                  <arg value="--build-platlib" />
+                  <arg value="${basedir}/target/psutil_build" />
+                </exec>
+              </target>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>exec-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <configuration>
+              <executable>python</executable>
+              <workingDirectory>src/test/python</workingDirectory>
+              <arguments>
+                <argument>unitTests.py</argument>
+              </arguments>
+              <environmentVariables>
+                <PYTHONPATH>../../main/python:$PYTHONPATH</PYTHONPATH>
+              </environmentVariables>
+              <skip>${skipTests}</skip>
+            </configuration>
+            <id>python-test</id>
+            <phase>test</phase>
+            <goals>
+              <goal>exec</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <configuration>
+          <excludes>
+            <exclude>src/main/python/psutil/**</exclude>
+            <exclude>.pydevproject</exclude>
+          </excludes>
+        </configuration>
+        <executions>
+          <execution>
+            <phase>test</phase>
+            <goals>
+              <goal>check</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>

+ 28 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/package/rpm/preremove.sh

@@ -0,0 +1,28 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+# WARNING: This script is performed not only on uninstall, but also
+# during package update. See http://www.ibm.com/developerworks/library/l-rpm2/
+# for details
+
+RESOURCE_MONITORING_DIR=/usr/lib/python2.6/site-packages/resource_monitoring
+PSUTIL_DIR="${RESOURCE_MONITORING_DIR}/psutil"
+
+
+if [ -d "${PSUTIL_DIR}" ]; then
+  rm -rf "${PSUTIL_DIR}/*"
+fi
+
+exit 0

+ 21 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/__init__.py

@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from core import *

+ 33 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/__init__.py

@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os, sys
+path = os.path.abspath(__file__)
+path = os.path.join(os.path.dirname(os.path.dirname(path)), "psutil/build/")
+
+for dir in os.walk(path).next()[1]:
+  if 'lib' in dir:
+    sys.path.append(os.path.join(path, dir))
+
+try:
+  import psutil
+except ImportError:
+  print 'psutil binaries need to be built by running, psutil/build.py ' \
+        'manually or by running a, mvn clean package, command.'

+ 130 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/application_metric_map.py

@@ -0,0 +1,130 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import logging
+import json
+from threading import RLock
+
+logger = logging.getLogger()
+
+class ApplicationMetricMap:
+  """
+  A data structure to buffer metrics in memory.
+  The in-memory dict stores metrics as shown below:
+  { application_id : { metric_id : { timestamp :  metric_value } } }
+  application_id => uniquely identify the metrics for an application / host.
+  metric_id      => identify the metric
+  timestamp      => collection time
+  metric_value   => numeric value
+  """
+
+
+  def __init__(self, hostname, ip_address):
+    self.hostname = hostname
+    self.ip_address = ip_address
+    self.lock = RLock()
+    self.app_metric_map = {}
+  pass
+
+  def put_metric(self, application_id, metric_id_to_value_map, timestamp):
+    with self.lock:
+      for metric_name, value in metric_id_to_value_map.iteritems():
+      
+        metric_map = self.app_metric_map.get(application_id)
+        if not metric_map:
+          metric_map = { metric_name : { timestamp : value } }
+          self.app_metric_map[ application_id ] = metric_map
+        else:
+          metric_id_map = metric_map.get(metric_name)
+          if not metric_id_map:
+            metric_id_map = { timestamp : value }
+            metric_map[ metric_name ] = metric_id_map
+          else:
+            metric_map[ metric_name ].update( { timestamp : value } )
+          pass
+        pass
+  pass
+
+  def delete_application_metrics(self, app_id):
+    del self.app_metric_map[ app_id ]
+  pass
+
+  def flatten(self, application_id = None):
+    """
+    Return flatten dict to caller in json format.
+    Json format:
+    {"metrics":[{"hostname":"a","metricname":"b","appid":"c",
+    "instanceid":"d","starttime":"e","metrics":{"t":"v"}}]}
+    """
+    with self.lock:
+      timeline_metrics = { "metrics" : [] }
+      local_metric_map = {}
+  
+      if application_id:
+        if self.app_metric_map.has_key(application_id):
+          local_metric_map = { application_id : self.app_metric_map[application_id] }
+        else:
+          logger.info("application_id: {0}, not present in the map.".format(application_id))
+      else:
+        local_metric_map = self.app_metric_map.copy()
+      pass
+  
+      for appId, metrics in local_metric_map.iteritems():
+        for metricId, metricData in dict(metrics).iteritems():
+          # Create a timeline metric object
+          timeline_metric = {
+            "hostname" : self.hostname,
+            "metricname" : metricId,
+            "appid" : "HOST",
+            "instanceid" : "",
+            "starttime" : self.get_start_time(appId, metricId),
+            "metrics" : metricData
+          }
+          timeline_metrics[ "metrics" ].append( timeline_metric )
+        pass
+      pass
+      return json.dumps(timeline_metrics) if len(timeline_metrics[ "metrics" ]) > 0 else None
+  pass
+
+  def get_start_time(self, app_id, metric_id):
+    with self.lock:
+      if self.app_metric_map.has_key(app_id):
+        if self.app_metric_map.get(app_id).has_key(metric_id):
+          metrics = self.app_metric_map.get(app_id).get(metric_id)
+          return min(metrics.iterkeys())
+  pass
+
+  def format_app_id(self, app_id, instance_id = None):
+    return app_id + "_" + instance_id if instance_id else app_id
+  pass
+
+  def get_app_id(self, app_id):
+    return app_id.split("_")[0]
+  pass
+
+  def get_instance_id(self, app_id):
+    parts = app_id.split("_")
+    return parts[1] if len(parts) > 1 else ''
+  pass
+
+  def clear(self):
+    with self.lock:
+      self.app_metric_map.clear()
+  pass

+ 127 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py

@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import ConfigParser
+import StringIO
+import json
+import os
+
+config = ConfigParser.RawConfigParser()
+CONFIG_FILE_PATH = "/etc/ambari-metrics-monitor/conf/metric_monitor.ini"
+METRIC_FILE_PATH = "/etc/ambari-metrics-monitor/conf/metric_groups.conf"
+
+config_content = """
+[default]
+debug_level = INFO
+metrics_server = host:port
+enable_time_threshold = false
+enable_value_threshold = false
+
+[emitter]
+send_interval = 60
+
+[collector]
+collector_sleep_interval = 5
+max_queue_size = 5000
+"""
+
+metric_group_info = """
+{
+   "host_metric_groups": {
+      "cpu_info": {
+         "collect_every": "15",
+         "metrics": [
+            {
+               "name": "cpu_user",
+               "value_threshold": "1.0"
+            }
+         ]
+      },
+      "disk_info": {
+         "collect_every": "30",
+         "metrics": [
+            {
+               "name": "disk_free",
+               "value_threshold": "5.0"
+            }
+         ]
+      },
+      "network_info": {
+         "collect_every": "20",
+         "metrics": [
+            {
+               "name": "bytes_out",
+               "value_threshold": "128"
+            }
+         ]
+      }
+   },
+   "process_metric_groups": {
+      "": {
+         "collect_every": "15",
+         "metrics": []
+      }
+   }
+}
+"""
+
+class Configuration:
+
+  def __init__(self):
+    global config_content
+    self.config = ConfigParser.RawConfigParser()
+    if os.path.exists(CONFIG_FILE_PATH):
+      self.config.read(CONFIG_FILE_PATH)
+    else:
+      self.config.readfp(StringIO.StringIO(config_content))
+    pass
+    if os.path.exists(METRIC_FILE_PATH):
+      self.metric_groups = json.load(open(METRIC_FILE_PATH))
+    else:
+      print 'No metric configs found at {0}'.format(METRIC_FILE_PATH)
+    pass
+
+  def getConfig(self):
+    return self.config
+
+  def getMetricGroupConfig(self):
+    return self.metric_groups
+
+  def get(self, section, key, default=None):
+    try:
+      value = self.config.get(section, key)
+    except:
+      return default
+    return value
+
+  def get_send_interval(self):
+    return int(self.get("emitter", "send_interval", 60))
+
+  def get_collector_sleep_interval(self):
+    return int(self.get("collector", "collector_sleep_interval", 5))
+
+  def get_server_address(self):
+    return self.get("default", "metrics_server")
+
+  def get_log_level(self):
+    return self.get("default", "debug_level", "INFO")
+
+  def get_max_queue_size(self):
+    return int(self.get("collector", "max_queue_size", 5000))

+ 103 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py

@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import logging
+import threading
+import time
+from Queue import Queue
+from threading import Timer
+from application_metric_map import ApplicationMetricMap
+from event_definition import HostMetricCollectEvent, ProcessMetricCollectEvent
+from metric_collector import MetricsCollector
+from emitter import Emitter
+from host_info import HostInfo
+
+logger = logging.getLogger()
+
+class Controller(threading.Thread):
+
+  def __init__(self, config):
+    # Process initialization code
+    threading.Thread.__init__(self)
+    logger.debug('Initializing Controller thread.')
+    self.lock = threading.Lock()
+    self.config = config
+    self.metrics_config = config.getMetricGroupConfig()
+    self.events_cache = []
+    hostinfo = HostInfo()
+    self.application_metric_map = ApplicationMetricMap(hostinfo.get_hostname(),
+                                                       hostinfo.get_ip_address())
+    self.event_queue = Queue(config.get_max_queue_size())
+    self.metric_collector = MetricsCollector(self.event_queue, self.application_metric_map)
+    self.server_url = config.get_server_address()
+    self.sleep_interval = config.get_collector_sleep_interval()
+    self.initialize_events_cache()
+    self.emitter = Emitter(self.config, self.application_metric_map)
+
+  def run(self):
+    logger.info('Running Controller thread: %s' % threading.currentThread().getName())
+    # Wake every 5 seconds to push events to the queue
+    while True:
+      if (self.event_queue.full()):
+        logger.warn('Event Queue full!! Suspending further collections.')
+      else:
+        self.enqueque_events()
+      pass
+      time.sleep(self.sleep_interval)
+    pass
+
+  # TODO: Optimize to not use Timer class and use the Queue instead
+  def enqueque_events(self):
+    # Queue events for up to a minute
+    for event in self.events_cache:
+      t = Timer(event.get_collect_interval(), self.metric_collector.process_event, args=(event,))
+      t.start()
+    pass
+
+  def initialize_events_cache(self):
+    self.events_cache = []
+    try:
+      host_metrics_groups = self.metrics_config['host_metric_groups']
+      process_metrics_groups = self.metrics_config['process_metric_groups']
+    except KeyError, ke:
+      logger.warn('Error loading metric groups.')
+      raise ke
+    pass
+
+    if host_metrics_groups:
+      for name, properties in host_metrics_groups.iteritems():
+        event = HostMetricCollectEvent(properties, name)
+        logger.info('Adding event to cache, {0} : {1}'.format(name, properties))
+        self.events_cache.append(event)
+      pass
+    pass
+
+    if process_metrics_groups:
+      for name, properties in process_metrics_groups.iteritems():
+        event = ProcessMetricCollectEvent(properties, name)
+        logger.info('Adding event to cache, {0} : {1}'.format(name, properties))
+        #self.events_cache.append(event)
+      pass
+    pass
+
+  pass
+
+  def start_emitter(self):
+    self.emitter.start()

+ 88 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py

@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import logging
+import threading
+import time
+import urllib2
+
+logger = logging.getLogger()
+
+class Emitter(threading.Thread):
+  COLLECTOR_URL = "http://{0}/ws/v1/timeline/metrics"
+  RETRY_SLEEP_INTERVAL = 5
+  MAX_RETRY_COUNT = 3
+  """
+  Wake up every send interval seconds and empty the application metric map.
+  """
+  def __init__(self, config, application_metric_map):
+    threading.Thread.__init__(self)
+    logger.debug('Initializing Emitter thread.')
+    self.lock = threading.Lock()
+    self.collector_address = config.get_server_address()
+    self.send_interval = config.get_send_interval()
+    self.application_metric_map = application_metric_map
+
+  def run(self):
+    logger.info('Running Emitter thread: %s' % threading.currentThread().getName())
+    while True:
+      try:
+        self.submit_metrics()
+        time.sleep(self.send_interval)
+      except Exception, e:
+        logger.warn('Unable to emit events. %s' % str(e))
+        time.sleep(self.RETRY_SLEEP_INTERVAL)
+        logger.info('Retrying emit after %s seconds.' % self.RETRY_SLEEP_INTERVAL)
+    pass
+  
+  def submit_metrics(self):
+    retry_count = 0
+    while retry_count < self.MAX_RETRY_COUNT:
+      json_data = self.application_metric_map.flatten()
+      if json_data is None:
+        logger.info("Nothing to emit, resume waiting.")
+        break
+      pass
+      response = self.push_metrics(json_data)
+  
+      if response and response.getcode() == 200:
+        retry_count = self.MAX_RETRY_COUNT
+        self.application_metric_map.clear()
+      else:
+        logger.warn("Error sending metrics to server. Retrying after {0} "
+                    "...".format(self.RETRY_SLEEP_INTERVAL))
+        retry_count += 1
+        time.sleep(self.RETRY_SLEEP_INTERVAL)
+      pass
+    pass
+  
+  def push_metrics(self, data):
+    headers = {"Content-Type" : "application/json", "Accept" : "*/*"}
+    server = self.COLLECTOR_URL.format(self.collector_address.strip())
+    logger.info("server: %s" % server)
+    logger.debug("message to sent: %s" % data)
+    req = urllib2.Request(server, data, headers)
+    response = urllib2.urlopen(req, timeout=int(self.send_interval - 10))
+    if response:
+      logger.debug("POST response from server: retcode = {0}".format(response.getcode()))
+      logger.debug(str(response.read()))
+    pass
+    return response
+

+ 85 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/event_definition.py

@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import logging
+
+DEFAULT_COLLECT_INTERVAL = 10
+
+logger = logging.getLogger()
+
+class Event:
+  def __init__(self):
+    self._classname = self.__class__.__name__
+
+  def get_classname(self):
+    return self._classname
+
+  def get_collect_interval(self):
+    return DEFAULT_COLLECT_INTERVAL
+
+
+class EmmitEvent(Event):
+
+  def __init__(self, application_metric_map, config):
+    Event.__init__(self)
+    self.collector_address = config.get_server_address()
+    self.application_metric_map = application_metric_map
+
+  def get_emmit_payload(self):
+    return self.application_metric_map.flatten()
+
+
+class HostMetricCollectEvent(Event):
+
+  def __init__(self, group_config, group_name):
+    Event.__init__(self)
+    self.group_config = group_config
+    self.group_name = group_name
+    try:
+      self.group_interval = group_config['collect_every']
+      self.metrics = group_config['metrics']
+    except KeyError, ex:
+      logger.warn('Unable to create event from metric group. {0}'.format(
+        group_config))
+      raise ex
+
+  def get_metric_value_thresholds(self):
+    metric_value_thresholds = {}
+
+    for metric in self.metrics:
+      try:
+        metric_value_thresholds[metric['name']] = metric['value_threshold']
+      except:
+        logger.warn('Error parsing metric configuration. {0}'.format(metric))
+    pass
+
+    return metric_value_thresholds
+
+  def get_group_name(self):
+    return self.group_name
+
+  def get_collect_interval(self):
+    return int(self.group_interval if self.group_interval else DEFAULT_COLLECT_INTERVAL)
+
+class ProcessMetricCollectEvent:
+
+  def __init__(self, group_config, group_name):
+    # Initialize the Process metric event
+    pass

+ 190 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py

@@ -0,0 +1,190 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import logging
+import psutil
+import os
+from collections import namedtuple
+import platform
+import socket
+
+logger = logging.getLogger()
+
+def bytes2human(n):
+  bytes = float(n)
+  gigabytes = bytes / 1073741824
+  return '%.2f' % gigabytes
+pass
+
+
+class HostInfo():
+
+
+  def get_cpu_times(self):
+    """
+    Return cpu stats at current time
+    """
+    cpu_times = psutil.cpu_times()
+    load_avg = os.getloadavg()
+
+    return {
+      'cpu_user' : cpu_times.user if hasattr(cpu_times, 'user') else '',
+      'cpu_system' : cpu_times.system if hasattr(cpu_times, 'system') else '',
+      'cpu_idle' : cpu_times.idle if hasattr(cpu_times, 'idle') else '',
+      'cpu_nice' : cpu_times.nice if hasattr(cpu_times, 'nice') else '',
+      'cpu_wio' : cpu_times.iowait if hasattr(cpu_times, 'iowait') else '',
+      'cpu_intr' : cpu_times.irq if hasattr(cpu_times, 'irq') else '',
+      'cpu_sintr' : cpu_times.softirq if hasattr(cpu_times, 'softirq') else '',
+      'load_one' : load_avg[0] if len(load_avg) > 0 else '',
+      'load_five' : load_avg[1] if len(load_avg) > 1 else '',
+      'load_fifteen' : load_avg[2] if len(load_avg) > 2 else ''
+    }
+  pass
+
+  def get_mem_info(self):
+    """
+    Return memory statistics at current time
+    """
+
+    mem_stats = psutil.virtual_memory()
+    swap_stats = psutil.swap_memory()
+    disk_usage = self.get_combined_disk_usage()
+
+    return {
+      'mem_free' : mem_stats.free if hasattr(mem_stats, 'free') else '',
+      'mem_shared' : mem_stats.shared if hasattr(mem_stats, 'shared') else '',
+      'mem_buffered' : mem_stats.buffers if hasattr(mem_stats, 'buffers') else '',
+      'mem_cached' : mem_stats.cached if hasattr(mem_stats, 'cached') else '',
+      'swap_free' : swap_stats.free if hasattr(mem_stats, 'free') else '',
+      'disk_free' : disk_usage.get("disk_free"),
+      # todo: cannot send string
+      #'part_max_used' : disk_usage.get("max_part_used")[0],
+      'disk_total' : disk_usage.get("disk_total")
+    }
+  pass
+
+  def get_network_info(self):
+    """
+    Return network counters
+    """
+    net_stats = psutil.net_io_counters()
+
+    return {
+      'bytes_out' : net_stats.bytes_sent,
+      'bytes_in' : net_stats.bytes_recv,
+      'pkts_out' : net_stats.packets_sent,
+      'pkts_in' : net_stats.packets_recv
+    }
+  pass
+
+  # Faster version
+  def get_combined_disk_usage(self):
+    disk_usage = namedtuple('disk_usage', [ 'total', 'used', 'free',
+                                            'percent', 'part_max_used' ])
+    combined_disk_total = 0
+    combined_disk_used = 0
+    combined_disk_free = 0
+    combined_disk_percent = 0
+    max_percent_usage = ('', 0)
+
+    for part in psutil.disk_partitions(all=False):
+      if os.name == 'nt':
+        if 'cdrom' in part.opts or part.fstype == '':
+          # skip cd-rom drives with no disk in it; they may raise
+          # ENOENT, pop-up a Windows GUI error for a non-ready
+          # partition or just hang.
+          continue
+        pass
+      pass
+      usage = psutil.disk_usage(part.mountpoint)
+
+      combined_disk_total += usage.total if hasattr(usage, 'total') else 0
+      combined_disk_used += usage.used if hasattr(usage, 'used') else 0
+      combined_disk_free += usage.free if hasattr(usage, 'free') else 0
+      combined_disk_percent += usage.percent if hasattr(usage, 'percent') else 0
+
+      if hasattr(usage, 'percent') and max_percent_usage[1] < int(usage.percent):
+        max_percent_usage = (part.mountpoint, usage.percent)
+      pass
+    pass
+
+    return { "disk_total" : bytes2human(combined_disk_total),
+             "disk_used"  : bytes2human(combined_disk_used),
+             "disk_free"  : bytes2human(combined_disk_free),
+             "disk_percent" : bytes2human(combined_disk_percent)
+            # todo: cannot send string
+             #"max_part_used" : max_percent_usage }
+           }
+  pass
+
+  def get_host_static_info(self):
+
+    boot_time = psutil.boot_time()
+    cpu_count_logical = psutil.cpu_count()
+    swap_stats = psutil.swap_memory()
+    mem_info = psutil.virtual_memory()
+
+    return {
+      'cpu_num' : cpu_count_logical,
+      'cpu_speed' : '',
+      'swap_total' : swap_stats.total,
+      'boottime' : boot_time,
+      'machine_type' : platform.processor(),
+      'os_name' : platform.system(),
+      'os_release' : platform.release(),
+      'location' : '',
+      'mem_total' : mem_info.total
+    }
+
+
+
+  def get_disk_usage(self):
+    disk_usage = {}
+
+    for part in psutil.disk_partitions(all=False):
+      if os.name == 'nt':
+        if 'cdrom' in part.opts or part.fstype == '':
+          # skip cd-rom drives with no disk in it; they may raise
+          # ENOENT, pop-up a Windows GUI error for a non-ready
+          # partition or just hang.
+          continue
+        pass
+      pass
+      usage = psutil.disk_usage(part.mountpoint)
+      disk_usage.update(
+        { part.device :
+          {
+              "total" : bytes2human(usage.total),
+              "user" : bytes2human(usage.used),
+              "free" : bytes2human(usage.free),
+              "percent" : int(usage.percent),
+              "fstype" : part.fstype,
+              "mount" : part.mountpoint
+          }
+        }
+      )
+    pass
+  pass
+
+  def get_hostname(self):
+    return socket.getfqdn()
+
+  def get_ip_address(self):
+    return socket.gethostbyname(socket.getfqdn())

+ 87 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/metric_collector.py

@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import logging
+from time import time
+from host_info import HostInfo
+from event_definition import HostMetricCollectEvent, ProcessMetricCollectEvent
+
+logger = logging.getLogger()
+
+DEFAULT_HOST_APP_ID = '_HOST'
+
+class MetricsCollector():
+  """
+  The main Reader thread that dequeues events from the event queue and
+  submits a metric record to the emit buffer. Implementation of dequeue is
+  not required if Timer class is used for metric groups.
+  """
+
+  def __init__(self, emit_queue, application_metric_map):
+    self.emit_queue = emit_queue
+    self.application_metric_map = application_metric_map
+    self.host_info = HostInfo()
+  pass
+
+  def process_event(self, event):
+    if event.get_classname() == HostMetricCollectEvent.__name__:
+      self.process_host_collection_event(event)
+    elif event.get_classname() == ProcessMetricCollectEvent.__name__:
+      self.process_process_collection_event(event)
+    else:
+      logger.warn('Unknown event in queue')
+    pass
+
+  def process_host_collection_event(self, event):
+    startTime = int(round(time() * 1000))
+    metrics = None
+
+    if 'cpu' in event.get_group_name():
+      metrics = self.host_info.get_cpu_times()
+
+    elif 'disk' in event.get_group_name():
+      metrics = self.host_info.get_combined_disk_usage()
+
+    elif 'network' in event.get_group_name():
+      metrics = self.host_info.get_network_info()
+
+    elif 'mem' in event.get_group_name():
+      metrics = self.host_info.get_mem_info()
+
+    elif 'all' in event.get_group_name():
+      metrics = {}
+      metrics.update(self.host_info.get_cpu_times())
+      metrics.update(self.host_info.get_combined_disk_usage())
+      metrics.update(self.host_info.get_network_info())
+      metrics.update(self.host_info.get_mem_info())
+
+    else:
+      logger.warn('Unknown metric group.')
+    pass
+
+    if metrics:
+      self.application_metric_map.put_metric(DEFAULT_HOST_APP_ID, metrics, startTime)
+    pass
+
+  def process_process_collection_event(self, event):
+    """
+    Collect Process level metrics and update the application metric map
+    """
+    pass

+ 64 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py

@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import core
+from core.controller import Controller
+from core.config_reader import Configuration
+import logging
+import signal
+import sys
+
+logger = logging.getLogger()
+
+def main(argv=None):
+  # Allow Ctrl-C
+  signal.signal(signal.SIGINT, signal.SIG_DFL)
+
+  config = Configuration()
+  controller = Controller(config)
+  
+  _init_logging(config)
+  
+  logger.info('Starting Server RPC Thread: %s' % ' '.join(sys.argv))
+  controller.start()
+  controller.start_emitter()
+
+def _init_logging(config):
+  _levels = {
+          'DEBUG': logging.DEBUG,
+          'INFO': logging.INFO,
+          'WARNING': logging.WARNING,
+          'ERROR': logging.ERROR,
+          'CRITICAL': logging.CRITICAL,
+          'NOTSET' : logging.NOTSET
+          }
+  level = logging.INFO
+  if config.get_log_level() in _levels:
+    level = _levels.get(config.get_log_level())
+  logger.setLevel(level)
+  formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(filename)s:%(lineno)d - %(message)s")
+  stream_handler = logging.StreamHandler()
+  stream_handler.setFormatter(formatter)
+  logger.addHandler(stream_handler)
+  
+
+if __name__ == '__main__':
+  main()
+

+ 27 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/LICENSE

@@ -0,0 +1,27 @@
+psutil is distributed under BSD license reproduced below.
+
+Copyright (c) 2009, Jay Loden, Dave Daeschler, Giampaolo Rodola'
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of the psutil authors nor the names of its contributors
+   may be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 14 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/MANIFEST.in

@@ -0,0 +1,14 @@
+include CREDITS
+include HISTORY
+include LICENSE
+include make.bat
+include Makefile
+include MANIFEST.in
+include README
+include setup.py
+include TODO
+recursive-include docs *
+recursive-exclude docs/_build *
+recursive-include examples *.py
+recursive-include psutil *.py *.c *.h
+recursive-include test *.py README

+ 77 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/Makefile

@@ -0,0 +1,77 @@
+# Shortcuts for various tasks (UNIX only).
+# To use a specific Python version run:
+# $ make install PYTHON=python3.3
+
+# You can set these variables from the command line.
+PYTHON    = python
+TSCRIPT   = test/test_psutil.py
+
+all: test
+
+clean:
+	rm -f `find . -type f -name \*.py[co]`
+	rm -f `find . -type f -name \*.so`
+	rm -f `find . -type f -name .\*~`
+	rm -f `find . -type f -name \*.orig`
+	rm -f `find . -type f -name \*.bak`
+	rm -f `find . -type f -name \*.rej`
+	rm -rf `find . -type d -name __pycache__`
+	rm -rf *.egg-info
+	rm -rf *\$testfile*
+	rm -rf build
+	rm -rf dist
+	rm -rf docs/_build
+
+build: clean
+	$(PYTHON) setup.py build
+
+install: build
+	if test $(PYTHON) = python2.4; then \
+		$(PYTHON) setup.py install; \
+	elif test $(PYTHON) = python2.5; then \
+		$(PYTHON) setup.py install; \
+	else \
+		$(PYTHON) setup.py install --user; \
+	fi
+
+uninstall:
+	if test $(PYTHON) = python2.4; then \
+		pip-2.4 uninstall -y -v psutil; \
+	else \
+		cd ..; $(PYTHON) -m pip uninstall -y -v psutil; \
+	fi
+
+test: install
+	$(PYTHON) $(TSCRIPT)
+
+test-process: install
+	$(PYTHON) -m unittest -v test.test_psutil.TestProcess
+
+test-system: install
+	$(PYTHON) -m unittest -v test.test_psutil.TestSystemAPIs
+
+# Run a specific test by name; e.g. "make test-by-name disk_" will run
+# all test methods containing "disk_" in their name.
+# Requires "pip install nose".
+test-by-name:
+	@$(PYTHON) -m nose test/test_psutil.py --nocapture -v -m $(filter-out $@,$(MAKECMDGOALS))
+
+memtest: install
+	$(PYTHON) test/test_memory_leaks.py
+
+pep8:
+	@hg locate '*py' | xargs pep8
+
+pyflakes:
+	@export PYFLAKES_NODOCTEST=1 && \
+		hg locate '*py' | xargs pyflakes
+
+# Upload source tarball on https://pypi.python.org/pypi/psutil.
+upload-src: clean
+	$(PYTHON) setup.py sdist upload
+
+# Build and upload doc on https://pythonhosted.org/psutil/.
+# Requires "pip install sphinx-pypi-upload".
+upload-doc:
+	cd docs; make html
+	$(PYTHON) setup.py upload_sphinx --upload-dir=docs/_build/html

+ 270 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/README

@@ -0,0 +1,270 @@
+.. image:: https://pypip.in/d/psutil/badge.png
+    :target: https://crate.io/packages/psutil/
+    :alt: Download this month
+
+.. image:: https://pypip.in/v/psutil/badge.png
+    :target: https://pypi.python.org/pypi/psutil/
+    :alt: Latest version
+
+.. image:: https://pypip.in/license/psutil/badge.png
+    :target: https://pypi.python.org/pypi/psutil/
+    :alt: License
+
+===========
+Quick links
+===========
+
+* `Home page <http://code.google.com/p/psutil>`_
+* `Download <https://pypi.python.org/pypi?:action=display&name=psutil#downloads>`_
+* `Blog <http://grodola.blogspot.com/search/label/psutil>`_
+* `Documentation <http://pythonhosted.org/psutil/>`_
+* `Forum <http://groups.google.com/group/psutil/topics>`_
+* `What's new <https://psutil.googlecode.com/hg/HISTORY>`_
+
+=======
+Summary
+=======
+
+psutil (python system and process utilities) is a cross-platform library for
+retrieving information on **running processes** and **system utilization**
+(CPU, memory, disks, network) in Python. It is useful mainly for **system
+monitoring**, **profiling and limiting process resources** and **management of
+running processes**. It implements many functionalities offered by command line
+tools such as: ps, top, lsof, netstat, ifconfig, who, df, kill, free, nice,
+ionice, iostat, iotop, uptime, pidof, tty, taskset, pmap. It currently supports
+**Linux, Windows, OSX, FreeBSD** and **Sun Solaris**, both **32-bit** and
+**64-bit** architectures, with Python versions from **2.4 to 3.4**. Pypi is
+also known to work.
+
+==============
+Example usages
+==============
+
+CPU
+===
+
+.. code-block:: python
+
+    >>> import psutil
+    >>> psutil.cpu_times()
+    scputimes(user=3961.46, nice=169.729, system=2150.659, idle=16900.540, iowait=629.59, irq=0.0, softirq=19.42, steal=0.0, guest=0, nice=0.0)
+    >>>
+    >>> for x in range(3):
+    ...     psutil.cpu_percent(interval=1)
+    ...
+    4.0
+    5.9
+    3.8
+    >>>
+    >>> for x in range(3):
+    ...     psutil.cpu_percent(interval=1, percpu=True)
+    ...
+    [4.0, 6.9, 3.7, 9.2]
+    [7.0, 8.5, 2.4, 2.1]
+    [1.2, 9.0, 9.9, 7.2]
+    >>>
+    >>>
+    >>> for x in range(3):
+    ...     psutil.cpu_times_percent(interval=1, percpu=False)
+    ...
+    scputimes(user=1.5, nice=0.0, system=0.5, idle=96.5, iowait=1.5, irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
+    scputimes(user=1.0, nice=0.0, system=0.0, idle=99.0, iowait=0.0, irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
+    scputimes(user=2.0, nice=0.0, system=0.0, idle=98.0, iowait=0.0, irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
+    >>>
+    >>> psutil.cpu_count()
+    4
+    >>> psutil.cpu_count(logical=False)
+    2
+    >>>
+
+Memory
+======
+
+.. code-block:: python
+
+    >>> psutil.virtual_memory()
+    svmem(total=8374149120L, available=2081050624L, percent=75.1, used=8074080256L, free=300068864L, active=3294920704, inactive=1361616896, buffers=529895424L, cached=1251086336)
+    >>> psutil.swap_memory()
+    sswap(total=2097147904L, used=296128512L, free=1801019392L, percent=14.1, sin=304193536, sout=677842944)
+    >>>
+
+Disks
+=====
+
+.. code-block:: python
+
+    >>> psutil.disk_partitions()
+    [sdiskpart(device='/dev/sda1', mountpoint='/', fstype='ext4', opts='rw,nosuid'),
+     sdiskpart(device='/dev/sda2', mountpoint='/home', fstype='ext, opts='rw')]
+    >>>
+    >>> psutil.disk_usage('/')
+    sdiskusage(total=21378641920, used=4809781248, free=15482871808, percent=22.5)
+    >>>
+    >>> psutil.disk_io_counters(perdisk=False)
+    sdiskio(read_count=719566, write_count=1082197, read_bytes=18626220032, write_bytes=24081764352, read_time=5023392, write_time=63199568)
+    >>>
+
+Network
+=======
+
+.. code-block:: python
+
+    >>> psutil.net_io_counters(pernic=True)
+    {'eth0': netio(bytes_sent=485291293, bytes_recv=6004858642, packets_sent=3251564, packets_recv=4787798, errin=0, errout=0, dropin=0, dropout=0),
+     'lo': netio(bytes_sent=2838627, bytes_recv=2838627, packets_sent=30567, packets_recv=30567, errin=0, errout=0, dropin=0, dropout=0)}
+    >>>
+    >>> psutil.net_connections()
+    [pconn(fd=115, family=2, type=1, laddr=('10.0.0.1', 48776), raddr=('93.186.135.91', 80), status='ESTABLISHED', pid=1254),
+     pconn(fd=117, family=2, type=1, laddr=('10.0.0.1', 43761), raddr=('72.14.234.100', 80), status='CLOSING', pid=2987),
+     pconn(fd=-1, family=2, type=1, laddr=('10.0.0.1', 60759), raddr=('72.14.234.104', 80), status='ESTABLISHED', pid=None),
+     pconn(fd=-1, family=2, type=1, laddr=('10.0.0.1', 51314), raddr=('72.14.234.83', 443), status='SYN_SENT', pid=None)
+     ...]
+
+Other system info
+=================
+
+.. code-block:: python
+
+    >>> psutil.users()
+    [user(name='giampaolo', terminal='pts/2', host='localhost', started=1340737536.0),
+     user(name='giampaolo', terminal='pts/3', host='localhost', started=1340737792.0)]
+    >>>
+    >>> psutil.boot_time()
+    1365519115.0
+    >>>
+
+Process management
+==================
+
+.. code-block:: python
+
+    >>> import psutil
+    >>> psutil.pids()
+    [1, 2, 3, 4, 5, 6, 7, 46, 48, 50, 51, 178, 182, 222, 223, 224,
+     268, 1215, 1216, 1220, 1221, 1243, 1244, 1301, 1601, 2237, 2355,
+     2637, 2774, 3932, 4176, 4177, 4185, 4187, 4189, 4225, 4243, 4245,
+     4263, 4282, 4306, 4311, 4312, 4313, 4314, 4337, 4339, 4357, 4358,
+     4363, 4383, 4395, 4408, 4433, 4443, 4445, 4446, 5167, 5234, 5235,
+     5252, 5318, 5424, 5644, 6987, 7054, 7055, 7071]
+    >>>
+    >>> p = psutil.Process(7055)
+    >>> p.name()
+    'python'
+    >>> p.exe()
+    '/usr/bin/python'
+    >>> p.cwd()
+    '/home/giampaolo'
+    >>> p.cmdline()
+    ['/usr/bin/python', 'main.py']
+    >>>
+    >>> p.status()
+    'running'
+    >>> p.username()
+    'giampaolo'
+    >>> p.create_time()
+    1267551141.5019531
+    >>> p.terminal()
+    '/dev/pts/0'
+    >>>
+    >>> p.uids()
+    puids(real=1000, effective=1000, saved=1000)
+    >>> p.gids()
+    pgids(real=1000, effective=1000, saved=1000)
+    >>>
+    >>> p.cpu_times()
+    pcputimes(user=1.02, system=0.31)
+    >>> p.cpu_percent(interval=1.0)
+    12.1
+    >>> p.cpu_affinity()
+    [0, 1, 2, 3]
+    >>> p.set_cpu_affinity([0])
+    >>>
+    >>> p.memory_percent()
+    0.63423
+    >>>
+    >>> p.memory_info()
+    pmem(rss=7471104, vms=68513792)
+    >>> p.ext_memory_info()
+    extmem(rss=9662464, vms=49192960, shared=3612672, text=2564096, lib=0, data=5754880, dirty=0)
+    >>> p.memory_maps()
+    [pmmap_grouped(path='/lib/x86_64-linux-gnu/libutil-2.15.so', rss=16384, anonymous=8192, swap=0),
+     pmmap_grouped(path='/lib/x86_64-linux-gnu/libc-2.15.so', rss=6384, anonymous=15, swap=0),
+     pmmap_grouped(path='/lib/x86_64-linux-gnu/libcrypto.so.1.0.0', rss=34124, anonymous=1245, swap=0),
+     pmmap_grouped(path='[heap]', rss=54653, anonymous=8192, swap=0),
+     pmmap_grouped(path='[stack]', rss=1542, anonymous=166, swap=0),
+     ...]
+    >>>
+    >>> p.io_counters()
+    pio(read_count=478001, write_count=59371, read_bytes=700416, write_bytes=69632)
+    >>>
+    >>> p.open_files()
+    [popenfile(path='/home/giampaolo/svn/psutil/somefile', fd=3)]
+    >>>
+    >>> p.connections()
+    [pconn(fd=115, family=2, type=1, laddr=('10.0.0.1', 48776), raddr=('93.186.135.91', 80), status='ESTABLISHED'),
+     pconn(fd=117, family=2, type=1, laddr=('10.0.0.1', 43761), raddr=('72.14.234.100', 80), status='CLOSING'),
+     pconn(fd=119, family=2, type=1, laddr=('10.0.0.1', 60759), raddr=('72.14.234.104', 80), status='ESTABLISHED'),
+     pconn(fd=123, family=2, type=1, laddr=('10.0.0.1', 51314), raddr=('72.14.234.83', 443), status='SYN_SENT')]
+    >>>
+    >>> p.num_threads()
+    4
+    >>> p.num_fds()
+    8
+    >>> p.threads()
+    [pthread(id=5234, user_time=22.5, system_time=9.2891),
+     pthread(id=5235, user_time=0.0, system_time=0.0),
+     pthread(id=5236, user_time=0.0, system_time=0.0),
+     pthread(id=5237, user_time=0.0707, system_time=1.1)]
+    >>>
+    >>> p.num_ctx_switches()
+    pctxsw(voluntary=78, involuntary=19)
+    >>>
+    >>> p.nice()
+    0
+    >>> p.set_nice(10)
+    >>>
+    >>> p.set_ionice(psutil.IOPRIO_CLASS_IDLE)  # IO priority (Win and Linux only)
+    >>> p.ionice()
+    pionice(ioclass=3, value=0)
+    >>>
+    >>> p.set_rlimit(psutil.RLIMIT_NOFILE, (5, 5))  # resource limits (Linux only)
+    >>> p.rlimit(psutil.RLIMIT_NOFILE)
+    (5, 5)
+    >>>
+    >>> p.suspend()
+    >>> p.resume()
+    >>>
+    >>> p.terminate()
+    >>> p.wait(timeout=3)
+    0
+    >>>
+    >>> psutil.test()
+    USER         PID %CPU %MEM     VSZ     RSS TTY        START    TIME  COMMAND
+    root           1  0.0  0.0   24584    2240            Jun17   00:00  init
+    root           2  0.0  0.0       0       0            Jun17   00:00  kthreadd
+    root           3  0.0  0.0       0       0            Jun17   00:05  ksoftirqd/0
+    ...
+    giampaolo  31475  0.0  0.0   20760    3024 /dev/pts/0 Jun19   00:00  python2.4
+    giampaolo  31721  0.0  2.2  773060  181896            00:04   10:30  chrome
+    root       31763  0.0  0.0       0       0            00:05   00:00  kworker/0:1
+    >>>
+
+Further process APIs
+====================
+
+.. code-block:: python
+
+    >>> for p in psutil.process_iter():
+    ...     print(p)
+    ...
+    psutil.Process(pid=1, name='init')
+    psutil.Process(pid=2, name='kthreadd')
+    psutil.Process(pid=3, name='ksoftirqd/0')
+    ...
+    >>>
+    >>> def on_terminate(proc):
+    ...     print("process {} terminated".format(proc))
+    ...
+    >>> # waits for multiple processes to terminate
+    >>> gone, alive = psutil.wait_procs(procs_list, 3, callback=on_terminate)
+    >>>

+ 57 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build.py

@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from subprocess import call
+import sys
+import os
+import shutil
+
+def build():
+  path = os.path.dirname(os.path.abspath(__file__))
+  build_path = path + os.sep + 'build'
+  build_out_path = path + os.sep + 'build.out'
+  build_out = open(build_out_path, 'wb')
+
+  # Delete old build dir if exists
+  if (os.path.exists(build_path)):
+    shutil.rmtree(build_path)
+  pass
+
+  cwd = os.getcwd()
+  os.chdir(path)
+
+  print 'Executing make at location: %s ' % path
+
+  if sys.platform.startswith("win"):
+    # Windows
+    returncode = call(['make.bat', 'build'], stdout=build_out, stderr=build_out)
+  else:
+    # Unix based
+    returncode = call(['make', 'build'], stdout=build_out, stderr=build_out)
+  pass
+
+  os.chdir(cwd)
+
+  if returncode != 0:
+    print 'psutil build failed. Please find build output at: %s' % build_out_path
+  pass
+
+if __name__ == '__main__':
+  build()

+ 177 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/Makefile

@@ -0,0 +1,177 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html       to make standalone HTML files"
+	@echo "  dirhtml    to make HTML files named index.html in directories"
+	@echo "  singlehtml to make a single large HTML file"
+	@echo "  pickle     to make pickle files"
+	@echo "  json       to make JSON files"
+	@echo "  htmlhelp   to make HTML files and a HTML help project"
+	@echo "  qthelp     to make HTML files and a qthelp project"
+	@echo "  devhelp    to make HTML files and a Devhelp project"
+	@echo "  epub       to make an epub"
+	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
+	@echo "  latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+	@echo "  text       to make text files"
+	@echo "  man        to make manual pages"
+	@echo "  texinfo    to make Texinfo files"
+	@echo "  info       to make Texinfo files and run them through makeinfo"
+	@echo "  gettext    to make PO message catalogs"
+	@echo "  changes    to make an overview of all changed/added/deprecated items"
+	@echo "  xml        to make Docutils-native XML files"
+	@echo "  pseudoxml  to make pseudoxml-XML files for display purposes"
+	@echo "  linkcheck  to check all external links for integrity"
+	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	rm -rf $(BUILDDIR)
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+	@echo
+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/psutil.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/psutil.qhc"
+
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/psutil"
+	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/psutil"
+	@echo "# devhelp"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make' in that directory to run these through (pdf)latex" \
+	      "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through pdflatex..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through platex and dvipdfmx..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+	@echo
+	@echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+	@echo
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo
+	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+	@echo "Run \`make' in that directory to run these through makeinfo" \
+	      "(use \`make info' here to do that automatically)."
+
+info:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo "Running Texinfo files through makeinfo..."
+	make -C $(BUILDDIR)/texinfo info
+	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+	@echo
+	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
+
+xml:
+	$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+	@echo
+	@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+	$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+	@echo
+	@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."

+ 15 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/README

@@ -0,0 +1,15 @@
+About
+=====
+
+This directory contains the reStructuredText (reST) sources to the psutil
+documentation.  You don't need to build them yourself, prebuilt versions are
+available at http://psutil.readthedocs.org/en/latest/.
+In case you want, you need to install sphinx first:
+
+    $ pip install sphinx
+
+Then run:
+
+    $ make html
+
+You'll then have an HTML version of the doc at _build/html/index.html.

+ 57 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_static/copybutton.js

@@ -0,0 +1,57 @@
+$(document).ready(function() {
+    /* Add a [>>>] button on the top-right corner of code samples to hide
+     * the >>> and ... prompts and the output and thus make the code
+     * copyable. */
+    var div = $('.highlight-python .highlight,' +
+                '.highlight-python3 .highlight')
+    var pre = div.find('pre');
+
+    // get the styles from the current theme
+    pre.parent().parent().css('position', 'relative');
+    var hide_text = 'Hide the prompts and output';
+    var show_text = 'Show the prompts and output';
+    var border_width = pre.css('border-top-width');
+    var border_style = pre.css('border-top-style');
+    var border_color = pre.css('border-top-color');
+    var button_styles = {
+        'cursor':'pointer', 'position': 'absolute', 'top': '0', 'right': '0',
+        'border-color': border_color, 'border-style': border_style,
+        'border-width': border_width, 'color': border_color, 'text-size': '75%',
+        'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em',
+        'border-radius': '0 3px 0 0'
+    }
+
+    // create and add the button to all the code blocks that contain >>>
+    div.each(function(index) {
+        var jthis = $(this);
+        if (jthis.find('.gp').length > 0) {
+            var button = $('<span class="copybutton">&gt;&gt;&gt;</span>');
+            button.css(button_styles)
+            button.attr('title', hide_text);
+            jthis.prepend(button);
+        }
+        // tracebacks (.gt) contain bare text elements that need to be
+        // wrapped in a span to work with .nextUntil() (see later)
+        jthis.find('pre:has(.gt)').contents().filter(function() {
+            return ((this.nodeType == 3) && (this.data.trim().length > 0));
+        }).wrap('<span>');
+    });
+
+    // define the behavior of the button when it's clicked
+    $('.copybutton').toggle(
+        function() {
+            var button = $(this);
+            button.parent().find('.go, .gp, .gt').hide();
+            button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden');
+            button.css('text-decoration', 'line-through');
+            button.attr('title', show_text);
+        },
+        function() {
+            var button = $(this);
+            button.parent().find('.go, .gp, .gt').show();
+            button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible');
+            button.css('text-decoration', 'none');
+            button.attr('title', hide_text);
+        });
+});
+

+ 161 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_static/sidebar.js

@@ -0,0 +1,161 @@
+/*
+ * sidebar.js
+ * ~~~~~~~~~~
+ *
+ * This script makes the Sphinx sidebar collapsible.
+ *
+ * .sphinxsidebar contains .sphinxsidebarwrapper.  This script adds in
+ * .sphixsidebar, after .sphinxsidebarwrapper, the #sidebarbutton used to
+ * collapse and expand the sidebar.
+ *
+ * When the sidebar is collapsed the .sphinxsidebarwrapper is hidden and the
+ * width of the sidebar and the margin-left of the document are decreased.
+ * When the sidebar is expanded the opposite happens.  This script saves a
+ * per-browser/per-session cookie used to remember the position of the sidebar
+ * among the pages.  Once the browser is closed the cookie is deleted and the
+ * position reset to the default (expanded).
+ *
+ * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
+ * :license: BSD, see LICENSE for details.
+ *
+ */
+
+$(function() {
+  // global elements used by the functions.
+  // the 'sidebarbutton' element is defined as global after its
+  // creation, in the add_sidebar_button function
+  var bodywrapper = $('.bodywrapper');
+  var sidebar = $('.sphinxsidebar');
+  var sidebarwrapper = $('.sphinxsidebarwrapper');
+
+  // original margin-left of the bodywrapper and width of the sidebar
+  // with the sidebar expanded
+  var bw_margin_expanded = bodywrapper.css('margin-left');
+  var ssb_width_expanded = sidebar.width();
+
+  // margin-left of the bodywrapper and width of the sidebar
+  // with the sidebar collapsed
+  var bw_margin_collapsed = '.8em';
+  var ssb_width_collapsed = '.8em';
+
+  // colors used by the current theme
+  var dark_color = '#AAAAAA';
+  var light_color = '#CCCCCC';
+
+  function sidebar_is_collapsed() {
+    return sidebarwrapper.is(':not(:visible)');
+  }
+
+  function toggle_sidebar() {
+    if (sidebar_is_collapsed())
+      expand_sidebar();
+    else
+      collapse_sidebar();
+  }
+
+  function collapse_sidebar() {
+    sidebarwrapper.hide();
+    sidebar.css('width', ssb_width_collapsed);
+    bodywrapper.css('margin-left', bw_margin_collapsed);
+    sidebarbutton.css({
+        'margin-left': '0',
+        //'height': bodywrapper.height(),
+        'height': sidebar.height(),
+        'border-radius': '5px'
+    });
+    sidebarbutton.find('span').text('»');
+    sidebarbutton.attr('title', _('Expand sidebar'));
+    document.cookie = 'sidebar=collapsed';
+  }
+
+  function expand_sidebar() {
+    bodywrapper.css('margin-left', bw_margin_expanded);
+    sidebar.css('width', ssb_width_expanded);
+    sidebarwrapper.show();
+    sidebarbutton.css({
+        'margin-left': ssb_width_expanded-12,
+        //'height': bodywrapper.height(),
+        'height': sidebar.height(),
+        'border-radius': '0 5px 5px 0'
+    });
+    sidebarbutton.find('span').text('«');
+    sidebarbutton.attr('title', _('Collapse sidebar'));
+    //sidebarwrapper.css({'padding-top':
+    //  Math.max(window.pageYOffset - sidebarwrapper.offset().top, 10)});
+    document.cookie = 'sidebar=expanded';
+  }
+
+  function add_sidebar_button() {
+    sidebarwrapper.css({
+        'float': 'left',
+        'margin-right': '0',
+        'width': ssb_width_expanded - 28
+    });
+    // create the button
+    sidebar.append(
+      '<div id="sidebarbutton"><span>&laquo;</span></div>'
+    );
+    var sidebarbutton = $('#sidebarbutton');
+    // find the height of the viewport to center the '<<' in the page
+    var viewport_height;
+    if (window.innerHeight)
+ 	  viewport_height = window.innerHeight;
+    else
+	  viewport_height = $(window).height();
+    var sidebar_offset = sidebar.offset().top;
+
+    var sidebar_height = sidebar.height();
+    //var sidebar_height = Math.max(bodywrapper.height(), sidebar.height());
+    sidebarbutton.find('span').css({
+        'display': 'block',
+        'margin-top': sidebar_height/2 - 10
+        //'margin-top': (viewport_height - sidebar.position().top - 20) / 2
+        //'position': 'fixed',
+        //'top': Math.min(viewport_height/2, sidebar_height/2 + sidebar_offset) - 10
+    });
+
+    sidebarbutton.click(toggle_sidebar);
+    sidebarbutton.attr('title', _('Collapse sidebar'));
+    sidebarbutton.css({
+        'border-radius': '0 5px 5px 0',
+        'color': '#444444',
+        'background-color': '#CCCCCC',
+        'font-size': '1.2em',
+        'cursor': 'pointer',
+        'height': sidebar_height,
+        'padding-top': '1px',
+        'padding-left': '1px',
+        'margin-left': ssb_width_expanded - 12
+    });
+
+    sidebarbutton.hover(
+      function () {
+          $(this).css('background-color', dark_color);
+      },
+      function () {
+          $(this).css('background-color', light_color);
+      }
+    );
+  }
+
+  function set_position_from_cookie() {
+    if (!document.cookie)
+      return;
+    var items = document.cookie.split(';');
+    for(var k=0; k<items.length; k++) {
+      var key_val = items[k].split('=');
+      var key = key_val[0];
+      if (key == 'sidebar') {
+        var value = key_val[1];
+        if ((value == 'collapsed') && (!sidebar_is_collapsed()))
+          collapse_sidebar();
+        else if ((value == 'expanded') && (sidebar_is_collapsed()))
+          expand_sidebar();
+      }
+    }
+  }
+
+  add_sidebar_button();
+  var sidebarbutton = $('#sidebarbutton');
+  set_position_from_cookie();
+});

+ 12 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/globaltoc.html

@@ -0,0 +1,12 @@
+{#
+    basic/globaltoc.html
+    ~~~~~~~~~~~~~~~~~~~~
+
+    Sphinx sidebar template: global table of contents.
+
+    :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+#}
+<h3>{{ _('Manual') }}</h3>
+{{ toctree() }}
+<a href="{{ pathto(master_doc) }}">Back to Welcome</a>

+ 4 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/indexcontent.html

@@ -0,0 +1,4 @@
+{% extends "defindex.html" %}
+{% block tables %}
+
+{% endblock %}

+ 16 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/indexsidebar.html

@@ -0,0 +1,16 @@
+<!--
+<h3>Other versions</h3>
+<ul>
+  <li><a href="http://psutil.readthedocs.org/en/latest/">Latest</a></li>
+  <li><a href="http://psutil.readthedocs.org/en/0.6/">0.6</a></li>
+  <li><a href="http://psutil.readthedocs.org/en/0.5/">0.5</a></li>
+</ul>
+-->
+<h3>Useful links</h3>
+<ul>
+  <li><a href="http://code.google.com/p/psutil/">Google Code Project</a></li>
+  <li><a href="http://grodola.blogspot.com/search/label/psutil">Blog</a></li>
+  <li><a href="https://pypi.python.org/pypi?:action=display&name=psutil#downloads">Download</a></li>
+  <li><a href="https://code.google.com/p/psutil/issues/list">Issues</a></li>
+  <li><a href="http://groups.google.com/group/psutil/topics">Forum</a></li>
+</ul>

+ 66 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/page.html

@@ -0,0 +1,66 @@
+{% extends "!page.html" %}
+{% block extrahead %}
+{{ super() }}
+{% if not embedded %}<script type="text/javascript" src="{{ pathto('_static/copybutton.js', 1) }}"></script>{% endif %}
+<script type="text/javascript">
+
+  // Store editor pop-up help state in localStorage
+  // so it does not re-pop-up itself between page loads.
+  // Do not even to pretend to support IE gracefully.
+  (function($) {
+
+    $(document).ready(function() {
+        var box = $("#editor-trap");
+        var klass = "toggled";
+        var storageKey = "toggled";
+
+        function toggle() {
+            box.toggleClass(klass);
+            // Store the toggle status in local storage as "has value string" or null
+            window.localStorage.setItem(storageKey, box.hasClass(klass) ? "toggled" : "not-toggled");
+        }
+
+        box.click(toggle);
+
+        // Check the persistent state of the editor pop-up
+        // Note that localStorage does not necessarily support boolean values (ugh!)
+        // http://stackoverflow.com/questions/3263161/cannot-set-boolean-values-in-localstorage
+        var v = window.localStorage.getItem(storageKey);
+        if(v == "toggled" || !v) {
+          box.addClass(klass);
+        }
+
+    });
+
+  })(jQuery);
+</script>
+<script type="text/javascript">
+
+  var _gaq = _gaq || [];
+  _gaq.push(['_setAccount', 'UA-2097050-4']);
+  _gaq.push(['_trackPageview']);
+
+  (function() {
+    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
+    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
+    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
+  })();
+
+</script>
+{% endblock %}
+
+{% block rootrellink %}
+    <li><a href="http://code.google.com/p/psutil/"><img src="{{ pathto('_static/logo.png', 1) }}" style="height: 30px; vertical-align: middle; padding-right: 1em;" /> Project Homepage</a>{{ reldelim1 }}</li>
+	<li><a href="{{ pathto('index') }}">{{ shorttitle }}</a>{{ reldelim1 }}</li>
+{% endblock %}
+
+
+{% block footer %}
+<div class="footer">
+    &copy; Copyright {{ copyright|e }}.
+    <br />
+    Last updated on {{ last_updated|e }}.
+    <br />
+    Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> {{ sphinx_version|e }}.
+</div>
+{% endblock %}

+ 187 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_themes/pydoctheme/static/pydoctheme.css

@@ -0,0 +1,187 @@
+@import url("default.css");
+
+body {
+    background-color: white;
+    margin-left: 1em;
+    margin-right: 1em;
+}
+
+div.related {
+    margin-bottom: 1.2em;
+    padding: 0.5em 0;
+    border-top: 1px solid #ccc;
+    margin-top: 0.5em;
+}
+
+div.related a:hover {
+    color: #0095C4;
+}
+
+div.related:first-child {
+    border-top: 0;
+    padding-top: 0;
+    border-bottom: 1px solid #ccc;
+}
+
+div.sphinxsidebar {
+    background-color: #eeeeee;
+    border-radius: 5px;
+    line-height: 130%;
+    font-size: smaller;
+}
+
+div.sphinxsidebar h3, div.sphinxsidebar h4 {
+    margin-top: 1.5em;
+}
+
+div.sphinxsidebarwrapper > h3:first-child {
+    margin-top: 0.2em;
+}
+
+div.sphinxsidebarwrapper > ul > li > ul > li {
+    margin-bottom: 0.4em;
+}
+
+div.sphinxsidebar a:hover {
+    color: #0095C4;
+}
+
+div.sphinxsidebar input {
+    font-family: 'Lucida Grande','Lucida Sans','DejaVu Sans',Arial,sans-serif;
+    border: 1px solid #999999;
+    font-size: smaller;
+    border-radius: 3px;
+}
+
+div.sphinxsidebar input[type=text] {
+    max-width: 150px;
+}
+
+div.body {
+    padding: 0 0 0 1.2em;
+}
+
+div.body p {
+    line-height: 140%;
+}
+
+div.body h1, div.body h2, div.body h3, div.body h4, div.body h5, div.body h6 {
+    margin: 0;
+    border: 0;
+    padding: 0.3em 0;
+}
+
+div.body hr {
+    border: 0;
+    background-color: #ccc;
+    height: 1px;
+}
+
+div.body pre {
+    border-radius: 3px;
+    border: 1px solid #ac9;
+}
+
+div.body div.admonition, div.body div.impl-detail {
+    border-radius: 3px;
+}
+
+div.body div.impl-detail > p {
+    margin: 0;
+}
+
+div.body div.seealso {
+    border: 1px solid #dddd66;
+}
+
+div.body a {
+    color: #00608f;
+}
+
+div.body a:visited {
+    color: #30306f;
+}
+
+div.body a:hover {
+    color: #00B0E4;
+}
+
+tt, pre {
+    font-family: monospace, sans-serif;
+    font-size: 96.5%;
+}
+
+div.body tt {
+    border-radius: 3px;
+}
+
+div.body tt.descname {
+    font-size: 120%;
+}
+
+div.body tt.xref, div.body a tt {
+    font-weight: normal;
+}
+
+p.deprecated {
+    border-radius: 3px;
+}
+
+table.docutils {
+    border: 1px solid #ddd;
+    min-width: 20%;
+    border-radius: 3px;
+    margin-top: 10px;
+    margin-bottom: 10px;
+}
+
+table.docutils td, table.docutils th {
+    border: 1px solid #ddd !important;
+    border-radius: 3px;
+}
+
+table p, table li {
+    text-align: left !important;
+}
+
+table.docutils th {
+    background-color: #eee;
+    padding: 0.3em 0.5em;
+}
+
+table.docutils td {
+    background-color: white;
+    padding: 0.3em 0.5em;
+}
+
+table.footnote, table.footnote td {
+    border: 0 !important;
+}
+
+div.footer {
+    line-height: 150%;
+    margin-top: -2em;
+    text-align: right;
+    width: auto;
+    margin-right: 10px;
+}
+
+div.footer a:hover {
+    color: #0095C4;
+}
+
+div.body h1,
+div.body h2,
+div.body h3 {
+    background-color: #EAEAEA;
+    border-bottom: 1px solid #CCC;
+    padding-top: 2px;
+    padding-bottom: 2px;
+    padding-left: 5px;
+    margin-top: 5px;
+    margin-bottom: 5px;
+}
+
+div.body h2 {
+    padding-left:10px;
+}

+ 23 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_themes/pydoctheme/theme.conf

@@ -0,0 +1,23 @@
+[theme]
+inherit = default
+stylesheet = pydoctheme.css
+pygments_style = sphinx
+
+[options]
+bodyfont = 'Lucida Grande', 'Lucida Sans', 'DejaVu Sans', Arial, sans-serif
+headfont = 'Lucida Grande', 'Lucida Sans', 'DejaVu Sans', Arial, sans-serif
+footerbgcolor = white
+footertextcolor = #555555
+relbarbgcolor = white
+relbartextcolor = #666666
+relbarlinkcolor = #444444
+sidebarbgcolor = white
+sidebartextcolor = #444444
+sidebarlinkcolor = #444444
+bgcolor = white
+textcolor = #222222
+linkcolor = #0090c0
+visitedlinkcolor = #00608f
+headtextcolor = #1a1a1a
+headbgcolor = white
+headlinkcolor = #aaaaaa

+ 253 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/conf.py

@@ -0,0 +1,253 @@
+# -*- coding: utf-8 -*-
+#
+# psutil documentation build configuration file, created by
+# sphinx-quickstart.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import datetime
+import os
+
+
+PROJECT_NAME = u"psutil"
+AUTHOR = u"Giampaolo Rodola'"
+THIS_YEAR = str(datetime.datetime.now().year)
+HERE = os.path.abspath(os.path.dirname(__file__))
+
+def get_version():
+    INIT = os.path.abspath(os.path.join(HERE, '../psutil/__init__.py'))
+    f = open(INIT, 'r')
+    try:
+        for line in f:
+            if line.startswith('__version__'):
+                ret = eval(line.strip().split(' = ')[1])
+                assert ret.count('.') == 2, ret
+                for num in ret.split('.'):
+                    assert num.isdigit(), ret
+                return ret
+        else:
+            raise ValueError("couldn't find version string")
+    finally:
+        f.close()
+
+VERSION = get_version()
+
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = ['sphinx.ext.autodoc',
+              'sphinx.ext.coverage',
+              'sphinx.ext.pngmath',
+              'sphinx.ext.viewcode',
+              'sphinx.ext.intersphinx']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_template']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = PROJECT_NAME
+copyright = u'2009-%s, %s' % (THIS_YEAR, AUTHOR)
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = VERSION
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+autodoc_docstring_signature = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+html_theme = 'pydoctheme'
+html_theme_options = {'collapsiblesidebar': True}
+
+# Add any paths that contain custom themes here, relative to this directory.
+html_theme_path = ["_themes"]
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+html_title = "{project} {version} documentation".format(**locals())
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = 'logo.png'
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+html_favicon = 'favicon.ico'
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+html_sidebars = {
+    'index': 'indexsidebar.html',
+    '**': ['globaltoc.html',
+           'relations.html',
+           'sourcelink.html',
+           'searchbox.html']
+}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {
+#    'index': 'indexcontent.html',
+#}
+
+# If false, no module index is generated.
+html_domain_indices = False
+
+# If false, no index is generated.
+html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = '%s-doc' % PROJECT_NAME
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+    ('index', '%s.tex' % PROJECT_NAME,
+     u'%s documentation' % PROJECT_NAME, AUTHOR),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    ('index', PROJECT_NAME, u'%s documentation' % PROJECT_NAME, [AUTHOR], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False

+ 1247 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/index.rst

@@ -0,0 +1,1247 @@
+.. module:: psutil
+   :synopsis: psutil module
+.. moduleauthor:: Giampaolo Rodola' <grodola@gmail.com>
+
+.. warning::
+
+   This documentation refers to new 2.X version of psutil.
+   Instructions on how to port existing 1.2.1 code are
+   `here <http://grodola.blogspot.com/2014/01/psutil-20-porting.html>`__.
+   Old 1.2.1 documentation is still available
+   `here <https://code.google.com/p/psutil/wiki/Documentation>`__.
+
+psutil documentation
+====================
+
+Quick links
+-----------
+
+* `Home page <http://code.google.com/p/psutil>`__
+* `Blog <http://grodola.blogspot.com/search/label/psutil>`__
+* `Download <https://pypi.python.org/pypi?:action=display&name=psutil#downloads>`__
+* `Forum <http://groups.google.com/group/psutil/topics>`__
+* `What's new <https://psutil.googlecode.com/hg/HISTORY>`__
+
+About
+-----
+
+From project's home page:
+
+  psutil (python system and process utilities) is a cross-platform library for
+  retrieving information on running
+  **processes** and **system utilization** (CPU, memory, disks, network) in
+  **Python**.
+  It is useful mainly for **system monitoring**, **profiling** and **limiting
+  process resources** and **management of running processes**.
+  It implements many functionalities offered by command line tools
+  such as: *ps, top, lsof, netstat, ifconfig, who, df, kill, free, nice,
+  ionice, iostat, iotop, uptime, pidof, tty, taskset, pmap*.
+  It currently supports **Linux, Windows, OSX, FreeBSD** and **Sun Solaris**,
+  both **32-bit** and **64-bit** architectures, with Python versions from
+  **2.4** to **3.4**.
+  `Pypy <http://pypy.org/>`__ is also known to work.
+
+The psutil documentation you're reading is distributed as a single HTML page.
+
+System related functions
+========================
+
+CPU
+---
+
+.. function:: cpu_times(percpu=False)
+
+  Return system CPU times as a namedtuple.
+  Every attribute represents the seconds the CPU has spent in the given mode.
+  The attributes availability varies depending on the platform:
+
+  - **user**
+  - **system**
+  - **idle**
+  - **nice** *(UNIX)*
+  - **iowait** *(Linux)*
+  - **irq** *(Linux, FreeBSD)*
+  - **softirq** *(Linux)*
+  - **steal** *(Linux 2.6.11+)*
+  - **guest** *(Linux 2.6.24+)*
+  - **guest_nice** *(Linux 3.2.0+)*
+
+  When *percpu* is ``True`` return a list of nameduples for each logical CPU
+  on the system.
+  First element of the list refers to first CPU, second element to second CPU
+  and so on.
+  The order of the list is consistent across calls.
+  Example output on Linux:
+
+    >>> import psutil
+    >>> psutil.cpu_times()
+    scputimes(user=17411.7, nice=77.99, system=3797.02, idle=51266.57, iowait=732.58, irq=0.01, softirq=142.43, steal=0.0, guest=0.0, guest_nice=0.0)
+
+.. function:: cpu_percent(interval=None, percpu=False)
+
+  Return a float representing the current system-wide CPU utilization as a
+  percentage. When *interval* is > ``0.0`` compares system CPU times elapsed
+  before and after the interval (blocking).
+  When *interval* is ``0.0`` or ``None`` compares system CPU times elapsed
+  since last call or module import, returning immediately.
+  That means the first time this is called it will return a meaningless ``0.0``
+  value which you are supposed to ignore.
+  In this case is recommended for accuracy that this function be called with at
+  least ``0.1`` seconds between calls.
+  When *percpu* is ``True`` returns a list of floats representing the
+  utilization as a percentage for each CPU.
+  First element of the list refers to first CPU, second element to second CPU
+  and so on. The order of the list is consistent across calls.
+
+    >>> import psutil
+    >>> # blocking
+    >>> psutil.cpu_percent(interval=1)
+    2.0
+    >>> # non-blocking (percentage since last call)
+    >>> psutil.cpu_percent(interval=None)
+    2.9
+    >>> # blocking, per-cpu
+    >>> psutil.cpu_percent(interval=1, percpu=True)
+    [2.0, 1.0]
+    >>>
+
+  .. warning::
+
+    the first time this function is called with *interval* = ``0.0`` or ``None``
+    it will return a meaningless ``0.0`` value which you are supposed to
+    ignore.
+
+.. function:: cpu_times_percent(interval=None, percpu=False)
+
+  Same as :func:`cpu_percent()` but provides utilization percentages for each
+  specific CPU time as is returned by
+  :func:`psutil.cpu_times(percpu=True)<cpu_times()>`.
+  *interval* and
+  *percpu* arguments have the same meaning as in :func:`cpu_percent()`.
+
+  .. warning::
+
+    the first time this function is called with *interval* = ``0.0`` or
+    ``None`` it will return a meaningless ``0.0`` value which you are supposed
+    to ignore.
+
+.. function:: cpu_count(logical=True)
+
+    Return the number of logical CPUs in the system (same as
+    `os.cpu_count() <http://docs.python.org/3/library/os.html#os.cpu_count>`__
+    in Python 3.4).
+    If *logical* is ``False`` return the number of physical cores only (hyper
+    thread CPUs are excluded). Return ``None`` if undetermined.
+
+      >>> import psutil
+      >>> psutil.cpu_count()
+      4
+      >>> psutil.cpu_count(logical=False)
+      2
+      >>>
+
+Memory
+------
+
+.. function:: virtual_memory()
+
+  Return statistics about system memory usage as a namedtuple including the
+  following fields, expressed in bytes:
+
+  - **total**: total physical memory available.
+  - **available**: the actual amount of available memory that can be given
+    instantly to processes that request more memory in bytes; this is
+    calculated by summing different memory values depending on the platform
+    (e.g. free + buffers + cached on Linux) and it is supposed to be used to
+    monitor actual memory usage in a cross platform fashion.
+  - **percent**: the percentage usage calculated as
+    ``(total - available) / total * 100``.
+  - **used**: memory used, calculated differently depending on the platform and
+    designed for informational purposes only.
+  - **free**: memory not being used at all (zeroed) that is readily available;
+    note that this doesn't reflect the actual memory available (use 'available'
+    instead).
+
+  Platform-specific fields:
+
+  - **active**: (UNIX): memory currently in use or very recently used, and so
+    it is in RAM.
+  - **inactive**: (UNIX): memory that is marked as not used.
+  - **buffers**: (Linux, BSD): cache for things like file system metadata.
+  - **cached**: (Linux, BSD): cache for various things.
+  - **wired**: (BSD, OSX): memory that is marked to always stay in RAM. It is
+    never moved to disk.
+  - **shared**: (BSD): memory that may be simultaneously accessed by multiple
+    processes.
+
+  The sum of **used** and **available** does not necessarily equal **total**.
+  On Windows **available** and **free** are the same.
+  See `examples/meminfo.py <http://code.google.com/p/psutil/source/browse/examples/meminfo.py>`__
+  script providing an example on how to convert bytes in a human readable form.
+
+    >>> import psutil
+    >>> mem = psutil.virtual_memory()
+    >>> mem
+    svmem(total=8374149120L, available=1247768576L, percent=85.1, used=8246628352L, free=127520768L, active=3208777728, inactive=1133408256, buffers=342413312L, cached=777834496)
+    >>>
+    >>> THRESHOLD = 100 * 1024 * 1024  # 100MB
+    >>> if mem.available <= THRESHOLD:
+    ...     print("warning")
+    ...
+    >>>
+
+
+.. function:: swap_memory()
+
+  Return system swap memory statistics as a namedtuple including the following
+  fields:
+
+  * **total**: total swap memory in bytes
+  * **used**: used swap memory in bytes
+  * **free**: free swap memory in bytes
+  * **percent**: the percentage usage
+  * **sin**: the number of bytes the system has swapped in from disk
+    (cumulative)
+  * **sout**: the number of bytes the system has swapped out from disk
+    (cumulative)
+
+  **sin** and **sout** on Windows are meaningless and are always set to ``0``.
+  See `examples/meminfo.py <http://code.google.com/p/psutil/source/browse/examples/meminfo.py>`__
+  script providing an example on how to convert bytes in a human readable form.
+
+    >>> import psutil
+    >>> psutil.swap_memory()
+    sswap(total=2097147904L, used=886620160L, free=1210527744L, percent=42.3, sin=1050411008, sout=1906720768)
+
+Disks
+-----
+
+.. function:: disk_partitions(all=False)
+
+  Return all mounted disk partitions as a list of namedtuples including device,
+  mount point and filesystem type, similarly to "df" command on UNIX. If *all*
+  parameter is ``False`` return physical devices only (e.g. hard disks, cd-rom
+  drives, USB keys) and ignore all others (e.g. memory partitions such as
+  `/dev/shm <http://www.cyberciti.biz/tips/what-is-devshm-and-its-practical-usage.html>`__).
+  Namedtuple's **fstype** field is a string which varies depending on the
+  platform.
+  On Linux it can be one of the values found in /proc/filesystems (e.g.
+  ``'ext3'`` for an ext3 hard drive o ``'iso9660'`` for the CD-ROM drive).
+  On Windows it is determined via
+  `GetDriveType <http://msdn.microsoft.com/en-us/library/aa364939(v=vs.85).aspx>`__
+  and can be either ``"removable"``, ``"fixed"``, ``"remote"``, ``"cdrom"``,
+  ``"unmounted"`` or ``"ramdisk"``. On OSX and FreeBSD it is retrieved via
+  `getfsstat(2) <http://www.manpagez.com/man/2/getfsstat/>`__. See
+  `disk_usage.py <http://code.google.com/p/psutil/source/browse/examples/disk_usage.py>`__
+  script providing an example usage.
+
+    >>> import psutil
+    >>> psutil.disk_partitions()
+    [sdiskpart(device='/dev/sda3', mountpoint='/', fstype='ext4', opts='rw,errors=remount-ro'),
+     sdiskpart(device='/dev/sda7', mountpoint='/home', fstype='ext4', opts='rw')]
+
+.. function:: disk_usage(path)
+
+  Return disk usage statistics about the given *path* as a namedtuple including
+  **total**, **used** and **free** space expressed in bytes, plus the
+  **percentage** usage.
+  `OSError <http://docs.python.org/3/library/exceptions.html#OSError>`__ is
+  raised if *path* does not exist. See
+  `examples/disk_usage.py <http://code.google.com/p/psutil/source/browse/examples/disk_usage.py>`__
+  script providing an example usage. Starting from
+  `Python 3.3 <http://bugs.python.org/issue12442>`__  this is also
+  available as
+  `shutil.disk_usage() <http://docs.python.org/3/library/shutil.html#shutil.disk_usage>`__.
+  See
+  `disk_usage.py <http://code.google.com/p/psutil/source/browse/examples/disk_usage.py>`__
+  script providing an example usage.
+
+    >>> import psutil
+    >>> psutil.disk_usage('/')
+    sdiskusage(total=21378641920, used=4809781248, free=15482871808, percent=22.5)
+
+.. function:: disk_io_counters(perdisk=False)
+
+  Return system-wide disk I/O statistics as a namedtuple including the
+  following fields:
+
+  - **read_count**: number of reads
+  - **write_count**: number of writes
+  - **read_bytes**: number of bytes read
+  - **write_bytes**: number of bytes written
+  - **read_time**: time spent reading from disk (in milliseconds)
+  - **write_time**: time spent writing to disk (in milliseconds)
+
+  If *perdisk* is ``True`` return the same information for every physical disk
+  installed on the system as a dictionary with partition names as the keys and
+  the namedutuple described above as the values.
+  See `examples/iotop.py <http://code.google.com/p/psutil/source/browse/examples/iotop.py>`__
+  for an example application.
+
+    >>> import psutil
+    >>> psutil.disk_io_counters()
+    sdiskio(read_count=8141, write_count=2431, read_bytes=290203, write_bytes=537676, read_time=5868, write_time=94922)
+    >>>
+    >>> psutil.disk_io_counters(perdisk=True)
+    {'sda1': sdiskio(read_count=920, write_count=1, read_bytes=2933248, write_bytes=512, read_time=6016, write_time=4),
+     'sda2': sdiskio(read_count=18707, write_count=8830, read_bytes=6060, write_bytes=3443, read_time=24585, write_time=1572),
+     'sdb1': sdiskio(read_count=161, write_count=0, read_bytes=786432, write_bytes=0, read_time=44, write_time=0)}
+
+Network
+-------
+
+.. function:: net_io_counters(pernic=False)
+
+  Return system-wide network I/O statistics as a namedtuple including the
+  following attributes:
+
+  - **bytes_sent**: number of bytes sent
+  - **bytes_recv**: number of bytes received
+  - **packets_sent**: number of packets sent
+  - **packets_recv**: number of packets received
+  - **errin**: total number of errors while receiving
+  - **errout**: total number of errors while sending
+  - **dropin**: total number of incoming packets which were dropped
+  - **dropout**: total number of outgoing packets which were dropped (always 0
+    on OSX and BSD)
+
+  If *pernic* is ``True`` return the same information for every network
+  interface installed on the system as a dictionary with network interface
+  names as the keys and the namedtuple described above as the values.
+  See `examples/nettop.py <http://code.google.com/p/psutil/source/browse/examples/nettop.py>`__
+  for an example application.
+
+    >>> import psutil
+    >>> psutil.net_io_counters()
+    snetio(bytes_sent=14508483, bytes_recv=62749361, packets_sent=84311, packets_recv=94888, errin=0, errout=0, dropin=0, dropout=0)
+    >>>
+    >>> psutil.net_io_counters(pernic=True)
+    {'lo': snetio(bytes_sent=547971, bytes_recv=547971, packets_sent=5075, packets_recv=5075, errin=0, errout=0, dropin=0, dropout=0),
+    'wlan0': snetio(bytes_sent=13921765, bytes_recv=62162574, packets_sent=79097, packets_recv=89648, errin=0, errout=0, dropin=0, dropout=0)}
+
+.. function:: net_connections(kind='inet')
+
+  Return system-wide socket connections as a list of namedutples.
+  Every namedtuple provides 7 attributes:
+
+  - **fd**: the socket file descriptor, if retrievable, else ``-1``.
+    If the connection refers to the current process this may be passed to
+    `socket.fromfd() <http://docs.python.org/library/socket.html#socket.fromfd>`__
+    to obtain a usable socket object.
+  - **family**: the address family, either `AF_INET
+    <http://docs.python.org//library/socket.html#socket.AF_INET>`__,
+    `AF_INET6 <http://docs.python.org//library/socket.html#socket.AF_INET6>`__
+    or `AF_UNIX <http://docs.python.org//library/socket.html#socket.AF_UNIX>`__.
+  - **type**: the address type, either `SOCK_STREAM
+    <http://docs.python.org//library/socket.html#socket.SOCK_STREAM>`__ or
+    `SOCK_DGRAM
+    <http://docs.python.org//library/socket.html#socket.SOCK_DGRAM>`__.
+  - **laddr**: the local address as a ``(ip, port)`` tuple or a ``path``
+    in case of AF_UNIX sockets.
+  - **raddr**: the remote address as a ``(ip, port)`` tuple or an absolute
+    ``path`` in case of UNIX sockets.
+    When the remote endpoint is not connected you'll get an empty tuple
+    (AF_INET*) or ``None`` (AF_UNIX).
+    On Linux AF_UNIX sockets will always have this set to ``None``.
+  - **status**: represents the status of a TCP connection. The return value
+    is one of the :data:`psutil.CONN_* <psutil.CONN_ESTABLISHED>` constants
+    (a string).
+    For UDP and UNIX sockets this is always going to be
+    :const:`psutil.CONN_NONE`.
+  - **pid**: the PID of the process which opened the socket, if retrievable,
+    else ``None``. On some platforms (e.g. Linux) the availability of this
+    field changes depending on process privileges (root is needed).
+
+  The *kind* parameter is a string which filters for connections that fit the
+  following criteria:
+
+  .. table::
+
+   +----------------+-----------------------------------------------------+
+   | **Kind value** | **Connections using**                               |
+   +================+=====================================================+
+   | "inet"         | IPv4 and IPv6                                       |
+   +----------------+-----------------------------------------------------+
+   | "inet4"        | IPv4                                                |
+   +----------------+-----------------------------------------------------+
+   | "inet6"        | IPv6                                                |
+   +----------------+-----------------------------------------------------+
+   | "tcp"          | TCP                                                 |
+   +----------------+-----------------------------------------------------+
+   | "tcp4"         | TCP over IPv4                                       |
+   +----------------+-----------------------------------------------------+
+   | "tcp6"         | TCP over IPv6                                       |
+   +----------------+-----------------------------------------------------+
+   | "udp"          | UDP                                                 |
+   +----------------+-----------------------------------------------------+
+   | "udp4"         | UDP over IPv4                                       |
+   +----------------+-----------------------------------------------------+
+   | "udp6"         | UDP over IPv6                                       |
+   +----------------+-----------------------------------------------------+
+   | "unix"         | UNIX socket (both UDP and TCP protocols)            |
+   +----------------+-----------------------------------------------------+
+   | "all"          | the sum of all the possible families and protocols  |
+   +----------------+-----------------------------------------------------+
+
+  To get per-process connections use :meth:`Process.connections`.
+  Also, see
+  `netstat.py sample script <https://code.google.com/p/psutil/source/browse/examples/netstat.py>`__.
+  Example:
+
+    >>> import psutil
+    >>> psutil.net_connections()
+    [pconn(fd=115, family=2, type=1, laddr=('10.0.0.1', 48776), raddr=('93.186.135.91', 80), status='ESTABLISHED', pid=1254),
+     pconn(fd=117, family=2, type=1, laddr=('10.0.0.1', 43761), raddr=('72.14.234.100', 80), status='CLOSING', pid=2987),
+     pconn(fd=-1, family=2, type=1, laddr=('10.0.0.1', 60759), raddr=('72.14.234.104', 80), status='ESTABLISHED', pid=None),
+     pconn(fd=-1, family=2, type=1, laddr=('10.0.0.1', 51314), raddr=('72.14.234.83', 443), status='SYN_SENT', pid=None)
+     ...]
+
+  .. note:: (OSX) :class:`psutil.AccessDenied` is always raised unless running
+     as root (lsof does the same).
+  .. note:: (Solaris) UNIX sockets are not supported.
+
+  *New in 2.1.0*
+
+
+Other system info
+-----------------
+
+.. function:: users()
+
+  Return users currently connected on the system as a list of namedtuples
+  including the following fields:
+
+  - **user**: the name of the user.
+  - **terminal**: the tty or pseudo-tty associated with the user, if any,
+    else ``None``.
+  - **host**: the host name associated with the entry, if any.
+  - **started**: the creation time as a floating point number expressed in
+    seconds since the epoch.
+
+  Example::
+
+    >>> import psutil
+    >>> psutil.users()
+    [suser(name='giampaolo', terminal='pts/2', host='localhost', started=1340737536.0),
+     suser(name='giampaolo', terminal='pts/3', host='localhost', started=1340737792.0)]
+
+.. function:: boot_time()
+
+  Return the system boot time expressed in seconds since the epoch.
+  Example:
+
+  .. code-block:: python
+
+     >>> import psutil, datetime
+     >>> psutil.boot_time()
+     1389563460.0
+     >>> datetime.datetime.fromtimestamp(psutil.boot_time()).strftime("%Y-%m-%d %H:%M:%S")
+     '2014-01-12 22:51:00'
+
+Processes
+=========
+
+Functions
+---------
+
+.. function:: pids()
+
+  Return a list of current running PIDs. To iterate over all processes
+  :func:`process_iter()` should be preferred.
+
+.. function:: pid_exists(pid)
+
+  Check whether the given PID exists in the current process list. This is
+  faster than doing ``"pid in psutil.pids()"`` and should be preferred.
+
+.. function:: process_iter()
+
+  Return an iterator yielding a :class:`Process` class instance for all running
+  processes on the local machine.
+  Every instance is only created once and then cached into an internal table
+  which is updated every time an element is yielded.
+  Cached :class:`Process` instances are checked for identity so that you're
+  safe in case a PID has been reused by another process, in which case the
+  cached instance is updated.
+  This is should be preferred over :func:`psutil.pids()` for iterating over
+  processes.
+  Sorting order in which processes are returned is
+  based on their PID. Example usage::
+
+    import psutil
+
+    for proc in psutil.process_iter():
+        try:
+            pinfo = proc.as_dict(attrs=['pid', 'name'])
+        except psutil.NoSuchProcess:
+            pass
+        else:
+            print(pinfo)
+
+.. function:: wait_procs(procs, timeout=None, callback=None)
+
+  Convenience function which waits for a list of :class:`Process` instances to
+  terminate. Return a ``(gone, alive)`` tuple indicating which processes are
+  gone and which ones are still alive. The *gone* ones will have a new
+  *returncode* attribute indicating process exit status (it may be ``None``).
+  ``callback`` is a function which gets called every time a process terminates
+  (a :class:`Process` instance is passed as callback argument). Function will
+  return as soon as all processes terminate or when timeout occurs. Tipical use
+  case is:
+
+  - send SIGTERM to a list of processes
+  - give them some time to terminate
+  - send SIGKILL to those ones which are still alive
+
+  Example::
+
+    import psutil
+
+    def on_terminate(proc):
+        print("process {} terminated".format(proc))
+
+    procs = [...]  # a list of Process instances
+    for p in procs:
+        p.terminate()
+    gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
+    for p in alive:
+        p.kill()
+
+Exceptions
+----------
+
+.. class:: Error()
+
+  Base exception class. All other exceptions inherit from this one.
+
+.. class:: NoSuchProcess(pid, name=None, msg=None)
+
+   Raised by :class:`Process` class methods when no process with the given
+   *pid* is found in the current process list or when a process no longer
+   exists. "name" is the name the process had before disappearing
+   and gets set only if :meth:`Process.name()` was previosly called.
+
+.. class:: AccessDenied(pid=None, name=None, msg=None)
+
+    Raised by :class:`Process` class methods when permission to perform an
+    action is denied. "name" is the name of the process (may be ``None``).
+
+.. class:: TimeoutExpired(seconds, pid=None, name=None, msg=None)
+
+    Raised by :meth:`Process.wait` if timeout expires and process is still
+    alive.
+
+Process class
+-------------
+
+.. class:: Process(pid=None)
+
+  Represents an OS process with the given *pid*. If *pid* is omitted current
+  process *pid* (`os.getpid() <http://docs.python.org/library/os.html#os.getpid>`__)
+  is used.
+  Raise :class:`NoSuchProcess` if *pid* does not exist.
+  When accessing methods of this class always be  prepared to catch
+  :class:`NoSuchProcess` and :class:`AccessDenied` exceptions.
+  `hash() <http://docs.python.org/2/library/functions.html#hash>`__ builtin can
+  be used against instances of this class in order to identify a process
+  univocally over time (the hash is determined by mixing process PID
+  and creation time). As such it can also be used with
+  `set()s <http://docs.python.org/2/library/stdtypes.html#types-set>`__.
+
+  .. warning::
+
+    the way this class is bound to a process is uniquely via its **PID**.
+    That means that if the :class:`Process` instance is old enough and
+    the PID has been reused by another process in the meantime you might end up
+    interacting with another process.
+    The only exceptions for which process identity is pre-emptively checked
+    (via PID + creation time) and guaranteed are for
+    :meth:`nice` (set),
+    :meth:`ionice`  (set),
+    :meth:`cpu_affinity` (set),
+    :meth:`rlimit` (set),
+    :meth:`children`,
+    :meth:`parent`,
+    :meth:`suspend`
+    :meth:`resume`,
+    :meth:`send_signal`,
+    :meth:`terminate`, and
+    :meth:`kill`
+    methods.
+    To prevent this problem for all other methods you can use
+    :meth:`is_running()` before querying the process or use
+    :func:`process_iter()` in case you're iterating over all processes.
+
+  .. attribute:: pid
+
+     The process PID.
+
+  .. method:: ppid()
+
+     The process parent pid.  On Windows the return value is cached after first
+     call.
+
+  .. method:: name()
+
+     The process name. The return value is cached after first call.
+
+  .. method:: exe()
+
+     The process executable as an absolute path.
+     On some systems this may also be an empty string.
+     The return value is cached after first call.
+
+  .. method:: cmdline()
+
+     The command line this process has been called with.
+
+  .. method:: create_time()
+
+     The process creation time as a floating point number expressed in seconds
+     since the epoch, in
+     `UTC <http://en.wikipedia.org/wiki/Coordinated_universal_time>`__.
+     The return value is cached after first call.
+
+        >>> import psutil, datetime
+        >>> p = psutil.Process()
+        >>> p.create_time()
+        1307289803.47
+        >>> datetime.datetime.fromtimestamp(p.create_time()).strftime("%Y-%m-%d %H:%M:%S")
+        '2011-03-05 18:03:52'
+
+  .. method:: as_dict(attrs=[], ad_value=None)
+
+     Utility method returning process information as a hashable dictionary.
+     If *attrs* is specified it must be a list of strings reflecting available
+     :class:`Process` class's attribute names (e.g. ``['cpu_times', 'name']``)
+     else all public (read only) attributes are assumed. *ad_value* is the
+     value which gets assigned to a dict key in case :class:`AccessDenied`
+     exception is raised when retrieving that particular process information.
+
+        >>> import psutil
+        >>> p = psutil.Process()
+        >>> p.as_dict(attrs=['pid', 'name', 'username'])
+        {'username': 'giampaolo', 'pid': 12366, 'name': 'python'}
+
+  .. method:: parent()
+
+     Utility method which returns the parent process as a :class:`Process`
+     object pre-emptively checking whether PID has been reused. If no parent
+     PID is known return ``None``.
+
+  .. method:: status()
+
+     The current process status as a string. The returned string is one of the
+     :data:`psutil.STATUS_*<psutil.STATUS_RUNNING>` constants.
+
+  .. method:: cwd()
+
+     The process current working directory as an absolute path.
+
+  .. method:: username()
+
+     The name of the user that owns the process. On UNIX this is calculated by
+     using real process uid.
+
+  .. method:: uids()
+
+     The **real**, **effective** and **saved** user ids of this process as a
+     nameduple. This is the same as
+     `os.getresuid() <http://docs.python.org//library/os.html#os.getresuid>`__
+     but can be used for every process PID.
+
+     Availability: UNIX
+
+  .. method:: gids()
+
+     The **real**, **effective** and **saved** group ids of this process as a
+     nameduple. This is the same as
+     `os.getresgid() <http://docs.python.org//library/os.html#os.getresgid>`__
+     but can be used for every process PID.
+
+     Availability: UNIX
+
+  .. method:: terminal()
+
+     The terminal associated with this process, if any, else ``None``. This is
+     similar to "tty" command but can be used for every process PID.
+
+     Availability: UNIX
+
+  .. method:: nice(value=None)
+
+     Get or set process
+     `niceness <blogs.techrepublic.com.com/opensource/?p=140>`__ (priority).
+     On UNIX this is a number which usually goes from ``-20`` to ``20``.
+     The higher the nice value, the lower the priority of the process.
+
+        >>> import psutil
+        >>> p = psutil.Process()
+        >>> p.nice(10)  # set
+        >>> p.nice()  # get
+        10
+        >>>
+
+     On Windows this is available as well by using
+     `GetPriorityClass <http://msdn.microsoft.com/en-us/library/ms683211(v=vs.85).aspx>`__
+     and `SetPriorityClass <http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx>`__
+     and *value* is one of the
+     :data:`psutil.*_PRIORITY_CLASS <psutil.ABOVE_NORMAL_PRIORITY_CLASS>`
+     constants.
+     Example which increases process priority on Windows:
+
+        >>> p.nice(psutil.HIGH_PRIORITY_CLASS)
+
+     Starting from `Python 3.3 <http://bugs.python.org/issue10784>`__ this
+     same functionality is available as
+     `os.getpriority() <http://docs.python.org/3/library/os.html#os.getpriority>`__
+     and
+     `os.setpriority() <http://docs.python.org/3/library/os.html#os.setpriority>`__.
+
+  .. method:: ionice(ioclass=None, value=None)
+
+     Get or set
+     `process I/O niceness <http://friedcpu.wordpress.com/2007/07/17/why-arent-you-using-ionice-yet/>`__ (priority).
+     On Linux *ioclass* is one of the
+     :data:`psutil.IOPRIO_CLASS_*<psutil.IOPRIO_CLASS_NONE>` constants.
+     *value* is a number which goes from  ``0`` to ``7``. The higher the value,
+     the lower the I/O priority of the process. On Windows only *ioclass* is
+     used and it can be set to ``2`` (normal), ``1`` (low) or ``0`` (very low).
+     The example below sets IDLE priority class for the current process,
+     meaning it will only get I/O time when no other process needs the disk:
+
+      >>> import psutil
+      >>> p = psutil.Process()
+      >>> p.ionice(psutil.IOPRIO_CLASS_IDLE)  # set
+      >>> p.ionice()  # get
+      pionice(ioclass=3, value=0)
+      >>>
+
+     On Windows only *ioclass* is used and it can be set to ``2`` (normal),
+     ``1`` (low) or ``0`` (very low).
+
+     Availability: Linux and Windows > Vista
+
+  .. method:: rlimit(resource, limits=None)
+
+     Get or set process resource limits (see
+     `man prlimit <http://linux.die.net/man/2/prlimit>`__). *resource* is one of
+     the :data:`psutil.RLIMIT_* <psutil.RLIMIT_INFINITY>` constants.
+     *limits* is a ``(soft, hard)`` tuple.
+     This is the same as `resource.getrlimit() <http://docs.python.org/library/resource.html#resource.getrlimit>`__
+     and `resource.setrlimit() <http://docs.python.org/library/resource.html#resource.setrlimit>`__
+     but can be used for every process PID and only on Linux.
+     Example:
+
+      >>> import psutil
+      >>> p = psutil.Process()
+      >>> # process may open no more than 128 file descriptors
+      >>> p.rlimit(psutil.RLIMIT_NOFILE, (128, 128))
+      >>> # process may create files no bigger than 1024 bytes
+      >>> p.rlimit(psutil.RLIMIT_FSIZE, (1024, 1024))
+      >>> # get
+      >>> p.rlimit(psutil.RLIMIT_FSIZE)
+      (1024, 1024)
+      >>>
+
+     Availability: Linux
+
+  .. method:: io_counters()
+
+     Return process I/O statistics as a namedtuple including the number of read
+     and write operations performed by the process and the amount of bytes read
+     and written. For Linux refer to
+     `/proc filesysem documentation <https://www.kernel.org/doc/Documentation/filesystems/proc.txt>`__.
+     On BSD there's apparently no way to retrieve bytes counters, hence ``-1``
+     is returned for **read_bytes** and **write_bytes** fields. OSX is not
+     supported.
+
+      >>> import psutil
+      >>> p = psutil.Process()
+      >>> p.io_counters()
+      pio(read_count=454556, write_count=3456, read_bytes=110592, write_bytes=0)
+
+     Availability: all platforms except OSX
+
+  .. method:: num_ctx_switches()
+
+     The number voluntary and involuntary context switches performed by
+     this process.
+
+  .. method:: num_fds()
+
+     The number of file descriptors used by this process.
+
+     Availability: UNIX
+
+  .. method:: num_handles()
+
+     The number of handles used by this process.
+
+     Availability: Windows
+
+  .. method:: num_threads()
+
+     The number of threads currently used by this process.
+
+  .. method:: threads()
+
+     Return threads opened by process as a list of namedtuples including thread
+     id and thread CPU times (user/system).
+
+  .. method:: cpu_times()
+
+     Return a tuple whose values are process CPU **user** and **system**
+     times which means the amount of time expressed in seconds that a process
+     has spent in
+     `user / system mode <http://stackoverflow.com/questions/556405/what-do-real-user-and-sys-mean-in-the-output-of-time1>`__.
+     This is similar to
+     `os.times() <http://docs.python.org//library/os.html#os.times>`__
+     but can be used for every process PID.
+
+  .. method:: cpu_percent(interval=None)
+
+     Return a float representing the process CPU utilization as a percentage.
+     When *interval* is > ``0.0`` compares process times to system CPU times
+     elapsed before and after the interval (blocking). When interval is ``0.0``
+     or ``None`` compares process times to system CPU times elapsed since last
+     call, returning immediately. That means the first time this is called it
+     will return a meaningless ``0.0`` value which you are supposed to ignore.
+     In this case is recommended for accuracy that this function be called a
+     second time with at least ``0.1`` seconds between calls. Example:
+
+      >>> import psutil
+      >>> p = psutil.Process()
+      >>>
+      >>> # blocking
+      >>> p.cpu_percent(interval=1)
+      2.0
+      >>> # non-blocking (percentage since last call)
+      >>> p.cpu_percent(interval=None)
+      2.9
+      >>>
+
+     .. note::
+        a percentage > 100 is legitimate as it can result from a process with
+        multiple threads running on different CPU cores.
+
+     .. warning::
+        the first time this method is called with interval = ``0.0`` or
+        ``None`` it will return a meaningless ``0.0`` value which you are
+        supposed to ignore.
+
+  .. method:: cpu_affinity(cpus=None)
+
+     Get or set process current
+     `CPU affinity <http://www.linuxjournal.com/article/6799?page=0,0>`__.
+     CPU affinity consists in telling the OS to run a certain process on a
+     limited set of CPUs only. The number of eligible CPUs can be obtained with
+     ``list(range(psutil.cpu_count()))``.
+
+      >>> import psutil
+      >>> psutil.cpu_count()
+      4
+      >>> p = psutil.Process()
+      >>> p.cpu_affinity()  # get
+      [0, 1, 2, 3]
+      >>> p.cpu_affinity([0])  # set; from now on, process will run on CPU #0 only
+      >>>
+
+     Availability: Linux, Windows
+
+  .. method:: memory_info()
+
+     Return a tuple representing RSS (Resident Set Size) and VMS (Virtual
+     Memory Size) in bytes. On UNIX *rss* and *vms* are the same values shown
+     by ps. On Windows *rss* and *vms* refer to "Mem Usage" and "VM Size"
+     columns of taskmgr.exe. For more detailed memory stats use
+     :meth:`memory_info_ex`.
+
+  .. method:: memory_info_ex()
+
+     Return a namedtuple with variable fields depending on the platform
+     representing extended memory information about the process.
+     All numbers are expressed in bytes.
+
+     +--------+---------+-------+-------+--------------------+
+     | Linux  | OSX     | BSD   | SunOS | Windows            |
+     +========+=========+=======+=======+====================+
+     | rss    | rss     | rss   | rss   | num_page_faults    |
+     +--------+---------+-------+-------+--------------------+
+     | vms    | vms     | vms   | vms   | peak_wset          |
+     +--------+---------+-------+-------+--------------------+
+     | shared | pfaults | text  |       | wset               |
+     +--------+---------+-------+-------+--------------------+
+     | text   | pageins | data  |       | peak_paged_pool    |
+     +--------+---------+-------+-------+--------------------+
+     | lib    |         | stack |       | paged_pool         |
+     +--------+---------+-------+-------+--------------------+
+     | data   |         |       |       | peak_nonpaged_pool |
+     +--------+---------+-------+-------+--------------------+
+     | dirty  |         |       |       | nonpaged_pool      |
+     +--------+---------+-------+-------+--------------------+
+     |        |         |       |       | pagefile           |
+     +--------+---------+-------+-------+--------------------+
+     |        |         |       |       | peak_pagefile      |
+     +--------+---------+-------+-------+--------------------+
+     |        |         |       |       | private            |
+     +--------+---------+-------+-------+--------------------+
+
+     Windows metrics are extracted from
+     `PROCESS_MEMORY_COUNTERS_EX <http://msdn.microsoft.com/en-us/library/windows/desktop/ms684874(v=vs.85).aspx>`__ structure.
+     Example on Linux:
+
+     >>> import psutil
+     >>> p = psutil.Process()
+     >>> p.memory_info_ex()
+     pextmem(rss=15491072, vms=84025344, shared=5206016, text=2555904, lib=0, data=9891840, dirty=0)
+
+  .. method:: memory_percent()
+
+     Compare physical system memory to process resident memory (RSS) and
+     calculate process memory utilization as a percentage.
+
+  .. method:: memory_maps(grouped=True)
+
+     Return process's mapped memory regions as a list of nameduples whose
+     fields are variable depending on the platform. As such, portable
+     applications should rely on namedtuple's `path` and `rss` fields only.
+     This method is useful to obtain a detailed representation of process
+     memory usage as explained
+     `here <http://bmaurer.blogspot.it/2006/03/memory-usage-with-smaps.html>`__.
+     If *grouped* is ``True`` the mapped regions with the same *path* are
+     grouped together and the different memory fields are summed.  If *grouped*
+     is ``False`` every mapped region is shown as a single entity and the
+     namedtuple will also include the mapped region's address space (*addr*)
+     and permission set (*perms*).
+     See `examples/pmap.py <http://code.google.com/p/psutil/source/browse/examples/pmap.py>`__
+     for an example application.
+
+      >>> import psutil
+      >>> p = psutil.Process()
+      >>> p.memory_maps()
+      [pmmap_grouped(path='/lib/x8664-linux-gnu/libutil-2.15.so', rss=16384, anonymous=8192, swap=0),
+       pmmap_grouped(path='/lib/x8664-linux-gnu/libc-2.15.so', rss=6384, anonymous=15, swap=0),
+       pmmap_grouped(path='/lib/x8664-linux-gnu/libcrypto.so.0.1', rss=34124, anonymous=1245, swap=0),
+       pmmap_grouped(path='[heap]', rss=54653, anonymous=8192, swap=0),
+       pmmap_grouped(path='[stack]', rss=1542, anonymous=166, swap=0),
+       ...]
+      >>>
+
+  .. method:: children(recursive=False)
+
+     Return the children of this process as a list of :Class:`Process` objects,
+     pre-emptively checking whether PID has been reused. If recursive is `True`
+     return all the parent descendants.
+     Example assuming *A == this process*:
+     ::
+
+          A ─┐
+             │
+             ├─ B (child) ─┐
+             │             └─ X (grandchild) ─┐
+             │                                └─ Y (great grandchild)
+             ├─ C (child)
+             └─ D (child)
+
+          >>> p.children()
+          B, C, D
+          >>> p.children(recursive=True)
+          B, X, Y, C, D
+
+     Note that in the example above if process X disappears process Y won't be
+     returned either as the reference to process A is lost.
+
+  .. method:: open_files()
+
+     Return regular files opened by process as a list of namedtuples including
+     the absolute file name and the file descriptor number (on Windows this is
+     always ``-1``). Example:
+
+      >>> import psutil
+      >>> f = open('file.ext', 'w')
+      >>> p = psutil.Process()
+      >>> p.open_files()
+      [popenfile(path='/home/giampaolo/svn/psutil/file.ext', fd=3)]
+
+  .. method:: connections(kind="inet")
+
+    Return socket connections opened by process as a list of namedutples.
+    To get system-wide connections use :func:`psutil.net_connections()`.
+    Every namedtuple provides 6 attributes:
+
+    - **fd**: the socket file descriptor. This can be passed to
+      `socket.fromfd() <http://docs.python.org/library/socket.html#socket.fromfd>`__
+      to obtain a usable socket object.
+      This is only available on UNIX; on Windows ``-1`` is always returned.
+    - **family**: the address family, either `AF_INET
+      <http://docs.python.org//library/socket.html#socket.AF_INET>`__,
+      `AF_INET6 <http://docs.python.org//library/socket.html#socket.AF_INET6>`__
+      or `AF_UNIX <http://docs.python.org//library/socket.html#socket.AF_UNIX>`__.
+    - **type**: the address type, either `SOCK_STREAM
+      <http://docs.python.org//library/socket.html#socket.SOCK_STREAM>`__ or
+      `SOCK_DGRAM
+      <http://docs.python.org//library/socket.html#socket.SOCK_DGRAM>`__.
+    - **laddr**: the local address as a ``(ip, port)`` tuple or a ``path``
+      in case of AF_UNIX sockets.
+    - **raddr**: the remote address as a ``(ip, port)`` tuple or an absolute
+      ``path`` in case of UNIX sockets.
+      When the remote endpoint is not connected you'll get an empty tuple
+      (AF_INET) or ``None`` (AF_UNIX).
+      On Linux AF_UNIX sockets will always have this set to ``None``.
+    - **status**: represents the status of a TCP connection. The return value
+      is one of the :data:`psutil.CONN_* <psutil.CONN_ESTABLISHED>` constants.
+      For UDP and UNIX sockets this is always going to be
+      :const:`psutil.CONN_NONE`.
+
+    The *kind* parameter is a string which filters for connections that fit the
+    following criteria:
+
+    .. table::
+
+     +----------------+-----------------------------------------------------+
+     | **Kind value** | **Connections using**                               |
+     +================+=====================================================+
+     | "inet"         | IPv4 and IPv6                                       |
+     +----------------+-----------------------------------------------------+
+     | "inet4"        | IPv4                                                |
+     +----------------+-----------------------------------------------------+
+     | "inet6"        | IPv6                                                |
+     +----------------+-----------------------------------------------------+
+     | "tcp"          | TCP                                                 |
+     +----------------+-----------------------------------------------------+
+     | "tcp4"         | TCP over IPv4                                       |
+     +----------------+-----------------------------------------------------+
+     | "tcp6"         | TCP over IPv6                                       |
+     +----------------+-----------------------------------------------------+
+     | "udp"          | UDP                                                 |
+     +----------------+-----------------------------------------------------+
+     | "udp4"         | UDP over IPv4                                       |
+     +----------------+-----------------------------------------------------+
+     | "udp6"         | UDP over IPv6                                       |
+     +----------------+-----------------------------------------------------+
+     | "unix"         | UNIX socket (both UDP and TCP protocols)            |
+     +----------------+-----------------------------------------------------+
+     | "all"          | the sum of all the possible families and protocols  |
+     +----------------+-----------------------------------------------------+
+
+    Example:
+
+      >>> import psutil
+      >>> p = psutil.Process(1694)
+      >>> p.name()
+      'firefox'
+      >>> p.connections()
+      [pconn(fd=115, family=2, type=1, laddr=('10.0.0.1', 48776), raddr=('93.186.135.91', 80), status='ESTABLISHED'),
+       pconn(fd=117, family=2, type=1, laddr=('10.0.0.1', 43761), raddr=('72.14.234.100', 80), status='CLOSING'),
+       pconn(fd=119, family=2, type=1, laddr=('10.0.0.1', 60759), raddr=('72.14.234.104', 80), status='ESTABLISHED'),
+       pconn(fd=123, family=2, type=1, laddr=('10.0.0.1', 51314), raddr=('72.14.234.83', 443), status='SYN_SENT')]
+
+  .. method:: is_running()
+
+     Return whether the current process is running in the current process list.
+     This is reliable also in case the process is gone and its PID reused by
+     another process, therefore it must be preferred over doing
+     ``psutil.pid_exists(p.pid)``.
+
+     .. note::
+      this will return ``True`` also if the process is a zombie
+      (``p.status() == psutil.STATUS_ZOMBIE``).
+
+  .. method:: send_signal(signal)
+
+     Send a signal to process (see
+     `signal module <http://docs.python.org//library/signal.html>`__
+     constants) pre-emptively checking whether PID has been reused.
+     This is the same as ``os.kill(pid, sig)``.
+     On Windows only **SIGTERM** is valid and is treated as an alias for
+     :meth:`kill()`.
+
+  .. method:: suspend()
+
+     Suspend process execution with **SIGSTOP** signal pre-emptively checking
+     whether PID has been reused.
+     On UNIX this is the same as ``os.kill(pid, signal.SIGSTOP)``.
+     On Windows this is done by suspending all process threads execution.
+
+  .. method:: resume()
+
+     Resume process execution with **SIGCONT** signal pre-emptively checking
+     whether PID has been reused.
+     On UNIX this is the same as ``os.kill(pid, signal.SIGCONT)``.
+     On Windows this is done by resuming all process threads execution.
+
+  .. method:: terminate()
+
+     Terminate the process with **SIGTERM** signal pre-emptively checking
+     whether PID has been reused.
+     On UNIX this is the same as ``os.kill(pid, signal.SIGTERM)``.
+     On Windows this is an alias for :meth:`kill`.
+
+  .. method:: kill()
+
+     Kill the current process by using **SIGKILL** signal pre-emptively
+     checking whether PID has been reused.
+     On UNIX this is the same as ``os.kill(pid, signal.SIGKILL)``.
+     On Windows this is done by using
+     `TerminateProcess <http://msdn.microsoft.com/en-us/library/windows/desktop/ms686714(v=vs.85).aspx>`__.
+
+  .. method:: wait(timeout=None)
+
+     Wait for process termination and if the process is a children of the
+     current one also return the exit code, else ``None``. On Windows there's
+     no such limitation (exit code is always returned). If the process is
+     already terminated immediately return ``None`` instead of raising
+     :class:`NoSuchProcess`. If *timeout* is specified and process is still
+     alive raise :class:`TimeoutExpired` exception. It can also be used in a
+     non-blocking fashion by specifying ``timeout=0`` in which case it will
+     either return immediately or raise :class:`TimeoutExpired`.
+     To wait for multiple processes use :func:`psutil.wait_procs()`.
+
+
+Popen class
+-----------
+
+.. class:: Popen(*args, **kwargs)
+
+  A more convenient interface to stdlib
+  `subprocess.Popen <http://docs.python.org/library/subprocess.html#subprocess.Popen>`__.
+  It starts a sub process and deals with it exactly as when using
+  `subprocess.Popen <http://docs.python.org/library/subprocess.html#subprocess.Popen>`__
+  but in addition it also provides all the methods of
+  :class:`psutil.Process` class in a single interface.
+  For method names common to both classes such as
+  :meth:`send_signal() <psutil.Process.send_signal()>`,
+  :meth:`terminate() <psutil.Process.terminate()>` and
+  :meth:`kill() <psutil.Process.kill()>`
+  :class:`psutil.Process` implementation takes precedence.
+  For a complete documentation refer to
+  `subprocess module documentation <http://docs.python.org/library/subprocess.html>`__.
+
+  .. note::
+
+     Unlike `subprocess.Popen <http://docs.python.org/library/subprocess.html#subprocess.Popen>`__
+     this class pre-emptively checks wheter PID has been reused on
+     :meth:`send_signal() <psutil.Process.send_signal()>`,
+     :meth:`terminate() <psutil.Process.terminate()>` and
+     :meth:`kill() <psutil.Process.kill()>`
+     so that you don't accidentally terminate another process, fixing
+     http://bugs.python.org/issue6973.
+
+  >>> import psutil
+  >>> from subprocess import PIPE
+  >>>
+  >>> p = psutil.Popen(["/usr/bin/python", "-c", "print('hello')"], stdout=PIPE)
+  >>> p.name()
+  'python'
+  >>> p.username()
+  'giampaolo'
+  >>> p.communicate()
+  ('hello\n', None)
+  >>> p.wait(timeout=2)
+  0
+  >>>
+
+Constants
+=========
+
+.. _const-pstatus:
+.. data:: STATUS_RUNNING
+          STATUS_SLEEPING
+          STATUS_DISK_SLEEP
+          STATUS_STOPPED
+          STATUS_TRACING_STOP
+          STATUS_ZOMBIE
+          STATUS_DEAD
+          STATUS_WAKE_KILL
+          STATUS_WAKING
+          STATUS_IDLE
+          STATUS_LOCKED
+          STATUS_WAITING
+
+  A set of strings representing the status of a process.
+  Returned by :meth:`psutil.Process.status()`.
+
+.. _const-conn:
+.. data:: CONN_ESTABLISHED
+          CONN_SYN_SENT
+          CONN_SYN_RECV
+          CONN_FIN_WAIT1
+          CONN_FIN_WAIT2
+          CONN_TIME_WAIT
+          CONN_CLOSE
+          CONN_CLOSE_WAIT
+          CONN_LAST_ACK
+          CONN_LISTEN
+          CONN_CLOSING
+          CONN_NONE
+          CONN_DELETE_TCB (Windows)
+          CONN_IDLE (Solaris)
+          CONN_BOUND (Solaris)
+
+  A set of strings representing the status of a TCP connection.
+  Returned by :meth:`psutil.Process.connections()` (`status` field).
+
+.. _const-prio:
+.. data:: ABOVE_NORMAL_PRIORITY_CLASS
+          BELOW_NORMAL_PRIORITY_CLASS
+          HIGH_PRIORITY_CLASS
+          IDLE_PRIORITY_CLASS
+          NORMAL_PRIORITY_CLASS
+          REALTIME_PRIORITY_CLASS
+
+  A set of integers representing the priority of a process on Windows (see
+  `MSDN documentation <http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx>`__).
+  They can be used in conjunction with
+  :meth:`psutil.Process.nice()` to get or set process priority.
+
+  Availability: Windows
+
+.. _const-ioprio:
+.. data:: IOPRIO_CLASS_NONE
+          IOPRIO_CLASS_RT
+          IOPRIO_CLASS_BE
+          IOPRIO_CLASS_IDLE
+
+  A set of integers representing the I/O priority of a process on Linux. They
+  can be used in conjunction with :meth:`psutil.Process.ionice()` to get or set
+  process I/O priority.
+  *IOPRIO_CLASS_NONE* and *IOPRIO_CLASS_BE* (best effort) is the default for
+  any process that hasn't set a specific I/O priority.
+  *IOPRIO_CLASS_RT* (real time) means the process is given first access to the
+  disk, regardless of what else is going on in the system.
+  *IOPRIO_CLASS_IDLE* means the process will get I/O time when no-one else
+  needs the disk.
+  For further information refer to manuals of
+  `ionice <http://linux.die.net/man/1/ionice>`__
+  command line utility or
+  `ioprio_get <http://linux.die.net/man/2/ioprio_get>`__
+  system call.
+
+  Availability: Linux
+
+.. _const-rlimit:
+.. data:: RLIMIT_INFINITY
+          RLIMIT_AS
+          RLIMIT_CORE
+          RLIMIT_CPU
+          RLIMIT_DATA
+          RLIMIT_FSIZE
+          RLIMIT_LOCKS
+          RLIMIT_MEMLOCK
+          RLIMIT_MSGQUEUE
+          RLIMIT_NICE
+          RLIMIT_NOFILE
+          RLIMIT_NPROC
+          RLIMIT_RSS
+          RLIMIT_RTPRIO
+          RLIMIT_RTTIME
+          RLIMIT_RTPRIO
+          RLIMIT_SIGPENDING
+          RLIMIT_STACK
+
+  Constants used for getting and setting process resource limits to be used in
+  conjunction with :meth:`psutil.Process.rlimit()`. See
+  `man prlimit <http://linux.die.net/man/2/prlimit>`__ for futher information.
+
+  Availability: Linux

+ 242 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/make.bat

@@ -0,0 +1,242 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+	set SPHINXBUILD=sphinx-build
+)
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+set I18NSPHINXOPTS=%SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+	set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+	set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+	:help
+	echo.Please use `make ^<target^>` where ^<target^> is one of
+	echo.  html       to make standalone HTML files
+	echo.  dirhtml    to make HTML files named index.html in directories
+	echo.  singlehtml to make a single large HTML file
+	echo.  pickle     to make pickle files
+	echo.  json       to make JSON files
+	echo.  htmlhelp   to make HTML files and a HTML help project
+	echo.  qthelp     to make HTML files and a qthelp project
+	echo.  devhelp    to make HTML files and a Devhelp project
+	echo.  epub       to make an epub
+	echo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+	echo.  text       to make text files
+	echo.  man        to make manual pages
+	echo.  texinfo    to make Texinfo files
+	echo.  gettext    to make PO message catalogs
+	echo.  changes    to make an overview over all changed/added/deprecated items
+	echo.  xml        to make Docutils-native XML files
+	echo.  pseudoxml  to make pseudoxml-XML files for display purposes
+	echo.  linkcheck  to check all external links for integrity
+	echo.  doctest    to run all doctests embedded in the documentation if enabled
+	goto end
+)
+
+if "%1" == "clean" (
+	for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+	del /q /s %BUILDDIR%\*
+	goto end
+)
+
+
+%SPHINXBUILD% 2> nul
+if errorlevel 9009 (
+	echo.
+	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+	echo.installed, then set the SPHINXBUILD environment variable to point
+	echo.to the full path of the 'sphinx-build' executable. Alternatively you
+	echo.may add the Sphinx directory to PATH.
+	echo.
+	echo.If you don't have Sphinx installed, grab it from
+	echo.http://sphinx-doc.org/
+	exit /b 1
+)
+
+if "%1" == "html" (
+	%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+	goto end
+)
+
+if "%1" == "dirhtml" (
+	%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+	goto end
+)
+
+if "%1" == "singlehtml" (
+	%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
+	goto end
+)
+
+if "%1" == "pickle" (
+	%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can process the pickle files.
+	goto end
+)
+
+if "%1" == "json" (
+	%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can process the JSON files.
+	goto end
+)
+
+if "%1" == "htmlhelp" (
+	%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+	goto end
+)
+
+if "%1" == "qthelp" (
+	%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+	echo.^> qcollectiongenerator %BUILDDIR%\qthelp\psutil.qhcp
+	echo.To view the help file:
+	echo.^> assistant -collectionFile %BUILDDIR%\qthelp\psutil.ghc
+	goto end
+)
+
+if "%1" == "devhelp" (
+	%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished.
+	goto end
+)
+
+if "%1" == "epub" (
+	%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The epub file is in %BUILDDIR%/epub.
+	goto end
+)
+
+if "%1" == "latex" (
+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+	goto end
+)
+
+if "%1" == "latexpdf" (
+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+	cd %BUILDDIR%/latex
+	make all-pdf
+	cd %BUILDDIR%/..
+	echo.
+	echo.Build finished; the PDF files are in %BUILDDIR%/latex.
+	goto end
+)
+
+if "%1" == "latexpdfja" (
+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+	cd %BUILDDIR%/latex
+	make all-pdf-ja
+	cd %BUILDDIR%/..
+	echo.
+	echo.Build finished; the PDF files are in %BUILDDIR%/latex.
+	goto end
+)
+
+if "%1" == "text" (
+	%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The text files are in %BUILDDIR%/text.
+	goto end
+)
+
+if "%1" == "man" (
+	%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The manual pages are in %BUILDDIR%/man.
+	goto end
+)
+
+if "%1" == "texinfo" (
+	%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
+	goto end
+)
+
+if "%1" == "gettext" (
+	%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
+	goto end
+)
+
+if "%1" == "changes" (
+	%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.The overview file is in %BUILDDIR%/changes.
+	goto end
+)
+
+if "%1" == "linkcheck" (
+	%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+	goto end
+)
+
+if "%1" == "doctest" (
+	%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+	goto end
+)
+
+if "%1" == "xml" (
+	%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The XML files are in %BUILDDIR%/xml.
+	goto end
+)
+
+if "%1" == "pseudoxml" (
+	%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
+	goto end
+)
+
+:end

+ 63 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/disk_usage.py

@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+List all mounted disk partitions a-la "df -h" command.
+
+$ python examples/disk_usage.py
+Device               Total     Used     Free  Use %      Type  Mount
+/dev/sdb3            18.9G    14.7G     3.3G    77%      ext4  /
+/dev/sda6           345.9G    83.8G   244.5G    24%      ext4  /home
+/dev/sda1           296.0M    43.1M   252.9M    14%      vfat  /boot/efi
+/dev/sda2           600.0M   312.4M   287.6M    52%   fuseblk  /media/Recovery
+"""
+
+import sys
+import os
+import psutil
+from psutil._compat import print_
+
+
+def bytes2human(n):
+    # http://code.activestate.com/recipes/578019
+    # >>> bytes2human(10000)
+    # '9.8K'
+    # >>> bytes2human(100001221)
+    # '95.4M'
+    symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
+    prefix = {}
+    for i, s in enumerate(symbols):
+        prefix[s] = 1 << (i + 1) * 10
+    for s in reversed(symbols):
+        if n >= prefix[s]:
+            value = float(n) / prefix[s]
+            return '%.1f%s' % (value, s)
+    return "%sB" % n
+
+
+def main():
+    templ = "%-17s %8s %8s %8s %5s%% %9s  %s"
+    print_(templ % ("Device", "Total", "Used", "Free", "Use ", "Type",
+                    "Mount"))
+    for part in psutil.disk_partitions(all=False):
+        if os.name == 'nt':
+            if 'cdrom' in part.opts or part.fstype == '':
+                # skip cd-rom drives with no disk in it; they may raise
+                # ENOENT, pop-up a Windows GUI error for a non-ready
+                # partition or just hang.
+                continue
+        usage = psutil.disk_usage(part.mountpoint)
+        print_(templ % (
+            part.device,
+            bytes2human(usage.total),
+            bytes2human(usage.used),
+            bytes2human(usage.free),
+            int(usage.percent),
+            part.fstype,
+            part.mountpoint))
+
+if __name__ == '__main__':
+    sys.exit(main())

+ 42 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/free.py

@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+A clone of 'free' cmdline utility.
+
+$ python examples/free.py
+             total       used       free     shared    buffers      cache
+Mem:      10125520    8625996    1499524          0     349500    3307836
+Swap:            0          0          0
+"""
+
+import psutil
+from psutil._compat import print_
+
+
+def main():
+    virt = psutil.virtual_memory()
+    swap = psutil.swap_memory()
+    templ = "%-7s %10s %10s %10s %10s %10s %10s"
+    print_(templ % ('', 'total', 'used', 'free', 'shared', 'buffers', 'cache'))
+    print_(templ % (
+        'Mem:',
+        int(virt.total / 1024),
+        int(virt.used / 1024),
+        int(virt.free / 1024),
+        int(getattr(virt, 'shared', 0) / 1024),
+        int(getattr(virt, 'buffers', 0) / 1024),
+        int(getattr(virt, 'cached', 0) / 1024)))
+    print_(templ % (
+        'Swap:', int(swap.total / 1024),
+        int(swap.used / 1024),
+        int(swap.free / 1024),
+        '',
+        '',
+        ''))
+
+if __name__ == '__main__':
+    main()

+ 178 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/iotop.py

@@ -0,0 +1,178 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+A clone of iotop (http://guichaz.free.fr/iotop/) showing real time
+disk I/O statistics.
+
+It works on Linux only (FreeBSD and OSX are missing support for IO
+counters).
+It doesn't work on Windows as curses module is required.
+
+Example output:
+
+$ python examples/iotop.py
+Total DISK READ: 0.00 B/s | Total DISK WRITE: 472.00 K/s
+PID   USER      DISK READ  DISK WRITE  COMMAND
+13155 giampao    0.00 B/s  428.00 K/s  /usr/bin/google-chrome-beta
+3260  giampao    0.00 B/s    0.00 B/s  bash
+3779  giampao    0.00 B/s    0.00 B/s  gnome-session --session=ubuntu
+3830  giampao    0.00 B/s    0.00 B/s  /usr/bin/dbus-launch
+3831  giampao    0.00 B/s    0.00 B/s  //bin/dbus-daemon --fork --print-pid 5
+3841  giampao    0.00 B/s    0.00 B/s  /usr/lib/at-spi-bus-launcher
+3845  giampao    0.00 B/s    0.00 B/s  /bin/dbus-daemon
+3848  giampao    0.00 B/s    0.00 B/s  /usr/lib/at-spi2-core/at-spi2-registryd
+3862  giampao    0.00 B/s    0.00 B/s  /usr/lib/gnome-settings-daemon
+
+Author: Giampaolo Rodola' <g.rodola@gmail.com>
+"""
+
+import os
+import sys
+import psutil
+if not hasattr(psutil.Process, 'io_counters') or os.name != 'posix':
+    sys.exit('platform not supported')
+import time
+import curses
+import atexit
+
+
+# --- curses stuff
+def tear_down():
+    win.keypad(0)
+    curses.nocbreak()
+    curses.echo()
+    curses.endwin()
+
+win = curses.initscr()
+atexit.register(tear_down)
+curses.endwin()
+lineno = 0
+
+
+def print_line(line, highlight=False):
+    """A thin wrapper around curses's addstr()."""
+    global lineno
+    try:
+        if highlight:
+            line += " " * (win.getmaxyx()[1] - len(line))
+            win.addstr(lineno, 0, line, curses.A_REVERSE)
+        else:
+            win.addstr(lineno, 0, line, 0)
+    except curses.error:
+        lineno = 0
+        win.refresh()
+        raise
+    else:
+        lineno += 1
+# --- /curses stuff
+
+
+def bytes2human(n):
+    """
+    >>> bytes2human(10000)
+    '9.8 K/s'
+    >>> bytes2human(100001221)
+    '95.4 M/s'
+    """
+    symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
+    prefix = {}
+    for i, s in enumerate(symbols):
+        prefix[s] = 1 << (i + 1) * 10
+    for s in reversed(symbols):
+        if n >= prefix[s]:
+            value = float(n) / prefix[s]
+            return '%.2f %s/s' % (value, s)
+    return '%.2f B/s' % (n)
+
+
+def poll(interval):
+    """Calculate IO usage by comparing IO statics before and
+    after the interval.
+    Return a tuple including all currently running processes
+    sorted by IO activity and total disks I/O activity.
+    """
+    # first get a list of all processes and disk io counters
+    procs = [p for p in psutil.process_iter()]
+    for p in procs[:]:
+        try:
+            p._before = p.io_counters()
+        except psutil.Error:
+            procs.remove(p)
+            continue
+    disks_before = psutil.disk_io_counters()
+
+    # sleep some time
+    time.sleep(interval)
+
+    # then retrieve the same info again
+    for p in procs[:]:
+        try:
+            p._after = p.io_counters()
+            p._cmdline = ' '.join(p.cmdline())
+            if not p._cmdline:
+                p._cmdline = p.name()
+            p._username = p.username()
+        except psutil.NoSuchProcess:
+            procs.remove(p)
+    disks_after = psutil.disk_io_counters()
+
+    # finally calculate results by comparing data before and
+    # after the interval
+    for p in procs:
+        p._read_per_sec = p._after.read_bytes - p._before.read_bytes
+        p._write_per_sec = p._after.write_bytes - p._before.write_bytes
+        p._total = p._read_per_sec + p._write_per_sec
+
+    disks_read_per_sec = disks_after.read_bytes - disks_before.read_bytes
+    disks_write_per_sec = disks_after.write_bytes - disks_before.write_bytes
+
+    # sort processes by total disk IO so that the more intensive
+    # ones get listed first
+    processes = sorted(procs, key=lambda p: p._total, reverse=True)
+
+    return (processes, disks_read_per_sec, disks_write_per_sec)
+
+
+def refresh_window(procs, disks_read, disks_write):
+    """Print results on screen by using curses."""
+    curses.endwin()
+    templ = "%-5s %-7s %11s %11s  %s"
+    win.erase()
+
+    disks_tot = "Total DISK READ: %s | Total DISK WRITE: %s" \
+                % (bytes2human(disks_read), bytes2human(disks_write))
+    print_line(disks_tot)
+
+    header = templ % ("PID", "USER", "DISK READ", "DISK WRITE", "COMMAND")
+    print_line(header, highlight=True)
+
+    for p in procs:
+        line = templ % (
+            p.pid,
+            p._username[:7],
+            bytes2human(p._read_per_sec),
+            bytes2human(p._write_per_sec),
+            p._cmdline)
+        try:
+            print_line(line)
+        except curses.error:
+            break
+    win.refresh()
+
+
+def main():
+    try:
+        interval = 0
+        while 1:
+            args = poll(interval)
+            refresh_window(*args)
+            interval = 1
+    except (KeyboardInterrupt, SystemExit):
+        pass
+
+if __name__ == '__main__':
+    main()

+ 32 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/killall.py

@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Kill a process by name.
+"""
+
+import os
+import sys
+import psutil
+
+
+def main():
+    if len(sys.argv) != 2:
+        sys.exit('usage: %s name' % __file__)
+    else:
+        NAME = sys.argv[1]
+
+    killed = []
+    for proc in psutil.process_iter():
+        if proc.name() == NAME and proc.pid != os.getpid():
+            proc.kill()
+            killed.append(proc.pid)
+    if not killed:
+        sys.exit('%s: no process found' % NAME)
+    else:
+        sys.exit(0)
+
+sys.exit(main())

+ 69 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/meminfo.py

@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Print system memory information.
+
+$ python examples/meminfo.py
+MEMORY
+------
+Total      :    9.7G
+Available  :    4.9G
+Percent    :    49.0
+Used       :    8.2G
+Free       :    1.4G
+Active     :    5.6G
+Inactive   :    2.1G
+Buffers    :  341.2M
+Cached     :    3.2G
+
+SWAP
+----
+Total      :      0B
+Used       :      0B
+Free       :      0B
+Percent    :     0.0
+Sin        :      0B
+Sout       :      0B
+"""
+
+import psutil
+from psutil._compat import print_
+
+
+def bytes2human(n):
+    # http://code.activestate.com/recipes/578019
+    # >>> bytes2human(10000)
+    # '9.8K'
+    # >>> bytes2human(100001221)
+    # '95.4M'
+    symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
+    prefix = {}
+    for i, s in enumerate(symbols):
+        prefix[s] = 1 << (i + 1) * 10
+    for s in reversed(symbols):
+        if n >= prefix[s]:
+            value = float(n) / prefix[s]
+            return '%.1f%s' % (value, s)
+    return "%sB" % n
+
+
+def pprint_ntuple(nt):
+    for name in nt._fields:
+        value = getattr(nt, name)
+        if name != 'percent':
+            value = bytes2human(value)
+        print_('%-10s : %7s' % (name.capitalize(), value))
+
+
+def main():
+    print_('MEMORY\n------')
+    pprint_ntuple(psutil.virtual_memory())
+    print_('\nSWAP\n----')
+    pprint_ntuple(psutil.swap_memory())
+
+if __name__ == '__main__':
+    main()

+ 65 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/netstat.py

@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+A clone of 'netstat -antp' on Linux.
+
+$ python examples/netstat.py
+Proto Local address      Remote address   Status        PID    Program name
+tcp   127.0.0.1:48256    127.0.0.1:45884  ESTABLISHED   13646  chrome
+tcp   127.0.0.1:47073    127.0.0.1:45884  ESTABLISHED   13646  chrome
+tcp   127.0.0.1:47072    127.0.0.1:45884  ESTABLISHED   13646  chrome
+tcp   127.0.0.1:45884    -                LISTEN        13651  GoogleTalkPlugi
+tcp   127.0.0.1:60948    -                LISTEN        13651  GoogleTalkPlugi
+tcp   172.17.42.1:49102  127.0.0.1:19305  CLOSE_WAIT    13651  GoogleTalkPlugi
+tcp   172.17.42.1:55797  127.0.0.1:443    CLOSE_WAIT    13651  GoogleTalkPlugi
+...
+"""
+
+import socket
+from socket import AF_INET, SOCK_STREAM, SOCK_DGRAM
+
+import psutil
+from psutil._compat import print_
+
+
+AD = "-"
+AF_INET6 = getattr(socket, 'AF_INET6', object())
+proto_map = {
+    (AF_INET, SOCK_STREAM): 'tcp',
+    (AF_INET6, SOCK_STREAM): 'tcp6',
+    (AF_INET, SOCK_DGRAM): 'udp',
+    (AF_INET6, SOCK_DGRAM): 'udp6',
+}
+
+
+def main():
+    templ = "%-5s %-30s %-30s %-13s %-6s %s"
+    print_(templ % (
+        "Proto", "Local address", "Remote address", "Status", "PID",
+        "Program name"))
+    proc_names = {}
+    for p in psutil.process_iter():
+        try:
+            proc_names[p.pid] = p.name()
+        except psutil.Error:
+            pass
+    for c in psutil.net_connections(kind='inet'):
+        laddr = "%s:%s" % (c.laddr)
+        raddr = ""
+        if c.raddr:
+            raddr = "%s:%s" % (c.raddr)
+        print_(templ % (
+            proto_map[(c.family, c.type)],
+            laddr,
+            raddr or AD,
+            c.status,
+            c.pid or AD,
+            proc_names.get(c.pid, '?')[:15],
+        ))
+
+if __name__ == '__main__':
+    main()

+ 165 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/nettop.py

@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+#
+# $Id: iotop.py 1160 2011-10-14 18:50:36Z g.rodola@gmail.com $
+#
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Shows real-time network statistics.
+
+Author: Giampaolo Rodola' <g.rodola@gmail.com>
+
+$ python examples/nettop.py
+-----------------------------------------------------------
+total bytes:           sent: 1.49 G       received: 4.82 G
+total packets:         sent: 7338724      received: 8082712
+
+wlan0                     TOTAL         PER-SEC
+-----------------------------------------------------------
+bytes-sent               1.29 G        0.00 B/s
+bytes-recv               3.48 G        0.00 B/s
+pkts-sent               7221782               0
+pkts-recv               6753724               0
+
+eth1                      TOTAL         PER-SEC
+-----------------------------------------------------------
+bytes-sent             131.77 M        0.00 B/s
+bytes-recv               1.28 G        0.00 B/s
+pkts-sent                     0               0
+pkts-recv               1214470               0
+"""
+
+import sys
+import os
+if os.name != 'posix':
+    sys.exit('platform not supported')
+import atexit
+import curses
+import time
+
+import psutil
+
+
+# --- curses stuff
+def tear_down():
+    win.keypad(0)
+    curses.nocbreak()
+    curses.echo()
+    curses.endwin()
+
+win = curses.initscr()
+atexit.register(tear_down)
+curses.endwin()
+lineno = 0
+
+
+def print_line(line, highlight=False):
+    """A thin wrapper around curses's addstr()."""
+    global lineno
+    try:
+        if highlight:
+            line += " " * (win.getmaxyx()[1] - len(line))
+            win.addstr(lineno, 0, line, curses.A_REVERSE)
+        else:
+            win.addstr(lineno, 0, line, 0)
+    except curses.error:
+        lineno = 0
+        win.refresh()
+        raise
+    else:
+        lineno += 1
+# --- curses stuff
+
+
+def bytes2human(n):
+    """
+    >>> bytes2human(10000)
+    '9.8 K'
+    >>> bytes2human(100001221)
+    '95.4 M'
+    """
+    symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
+    prefix = {}
+    for i, s in enumerate(symbols):
+        prefix[s] = 1 << (i + 1) * 10
+    for s in reversed(symbols):
+        if n >= prefix[s]:
+            value = float(n) / prefix[s]
+            return '%.2f %s' % (value, s)
+    return '%.2f B' % (n)
+
+
+def poll(interval):
+    """Retrieve raw stats within an interval window."""
+    tot_before = psutil.net_io_counters()
+    pnic_before = psutil.net_io_counters(pernic=True)
+    # sleep some time
+    time.sleep(interval)
+    tot_after = psutil.net_io_counters()
+    pnic_after = psutil.net_io_counters(pernic=True)
+    return (tot_before, tot_after, pnic_before, pnic_after)
+
+
+def refresh_window(tot_before, tot_after, pnic_before, pnic_after):
+    """Print stats on screen."""
+    global lineno
+
+    # totals
+    print_line("total bytes:           sent: %-10s   received: %s" % (
+        bytes2human(tot_after.bytes_sent),
+        bytes2human(tot_after.bytes_recv))
+    )
+    print_line("total packets:         sent: %-10s   received: %s" % (
+        tot_after.packets_sent, tot_after.packets_recv))
+
+    # per-network interface details: let's sort network interfaces so
+    # that the ones which generated more traffic are shown first
+    print_line("")
+    nic_names = list(pnic_after.keys())
+    nic_names.sort(key=lambda x: sum(pnic_after[x]), reverse=True)
+    for name in nic_names:
+        stats_before = pnic_before[name]
+        stats_after = pnic_after[name]
+        templ = "%-15s %15s %15s"
+        print_line(templ % (name, "TOTAL", "PER-SEC"), highlight=True)
+        print_line(templ % (
+            "bytes-sent",
+            bytes2human(stats_after.bytes_sent),
+            bytes2human(
+                stats_after.bytes_sent - stats_before.bytes_sent) + '/s',
+        ))
+        print_line(templ % (
+            "bytes-recv",
+            bytes2human(stats_after.bytes_recv),
+            bytes2human(
+                stats_after.bytes_recv - stats_before.bytes_recv) + '/s',
+        ))
+        print_line(templ % (
+            "pkts-sent",
+            stats_after.packets_sent,
+            stats_after.packets_sent - stats_before.packets_sent,
+        ))
+        print_line(templ % (
+            "pkts-recv",
+            stats_after.packets_recv,
+            stats_after.packets_recv - stats_before.packets_recv,
+        ))
+        print_line("")
+    win.refresh()
+    lineno = 0
+
+
+def main():
+    try:
+        interval = 0
+        while True:
+            args = poll(interval)
+            refresh_window(*args)
+            interval = 1
+    except (KeyboardInterrupt, SystemExit):
+        pass
+
+if __name__ == '__main__':
+    main()

+ 58 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/pmap.py

@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+A clone of 'pmap' utility on Linux, 'vmmap' on OSX and 'procstat -v' on BSD.
+Report memory map of a process.
+
+$ python examples/pmap.py 32402
+pid=32402, name=hg
+Address                 RSS  Mode    Mapping
+0000000000400000      1200K  r-xp    /usr/bin/python2.7
+0000000000838000         4K  r--p    /usr/bin/python2.7
+0000000000839000       304K  rw-p    /usr/bin/python2.7
+00000000008ae000        68K  rw-p    [anon]
+000000000275e000      5396K  rw-p    [heap]
+00002b29bb1e0000       124K  r-xp    /lib/x86_64-linux-gnu/ld-2.17.so
+00002b29bb203000         8K  rw-p    [anon]
+00002b29bb220000       528K  rw-p    [anon]
+00002b29bb2d8000       768K  rw-p    [anon]
+00002b29bb402000         4K  r--p    /lib/x86_64-linux-gnu/ld-2.17.so
+00002b29bb403000         8K  rw-p    /lib/x86_64-linux-gnu/ld-2.17.so
+00002b29bb405000        60K  r-xp    /lib/x86_64-linux-gnu/libpthread-2.17.so
+00002b29bb41d000         0K  ---p    /lib/x86_64-linux-gnu/libpthread-2.17.so
+00007fff94be6000        48K  rw-p    [stack]
+00007fff94dd1000         4K  r-xp    [vdso]
+ffffffffff600000         0K  r-xp    [vsyscall]
+...
+"""
+
+import sys
+
+import psutil
+from psutil._compat import print_
+
+
+def main():
+    if len(sys.argv) != 2:
+        sys.exit('usage: pmap <pid>')
+    p = psutil.Process(int(sys.argv[1]))
+    print_("pid=%s, name=%s" % (p.pid, p.name()))
+    templ = "%-16s %10s  %-7s %s"
+    print_(templ % ("Address", "RSS", "Mode", "Mapping"))
+    total_rss = 0
+    for m in p.memory_maps(grouped=False):
+        total_rss += m.rss
+        print_(templ % (
+            m.addr.split('-')[0].zfill(16),
+            str(m.rss / 1024) + 'K',
+            m.perms,
+            m.path))
+    print_("-" * 33)
+    print_(templ % ("Total", str(total_rss / 1024) + 'K', '', ''))
+
+if __name__ == '__main__':
+    main()

+ 162 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/process_detail.py

@@ -0,0 +1,162 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Print detailed information about a process.
+Author: Giampaolo Rodola' <g.rodola@gmail.com>
+
+$ python examples/process_detail.py
+pid               820
+name              python
+exe               /usr/bin/python2.7
+parent            29613 (bash)
+cmdline           python examples/process_detail.py
+started           2014-41-27 03:41
+user              giampaolo
+uids              real=1000, effective=1000, saved=1000
+gids              real=1000, effective=1000, saved=1000
+terminal          /dev/pts/17
+cwd               /ssd/svn/psutil
+memory            0.1% (resident=10.6M, virtual=58.5M)
+cpu               0.0% (user=0.09, system=0.0)
+status            running
+niceness          0
+num threads       1
+I/O               bytes-read=0B, bytes-written=0B
+open files
+running threads   id=820, user-time=0.09, sys-time=0.0
+"""
+
+import datetime
+import os
+import socket
+import sys
+
+import psutil
+
+
+POSIX = os.name == 'posix'
+
+
+def convert_bytes(n):
+    symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
+    prefix = {}
+    for i, s in enumerate(symbols):
+        prefix[s] = 1 << (i + 1) * 10
+    for s in reversed(symbols):
+        if n >= prefix[s]:
+            value = float(n) / prefix[s]
+            return '%.1f%s' % (value, s)
+    return "%sB" % n
+
+
+def print_(a, b):
+    if sys.stdout.isatty() and POSIX:
+        fmt = '\x1b[1;32m%-17s\x1b[0m %s' % (a, b)
+    else:
+        fmt = '%-15s %s' % (a, b)
+    # python 2/3 compatibility layer
+    sys.stdout.write(fmt + '\n')
+    sys.stdout.flush()
+
+
+def run(pid):
+    ACCESS_DENIED = ''
+    try:
+        p = psutil.Process(pid)
+        pinfo = p.as_dict(ad_value=ACCESS_DENIED)
+    except psutil.NoSuchProcess:
+        sys.exit(str(sys.exc_info()[1]))
+
+    try:
+        parent = p.parent()
+        if parent:
+            parent = '(%s)' % parent.name()
+        else:
+            parent = ''
+    except psutil.Error:
+        parent = ''
+    started = datetime.datetime.fromtimestamp(
+        pinfo['create_time']).strftime('%Y-%M-%d %H:%M')
+    io = pinfo.get('io_counters', ACCESS_DENIED)
+    mem = '%s%% (resident=%s, virtual=%s) ' % (
+        round(pinfo['memory_percent'], 1),
+        convert_bytes(pinfo['memory_info'].rss),
+        convert_bytes(pinfo['memory_info'].vms))
+    children = p.children()
+
+    print_('pid', pinfo['pid'])
+    print_('name', pinfo['name'])
+    print_('exe', pinfo['exe'])
+    print_('parent', '%s %s' % (pinfo['ppid'], parent))
+    print_('cmdline', ' '.join(pinfo['cmdline']))
+    print_('started', started)
+    print_('user', pinfo['username'])
+    if POSIX and pinfo['uids'] and pinfo['gids']:
+        print_('uids', 'real=%s, effective=%s, saved=%s' % pinfo['uids'])
+    if POSIX and pinfo['gids']:
+        print_('gids', 'real=%s, effective=%s, saved=%s' % pinfo['gids'])
+    if POSIX:
+        print_('terminal', pinfo['terminal'] or '')
+    if hasattr(p, 'getcwd'):
+        print_('cwd', pinfo['cwd'])
+    print_('memory', mem)
+    print_('cpu', '%s%% (user=%s, system=%s)' % (
+        pinfo['cpu_percent'],
+        getattr(pinfo['cpu_times'], 'user', '?'),
+        getattr(pinfo['cpu_times'], 'system', '?')))
+    print_('status', pinfo['status'])
+    print_('niceness', pinfo['nice'])
+    print_('num threads', pinfo['num_threads'])
+    if io != ACCESS_DENIED:
+        print_('I/O', 'bytes-read=%s, bytes-written=%s' % (
+            convert_bytes(io.read_bytes),
+            convert_bytes(io.write_bytes)))
+    if children:
+        print_('children', '')
+        for child in children:
+            print_('', 'pid=%s name=%s' % (child.pid, child.name()))
+
+    if pinfo['open_files'] != ACCESS_DENIED:
+        print_('open files', '')
+        for file in pinfo['open_files']:
+            print_('', 'fd=%s %s ' % (file.fd, file.path))
+
+    if pinfo['threads']:
+        print_('running threads', '')
+        for thread in pinfo['threads']:
+            print_('', 'id=%s, user-time=%s, sys-time=%s' % (
+                thread.id, thread.user_time, thread.system_time))
+    if pinfo['connections'] not in (ACCESS_DENIED, []):
+        print_('open connections', '')
+        for conn in pinfo['connections']:
+            if conn.type == socket.SOCK_STREAM:
+                type = 'TCP'
+            elif conn.type == socket.SOCK_DGRAM:
+                type = 'UDP'
+            else:
+                type = 'UNIX'
+            lip, lport = conn.laddr
+            if not conn.raddr:
+                rip, rport = '*', '*'
+            else:
+                rip, rport = conn.raddr
+            print_('', '%s:%s -> %s:%s type=%s status=%s' % (
+                lip, lport, rip, rport, type, conn.status))
+
+
+def main(argv=None):
+    if argv is None:
+        argv = sys.argv
+    if len(argv) == 1:
+        sys.exit(run(os.getpid()))
+    elif len(argv) == 2:
+        sys.exit(run(int(argv[1])))
+    else:
+        sys.exit('usage: %s [pid]' % __file__)
+
+if __name__ == '__main__':
+    sys.exit(main())

+ 232 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/top.py

@@ -0,0 +1,232 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+A clone of top / htop.
+
+Author: Giampaolo Rodola' <g.rodola@gmail.com>
+
+$ python examples/top.py
+ CPU0  [|                                       ]   4.9%
+ CPU1  [|||                                     ]   7.8%
+ CPU2  [                                        ]   2.0%
+ CPU3  [|||||                                   ]  13.9%
+ Mem   [|||||||||||||||||||                     ]  49.8%  4920M/9888M
+ Swap  [                                        ]   0.0%     0M/0M
+ Processes: 287 (running=1 sleeping=286)
+ Load average: 0.34 0.54 0.46  Uptime: 3 days, 10:16:37
+
+PID    USER       NI  VIRT   RES   CPU% MEM%     TIME+  NAME
+------------------------------------------------------------
+989    giampaol    0   66M   12M    7.4  0.1   0:00.61  python
+2083   root        0  506M  159M    6.5  1.6   0:29.26  Xorg
+4503   giampaol    0  599M   25M    6.5  0.3   3:32.60  gnome-terminal
+3868   giampaol    0  358M    8M    2.8  0.1  23:12.60  pulseaudio
+3936   giampaol    0    1G  111M    2.8  1.1  33:41.67  compiz
+4401   giampaol    0  536M  141M    2.8  1.4  35:42.73  skype
+4047   giampaol    0  743M   76M    1.8  0.8  42:03.33  unity-panel-service
+13155  giampaol    0    1G  280M    1.8  2.8  41:57.34  chrome
+10     root        0    0B    0B    0.9  0.0   4:01.81  rcu_sched
+339    giampaol    0    1G  113M    0.9  1.1   8:15.73  chrome
+...
+"""
+
+import os
+import sys
+if os.name != 'posix':
+    sys.exit('platform not supported')
+import atexit
+import curses
+import time
+from datetime import datetime, timedelta
+
+import psutil
+
+
+# --- curses stuff
+def tear_down():
+    win.keypad(0)
+    curses.nocbreak()
+    curses.echo()
+    curses.endwin()
+
+win = curses.initscr()
+atexit.register(tear_down)
+curses.endwin()
+lineno = 0
+
+
+def print_line(line, highlight=False):
+    """A thin wrapper around curses's addstr()."""
+    global lineno
+    try:
+        if highlight:
+            line += " " * (win.getmaxyx()[1] - len(line))
+            win.addstr(lineno, 0, line, curses.A_REVERSE)
+        else:
+            win.addstr(lineno, 0, line, 0)
+    except curses.error:
+        lineno = 0
+        win.refresh()
+        raise
+    else:
+        lineno += 1
+# --- /curses stuff
+
+
+def bytes2human(n):
+    """
+    >>> bytes2human(10000)
+    '9K'
+    >>> bytes2human(100001221)
+    '95M'
+    """
+    symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
+    prefix = {}
+    for i, s in enumerate(symbols):
+        prefix[s] = 1 << (i + 1) * 10
+    for s in reversed(symbols):
+        if n >= prefix[s]:
+            value = int(float(n) / prefix[s])
+            return '%s%s' % (value, s)
+    return "%sB" % n
+
+
+def poll(interval):
+    # sleep some time
+    time.sleep(interval)
+    procs = []
+    procs_status = {}
+    for p in psutil.process_iter():
+        try:
+            p.dict = p.as_dict(['username', 'nice', 'memory_info',
+                                'memory_percent', 'cpu_percent',
+                                'cpu_times', 'name', 'status'])
+            try:
+                procs_status[p.dict['status']] += 1
+            except KeyError:
+                procs_status[p.dict['status']] = 1
+        except psutil.NoSuchProcess:
+            pass
+        else:
+            procs.append(p)
+
+    # return processes sorted by CPU percent usage
+    processes = sorted(procs, key=lambda p: p.dict['cpu_percent'],
+                       reverse=True)
+    return (processes, procs_status)
+
+
+def print_header(procs_status, num_procs):
+    """Print system-related info, above the process list."""
+
+    def get_dashes(perc):
+        dashes = "|" * int((float(perc) / 10 * 4))
+        empty_dashes = " " * (40 - len(dashes))
+        return dashes, empty_dashes
+
+    # cpu usage
+    percs = psutil.cpu_percent(interval=0, percpu=True)
+    for cpu_num, perc in enumerate(percs):
+        dashes, empty_dashes = get_dashes(perc)
+        print_line(" CPU%-2s [%s%s] %5s%%" % (cpu_num, dashes, empty_dashes,
+                                              perc))
+    mem = psutil.virtual_memory()
+    dashes, empty_dashes = get_dashes(mem.percent)
+    used = mem.total - mem.available
+    line = " Mem   [%s%s] %5s%% %6s/%s" % (
+        dashes, empty_dashes,
+        mem.percent,
+        str(int(used / 1024 / 1024)) + "M",
+        str(int(mem.total / 1024 / 1024)) + "M"
+    )
+    print_line(line)
+
+    # swap usage
+    swap = psutil.swap_memory()
+    dashes, empty_dashes = get_dashes(swap.percent)
+    line = " Swap  [%s%s] %5s%% %6s/%s" % (
+        dashes, empty_dashes,
+        swap.percent,
+        str(int(swap.used / 1024 / 1024)) + "M",
+        str(int(swap.total / 1024 / 1024)) + "M"
+    )
+    print_line(line)
+
+    # processes number and status
+    st = []
+    for x, y in procs_status.items():
+        if y:
+            st.append("%s=%s" % (x, y))
+    st.sort(key=lambda x: x[:3] in ('run', 'sle'), reverse=1)
+    print_line(" Processes: %s (%s)" % (num_procs, ' '.join(st)))
+    # load average, uptime
+    uptime = datetime.now() - datetime.fromtimestamp(psutil.boot_time())
+    av1, av2, av3 = os.getloadavg()
+    line = " Load average: %.2f %.2f %.2f  Uptime: %s" \
+        % (av1, av2, av3, str(uptime).split('.')[0])
+    print_line(line)
+
+
+def refresh_window(procs, procs_status):
+    """Print results on screen by using curses."""
+    curses.endwin()
+    templ = "%-6s %-8s %4s %5s %5s %6s %4s %9s  %2s"
+    win.erase()
+    header = templ % ("PID", "USER", "NI", "VIRT", "RES", "CPU%", "MEM%",
+                      "TIME+", "NAME")
+    print_header(procs_status, len(procs))
+    print_line("")
+    print_line(header, highlight=True)
+    for p in procs:
+        # TIME+ column shows process CPU cumulative time and it
+        # is expressed as: "mm:ss.ms"
+        if p.dict['cpu_times'] is not None:
+            ctime = timedelta(seconds=sum(p.dict['cpu_times']))
+            ctime = "%s:%s.%s" % (ctime.seconds // 60 % 60,
+                                  str((ctime.seconds % 60)).zfill(2),
+                                  str(ctime.microseconds)[:2])
+        else:
+            ctime = ''
+        if p.dict['memory_percent'] is not None:
+            p.dict['memory_percent'] = round(p.dict['memory_percent'], 1)
+        else:
+            p.dict['memory_percent'] = ''
+        if p.dict['cpu_percent'] is None:
+            p.dict['cpu_percent'] = ''
+        if p.dict['username']:
+            username = p.dict['username'][:8]
+        else:
+            username = ""
+        line = templ % (p.pid,
+                        username,
+                        p.dict['nice'],
+                        bytes2human(getattr(p.dict['memory_info'], 'vms', 0)),
+                        bytes2human(getattr(p.dict['memory_info'], 'rss', 0)),
+                        p.dict['cpu_percent'],
+                        p.dict['memory_percent'],
+                        ctime,
+                        p.dict['name'] or '',
+                        )
+        try:
+            print_line(line)
+        except curses.error:
+            break
+        win.refresh()
+
+
+def main():
+    try:
+        interval = 0
+        while 1:
+            args = poll(interval)
+            refresh_window(*args)
+            interval = 1
+    except (KeyboardInterrupt, SystemExit):
+        pass
+
+if __name__ == '__main__':
+    main()

+ 34 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/who.py

@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+A clone of 'who' command; print information about users who are
+currently logged in.
+
+$ python examples/who.py
+giampaolo       tty7            2014-02-23 17:25  (:0)
+giampaolo       pts/7           2014-02-24 18:25  (:192.168.1.56)
+giampaolo       pts/8           2014-02-24 18:25  (:0)
+giampaolo       pts/9           2014-02-27 01:32  (:0)
+"""
+
+from datetime import datetime
+
+import psutil
+from psutil._compat import print_
+
+
+def main():
+    users = psutil.users()
+    for user in users:
+        print_("%-15s %-15s %s  (%s)" % (
+            user.name,
+            user.terminal or '-',
+            datetime.fromtimestamp(user.started).strftime("%Y-%m-%d %H:%M"),
+            user.host))
+
+if __name__ == '__main__':
+    main()

+ 176 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/make.bat

@@ -0,0 +1,176 @@
+@echo off
+
+rem ==========================================================================
+rem Shortcuts for various tasks, emulating UNIX "make" on Windows.
+rem It is primarly intended as a shortcut for compiling / installing
+rem psutil ("make.bat build", "make.bat install") and running tests
+rem ("make.bat test").
+rem
+rem This script is modeled after my Windows installation which uses:
+rem - mingw32 for Python 2.4 and 2.5
+rem - Visual studio 2008 for Python 2.6, 2.7, 3.2
+rem - Visual studio 2010 for Python 3.3+
+rem
+rem By default C:\Python27\python.exe is used.
+rem To compile for a specific Python version run:
+rem
+rem     set PYTHON=C:\Python24\python.exe & make.bat build
+rem
+rem If you compile by using mingw on Python 2.4 and 2.5 you need to patch
+rem distutils first: http://stackoverflow.com/questions/13592192
+rem ==========================================================================
+
+if "%PYTHON%" == "" (
+    set PYTHON=C:\Python27\python.exe
+)
+if "%TSCRIPT%" == "" (
+    set TSCRIPT=test\test_psutil.py
+)
+
+rem Needed to compile using Mingw.
+set PATH=C:\MinGW\bin;%PATH%
+
+rem Needed to locate the .pypirc file and upload exes on PYPI.
+set HOME=%USERPROFILE%
+
+rem ==========================================================================
+
+if "%1" == "help" (
+    :help
+    echo Run `make ^<target^>` where ^<target^> is one of:
+    echo   build         compile without installing
+    echo   build-exes    create exe installers in dist directory
+    echo   clean         clean build files
+    echo   install       compile and install
+    echo   memtest       run memory leak tests
+    echo   test          run tests
+    echo   test-process  run process related tests
+    echo   test-system   run system APIs related tests
+    echo   uninstall     uninstall
+    echo   upload-exes   upload exe installers on pypi
+    goto :eof
+)
+
+if "%1" == "clean" (
+    :clean
+    for /r %%R in (__pycache__) do if exist %%R (rmdir /S /Q %%R)
+    for /r %%R in (*.pyc) do if exist %%R (del /s %%R)
+    for /r %%R in (*.pyd) do if exist %%R (del /s %%R)
+    for /r %%R in (*.orig) do if exist %%R (del /s %%R)
+    for /r %%R in (*.bak) do if exist %%R (del /s %%R)
+    for /r %%R in (*.rej) do if exist %%R (del /s %%R)
+    if exist psutil.egg-info (rmdir /S /Q psutil.egg-info)
+    if exist build (rmdir /S /Q build)
+    if exist dist (rmdir /S /Q dist)
+    goto :eof
+)
+
+if "%1" == "build" (
+    :build
+    if %PYTHON%==C:\Python24\python.exe (
+        %PYTHON% setup.py build -c mingw32
+    ) else if %PYTHON%==C:\Python25\python.exe (
+        %PYTHON% setup.py build -c mingw32
+    ) else (
+        %PYTHON% setup.py build
+    )
+    if %errorlevel% neq 0 goto :error
+    goto :eof
+)
+
+if "%1" == "install" (
+    :install
+    if %PYTHON%==C:\Python24\python.exe (
+        %PYTHON% setup.py build -c mingw32 install
+    ) else if %PYTHON%==C:\Python25\python.exe (
+        %PYTHON% setup.py build -c mingw32 install
+    ) else (
+        %PYTHON% setup.py build install
+    )
+    goto :eof
+)
+
+if "%1" == "uninstall" (
+    :uninstall
+    for %%A in ("%PYTHON%") do (
+        set folder=%%~dpA
+    )
+    for /F "delims=" %%i in ('dir /b %folder%\Lib\site-packages\*psutil*') do (
+        rmdir /S /Q %folder%\Lib\site-packages\%%i
+    )
+    goto :eof
+)
+
+if "%1" == "test" (
+    :test
+    call :install
+    %PYTHON% %TSCRIPT%
+    goto :eof
+)
+
+if "%1" == "test-process" (
+    :test
+    call :install
+    %PYTHON% -m unittest -v test.test_psutil.TestProcess
+    goto :eof
+)
+
+if "%1" == "test-system" (
+    :test
+    call :install
+    %PYTHON% -m unittest -v test.test_psutil.TestSystem
+    goto :eof
+)
+
+if "%1" == "memtest" (
+    :memtest
+    call :install
+    %PYTHON% test\test_memory_leaks.py
+    goto :eof
+)
+
+if "%1" == "build-exes" (
+    :build-exes
+    rem mingw 32 versions
+    C:\Python24\python.exe setup.py build -c mingw32 bdist_wininst || goto :error
+    C:\Python25\python.exe setup.py build -c mingw32 bdist_wininst || goto :error
+    rem "standard" 32 bit versions, using VS 2008 (2.6, 2.7) or VS 2010 (3.3+)
+    C:\Python26\python.exe setup.py build bdist_wininst || goto :error
+    C:\Python27\python.exe setup.py build bdist_wininst || goto :error
+    C:\Python33\python.exe setup.py build bdist_wininst || goto :error
+    C:\Python34\python.exe setup.py build bdist_wininst || goto :error
+    rem 64 bit versions
+    rem Python 2.7 + VS 2008 requires vcvars64.bat to be run first:
+    rem http://stackoverflow.com/questions/11072521/
+    rem Windows SDK and .NET Framework 3.5 SP1 also need to be installed (sigh)
+    "C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\bin\vcvars64.bat"
+    C:\Python27-64\python.exe setup.py build bdist_wininst || goto :error
+    C:\Python33-64\python.exe setup.py build bdist_wininst || goto :error
+    C:\Python34-64\python.exe setup.py build bdist_wininst || goto :error
+    echo OK
+    goto :eof
+)
+
+if "%1" == "upload-exes" (
+    :upload-exes
+    rem mingw 32 versions
+    C:\Python25\python.exe setup.py build -c mingw32 bdist_wininst upload || goto :error
+    rem "standard" 32 bit versions, using VS 2008 (2.6, 2.7) or VS 2010 (3.3+)
+    C:\Python26\python.exe setup.py bdist_wininst upload || goto :error
+    C:\Python27\python.exe setup.py bdist_wininst upload || goto :error
+    C:\Python33\python.exe setup.py bdist_wininst upload || goto :error
+    C:\Python34\python.exe setup.py bdist_wininst upload || goto :error
+    rem 64 bit versions
+    C:\Python27-64\python.exe setup.py build bdist_wininst upload || goto :error
+    C:\Python33-64\python.exe setup.py build bdist_wininst upload || goto :error
+    C:\Python34-64\python.exe setup.py build bdist_wininst upload || goto :error
+    echo OK
+    goto :eof
+)
+
+goto :help
+
+:error
+    echo last command exited with error code %errorlevel%
+    exit /b %errorlevel%
+    goto :eof

+ 1987 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/__init__.py

@@ -0,0 +1,1987 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""psutil is a cross-platform library for retrieving information on
+running processes and system utilization (CPU, memory, disks, network)
+in Python.
+"""
+
+from __future__ import division
+
+__author__ = "Giampaolo Rodola'"
+__version__ = "2.1.1"
+version_info = tuple([int(num) for num in __version__.split('.')])
+
+__all__ = [
+    # exceptions
+    "Error", "NoSuchProcess", "AccessDenied", "TimeoutExpired",
+    # constants
+    "version_info", "__version__",
+    "STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
+    "STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
+    "STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED",
+    "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
+    "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
+    "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE",
+    # classes
+    "Process", "Popen",
+    # functions
+    "pid_exists", "pids", "process_iter", "wait_procs",             # proc
+    "virtual_memory", "swap_memory",                                # memory
+    "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count",   # cpu
+    "net_io_counters", "net_connections",                           # network
+    "disk_io_counters", "disk_partitions", "disk_usage",            # disk
+    "users", "boot_time",                                           # others
+]
+
+import sys
+import os
+import time
+import signal
+import warnings
+import errno
+import subprocess
+try:
+    import pwd
+except ImportError:
+    pwd = None
+
+from psutil._common import memoize
+from psutil._compat import property, callable, defaultdict
+from psutil._compat import (wraps as _wraps,
+                            PY3 as _PY3)
+from psutil._common import (deprecated_method as _deprecated_method,
+                            deprecated as _deprecated,
+                            sdiskio as _nt_sys_diskio,
+                            snetio as _nt_sys_netio)
+
+from psutil._common import (STATUS_RUNNING,
+                            STATUS_SLEEPING,
+                            STATUS_DISK_SLEEP,
+                            STATUS_STOPPED,
+                            STATUS_TRACING_STOP,
+                            STATUS_ZOMBIE,
+                            STATUS_DEAD,
+                            STATUS_WAKING,
+                            STATUS_LOCKED,
+                            STATUS_IDLE,  # bsd
+                            STATUS_WAITING,  # bsd
+                            STATUS_LOCKED)  # bsd
+
+from psutil._common import (CONN_ESTABLISHED,
+                            CONN_SYN_SENT,
+                            CONN_SYN_RECV,
+                            CONN_FIN_WAIT1,
+                            CONN_FIN_WAIT2,
+                            CONN_TIME_WAIT,
+                            CONN_CLOSE,
+                            CONN_CLOSE_WAIT,
+                            CONN_LAST_ACK,
+                            CONN_LISTEN,
+                            CONN_CLOSING,
+                            CONN_NONE)
+
+if sys.platform.startswith("linux"):
+    import psutil._pslinux as _psplatform
+    from psutil._pslinux import (phymem_buffers,
+                                 cached_phymem)
+
+    from psutil._pslinux import (IOPRIO_CLASS_NONE,
+                                 IOPRIO_CLASS_RT,
+                                 IOPRIO_CLASS_BE,
+                                 IOPRIO_CLASS_IDLE)
+    # Linux >= 2.6.36
+    if _psplatform.HAS_PRLIMIT:
+        from _psutil_linux import (RLIM_INFINITY,
+                                   RLIMIT_AS,
+                                   RLIMIT_CORE,
+                                   RLIMIT_CPU,
+                                   RLIMIT_DATA,
+                                   RLIMIT_FSIZE,
+                                   RLIMIT_LOCKS,
+                                   RLIMIT_MEMLOCK,
+                                   RLIMIT_NOFILE,
+                                   RLIMIT_NPROC,
+                                   RLIMIT_RSS,
+                                   RLIMIT_STACK)
+        # Kinda ugly but considerably faster than using hasattr() and
+        # setattr() against the module object (we are at import time:
+        # speed matters).
+        import _psutil_linux
+        try:
+            RLIMIT_MSGQUEUE = _psutil_linux.RLIMIT_MSGQUEUE
+        except AttributeError:
+            pass
+        try:
+            RLIMIT_NICE = _psutil_linux.RLIMIT_NICE
+        except AttributeError:
+            pass
+        try:
+            RLIMIT_RTPRIO = _psutil_linux.RLIMIT_RTPRIO
+        except AttributeError:
+            pass
+        try:
+            RLIMIT_RTTIME = _psutil_linux.RLIMIT_RTTIME
+        except AttributeError:
+            pass
+        try:
+            RLIMIT_SIGPENDING = _psutil_linux.RLIMIT_SIGPENDING
+        except AttributeError:
+            pass
+        del _psutil_linux
+
+elif sys.platform.startswith("win32"):
+    import psutil._pswindows as _psplatform
+    from _psutil_windows import (ABOVE_NORMAL_PRIORITY_CLASS,
+                                 BELOW_NORMAL_PRIORITY_CLASS,
+                                 HIGH_PRIORITY_CLASS,
+                                 IDLE_PRIORITY_CLASS,
+                                 NORMAL_PRIORITY_CLASS,
+                                 REALTIME_PRIORITY_CLASS)
+    from psutil._pswindows import CONN_DELETE_TCB
+
+elif sys.platform.startswith("darwin"):
+    import psutil._psosx as _psplatform
+
+elif sys.platform.startswith("freebsd"):
+    import psutil._psbsd as _psplatform
+
+elif sys.platform.startswith("sunos"):
+    import psutil._pssunos as _psplatform
+    from psutil._pssunos import (CONN_IDLE,
+                                 CONN_BOUND)
+
+else:
+    raise NotImplementedError('platform %s is not supported' % sys.platform)
+
+__all__.extend(_psplatform.__extra__all__)
+
+
+_TOTAL_PHYMEM = None
+_POSIX = os.name == 'posix'
+_WINDOWS = os.name == 'nt'
+_timer = getattr(time, 'monotonic', time.time)
+
+
+# =====================================================================
+# --- exceptions
+# =====================================================================
+
+class Error(Exception):
+    """Base exception class. All other psutil exceptions inherit
+    from this one.
+    """
+
+
+class NoSuchProcess(Error):
+    """Exception raised when a process with a certain PID doesn't
+    or no longer exists (zombie).
+    """
+
+    def __init__(self, pid, name=None, msg=None):
+        Error.__init__(self)
+        self.pid = pid
+        self.name = name
+        self.msg = msg
+        if msg is None:
+            if name:
+                details = "(pid=%s, name=%s)" % (self.pid, repr(self.name))
+            else:
+                details = "(pid=%s)" % self.pid
+            self.msg = "process no longer exists " + details
+
+    def __str__(self):
+        return self.msg
+
+
+class AccessDenied(Error):
+    """Exception raised when permission to perform an action is denied."""
+
+    def __init__(self, pid=None, name=None, msg=None):
+        Error.__init__(self)
+        self.pid = pid
+        self.name = name
+        self.msg = msg
+        if msg is None:
+            if (pid is not None) and (name is not None):
+                self.msg = "(pid=%s, name=%s)" % (pid, repr(name))
+            elif (pid is not None):
+                self.msg = "(pid=%s)" % self.pid
+            else:
+                self.msg = ""
+
+    def __str__(self):
+        return self.msg
+
+
+class TimeoutExpired(Error):
+    """Raised on Process.wait(timeout) if timeout expires and process
+    is still alive.
+    """
+
+    def __init__(self, seconds, pid=None, name=None):
+        Error.__init__(self)
+        self.seconds = seconds
+        self.pid = pid
+        self.name = name
+        self.msg = "timeout after %s seconds" % seconds
+        if (pid is not None) and (name is not None):
+            self.msg += " (pid=%s, name=%s)" % (pid, repr(name))
+        elif (pid is not None):
+            self.msg += " (pid=%s)" % self.pid
+
+    def __str__(self):
+        return self.msg
+
+# push exception classes into platform specific module namespace
+_psplatform.NoSuchProcess = NoSuchProcess
+_psplatform.AccessDenied = AccessDenied
+_psplatform.TimeoutExpired = TimeoutExpired
+
+
+# =====================================================================
+# --- Process class
+# =====================================================================
+
+def _assert_pid_not_reused(fun):
+    """Decorator which raises NoSuchProcess in case a process is no
+    longer running or its PID has been reused.
+    """
+    @_wraps(fun)
+    def wrapper(self, *args, **kwargs):
+        if not self.is_running():
+            raise NoSuchProcess(self.pid, self._name)
+        return fun(self, *args, **kwargs)
+    return wrapper
+
+
+class Process(object):
+    """Represents an OS process with the given PID.
+    If PID is omitted current process PID (os.getpid()) is used.
+    Raise NoSuchProcess if PID does not exist.
+
+    Note that most of the methods of this class do not make sure
+    the PID of the process being queried has been reused over time.
+    That means you might end up retrieving an information referring
+    to another process in case the original one this instance
+    refers to is gone in the meantime.
+
+    The only exceptions for which process identity is pre-emptively
+    checked and guaranteed are:
+
+     - parent()
+     - children()
+     - nice() (set)
+     - ionice() (set)
+     - rlimit() (set)
+     - cpu_affinity (set)
+     - suspend()
+     - resume()
+     - send_signal()
+     - terminate()
+     - kill()
+
+    To prevent this problem for all other methods you can:
+      - use is_running() before querying the process
+      - if you're continuously iterating over a set of Process
+        instances use process_iter() which pre-emptively checks
+        process identity for every yielded instance
+    """
+
+    def __init__(self, pid=None):
+        self._init(pid)
+
+    def _init(self, pid, _ignore_nsp=False):
+        if pid is None:
+            pid = os.getpid()
+        else:
+            if not _PY3 and not isinstance(pid, (int, long)):
+                raise TypeError('pid must be an integer (got %r)' % pid)
+            if pid < 0:
+                raise ValueError('pid must be a positive integer (got %s)'
+                                 % pid)
+        self._pid = pid
+        self._name = None
+        self._exe = None
+        self._create_time = None
+        self._gone = False
+        self._hash = None
+        # used for caching on Windows only (on POSIX ppid may change)
+        self._ppid = None
+        # platform-specific modules define an _psplatform.Process
+        # implementation class
+        self._proc = _psplatform.Process(pid)
+        self._last_sys_cpu_times = None
+        self._last_proc_cpu_times = None
+        # cache creation time for later use in is_running() method
+        try:
+            self.create_time()
+        except AccessDenied:
+            # we should never get here as AFAIK we're able to get
+            # process creation time on all platforms even as a
+            # limited user
+            pass
+        except NoSuchProcess:
+            if not _ignore_nsp:
+                msg = 'no process found with pid %s' % pid
+                raise NoSuchProcess(pid, None, msg)
+            else:
+                self._gone = True
+        # This pair is supposed to indentify a Process instance
+        # univocally over time (the PID alone is not enough as
+        # it might refer to a process whose PID has been reused).
+        # This will be used later in __eq__() and is_running().
+        self._ident = (self.pid, self._create_time)
+
+    def __str__(self):
+        try:
+            pid = self.pid
+            name = repr(self.name())
+        except NoSuchProcess:
+            details = "(pid=%s (terminated))" % self.pid
+        except AccessDenied:
+            details = "(pid=%s)" % (self.pid)
+        else:
+            details = "(pid=%s, name=%s)" % (pid, name)
+        return "%s.%s%s" % (self.__class__.__module__,
+                            self.__class__.__name__, details)
+
+    def __repr__(self):
+        return "<%s at %s>" % (self.__str__(), id(self))
+
+    def __eq__(self, other):
+        # Test for equality with another Process object based
+        # on PID and creation time.
+        if not isinstance(other, Process):
+            return NotImplemented
+        return self._ident == other._ident
+
+    def __ne__(self, other):
+        return not self == other
+
+    def __hash__(self):
+        if self._hash is None:
+            self._hash = hash(self._ident)
+        return self._hash
+
+    # --- utility methods
+
+    def as_dict(self, attrs=[], ad_value=None):
+        """Utility method returning process information as a
+        hashable dictionary.
+
+        If 'attrs' is specified it must be a list of strings
+        reflecting available Process class' attribute names
+        (e.g. ['cpu_times', 'name']) else all public (read
+        only) attributes are assumed.
+
+        'ad_value' is the value which gets assigned in case
+        AccessDenied  exception is raised when retrieving that
+        particular process information.
+        """
+        excluded_names = set(
+            ['send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
+             'is_running', 'as_dict', 'parent', 'children', 'rlimit'])
+        retdict = dict()
+        ls = set(attrs or [x for x in dir(self) if not x.startswith('get')])
+        for name in ls:
+            if name.startswith('_'):
+                continue
+            if name.startswith('set_'):
+                continue
+            if name.startswith('get_'):
+                msg = "%s() is deprecated; use %s() instead" % (name, name[4:])
+                warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+                name = name[4:]
+                if name in ls:
+                    continue
+            if name == 'getcwd':
+                msg = "getcwd() is deprecated; use cwd() instead"
+                warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+                name = 'cwd'
+                if name in ls:
+                    continue
+
+            if name in excluded_names:
+                continue
+            try:
+                attr = getattr(self, name)
+                if callable(attr):
+                    ret = attr()
+                else:
+                    ret = attr
+            except AccessDenied:
+                ret = ad_value
+            except NotImplementedError:
+                # in case of not implemented functionality (may happen
+                # on old or exotic systems) we want to crash only if
+                # the user explicitly asked for that particular attr
+                if attrs:
+                    raise
+                continue
+            retdict[name] = ret
+        return retdict
+
+    def parent(self):
+        """Return the parent process as a Process object pre-emptively
+        checking whether PID has been reused.
+        If no parent is known return None.
+        """
+        ppid = self.ppid()
+        if ppid is not None:
+            try:
+                parent = Process(ppid)
+                if parent.create_time() <= self.create_time():
+                    return parent
+                # ...else ppid has been reused by another process
+            except NoSuchProcess:
+                pass
+
+    def is_running(self):
+        """Return whether this process is running.
+        It also checks if PID has been reused by another process in
+        which case return False.
+        """
+        if self._gone:
+            return False
+        try:
+            # Checking if PID is alive is not enough as the PID might
+            # have been reused by another process: we also want to
+            # check process identity.
+            # Process identity / uniqueness over time is greanted by
+            # (PID + creation time) and that is verified in __eq__.
+            return self == Process(self.pid)
+        except NoSuchProcess:
+            self._gone = True
+            return False
+
+    # --- actual API
+
+    @property
+    def pid(self):
+        """The process PID."""
+        return self._pid
+
+    def ppid(self):
+        """The process parent PID.
+        On Windows the return value is cached after first call.
+        """
+        # On POSIX we don't want to cache the ppid as it may unexpectedly
+        # change to 1 (init) in case this process turns into a zombie:
+        # https://code.google.com/p/psutil/issues/detail?id=321
+        # http://stackoverflow.com/questions/356722/
+
+        # XXX should we check creation time here rather than in
+        # Process.parent()?
+        if _POSIX:
+            return self._proc.ppid()
+        else:
+            if self._ppid is None:
+                self._ppid = self._proc.ppid()
+            return self._ppid
+
+    def name(self):
+        """The process name. The return value is cached after first call."""
+        if self._name is None:
+            name = self._proc.name()
+            if _POSIX and len(name) >= 15:
+                # On UNIX the name gets truncated to the first 15 characters.
+                # If it matches the first part of the cmdline we return that
+                # one instead because it's usually more explicative.
+                # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
+                try:
+                    cmdline = self.cmdline()
+                except AccessDenied:
+                    pass
+                else:
+                    if cmdline:
+                        extended_name = os.path.basename(cmdline[0])
+                        if extended_name.startswith(name):
+                            name = extended_name
+            self._proc._name = name
+            self._name = name
+        return self._name
+
+    def exe(self):
+        """The process executable as an absolute path.
+        May also be an empty string.
+        The return value is cached after first call.
+        """
+        def guess_it(fallback):
+            # try to guess exe from cmdline[0] in absence of a native
+            # exe representation
+            cmdline = self.cmdline()
+            if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
+                exe = cmdline[0]  # the possible exe
+                # Attempt to guess only in case of an absolute path.
+                # It is not safe otherwise as the process might have
+                # changed cwd.
+                if (os.path.isabs(exe)
+                        and os.path.isfile(exe)
+                        and os.access(exe, os.X_OK)):
+                    return exe
+            if isinstance(fallback, AccessDenied):
+                raise fallback
+            return fallback
+
+        if self._exe is None:
+            try:
+                exe = self._proc.exe()
+            except AccessDenied:
+                err = sys.exc_info()[1]
+                return guess_it(fallback=err)
+            else:
+                if not exe:
+                    # underlying implementation can legitimately return an
+                    # empty string; if that's the case we don't want to
+                    # raise AD while guessing from the cmdline
+                    try:
+                        exe = guess_it(fallback=exe)
+                    except AccessDenied:
+                        pass
+                self._exe = exe
+        return self._exe
+
+    def cmdline(self):
+        """The command line this process has been called with."""
+        return self._proc.cmdline()
+
+    def status(self):
+        """The process current status as a STATUS_* constant."""
+        return self._proc.status()
+
+    def username(self):
+        """The name of the user that owns the process.
+        On UNIX this is calculated by using *real* process uid.
+        """
+        if _POSIX:
+            if pwd is None:
+                # might happen if python was installed from sources
+                raise ImportError(
+                    "requires pwd module shipped with standard python")
+            return pwd.getpwuid(self.uids().real).pw_name
+        else:
+            return self._proc.username()
+
+    def create_time(self):
+        """The process creation time as a floating point number
+        expressed in seconds since the epoch, in UTC.
+        The return value is cached after first call.
+        """
+        if self._create_time is None:
+            self._create_time = self._proc.create_time()
+        return self._create_time
+
+    def cwd(self):
+        """Process current working directory as an absolute path."""
+        return self._proc.cwd()
+
+    def nice(self, value=None):
+        """Get or set process niceness (priority)."""
+        if value is None:
+            return self._proc.nice_get()
+        else:
+            if not self.is_running():
+                raise NoSuchProcess(self.pid, self._name)
+            self._proc.nice_set(value)
+
+    if _POSIX:
+
+        def uids(self):
+            """Return process UIDs as a (real, effective, saved)
+            namedtuple.
+            """
+            return self._proc.uids()
+
+        def gids(self):
+            """Return process GIDs as a (real, effective, saved)
+            namedtuple.
+            """
+            return self._proc.gids()
+
+        def terminal(self):
+            """The terminal associated with this process, if any,
+            else None.
+            """
+            return self._proc.terminal()
+
+        def num_fds(self):
+            """Return the number of file descriptors opened by this
+            process (POSIX only).
+            """
+            return self._proc.num_fds()
+
+    # Linux, BSD and Windows only
+    if hasattr(_psplatform.Process, "io_counters"):
+
+        def io_counters(self):
+            """Return process I/O statistics as a
+            (read_count, write_count, read_bytes, write_bytes)
+            namedtuple.
+            Those are the number of read/write calls performed and the
+            amount of bytes read and written by the process.
+            """
+            return self._proc.io_counters()
+
+    # Linux and Windows >= Vista only
+    if hasattr(_psplatform.Process, "ionice_get"):
+
+        def ionice(self, ioclass=None, value=None):
+            """Get or set process I/O niceness (priority).
+
+            On Linux 'ioclass' is one of the IOPRIO_CLASS_* constants.
+            'value' is a number which goes from 0 to 7. The higher the
+            value, the lower the I/O priority of the process.
+
+            On Windows only 'ioclass' is used and it can be set to 2
+            (normal), 1 (low) or 0 (very low).
+
+            Available on Linux and Windows > Vista only.
+            """
+            if ioclass is None:
+                if value is not None:
+                    raise ValueError("'ioclass' must be specified")
+                return self._proc.ionice_get()
+            else:
+                return self._proc.ionice_set(ioclass, value)
+
+    # Linux only
+    if hasattr(_psplatform.Process, "rlimit"):
+
+        def rlimit(self, resource, limits=None):
+            """Get or set process resource limits as a (soft, hard)
+            tuple.
+
+            'resource' is one of the RLIMIT_* constants.
+            'limits' is supposed to be a (soft, hard)  tuple.
+
+            See "man prlimit" for further info.
+            Available on Linux only.
+            """
+            if limits is None:
+                return self._proc.rlimit(resource)
+            else:
+                return self._proc.rlimit(resource, limits)
+
+    # Windows and Linux only
+    if hasattr(_psplatform.Process, "cpu_affinity_get"):
+
+        def cpu_affinity(self, cpus=None):
+            """Get or set process CPU affinity.
+            If specified 'cpus' must be a list of CPUs for which you
+            want to set the affinity (e.g. [0, 1]).
+            """
+            if cpus is None:
+                return self._proc.cpu_affinity_get()
+            else:
+                self._proc.cpu_affinity_set(cpus)
+
+    if _WINDOWS:
+
+        def num_handles(self):
+            """Return the number of handles opened by this process
+            (Windows only).
+            """
+            return self._proc.num_handles()
+
+    def num_ctx_switches(self):
+        """Return the number of voluntary and involuntary context
+        switches performed by this process.
+        """
+        return self._proc.num_ctx_switches()
+
+    def num_threads(self):
+        """Return the number of threads used by this process."""
+        return self._proc.num_threads()
+
+    def threads(self):
+        """Return threads opened by process as a list of
+        (id, user_time, system_time) namedtuples representing
+        thread id and thread CPU times (user/system).
+        """
+        return self._proc.threads()
+
+    @_assert_pid_not_reused
+    def children(self, recursive=False):
+        """Return the children of this process as a list of Process
+        instances, pre-emptively checking whether PID has been reused.
+        If recursive is True return all the parent descendants.
+
+        Example (A == this process):
+
+         A ─┐
+            │
+            ├─ B (child) ─┐
+            │             └─ X (grandchild) ─┐
+            │                                └─ Y (great grandchild)
+            ├─ C (child)
+            └─ D (child)
+
+        >>> import psutil
+        >>> p = psutil.Process()
+        >>> p.children()
+        B, C, D
+        >>> p.children(recursive=True)
+        B, X, Y, C, D
+
+        Note that in the example above if process X disappears
+        process Y won't be listed as the reference to process A
+        is lost.
+        """
+        if hasattr(_psplatform, 'ppid_map'):
+            # Windows only: obtain a {pid:ppid, ...} dict for all running
+            # processes in one shot (faster).
+            ppid_map = _psplatform.ppid_map()
+        else:
+            ppid_map = None
+
+        ret = []
+        if not recursive:
+            if ppid_map is None:
+                # 'slow' version, common to all platforms except Windows
+                for p in process_iter():
+                    try:
+                        if p.ppid() == self.pid:
+                            # if child happens to be older than its parent
+                            # (self) it means child's PID has been reused
+                            if self.create_time() <= p.create_time():
+                                ret.append(p)
+                    except NoSuchProcess:
+                        pass
+            else:
+                # Windows only (faster)
+                for pid, ppid in ppid_map.items():
+                    if ppid == self.pid:
+                        try:
+                            child = Process(pid)
+                            # if child happens to be older than its parent
+                            # (self) it means child's PID has been reused
+                            if self.create_time() <= child.create_time():
+                                ret.append(child)
+                        except NoSuchProcess:
+                            pass
+        else:
+            # construct a dict where 'values' are all the processes
+            # having 'key' as their parent
+            table = defaultdict(list)
+            if ppid_map is None:
+                for p in process_iter():
+                    try:
+                        table[p.ppid()].append(p)
+                    except NoSuchProcess:
+                        pass
+            else:
+                for pid, ppid in ppid_map.items():
+                    try:
+                        p = Process(pid)
+                        table[ppid].append(p)
+                    except NoSuchProcess:
+                        pass
+            # At this point we have a mapping table where table[self.pid]
+            # are the current process' children.
+            # Below, we look for all descendants recursively, similarly
+            # to a recursive function call.
+            checkpids = [self.pid]
+            for pid in checkpids:
+                for child in table[pid]:
+                    try:
+                        # if child happens to be older than its parent
+                        # (self) it means child's PID has been reused
+                        intime = self.create_time() <= child.create_time()
+                    except NoSuchProcess:
+                        pass
+                    else:
+                        if intime:
+                            ret.append(child)
+                            if child.pid not in checkpids:
+                                checkpids.append(child.pid)
+        return ret
+
+    def cpu_percent(self, interval=None):
+        """Return a float representing the current process CPU
+        utilization as a percentage.
+
+        When interval is 0.0 or None (default) compares process times
+        to system CPU times elapsed since last call, returning
+        immediately (non-blocking). That means that the first time
+        this is called it will return a meaningful 0.0 value.
+
+        When interval is > 0.0 compares process times to system CPU
+        times elapsed before and after the interval (blocking).
+
+        In this case is recommended for accuracy that this function
+        be called with at least 0.1 seconds between calls.
+
+        Examples:
+
+          >>> import psutil
+          >>> p = psutil.Process(os.getpid())
+          >>> # blocking
+          >>> p.cpu_percent(interval=1)
+          2.0
+          >>> # non-blocking (percentage since last call)
+          >>> p.cpu_percent(interval=None)
+          2.9
+          >>>
+        """
+        blocking = interval is not None and interval > 0.0
+        num_cpus = cpu_count()
+        if _POSIX:
+            timer = lambda: _timer() * num_cpus
+        else:
+            timer = lambda: sum(cpu_times())
+        if blocking:
+            st1 = timer()
+            pt1 = self._proc.cpu_times()
+            time.sleep(interval)
+            st2 = timer()
+            pt2 = self._proc.cpu_times()
+        else:
+            st1 = self._last_sys_cpu_times
+            pt1 = self._last_proc_cpu_times
+            st2 = timer()
+            pt2 = self._proc.cpu_times()
+            if st1 is None or pt1 is None:
+                self._last_sys_cpu_times = st2
+                self._last_proc_cpu_times = pt2
+                return 0.0
+
+        delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
+        delta_time = st2 - st1
+        # reset values for next call in case of interval == None
+        self._last_sys_cpu_times = st2
+        self._last_proc_cpu_times = pt2
+
+        try:
+            # The utilization split between all CPUs.
+            # Note: a percentage > 100 is legitimate as it can result
+            # from a process with multiple threads running on different
+            # CPU cores, see:
+            # http://stackoverflow.com/questions/1032357
+            # https://code.google.com/p/psutil/issues/detail?id=474
+            overall_percent = ((delta_proc / delta_time) * 100) * num_cpus
+        except ZeroDivisionError:
+            # interval was too low
+            return 0.0
+        else:
+            return round(overall_percent, 1)
+
+    def cpu_times(self):
+        """Return a (user, system) namedtuple representing  the
+        accumulated process time, in seconds.
+        This is the same as os.times() but per-process.
+        """
+        return self._proc.cpu_times()
+
+    def memory_info(self):
+        """Return a tuple representing RSS (Resident Set Size) and VMS
+        (Virtual Memory Size) in bytes.
+
+        On UNIX RSS and VMS are the same values shown by 'ps'.
+
+        On Windows RSS and VMS refer to "Mem Usage" and "VM Size"
+        columns of taskmgr.exe.
+        """
+        return self._proc.memory_info()
+
+    def memory_info_ex(self):
+        """Return a namedtuple with variable fields depending on the
+        platform representing extended memory information about
+        this process. All numbers are expressed in bytes.
+        """
+        return self._proc.memory_info_ex()
+
+    def memory_percent(self):
+        """Compare physical system memory to process resident memory
+        (RSS) and calculate process memory utilization as a percentage.
+        """
+        rss = self._proc.memory_info()[0]
+        # use cached value if available
+        total_phymem = _TOTAL_PHYMEM or virtual_memory().total
+        try:
+            return (rss / float(total_phymem)) * 100
+        except ZeroDivisionError:
+            return 0.0
+
+    def memory_maps(self, grouped=True):
+        """Return process' mapped memory regions as a list of nameduples
+        whose fields are variable depending on the platform.
+
+        If 'grouped' is True the mapped regions with the same 'path'
+        are grouped together and the different memory fields are summed.
+
+        If 'grouped' is False every mapped region is shown as a single
+        entity and the namedtuple will also include the mapped region's
+        address space ('addr') and permission set ('perms').
+        """
+        it = self._proc.memory_maps()
+        if grouped:
+            d = {}
+            for tupl in it:
+                path = tupl[2]
+                nums = tupl[3:]
+                try:
+                    d[path] = map(lambda x, y: x + y, d[path], nums)
+                except KeyError:
+                    d[path] = nums
+            nt = _psplatform.pmmap_grouped
+            return [nt(path, *d[path]) for path in d]
+        else:
+            nt = _psplatform.pmmap_ext
+            return [nt(*x) for x in it]
+
+    def open_files(self):
+        """Return files opened by process as a list of
+        (path, fd) namedtuples including the absolute file name
+        and file descriptor number.
+        """
+        return self._proc.open_files()
+
+    def connections(self, kind='inet'):
+        """Return connections opened by process as a list of
+        (fd, family, type, laddr, raddr, status) namedtuples.
+        The 'kind' parameter filters for connections that match the
+        following criteria:
+
+        Kind Value      Connections using
+        inet            IPv4 and IPv6
+        inet4           IPv4
+        inet6           IPv6
+        tcp             TCP
+        tcp4            TCP over IPv4
+        tcp6            TCP over IPv6
+        udp             UDP
+        udp4            UDP over IPv4
+        udp6            UDP over IPv6
+        unix            UNIX socket (both UDP and TCP protocols)
+        all             the sum of all the possible families and protocols
+        """
+        return self._proc.connections(kind)
+
+    if _POSIX:
+        def _send_signal(self, sig):
+            try:
+                os.kill(self.pid, sig)
+            except OSError:
+                err = sys.exc_info()[1]
+                if err.errno == errno.ESRCH:
+                    self._gone = True
+                    raise NoSuchProcess(self.pid, self._name)
+                if err.errno == errno.EPERM:
+                    raise AccessDenied(self.pid, self._name)
+                raise
+
+    @_assert_pid_not_reused
+    def send_signal(self, sig):
+        """Send a signal to process pre-emptively checking whether
+        PID has been reused (see signal module constants) .
+        On Windows only SIGTERM is valid and is treated as an alias
+        for kill().
+        """
+        if _POSIX:
+            self._send_signal(sig)
+        else:
+            if sig == signal.SIGTERM:
+                self._proc.kill()
+            else:
+                raise ValueError("only SIGTERM is supported on Windows")
+
+    @_assert_pid_not_reused
+    def suspend(self):
+        """Suspend process execution with SIGSTOP pre-emptively checking
+        whether PID has been reused.
+        On Windows this has the effect ot suspending all process threads.
+        """
+        if _POSIX:
+            self._send_signal(signal.SIGSTOP)
+        else:
+            self._proc.suspend()
+
+    @_assert_pid_not_reused
+    def resume(self):
+        """Resume process execution with SIGCONT pre-emptively checking
+        whether PID has been reused.
+        On Windows this has the effect of resuming all process threads.
+        """
+        if _POSIX:
+            self._send_signal(signal.SIGCONT)
+        else:
+            self._proc.resume()
+
+    @_assert_pid_not_reused
+    def terminate(self):
+        """Terminate the process with SIGTERM pre-emptively checking
+        whether PID has been reused.
+        On Windows this is an alias for kill().
+        """
+        if _POSIX:
+            self._send_signal(signal.SIGTERM)
+        else:
+            self._proc.kill()
+
+    @_assert_pid_not_reused
+    def kill(self):
+        """Kill the current process with SIGKILL pre-emptively checking
+        whether PID has been reused.
+        """
+        if _POSIX:
+            self._send_signal(signal.SIGKILL)
+        else:
+            self._proc.kill()
+
+    def wait(self, timeout=None):
+        """Wait for process to terminate and, if process is a children
+        of os.getpid(), also return its exit code, else None.
+
+        If the process is already terminated immediately return None
+        instead of raising NoSuchProcess.
+
+        If timeout (in seconds) is specified and process is still alive
+        raise TimeoutExpired.
+
+        To wait for multiple Process(es) use psutil.wait_procs().
+        """
+        if timeout is not None and not timeout >= 0:
+            raise ValueError("timeout must be a positive integer")
+        return self._proc.wait(timeout)
+
+    # --- deprecated APIs
+
+    _locals = set(locals())
+
+    @_deprecated_method(replacement='children')
+    def get_children(self):
+        pass
+
+    @_deprecated_method(replacement='connections')
+    def get_connections(self):
+        pass
+
+    if "cpu_affinity" in _locals:
+        @_deprecated_method(replacement='cpu_affinity')
+        def get_cpu_affinity(self):
+            pass
+
+        @_deprecated_method(replacement='cpu_affinity')
+        def set_cpu_affinity(self, cpus):
+            pass
+
+    @_deprecated_method(replacement='cpu_percent')
+    def get_cpu_percent(self):
+        pass
+
+    @_deprecated_method(replacement='cpu_times')
+    def get_cpu_times(self):
+        pass
+
+    @_deprecated_method(replacement='cwd')
+    def getcwd(self):
+        pass
+
+    @_deprecated_method(replacement='memory_info_ex')
+    def get_ext_memory_info(self):
+        pass
+
+    if "io_counters" in _locals:
+        @_deprecated_method(replacement='io_counters')
+        def get_io_counters(self):
+            pass
+
+    if "ionice" in _locals:
+        @_deprecated_method(replacement='ionice')
+        def get_ionice(self):
+            pass
+
+        @_deprecated_method(replacement='ionice')
+        def set_ionice(self, ioclass, value=None):
+            pass
+
+    @_deprecated_method(replacement='memory_info')
+    def get_memory_info(self):
+        pass
+
+    @_deprecated_method(replacement='memory_maps')
+    def get_memory_maps(self):
+        pass
+
+    @_deprecated_method(replacement='memory_percent')
+    def get_memory_percent(self):
+        pass
+
+    @_deprecated_method(replacement='nice')
+    def get_nice(self):
+        pass
+
+    @_deprecated_method(replacement='num_ctx_switches')
+    def get_num_ctx_switches(self):
+        pass
+
+    if 'num_fds' in _locals:
+        @_deprecated_method(replacement='num_fds')
+        def get_num_fds(self):
+            pass
+
+    if 'num_handles' in _locals:
+        @_deprecated_method(replacement='num_handles')
+        def get_num_handles(self):
+            pass
+
+    @_deprecated_method(replacement='num_threads')
+    def get_num_threads(self):
+        pass
+
+    @_deprecated_method(replacement='open_files')
+    def get_open_files(self):
+        pass
+
+    if "rlimit" in _locals:
+        @_deprecated_method(replacement='rlimit')
+        def get_rlimit(self):
+            pass
+
+        @_deprecated_method(replacement='rlimit')
+        def set_rlimit(self, resource, limits):
+            pass
+
+    @_deprecated_method(replacement='threads')
+    def get_threads(self):
+        pass
+
+    @_deprecated_method(replacement='nice')
+    def set_nice(self, value):
+        pass
+
+    del _locals
+
+
+# =====================================================================
+# --- Popen class
+# =====================================================================
+
+class Popen(Process):
+    """A more convenient interface to stdlib subprocess module.
+    It starts a sub process and deals with it exactly as when using
+    subprocess.Popen class but in addition also provides all the
+    properties and methods of psutil.Process class as a unified
+    interface:
+
+      >>> import psutil
+      >>> from subprocess import PIPE
+      >>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE)
+      >>> p.name()
+      'python'
+      >>> p.uids()
+      user(real=1000, effective=1000, saved=1000)
+      >>> p.username()
+      'giampaolo'
+      >>> p.communicate()
+      ('hi\n', None)
+      >>> p.terminate()
+      >>> p.wait(timeout=2)
+      0
+      >>>
+
+    For method names common to both classes such as kill(), terminate()
+    and wait(), psutil.Process implementation takes precedence.
+
+    Unlike subprocess.Popen this class pre-emptively checks wheter PID
+    has been reused on send_signal(), terminate() and kill() so that
+    you don't accidentally terminate another process, fixing
+    http://bugs.python.org/issue6973.
+
+    For a complete documentation refer to:
+    http://docs.python.org/library/subprocess.html
+    """
+
+    def __init__(self, *args, **kwargs):
+        # Explicitly avoid to raise NoSuchProcess in case the process
+        # spawned by subprocess.Popen terminates too quickly, see:
+        # https://code.google.com/p/psutil/issues/detail?id=193
+        self.__subproc = subprocess.Popen(*args, **kwargs)
+        self._init(self.__subproc.pid, _ignore_nsp=True)
+
+    def __dir__(self):
+        return sorted(set(dir(Popen) + dir(subprocess.Popen)))
+
+    def __getattribute__(self, name):
+        try:
+            return object.__getattribute__(self, name)
+        except AttributeError:
+            try:
+                return object.__getattribute__(self.__subproc, name)
+            except AttributeError:
+                raise AttributeError("%s instance has no attribute '%s'"
+                                     % (self.__class__.__name__, name))
+
+    def wait(self, timeout=None):
+        if self.__subproc.returncode is not None:
+            return self.__subproc.returncode
+        ret = super(Popen, self).wait(timeout)
+        self.__subproc.returncode = ret
+        return ret
+
+
+# =====================================================================
+# --- system processes related functions
+# =====================================================================
+
+def pids():
+    """Return a list of current running PIDs."""
+    return _psplatform.pids()
+
+
+def pid_exists(pid):
+    """Return True if given PID exists in the current process list.
+    This is faster than doing "pid in psutil.pids()" and
+    should be preferred.
+    """
+    if pid < 0:
+        return False
+    elif pid == 0 and _POSIX:
+        # On POSIX we use os.kill() to determine PID existence.
+        # According to "man 2 kill" PID 0 has a special meaning
+        # though: it refers to <<every process in the process
+        # group of the calling process>> and that is not we want
+        # to do here.
+        return pid in pids()
+    else:
+        return _psplatform.pid_exists(pid)
+
+
+_pmap = {}
+
+def process_iter():
+    """Return a generator yielding a Process instance for all
+    running processes.
+
+    Every new Process instance is only created once and then cached
+    into an internal table which is updated every time this is used.
+
+    Cached Process instances are checked for identity so that you're
+    safe in case a PID has been reused by another process, in which
+    case the cached instance is updated.
+
+    The sorting order in which processes are yielded is based on
+    their PIDs.
+    """
+    def add(pid):
+        proc = Process(pid)
+        _pmap[proc.pid] = proc
+        return proc
+
+    def remove(pid):
+        _pmap.pop(pid, None)
+
+    a = set(pids())
+    b = set(_pmap.keys())
+    new_pids = a - b
+    gone_pids = b - a
+
+    for pid in gone_pids:
+        remove(pid)
+    for pid, proc in sorted(list(_pmap.items()) +
+                            list(dict.fromkeys(new_pids).items())):
+        try:
+            if proc is None:  # new process
+                yield add(pid)
+            else:
+                # use is_running() to check whether PID has been reused by
+                # another process in which case yield a new Process instance
+                if proc.is_running():
+                    yield proc
+                else:
+                    yield add(pid)
+        except NoSuchProcess:
+            remove(pid)
+        except AccessDenied:
+            # Process creation time can't be determined hence there's
+            # no way to tell whether the pid of the cached process
+            # has been reused. Just return the cached version.
+            yield proc
+
+
+def wait_procs(procs, timeout=None, callback=None):
+    """Convenience function which waits for a list of processes to
+    terminate.
+
+    Return a (gone, alive) tuple indicating which processes
+    are gone and which ones are still alive.
+
+    The gone ones will have a new 'returncode' attribute indicating
+    process exit status (may be None).
+
+    'callback' is a function which gets called every time a process
+    terminates (a Process instance is passed as callback argument).
+
+    Function will return as soon as all processes terminate or when
+    timeout occurs.
+
+    Typical use case is:
+
+     - send SIGTERM to a list of processes
+     - give them some time to terminate
+     - send SIGKILL to those ones which are still alive
+
+    Example:
+
+    >>> def on_terminate(proc):
+    ...     print("process {} terminated".format(proc))
+    ...
+    >>> for p in procs:
+    ...    p.terminate()
+    ...
+    >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
+    >>> for p in alive:
+    ...     p.kill()
+    """
+    def check_gone(proc, timeout):
+        try:
+            returncode = proc.wait(timeout=timeout)
+        except TimeoutExpired:
+            pass
+        else:
+            if returncode is not None or not proc.is_running():
+                proc.returncode = returncode
+                gone.add(proc)
+                if callback is not None:
+                    callback(proc)
+
+    if timeout is not None and not timeout >= 0:
+        msg = "timeout must be a positive integer, got %s" % timeout
+        raise ValueError(msg)
+    gone = set()
+    alive = set(procs)
+    if callback is not None and not callable(callback):
+        raise TypeError("callback %r is not a callable" % callable)
+    if timeout is not None:
+        deadline = _timer() + timeout
+
+    while alive:
+        if timeout is not None and timeout <= 0:
+            break
+        for proc in alive:
+            # Make sure that every complete iteration (all processes)
+            # will last max 1 sec.
+            # We do this because we don't want to wait too long on a
+            # single process: in case it terminates too late other
+            # processes may disappear in the meantime and their PID
+            # reused.
+            max_timeout = 1.0 / len(alive)
+            if timeout is not None:
+                timeout = min((deadline - _timer()), max_timeout)
+                if timeout <= 0:
+                    break
+                check_gone(proc, timeout)
+            else:
+                check_gone(proc, max_timeout)
+        alive = alive - gone
+
+    if alive:
+        # Last attempt over processes survived so far.
+        # timeout == 0 won't make this function wait any further.
+        for proc in alive:
+            check_gone(proc, 0)
+        alive = alive - gone
+
+    return (list(gone), list(alive))
+
+
+# =====================================================================
+# --- CPU related functions
+# =====================================================================
+
+@memoize
+def cpu_count(logical=True):
+    """Return the number of logical CPUs in the system (same as
+    os.cpu_count() in Python 3.4).
+
+    If logical is False return the number of physical cores only
+    (hyper thread CPUs are excluded).
+
+    Return None if undetermined.
+
+    The return value is cached after first call.
+    If desired cache can be cleared like this:
+
+    >>> psutil.cpu_count.cache_clear()
+    """
+    if logical:
+        return _psplatform.cpu_count_logical()
+    else:
+        return _psplatform.cpu_count_physical()
+
+
+def cpu_times(percpu=False):
+    """Return system-wide CPU times as a namedtuple.
+    Every CPU time represents the seconds the CPU has spent in the given mode.
+    The namedtuple's fields availability varies depending on the platform:
+     - user
+     - system
+     - idle
+     - nice (UNIX)
+     - iowait (Linux)
+     - irq (Linux, FreeBSD)
+     - softirq (Linux)
+     - steal (Linux >= 2.6.11)
+     - guest (Linux >= 2.6.24)
+     - guest_nice (Linux >= 3.2.0)
+
+    When percpu is True return a list of nameduples for each CPU.
+    First element of the list refers to first CPU, second element
+    to second CPU and so on.
+    The order of the list is consistent across calls.
+    """
+    if not percpu:
+        return _psplatform.cpu_times()
+    else:
+        return _psplatform.per_cpu_times()
+
+
+_last_cpu_times = cpu_times()
+_last_per_cpu_times = cpu_times(percpu=True)
+
+def cpu_percent(interval=None, percpu=False):
+    """Return a float representing the current system-wide CPU
+    utilization as a percentage.
+
+    When interval is > 0.0 compares system CPU times elapsed before
+    and after the interval (blocking).
+
+    When interval is 0.0 or None compares system CPU times elapsed
+    since last call or module import, returning immediately (non
+    blocking). That means the first time this is called it will
+    return a meaningless 0.0 value which you should ignore.
+    In this case is recommended for accuracy that this function be
+    called with at least 0.1 seconds between calls.
+
+    When percpu is True returns a list of floats representing the
+    utilization as a percentage for each CPU.
+    First element of the list refers to first CPU, second element
+    to second CPU and so on.
+    The order of the list is consistent across calls.
+
+    Examples:
+
+      >>> # blocking, system-wide
+      >>> psutil.cpu_percent(interval=1)
+      2.0
+      >>>
+      >>> # blocking, per-cpu
+      >>> psutil.cpu_percent(interval=1, percpu=True)
+      [2.0, 1.0]
+      >>>
+      >>> # non-blocking (percentage since last call)
+      >>> psutil.cpu_percent(interval=None)
+      2.9
+      >>>
+    """
+    global _last_cpu_times
+    global _last_per_cpu_times
+    blocking = interval is not None and interval > 0.0
+
+    def calculate(t1, t2):
+        t1_all = sum(t1)
+        t1_busy = t1_all - t1.idle
+
+        t2_all = sum(t2)
+        t2_busy = t2_all - t2.idle
+
+        # this usually indicates a float precision issue
+        if t2_busy <= t1_busy:
+            return 0.0
+
+        busy_delta = t2_busy - t1_busy
+        all_delta = t2_all - t1_all
+        busy_perc = (busy_delta / all_delta) * 100
+        return round(busy_perc, 1)
+
+    # system-wide usage
+    if not percpu:
+        if blocking:
+            t1 = cpu_times()
+            time.sleep(interval)
+        else:
+            t1 = _last_cpu_times
+        _last_cpu_times = cpu_times()
+        return calculate(t1, _last_cpu_times)
+    # per-cpu usage
+    else:
+        ret = []
+        if blocking:
+            tot1 = cpu_times(percpu=True)
+            time.sleep(interval)
+        else:
+            tot1 = _last_per_cpu_times
+        _last_per_cpu_times = cpu_times(percpu=True)
+        for t1, t2 in zip(tot1, _last_per_cpu_times):
+            ret.append(calculate(t1, t2))
+        return ret
+
+
+# Use separate global vars for cpu_times_percent() so that it's
+# independent from cpu_percent() and they can both be used within
+# the same program.
+_last_cpu_times_2 = _last_cpu_times
+_last_per_cpu_times_2 = _last_per_cpu_times
+
+def cpu_times_percent(interval=None, percpu=False):
+    """Same as cpu_percent() but provides utilization percentages
+    for each specific CPU time as is returned by cpu_times().
+    For instance, on Linux we'll get:
+
+      >>> cpu_times_percent()
+      cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,
+                 irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
+      >>>
+
+    interval and percpu arguments have the same meaning as in
+    cpu_percent().
+    """
+    global _last_cpu_times_2
+    global _last_per_cpu_times_2
+    blocking = interval is not None and interval > 0.0
+
+    def calculate(t1, t2):
+        nums = []
+        all_delta = sum(t2) - sum(t1)
+        for field in t1._fields:
+            field_delta = getattr(t2, field) - getattr(t1, field)
+            try:
+                field_perc = (100 * field_delta) / all_delta
+            except ZeroDivisionError:
+                field_perc = 0.0
+            field_perc = round(field_perc, 1)
+            if _WINDOWS:
+                # XXX
+                # Work around:
+                # https://code.google.com/p/psutil/issues/detail?id=392
+                # CPU times are always supposed to increase over time
+                # or at least remain the same and that's because time
+                # cannot go backwards.
+                # Surprisingly sometimes this might not be the case on
+                # Windows where 'system' CPU time can be smaller
+                # compared to the previous call, resulting in corrupted
+                # percentages (< 0 or > 100).
+                # I really don't know what to do about that except
+                # forcing the value to 0 or 100.
+                if field_perc > 100.0:
+                    field_perc = 100.0
+                elif field_perc < 0.0:
+                    field_perc = 0.0
+            nums.append(field_perc)
+        return _psplatform.scputimes(*nums)
+
+    # system-wide usage
+    if not percpu:
+        if blocking:
+            t1 = cpu_times()
+            time.sleep(interval)
+        else:
+            t1 = _last_cpu_times_2
+        _last_cpu_times_2 = cpu_times()
+        return calculate(t1, _last_cpu_times_2)
+    # per-cpu usage
+    else:
+        ret = []
+        if blocking:
+            tot1 = cpu_times(percpu=True)
+            time.sleep(interval)
+        else:
+            tot1 = _last_per_cpu_times_2
+        _last_per_cpu_times_2 = cpu_times(percpu=True)
+        for t1, t2 in zip(tot1, _last_per_cpu_times_2):
+            ret.append(calculate(t1, t2))
+        return ret
+
+
+# =====================================================================
+# --- system memory related functions
+# =====================================================================
+
+def virtual_memory():
+    """Return statistics about system memory usage as a namedtuple
+    including the following fields, expressed in bytes:
+
+     - total:
+       total physical memory available.
+
+     - available:
+       the actual amount of available memory that can be given
+       instantly to processes that request more memory in bytes; this
+       is calculated by summing different memory values depending on
+       the platform (e.g. free + buffers + cached on Linux) and it is
+       supposed to be used to monitor actual memory usage in a cross
+       platform fashion.
+
+     - percent:
+       the percentage usage calculated as (total - available) / total * 100
+
+     - used:
+       memory used, calculated differently depending on the platform and
+       designed for informational purposes only:
+        OSX: active + inactive + wired
+        BSD: active + wired + cached
+        LINUX: total - free
+
+     - free:
+       memory not being used at all (zeroed) that is readily available;
+       note that this doesn't reflect the actual memory available
+       (use 'available' instead)
+
+    Platform-specific fields:
+
+     - active (UNIX):
+       memory currently in use or very recently used, and so it is in RAM.
+
+     - inactive (UNIX):
+       memory that is marked as not used.
+
+     - buffers (BSD, Linux):
+       cache for things like file system metadata.
+
+     - cached (BSD, OSX):
+       cache for various things.
+
+     - wired (OSX, BSD):
+       memory that is marked to always stay in RAM. It is never moved to disk.
+
+     - shared (BSD):
+       memory that may be simultaneously accessed by multiple processes.
+
+    The sum of 'used' and 'available' does not necessarily equal total.
+    On Windows 'available' and 'free' are the same.
+    """
+    global _TOTAL_PHYMEM
+    ret = _psplatform.virtual_memory()
+    # cached for later use in Process.memory_percent()
+    _TOTAL_PHYMEM = ret.total
+    return ret
+
+
+def swap_memory():
+    """Return system swap memory statistics as a namedtuple including
+    the following fields:
+
+     - total:   total swap memory in bytes
+     - used:    used swap memory in bytes
+     - free:    free swap memory in bytes
+     - percent: the percentage usage
+     - sin:     no. of bytes the system has swapped in from disk (cumulative)
+     - sout:    no. of bytes the system has swapped out from disk (cumulative)
+
+    'sin' and 'sout' on Windows are meaningless and always set to 0.
+    """
+    return _psplatform.swap_memory()
+
+
+# =====================================================================
+# --- disks/paritions related functions
+# =====================================================================
+
+def disk_usage(path):
+    """Return disk usage statistics about the given path as a namedtuple
+    including total, used and free space expressed in bytes plus the
+    percentage usage.
+    """
+    return _psplatform.disk_usage(path)
+
+
+def disk_partitions(all=False):
+    """Return mounted partitions as a list of
+    (device, mountpoint, fstype, opts) namedtuple.
+    'opts' field is a raw string separated by commas indicating mount
+    options which may vary depending on the platform.
+
+    If "all" parameter is False return physical devices only and ignore
+    all others.
+    """
+    return _psplatform.disk_partitions(all)
+
+
+def disk_io_counters(perdisk=False):
+    """Return system disk I/O statistics as a namedtuple including
+    the following fields:
+
+     - read_count:  number of reads
+     - write_count: number of writes
+     - read_bytes:  number of bytes read
+     - write_bytes: number of bytes written
+     - read_time:   time spent reading from disk (in milliseconds)
+     - write_time:  time spent writing to disk (in milliseconds)
+
+    If perdisk is True return the same information for every
+    physical disk installed on the system as a dictionary
+    with partition names as the keys and the namedutuple
+    described above as the values.
+
+    On recent Windows versions 'diskperf -y' command may need to be
+    executed first otherwise this function won't find any disk.
+    """
+    rawdict = _psplatform.disk_io_counters()
+    if not rawdict:
+        raise RuntimeError("couldn't find any physical disk")
+    if perdisk:
+        for disk, fields in rawdict.items():
+            rawdict[disk] = _nt_sys_diskio(*fields)
+        return rawdict
+    else:
+        return _nt_sys_diskio(*[sum(x) for x in zip(*rawdict.values())])
+
+
+# =====================================================================
+# --- network related functions
+# =====================================================================
+
+def net_io_counters(pernic=False):
+    """Return network I/O statistics as a namedtuple including
+    the following fields:
+
+     - bytes_sent:   number of bytes sent
+     - bytes_recv:   number of bytes received
+     - packets_sent: number of packets sent
+     - packets_recv: number of packets received
+     - errin:        total number of errors while receiving
+     - errout:       total number of errors while sending
+     - dropin:       total number of incoming packets which were dropped
+     - dropout:      total number of outgoing packets which were dropped
+                     (always 0 on OSX and BSD)
+
+    If pernic is True return the same information for every
+    network interface installed on the system as a dictionary
+    with network interface names as the keys and the namedtuple
+    described above as the values.
+    """
+    rawdict = _psplatform.net_io_counters()
+    if not rawdict:
+        raise RuntimeError("couldn't find any network interface")
+    if pernic:
+        for nic, fields in rawdict.items():
+            rawdict[nic] = _nt_sys_netio(*fields)
+        return rawdict
+    else:
+        return _nt_sys_netio(*[sum(x) for x in zip(*rawdict.values())])
+
+
+def net_connections(kind='inet'):
+    """Return system-wide connections as a list of
+    (fd, family, type, laddr, raddr, status, pid) namedtuples.
+    In case of limited privileges 'fd' and 'pid' may be set to -1
+    and None respectively.
+    The 'kind' parameter filters for connections that fit the
+    following criteria:
+
+    Kind Value      Connections using
+    inet            IPv4 and IPv6
+    inet4           IPv4
+    inet6           IPv6
+    tcp             TCP
+    tcp4            TCP over IPv4
+    tcp6            TCP over IPv6
+    udp             UDP
+    udp4            UDP over IPv4
+    udp6            UDP over IPv6
+    unix            UNIX socket (both UDP and TCP protocols)
+    all             the sum of all the possible families and protocols
+    """
+    return _psplatform.net_connections(kind)
+
+# =====================================================================
+# --- other system related functions
+# =====================================================================
+
+def boot_time():
+    """Return the system boot time expressed in seconds since the epoch.
+    This is also available as psutil.BOOT_TIME.
+    """
+    # Note: we are not caching this because it is subject to
+    # system clock updates.
+    return _psplatform.boot_time()
+
+
+def users():
+    """Return users currently connected on the system as a list of
+    namedtuples including the following fields.
+
+     - user: the name of the user
+     - terminal: the tty or pseudo-tty associated with the user, if any.
+     - host: the host name associated with the entry, if any.
+     - started: the creation time as a floating point number expressed in
+       seconds since the epoch.
+    """
+    return _psplatform.users()
+
+
+# =====================================================================
+# --- deprecated functions
+# =====================================================================
+
+@_deprecated(replacement="psutil.pids()")
+def get_pid_list():
+    return pids()
+
+
+@_deprecated(replacement="list(process_iter())")
+def get_process_list():
+    return list(process_iter())
+
+
+@_deprecated(replacement="psutil.users()")
+def get_users():
+    return users()
+
+
+@_deprecated(replacement="psutil.virtual_memory()")
+def phymem_usage():
+    """Return the amount of total, used and free physical memory
+    on the system in bytes plus the percentage usage.
+    Deprecated; use psutil.virtual_memory() instead.
+    """
+    return virtual_memory()
+
+
+@_deprecated(replacement="psutil.swap_memory()")
+def virtmem_usage():
+    return swap_memory()
+
+
+@_deprecated(replacement="psutil.phymem_usage().free")
+def avail_phymem():
+    return phymem_usage().free
+
+
+@_deprecated(replacement="psutil.phymem_usage().used")
+def used_phymem():
+    return phymem_usage().used
+
+
+@_deprecated(replacement="psutil.virtmem_usage().total")
+def total_virtmem():
+    return virtmem_usage().total
+
+
+@_deprecated(replacement="psutil.virtmem_usage().used")
+def used_virtmem():
+    return virtmem_usage().used
+
+
+@_deprecated(replacement="psutil.virtmem_usage().free")
+def avail_virtmem():
+    return virtmem_usage().free
+
+
+@_deprecated(replacement="psutil.net_io_counters()")
+def network_io_counters(pernic=False):
+    return net_io_counters(pernic)
+
+
+def test():
+    """List info of all currently running processes emulating ps aux
+    output.
+    """
+    import datetime
+    from psutil._compat import print_
+
+    today_day = datetime.date.today()
+    templ = "%-10s %5s %4s %4s %7s %7s %-13s %5s %7s  %s"
+    attrs = ['pid', 'cpu_percent', 'memory_percent', 'name', 'cpu_times',
+             'create_time', 'memory_info']
+    if _POSIX:
+        attrs.append('uids')
+        attrs.append('terminal')
+    print_(templ % ("USER", "PID", "%CPU", "%MEM", "VSZ", "RSS", "TTY",
+                    "START", "TIME", "COMMAND"))
+    for p in process_iter():
+        try:
+            pinfo = p.as_dict(attrs, ad_value='')
+        except NoSuchProcess:
+            pass
+        else:
+            if pinfo['create_time']:
+                ctime = datetime.datetime.fromtimestamp(pinfo['create_time'])
+                if ctime.date() == today_day:
+                    ctime = ctime.strftime("%H:%M")
+                else:
+                    ctime = ctime.strftime("%b%d")
+            else:
+                ctime = ''
+            cputime = time.strftime("%M:%S",
+                                    time.localtime(sum(pinfo['cpu_times'])))
+            try:
+                user = p.username()
+            except KeyError:
+                if _POSIX:
+                    if pinfo['uids']:
+                        user = str(pinfo['uids'].real)
+                    else:
+                        user = ''
+                else:
+                    raise
+            except Error:
+                user = ''
+            if _WINDOWS and '\\' in user:
+                user = user.split('\\')[1]
+            vms = pinfo['memory_info'] and \
+                int(pinfo['memory_info'].vms / 1024) or '?'
+            rss = pinfo['memory_info'] and \
+                int(pinfo['memory_info'].rss / 1024) or '?'
+            memp = pinfo['memory_percent'] and \
+                round(pinfo['memory_percent'], 1) or '?'
+            print_(templ % (user[:10],
+                            pinfo['pid'],
+                            pinfo['cpu_percent'],
+                            memp,
+                            vms,
+                            rss,
+                            pinfo.get('terminal', '') or '?',
+                            ctime,
+                            cputime,
+                            pinfo['name'].strip() or '?'))
+
+
+def _replace_module():
+    """Dirty hack to replace the module object in order to access
+    deprecated module constants, see:
+    http://www.dr-josiah.com/2013/12/properties-on-python-modules.html
+    """
+    class ModuleWrapper(object):
+
+        def __repr__(self):
+            return repr(self._module)
+        __str__ = __repr__
+
+        @property
+        def NUM_CPUS(self):
+            msg = "NUM_CPUS constant is deprecated; use cpu_count() instead"
+            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+            return cpu_count()
+
+        @property
+        def BOOT_TIME(self):
+            msg = "BOOT_TIME constant is deprecated; use boot_time() instead"
+            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+            return boot_time()
+
+        @property
+        def TOTAL_PHYMEM(self):
+            msg = "TOTAL_PHYMEM constant is deprecated; " \
+                  "use virtual_memory().total instead"
+            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+            return virtual_memory().total
+
+    mod = ModuleWrapper()
+    mod.__dict__ = globals()
+    mod._module = sys.modules[__name__]
+    sys.modules[__name__] = mod
+
+
+_replace_module()
+del property, memoize, division, _replace_module
+if sys.version_info < (3, 0):
+    del num
+
+if __name__ == "__main__":
+    test()

+ 258 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_common.py

@@ -0,0 +1,258 @@
+#/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Common objects shared by all _ps* modules."""
+
+from __future__ import division
+import errno
+import os
+import socket
+import stat
+import sys
+import warnings
+try:
+    import threading
+except ImportError:
+    import dummy_threading as threading
+
+from socket import AF_INET, SOCK_STREAM, SOCK_DGRAM
+
+from psutil._compat import namedtuple, wraps
+
+# --- constants
+
+AF_INET6 = getattr(socket, 'AF_INET6', None)
+AF_UNIX = getattr(socket, 'AF_UNIX', None)
+
+STATUS_RUNNING = "running"
+STATUS_SLEEPING = "sleeping"
+STATUS_DISK_SLEEP = "disk-sleep"
+STATUS_STOPPED = "stopped"
+STATUS_TRACING_STOP = "tracing-stop"
+STATUS_ZOMBIE = "zombie"
+STATUS_DEAD = "dead"
+STATUS_WAKE_KILL = "wake-kill"
+STATUS_WAKING = "waking"
+STATUS_IDLE = "idle"  # BSD
+STATUS_LOCKED = "locked"  # BSD
+STATUS_WAITING = "waiting"  # BSD
+
+CONN_ESTABLISHED = "ESTABLISHED"
+CONN_SYN_SENT = "SYN_SENT"
+CONN_SYN_RECV = "SYN_RECV"
+CONN_FIN_WAIT1 = "FIN_WAIT1"
+CONN_FIN_WAIT2 = "FIN_WAIT2"
+CONN_TIME_WAIT = "TIME_WAIT"
+CONN_CLOSE = "CLOSE"
+CONN_CLOSE_WAIT = "CLOSE_WAIT"
+CONN_LAST_ACK = "LAST_ACK"
+CONN_LISTEN = "LISTEN"
+CONN_CLOSING = "CLOSING"
+CONN_NONE = "NONE"
+
+
+# --- functions
+
+def usage_percent(used, total, _round=None):
+    """Calculate percentage usage of 'used' against 'total'."""
+    try:
+        ret = (used / total) * 100
+    except ZeroDivisionError:
+        ret = 0
+    if _round is not None:
+        return round(ret, _round)
+    else:
+        return ret
+
+
+def memoize(fun):
+    """A simple memoize decorator for functions supporting (hashable)
+    positional arguments.
+    It also provides a cache_clear() function for clearing the cache:
+
+    >>> @memoize
+    ... def foo()
+    ...     return 1
+    ...
+    >>> foo()
+    1
+    >>> foo.cache_clear()
+    >>>
+    """
+    @wraps(fun)
+    def wrapper(*args, **kwargs):
+        key = (args, frozenset(sorted(kwargs.items())))
+        lock.acquire()
+        try:
+            try:
+                return cache[key]
+            except KeyError:
+                ret = cache[key] = fun(*args, **kwargs)
+        finally:
+            lock.release()
+        return ret
+
+    def cache_clear():
+        """Clear cache."""
+        lock.acquire()
+        try:
+            cache.clear()
+        finally:
+            lock.release()
+
+    lock = threading.RLock()
+    cache = {}
+    wrapper.cache_clear = cache_clear
+    return wrapper
+
+
+# http://code.activestate.com/recipes/577819-deprecated-decorator/
+def deprecated(replacement=None):
+    """A decorator which can be used to mark functions as deprecated."""
+    def outer(fun):
+        msg = "psutil.%s is deprecated" % fun.__name__
+        if replacement is not None:
+            msg += "; use %s instead" % replacement
+        if fun.__doc__ is None:
+            fun.__doc__ = msg
+
+        @wraps(fun)
+        def inner(*args, **kwargs):
+            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+            return fun(*args, **kwargs)
+
+        return inner
+    return outer
+
+
+def deprecated_method(replacement):
+    """A decorator which can be used to mark a method as deprecated
+    'replcement' is the method name which will be called instead.
+    """
+    def outer(fun):
+        msg = "%s() is deprecated; use %s() instead" % (
+            fun.__name__, replacement)
+        if fun.__doc__ is None:
+            fun.__doc__ = msg
+
+        @wraps(fun)
+        def inner(self, *args, **kwargs):
+            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+            return getattr(self, replacement)(*args, **kwargs)
+        return inner
+    return outer
+
+
+def isfile_strict(path):
+    """Same as os.path.isfile() but does not swallow EACCES / EPERM
+    exceptions, see:
+    http://mail.python.org/pipermail/python-dev/2012-June/120787.html
+    """
+    try:
+        st = os.stat(path)
+    except OSError:
+        err = sys.exc_info()[1]
+        if err.errno in (errno.EPERM, errno.EACCES):
+            raise
+        return False
+    else:
+        return stat.S_ISREG(st.st_mode)
+
+
+# --- Process.connections() 'kind' parameter mapping
+
+conn_tmap = {
+    "all": ([AF_INET, AF_INET6, AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),
+    "tcp": ([AF_INET, AF_INET6], [SOCK_STREAM]),
+    "tcp4": ([AF_INET], [SOCK_STREAM]),
+    "udp": ([AF_INET, AF_INET6], [SOCK_DGRAM]),
+    "udp4": ([AF_INET], [SOCK_DGRAM]),
+    "inet": ([AF_INET, AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),
+    "inet4": ([AF_INET], [SOCK_STREAM, SOCK_DGRAM]),
+    "inet6": ([AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),
+}
+
+if AF_INET6 is not None:
+    conn_tmap.update({
+        "tcp6": ([AF_INET6], [SOCK_STREAM]),
+        "udp6": ([AF_INET6], [SOCK_DGRAM]),
+    })
+
+if AF_UNIX is not None:
+    conn_tmap.update({
+        "unix": ([AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),
+    })
+
+del AF_INET, AF_INET6, AF_UNIX, SOCK_STREAM, SOCK_DGRAM, socket
+
+
+# --- namedtuples for psutil.* system-related functions
+
+# psutil.swap_memory()
+sswap = namedtuple('sswap', ['total', 'used', 'free', 'percent', 'sin',
+                             'sout'])
+# psutil.disk_usage()
+sdiskusage = namedtuple('sdiskusage', ['total', 'used', 'free', 'percent'])
+# psutil.disk_io_counters()
+sdiskio = namedtuple('sdiskio', ['read_count', 'write_count',
+                                 'read_bytes', 'write_bytes',
+                                 'read_time', 'write_time'])
+# psutil.disk_partitions()
+sdiskpart = namedtuple('sdiskpart', ['device', 'mountpoint', 'fstype', 'opts'])
+# psutil.net_io_counters()
+snetio = namedtuple('snetio', ['bytes_sent', 'bytes_recv',
+                               'packets_sent', 'packets_recv',
+                               'errin', 'errout',
+                               'dropin', 'dropout'])
+# psutil.users()
+suser = namedtuple('suser', ['name', 'terminal', 'host', 'started'])
+# psutil.net_connections()
+sconn = namedtuple('sconn', ['fd', 'family', 'type', 'laddr', 'raddr',
+                             'status', 'pid'])
+
+
+# --- namedtuples for psutil.Process methods
+
+# psutil.Process.memory_info()
+pmem = namedtuple('pmem', ['rss', 'vms'])
+# psutil.Process.cpu_times()
+pcputimes = namedtuple('pcputimes', ['user', 'system'])
+# psutil.Process.open_files()
+popenfile = namedtuple('popenfile', ['path', 'fd'])
+# psutil.Process.threads()
+pthread = namedtuple('pthread', ['id', 'user_time', 'system_time'])
+# psutil.Process.uids()
+puids = namedtuple('puids', ['real', 'effective', 'saved'])
+# psutil.Process.gids()
+pgids = namedtuple('pgids', ['real', 'effective', 'saved'])
+# psutil.Process.io_counters()
+pio = namedtuple('pio', ['read_count', 'write_count',
+                         'read_bytes', 'write_bytes'])
+# psutil.Process.ionice()
+pionice = namedtuple('pionice', ['ioclass', 'value'])
+# psutil.Process.ctx_switches()
+pctxsw = namedtuple('pctxsw', ['voluntary', 'involuntary'])
+
+
+# --- misc
+
+# backward compatibility layer for Process.connections() ntuple
+class pconn(
+    namedtuple('pconn',
+               ['fd', 'family', 'type', 'laddr', 'raddr', 'status'])):
+    __slots__ = ()
+
+    @property
+    def local_address(self):
+        warnings.warn("'local_address' field is deprecated; use 'laddr'"
+                      "instead", category=DeprecationWarning, stacklevel=2)
+        return self.laddr
+
+    @property
+    def remote_address(self):
+        warnings.warn("'remote_address' field is deprecated; use 'raddr'"
+                      "instead", category=DeprecationWarning, stacklevel=2)
+        return self.raddr

+ 433 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_compat.py

@@ -0,0 +1,433 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Module which provides compatibility with older Python versions."""
+
+__all__ = ["PY3", "int", "long", "xrange", "exec_", "callable", "namedtuple",
+           "property", "wraps", "defaultdict", "update_wrapper", "lru_cache"]
+
+import sys
+try:
+    import __builtin__
+except ImportError:
+    import builtins as __builtin__  # py3
+
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+    int = int
+    long = int
+    xrange = range
+    unicode = str
+    exec_ = getattr(__builtin__, "exec")
+    print_ = getattr(__builtin__, "print")
+
+    def u(s):
+        return s
+
+    def b(s):
+        return s.encode("latin-1")
+else:
+    int = int
+    long = long
+    xrange = xrange
+    unicode = unicode
+
+    def u(s):
+        return unicode(s, "unicode_escape")
+
+    def b(s):
+        return s
+
+    def exec_(code, globs=None, locs=None):
+        if globs is None:
+            frame = _sys._getframe(1)
+            globs = frame.f_globals
+            if locs is None:
+                locs = frame.f_locals
+            del frame
+        elif locs is None:
+            locs = globs
+        exec("""exec code in globs, locs""")
+
+    def print_(s):
+        sys.stdout.write(s + '\n')
+        sys.stdout.flush()
+
+
+# removed in 3.0, reintroduced in 3.2
+try:
+    callable = callable
+except NameError:
+    def callable(obj):
+        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+# --- stdlib additions
+
+# py 2.6 collections.namedtuple
+# Taken from: http://code.activestate.com/recipes/500261/
+# Credits: Raymond Hettinger
+try:
+    from collections import namedtuple
+except ImportError:
+    from operator import itemgetter as _itemgetter
+    from keyword import iskeyword as _iskeyword
+    import sys as _sys
+
+    def namedtuple(typename, field_names, verbose=False, rename=False):
+        """A collections.namedtuple implementation, see:
+        http://docs.python.org/library/collections.html#namedtuple
+        """
+        if isinstance(field_names, basestring):
+            field_names = field_names.replace(',', ' ').split()
+        field_names = tuple(map(str, field_names))
+        if rename:
+            names = list(field_names)
+            seen = set()
+            for i, name in enumerate(names):
+                if ((not min(c.isalnum() or c == '_' for c in name)
+                        or _iskeyword(name)
+                        or not name or name[0].isdigit()
+                        or name.startswith('_')
+                        or name in seen)):
+                    names[i] = '_%d' % i
+                seen.add(name)
+            field_names = tuple(names)
+        for name in (typename,) + field_names:
+            if not min(c.isalnum() or c == '_' for c in name):
+                raise ValueError('Type names and field names can only contain '
+                                 'alphanumeric characters and underscores: %r'
+                                 % name)
+            if _iskeyword(name):
+                raise ValueError('Type names and field names cannot be a '
+                                 'keyword: %r' % name)
+            if name[0].isdigit():
+                raise ValueError('Type names and field names cannot start '
+                                 'with a number: %r' % name)
+        seen_names = set()
+        for name in field_names:
+            if name.startswith('_') and not rename:
+                raise ValueError(
+                    'Field names cannot start with an underscore: %r' % name)
+            if name in seen_names:
+                raise ValueError('Encountered duplicate field name: %r' % name)
+            seen_names.add(name)
+
+        numfields = len(field_names)
+        argtxt = repr(field_names).replace("'", "")[1:-1]
+        reprtxt = ', '.join('%s=%%r' % name for name in field_names)
+        template = '''class %(typename)s(tuple):
+        '%(typename)s(%(argtxt)s)' \n
+        __slots__ = () \n
+        _fields = %(field_names)r \n
+        def __new__(_cls, %(argtxt)s):
+            return _tuple.__new__(_cls, (%(argtxt)s)) \n
+        @classmethod
+        def _make(cls, iterable, new=tuple.__new__, len=len):
+            'Make a new %(typename)s object from a sequence or iterable'
+            result = new(cls, iterable)
+            if len(result) != %(numfields)d:
+                raise TypeError(
+                    'Expected %(numfields)d arguments, got %%d' %% len(result))
+            return result \n
+        def __repr__(self):
+            return '%(typename)s(%(reprtxt)s)' %% self \n
+        def _asdict(self):
+            'Return a new dict which maps field names to their values'
+            return dict(zip(self._fields, self)) \n
+        def _replace(_self, **kwds):
+            result = _self._make(map(kwds.pop, %(field_names)r, _self))
+            if kwds:
+                raise ValueError(
+                    'Got unexpected field names: %%r' %% kwds.keys())
+            return result \n
+        def __getnewargs__(self):
+            return tuple(self) \n\n''' % locals()
+        for i, name in enumerate(field_names):
+            template += '        %s = _property(_itemgetter(%d))\n' % (name, i)
+        if verbose:
+            sys.stdout.write(template + '\n')
+            sys.stdout.flush()
+
+        namespace = dict(
+            _itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
+            _property=property, _tuple=tuple)
+        try:
+            exec_(template, namespace)
+        except SyntaxError:
+            e = sys.exc_info()[1]
+            raise SyntaxError(e.message + ':\n' + template)
+        result = namespace[typename]
+        try:
+            result.__module__ = _sys._getframe(
+                1).f_globals.get('__name__', '__main__')
+        except (AttributeError, ValueError):
+            pass
+
+        return result
+
+
+# hack to support property getter/setter/deleter on python < 2.6
+# http://docs.python.org/library/functions.html?highlight=property#property
+if hasattr(property, 'setter'):
+    property = property
+else:
+    class property(__builtin__.property):
+        __metaclass__ = type
+
+        def __init__(self, fget, *args, **kwargs):
+            super(property, self).__init__(fget, *args, **kwargs)
+            self.__doc__ = fget.__doc__
+
+        def getter(self, method):
+            return property(method, self.fset, self.fdel)
+
+        def setter(self, method):
+            return property(self.fget, method, self.fdel)
+
+        def deleter(self, method):
+            return property(self.fget, self.fset, method)
+
+
+# py 2.5 collections.defauldict
+# Taken from:
+# http://code.activestate.com/recipes/523034-emulate-collectionsdefaultdict/
+# Credits: Jason Kirtland
+try:
+    from collections import defaultdict
+except ImportError:
+    class defaultdict(dict):
+        """Dict subclass that calls a factory function to supply
+        missing values:
+        http://docs.python.org/library/collections.html#collections.defaultdict
+        """
+
+        def __init__(self, default_factory=None, *a, **kw):
+            if ((default_factory is not None and
+                    not hasattr(default_factory, '__call__'))):
+                raise TypeError('first argument must be callable')
+            dict.__init__(self, *a, **kw)
+            self.default_factory = default_factory
+
+        def __getitem__(self, key):
+            try:
+                return dict.__getitem__(self, key)
+            except KeyError:
+                return self.__missing__(key)
+
+        def __missing__(self, key):
+            if self.default_factory is None:
+                raise KeyError(key)
+            self[key] = value = self.default_factory()
+            return value
+
+        def __reduce__(self):
+            if self.default_factory is None:
+                args = tuple()
+            else:
+                args = self.default_factory,
+            return type(self), args, None, None, self.items()
+
+        def copy(self):
+            return self.__copy__()
+
+        def __copy__(self):
+            return type(self)(self.default_factory, self)
+
+        def __deepcopy__(self, memo):
+            import copy
+            return type(self)(self.default_factory,
+                              copy.deepcopy(self.items()))
+
+        def __repr__(self):
+            return 'defaultdict(%s, %s)' % (self.default_factory,
+                                            dict.__repr__(self))
+
+
+# py 2.5 functools.wraps
+try:
+    from functools import wraps
+except ImportError:
+    def wraps(original):
+        def inner(fn):
+            for attribute in ['__module__', '__name__', '__doc__']:
+                setattr(fn, attribute, getattr(original, attribute))
+            for attribute in ['__dict__']:
+                if hasattr(fn, attribute):
+                    getattr(fn, attribute).update(getattr(original, attribute))
+                else:
+                    setattr(fn, attribute,
+                            getattr(original, attribute).copy())
+            return fn
+        return inner
+
+
+# py 2.5 functools.update_wrapper
+try:
+    from functools import update_wrapper
+except ImportError:
+    WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
+    WRAPPER_UPDATES = ('__dict__',)
+
+    def update_wrapper(wrapper, wrapped, assigned=WRAPPER_ASSIGNMENTS,
+                       updated=WRAPPER_UPDATES):
+        """Update a wrapper function to look like the wrapped function, see:
+        http://docs.python.org/library/functools.html#functools.update_wrapper
+        """
+        for attr in assigned:
+            setattr(wrapper, attr, getattr(wrapped, attr))
+        for attr in updated:
+            getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+        return wrapper
+
+
+# py 3.2 functools.lru_cache
+# Taken from: http://code.activestate.com/recipes/578078
+# Credit: Raymond Hettinger
+try:
+    from functools import lru_cache
+except ImportError:
+    try:
+        from threading import RLock
+    except ImportError:
+        from dummy_threading import RLock
+
+    _CacheInfo = namedtuple("CacheInfo",
+                            ["hits", "misses", "maxsize", "currsize"])
+
+    class _HashedSeq(list):
+        __slots__ = 'hashvalue'
+
+        def __init__(self, tup, hash=hash):
+            self[:] = tup
+            self.hashvalue = hash(tup)
+
+        def __hash__(self):
+            return self.hashvalue
+
+    def _make_key(args, kwds, typed,
+                  kwd_mark=(object(), ),
+                  fasttypes=set((int, str, frozenset, type(None))),
+                  sorted=sorted, tuple=tuple, type=type, len=len):
+        key = args
+        if kwds:
+            sorted_items = sorted(kwds.items())
+            key += kwd_mark
+            for item in sorted_items:
+                key += item
+        if typed:
+            key += tuple(type(v) for v in args)
+            if kwds:
+                key += tuple(type(v) for k, v in sorted_items)
+        elif len(key) == 1 and type(key[0]) in fasttypes:
+            return key[0]
+        return _HashedSeq(key)
+
+    def lru_cache(maxsize=100, typed=False):
+        """Least-recently-used cache decorator, see:
+        http://docs.python.org/3/library/functools.html#functools.lru_cache
+        """
+        def decorating_function(user_function):
+            cache = dict()
+            stats = [0, 0]
+            HITS, MISSES = 0, 1
+            make_key = _make_key
+            cache_get = cache.get
+            _len = len
+            lock = RLock()
+            root = []
+            root[:] = [root, root, None, None]
+            nonlocal_root = [root]
+            PREV, NEXT, KEY, RESULT = 0, 1, 2, 3
+            if maxsize == 0:
+                def wrapper(*args, **kwds):
+                    result = user_function(*args, **kwds)
+                    stats[MISSES] += 1
+                    return result
+            elif maxsize is None:
+                def wrapper(*args, **kwds):
+                    key = make_key(args, kwds, typed)
+                    result = cache_get(key, root)
+                    if result is not root:
+                        stats[HITS] += 1
+                        return result
+                    result = user_function(*args, **kwds)
+                    cache[key] = result
+                    stats[MISSES] += 1
+                    return result
+            else:
+                def wrapper(*args, **kwds):
+                    if kwds or typed:
+                        key = make_key(args, kwds, typed)
+                    else:
+                        key = args
+                    lock.acquire()
+                    try:
+                        link = cache_get(key)
+                        if link is not None:
+                            root, = nonlocal_root
+                            link_prev, link_next, key, result = link
+                            link_prev[NEXT] = link_next
+                            link_next[PREV] = link_prev
+                            last = root[PREV]
+                            last[NEXT] = root[PREV] = link
+                            link[PREV] = last
+                            link[NEXT] = root
+                            stats[HITS] += 1
+                            return result
+                    finally:
+                        lock.release()
+                    result = user_function(*args, **kwds)
+                    lock.acquire()
+                    try:
+                        root, = nonlocal_root
+                        if key in cache:
+                            pass
+                        elif _len(cache) >= maxsize:
+                            oldroot = root
+                            oldroot[KEY] = key
+                            oldroot[RESULT] = result
+                            root = nonlocal_root[0] = oldroot[NEXT]
+                            oldkey = root[KEY]
+                            root[KEY] = root[RESULT] = None
+                            del cache[oldkey]
+                            cache[key] = oldroot
+                        else:
+                            last = root[PREV]
+                            link = [last, root, key, result]
+                            last[NEXT] = root[PREV] = cache[key] = link
+                        stats[MISSES] += 1
+                    finally:
+                        lock.release()
+                    return result
+
+            def cache_info():
+                """Report cache statistics"""
+                lock.acquire()
+                try:
+                    return _CacheInfo(stats[HITS], stats[MISSES], maxsize,
+                                      len(cache))
+                finally:
+                    lock.release()
+
+            def cache_clear():
+                """Clear the cache and cache statistics"""
+                lock.acquire()
+                try:
+                    cache.clear()
+                    root = nonlocal_root[0]
+                    root[:] = [root, root, None, None]
+                    stats[:] = [0, 0]
+                finally:
+                    lock.release()
+
+            wrapper.__wrapped__ = user_function
+            wrapper.cache_info = cache_info
+            wrapper.cache_clear = cache_clear
+            return update_wrapper(wrapper, user_function)
+
+        return decorating_function

+ 389 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psbsd.py

@@ -0,0 +1,389 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""FreeBSD platform implementation."""
+
+import errno
+import os
+import sys
+
+from psutil import _common
+from psutil import _psposix
+from psutil._common import conn_tmap, usage_percent
+from psutil._compat import namedtuple, wraps
+import _psutil_bsd as cext
+import _psutil_posix
+
+
+__extra__all__ = []
+
+# --- constants
+
+PROC_STATUSES = {
+    cext.SSTOP: _common.STATUS_STOPPED,
+    cext.SSLEEP: _common.STATUS_SLEEPING,
+    cext.SRUN: _common.STATUS_RUNNING,
+    cext.SIDL: _common.STATUS_IDLE,
+    cext.SWAIT: _common.STATUS_WAITING,
+    cext.SLOCK: _common.STATUS_LOCKED,
+    cext.SZOMB: _common.STATUS_ZOMBIE,
+}
+
+TCP_STATUSES = {
+    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+    cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV,
+    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+    cext.TCPS_CLOSED: _common.CONN_CLOSE,
+    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+    cext.TCPS_LISTEN: _common.CONN_LISTEN,
+    cext.TCPS_CLOSING: _common.CONN_CLOSING,
+    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+}
+
+PAGESIZE = os.sysconf("SC_PAGE_SIZE")
+
+# extend base mem ntuple with BSD-specific memory metrics
+svmem = namedtuple(
+    'svmem', ['total', 'available', 'percent', 'used', 'free',
+              'active', 'inactive', 'buffers', 'cached', 'shared', 'wired'])
+scputimes = namedtuple(
+    'scputimes', ['user', 'nice', 'system', 'idle', 'irq'])
+pextmem = namedtuple('pextmem', ['rss', 'vms', 'text', 'data', 'stack'])
+pmmap_grouped = namedtuple(
+    'pmmap_grouped', 'path rss, private, ref_count, shadow_count')
+pmmap_ext = namedtuple(
+    'pmmap_ext', 'addr, perms path rss, private, ref_count, shadow_count')
+
+# set later from __init__.py
+NoSuchProcess = None
+AccessDenied = None
+TimeoutExpired = None
+
+
+def virtual_memory():
+    """System virtual memory as a namedutple."""
+    mem = cext.virtual_mem()
+    total, free, active, inactive, wired, cached, buffers, shared = mem
+    avail = inactive + cached + free
+    used = active + wired + cached
+    percent = usage_percent((total - avail), total, _round=1)
+    return svmem(total, avail, percent, used, free,
+                 active, inactive, buffers, cached, shared, wired)
+
+
+def swap_memory():
+    """System swap memory as (total, used, free, sin, sout) namedtuple."""
+    total, used, free, sin, sout = [x * PAGESIZE for x in cext.swap_mem()]
+    percent = usage_percent(used, total, _round=1)
+    return _common.sswap(total, used, free, percent, sin, sout)
+
+
+def cpu_times():
+    """Return system per-CPU times as a named tuple"""
+    user, nice, system, idle, irq = cext.cpu_times()
+    return scputimes(user, nice, system, idle, irq)
+
+
+if hasattr(cext, "per_cpu_times"):
+    def per_cpu_times():
+        """Return system CPU times as a named tuple"""
+        ret = []
+        for cpu_t in cext.per_cpu_times():
+            user, nice, system, idle, irq = cpu_t
+            item = scputimes(user, nice, system, idle, irq)
+            ret.append(item)
+        return ret
+else:
+    # XXX
+    # Ok, this is very dirty.
+    # On FreeBSD < 8 we cannot gather per-cpu information, see:
+    # http://code.google.com/p/psutil/issues/detail?id=226
+    # If num cpus > 1, on first call we return single cpu times to avoid a
+    # crash at psutil import time.
+    # Next calls will fail with NotImplementedError
+    def per_cpu_times():
+        if cpu_count_logical() == 1:
+            return [cpu_times()]
+        if per_cpu_times.__called__:
+            raise NotImplementedError("supported only starting from FreeBSD 8")
+        per_cpu_times.__called__ = True
+        return [cpu_times()]
+
+    per_cpu_times.__called__ = False
+
+
+def cpu_count_logical():
+    """Return the number of logical CPUs in the system."""
+    return cext.cpu_count_logical()
+
+
+def cpu_count_physical():
+    """Return the number of physical CPUs in the system."""
+    # From the C module we'll get an XML string similar to this:
+    # http://manpages.ubuntu.com/manpages/precise/man4/smp.4freebsd.html
+    # We may get None in case "sysctl kern.sched.topology_spec"
+    # is not supported on this BSD version, in which case we'll mimic
+    # os.cpu_count() and return None.
+    s = cext.cpu_count_phys()
+    if s is not None:
+        # get rid of padding chars appended at the end of the string
+        index = s.rfind("</groups>")
+        if index != -1:
+            s = s[:index + 9]
+            if sys.version_info >= (2, 5):
+                import xml.etree.ElementTree as ET
+                root = ET.fromstring(s)
+                return len(root.findall('group/children/group/cpu')) or None
+            else:
+                s = s[s.find('<children>'):]
+                return s.count("<cpu") or None
+
+
+def boot_time():
+    """The system boot time expressed in seconds since the epoch."""
+    return cext.boot_time()
+
+
+def disk_partitions(all=False):
+    retlist = []
+    partitions = cext.disk_partitions()
+    for partition in partitions:
+        device, mountpoint, fstype, opts = partition
+        if device == 'none':
+            device = ''
+        if not all:
+            if not os.path.isabs(device) or not os.path.exists(device):
+                continue
+        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
+        retlist.append(ntuple)
+    return retlist
+
+
+def users():
+    retlist = []
+    rawlist = cext.users()
+    for item in rawlist:
+        user, tty, hostname, tstamp = item
+        if tty == '~':
+            continue  # reboot or shutdown
+        nt = _common.suser(user, tty or None, hostname, tstamp)
+        retlist.append(nt)
+    return retlist
+
+
+def net_connections(kind):
+    if kind not in _common.conn_tmap:
+        raise ValueError("invalid %r kind argument; choose between %s"
+                        % (kind, ', '.join([repr(x) for x in conn_tmap])))
+    families, types = conn_tmap[kind]
+    ret = []
+    rawlist = cext.net_connections()
+    for item in rawlist:
+        fd, fam, type, laddr, raddr, status, pid = item
+        # TODO: apply filter at C level
+        if fam in families and type in types:
+            status = TCP_STATUSES[status]
+            nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid)
+            ret.append(nt)
+    return ret
+
+
+pids = cext.pids
+pid_exists = _psposix.pid_exists
+disk_usage = _psposix.disk_usage
+net_io_counters = cext.net_io_counters
+disk_io_counters = cext.disk_io_counters
+
+
+def wrap_exceptions(fun):
+    """Decorator which translates bare OSError exceptions into
+    NoSuchProcess and AccessDenied.
+    """
+    @wraps(fun)
+    def wrapper(self, *args, **kwargs):
+        try:
+            return fun(self, *args, **kwargs)
+        except OSError:
+            # support for private module import
+            if NoSuchProcess is None or AccessDenied is None:
+                raise
+            err = sys.exc_info()[1]
+            if err.errno == errno.ESRCH:
+                raise NoSuchProcess(self.pid, self._name)
+            if err.errno in (errno.EPERM, errno.EACCES):
+                raise AccessDenied(self.pid, self._name)
+            raise
+    return wrapper
+
+
+class Process(object):
+    """Wrapper class around underlying C implementation."""
+
+    __slots__ = ["pid", "_name"]
+
+    def __init__(self, pid):
+        self.pid = pid
+        self._name = None
+
+    @wrap_exceptions
+    def name(self):
+        return cext.proc_name(self.pid)
+
+    @wrap_exceptions
+    def exe(self):
+        return cext.proc_exe(self.pid)
+
+    @wrap_exceptions
+    def cmdline(self):
+        return cext.proc_cmdline(self.pid)
+
+    @wrap_exceptions
+    def terminal(self):
+        tty_nr = cext.proc_tty_nr(self.pid)
+        tmap = _psposix._get_terminal_map()
+        try:
+            return tmap[tty_nr]
+        except KeyError:
+            return None
+
+    @wrap_exceptions
+    def ppid(self):
+        return cext.proc_ppid(self.pid)
+
+    @wrap_exceptions
+    def uids(self):
+        real, effective, saved = cext.proc_uids(self.pid)
+        return _common.puids(real, effective, saved)
+
+    @wrap_exceptions
+    def gids(self):
+        real, effective, saved = cext.proc_gids(self.pid)
+        return _common.pgids(real, effective, saved)
+
+    @wrap_exceptions
+    def cpu_times(self):
+        user, system = cext.proc_cpu_times(self.pid)
+        return _common.pcputimes(user, system)
+
+    @wrap_exceptions
+    def memory_info(self):
+        rss, vms = cext.proc_memory_info(self.pid)[:2]
+        return _common.pmem(rss, vms)
+
+    @wrap_exceptions
+    def memory_info_ex(self):
+        return pextmem(*cext.proc_memory_info(self.pid))
+
+    @wrap_exceptions
+    def create_time(self):
+        return cext.proc_create_time(self.pid)
+
+    @wrap_exceptions
+    def num_threads(self):
+        return cext.proc_num_threads(self.pid)
+
+    @wrap_exceptions
+    def num_ctx_switches(self):
+        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))
+
+    @wrap_exceptions
+    def threads(self):
+        rawlist = cext.proc_threads(self.pid)
+        retlist = []
+        for thread_id, utime, stime in rawlist:
+            ntuple = _common.pthread(thread_id, utime, stime)
+            retlist.append(ntuple)
+        return retlist
+
+    @wrap_exceptions
+    def connections(self, kind='inet'):
+        if kind not in conn_tmap:
+            raise ValueError("invalid %r kind argument; choose between %s"
+                             % (kind, ', '.join([repr(x) for x in conn_tmap])))
+        families, types = conn_tmap[kind]
+        rawlist = cext.proc_connections(self.pid, families, types)
+        ret = []
+        for item in rawlist:
+            fd, fam, type, laddr, raddr, status = item
+            status = TCP_STATUSES[status]
+            nt = _common.pconn(fd, fam, type, laddr, raddr, status)
+            ret.append(nt)
+        return ret
+
+    @wrap_exceptions
+    def wait(self, timeout=None):
+        try:
+            return _psposix.wait_pid(self.pid, timeout)
+        except _psposix.TimeoutExpired:
+            # support for private module import
+            if TimeoutExpired is None:
+                raise
+            raise TimeoutExpired(timeout, self.pid, self._name)
+
+    @wrap_exceptions
+    def nice_get(self):
+        return _psutil_posix.getpriority(self.pid)
+
+    @wrap_exceptions
+    def nice_set(self, value):
+        return _psutil_posix.setpriority(self.pid, value)
+
+    @wrap_exceptions
+    def status(self):
+        code = cext.proc_status(self.pid)
+        if code in PROC_STATUSES:
+            return PROC_STATUSES[code]
+        # XXX is this legit? will we even ever get here?
+        return "?"
+
+    @wrap_exceptions
+    def io_counters(self):
+        rc, wc, rb, wb = cext.proc_io_counters(self.pid)
+        return _common.pio(rc, wc, rb, wb)
+
+    nt_mmap_grouped = namedtuple(
+        'mmap', 'path rss, private, ref_count, shadow_count')
+    nt_mmap_ext = namedtuple(
+        'mmap', 'addr, perms path rss, private, ref_count, shadow_count')
+
+    # FreeBSD < 8 does not support functions based on kinfo_getfile()
+    # and kinfo_getvmmap()
+    if hasattr(cext, 'proc_open_files'):
+
+        @wrap_exceptions
+        def open_files(self):
+            """Return files opened by process as a list of namedtuples."""
+            rawlist = cext.proc_open_files(self.pid)
+            return [_common.popenfile(path, fd) for path, fd in rawlist]
+
+        @wrap_exceptions
+        def cwd(self):
+            """Return process current working directory."""
+            # sometimes we get an empty string, in which case we turn
+            # it into None
+            return cext.proc_cwd(self.pid) or None
+
+        @wrap_exceptions
+        def memory_maps(self):
+            return cext.proc_memory_maps(self.pid)
+
+        @wrap_exceptions
+        def num_fds(self):
+            """Return the number of file descriptors opened by this process."""
+            return cext.proc_num_fds(self.pid)
+
+    else:
+        def _not_implemented(self):
+            raise NotImplementedError("supported only starting from FreeBSD 8")
+
+        open_files = _not_implemented
+        proc_cwd = _not_implemented
+        memory_maps = _not_implemented
+        num_fds = _not_implemented

+ 1225 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pslinux.py

@@ -0,0 +1,1225 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Linux platform implementation."""
+
+from __future__ import division
+
+import base64
+import errno
+import os
+import re
+import socket
+import struct
+import sys
+import warnings
+
+from psutil import _common
+from psutil import _psposix
+from psutil._common import (isfile_strict, usage_percent, deprecated)
+from psutil._compat import PY3, xrange, namedtuple, wraps, b, defaultdict
+import _psutil_linux as cext
+import _psutil_posix
+
+
+__extra__all__ = [
+    # io prio constants
+    "IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE",
+    "IOPRIO_CLASS_IDLE",
+    # connection status constants
+    "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
+    "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
+    "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING",
+    # other
+    "phymem_buffers", "cached_phymem"]
+
+
+# --- constants
+
+HAS_PRLIMIT = hasattr(cext, "linux_prlimit")
+
+# RLIMIT_* constants, not guaranteed to be present on all kernels
+if HAS_PRLIMIT:
+    for name in dir(cext):
+        if name.startswith('RLIM'):
+            __extra__all__.append(name)
+
+# Number of clock ticks per second
+CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
+PAGESIZE = os.sysconf("SC_PAGE_SIZE")
+BOOT_TIME = None  # set later
+DEFAULT_ENCODING = sys.getdefaultencoding()
+
+# ioprio_* constants http://linux.die.net/man/2/ioprio_get
+IOPRIO_CLASS_NONE = 0
+IOPRIO_CLASS_RT = 1
+IOPRIO_CLASS_BE = 2
+IOPRIO_CLASS_IDLE = 3
+
+# taken from /fs/proc/array.c
+PROC_STATUSES = {
+    "R": _common.STATUS_RUNNING,
+    "S": _common.STATUS_SLEEPING,
+    "D": _common.STATUS_DISK_SLEEP,
+    "T": _common.STATUS_STOPPED,
+    "t": _common.STATUS_TRACING_STOP,
+    "Z": _common.STATUS_ZOMBIE,
+    "X": _common.STATUS_DEAD,
+    "x": _common.STATUS_DEAD,
+    "K": _common.STATUS_WAKE_KILL,
+    "W": _common.STATUS_WAKING
+}
+
+# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
+TCP_STATUSES = {
+    "01": _common.CONN_ESTABLISHED,
+    "02": _common.CONN_SYN_SENT,
+    "03": _common.CONN_SYN_RECV,
+    "04": _common.CONN_FIN_WAIT1,
+    "05": _common.CONN_FIN_WAIT2,
+    "06": _common.CONN_TIME_WAIT,
+    "07": _common.CONN_CLOSE,
+    "08": _common.CONN_CLOSE_WAIT,
+    "09": _common.CONN_LAST_ACK,
+    "0A": _common.CONN_LISTEN,
+    "0B": _common.CONN_CLOSING
+}
+
+# set later from __init__.py
+NoSuchProcess = None
+AccessDenied = None
+TimeoutExpired = None
+
+
+# --- named tuples
+
+def _get_cputimes_fields():
+    """Return a namedtuple of variable fields depending on the
+    CPU times available on this Linux kernel version which may be:
+    (user, nice, system, idle, iowait, irq, softirq, [steal, [guest,
+     [guest_nice]]])
+    """
+    f = open('/proc/stat', 'rb')
+    try:
+        values = f.readline().split()[1:]
+    finally:
+        f.close()
+    fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq']
+    vlen = len(values)
+    if vlen >= 8:
+        # Linux >= 2.6.11
+        fields.append('steal')
+    if vlen >= 9:
+        # Linux >= 2.6.24
+        fields.append('guest')
+    if vlen >= 10:
+        # Linux >= 3.2.0
+        fields.append('guest_nice')
+    return fields
+
+
+scputimes = namedtuple('scputimes', _get_cputimes_fields())
+
+svmem = namedtuple(
+    'svmem', ['total', 'available', 'percent', 'used', 'free',
+              'active', 'inactive', 'buffers', 'cached'])
+
+pextmem = namedtuple('pextmem', 'rss vms shared text lib data dirty')
+
+pmmap_grouped = namedtuple(
+    'pmmap_grouped', ['path', 'rss', 'size', 'pss', 'shared_clean',
+                      'shared_dirty', 'private_clean', 'private_dirty',
+                      'referenced', 'anonymous', 'swap'])
+
+pmmap_ext = namedtuple(
+    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
+
+
+# --- system memory
+
+def virtual_memory():
+    total, free, buffers, shared, _, _ = cext.linux_sysinfo()
+    cached = active = inactive = None
+    f = open('/proc/meminfo', 'rb')
+    CACHED, ACTIVE, INACTIVE = b("Cached:"), b("Active:"), b("Inactive:")
+    try:
+        for line in f:
+            if line.startswith(CACHED):
+                cached = int(line.split()[1]) * 1024
+            elif line.startswith(ACTIVE):
+                active = int(line.split()[1]) * 1024
+            elif line.startswith(INACTIVE):
+                inactive = int(line.split()[1]) * 1024
+            if (cached is not None
+                    and active is not None
+                    and inactive is not None):
+                break
+        else:
+            # we might get here when dealing with exotic Linux flavors, see:
+            # http://code.google.com/p/psutil/issues/detail?id=313
+            msg = "'cached', 'active' and 'inactive' memory stats couldn't " \
+                  "be determined and were set to 0"
+            warnings.warn(msg, RuntimeWarning)
+            cached = active = inactive = 0
+    finally:
+        f.close()
+    avail = free + buffers + cached
+    used = total - free
+    percent = usage_percent((total - avail), total, _round=1)
+    return svmem(total, avail, percent, used, free,
+                 active, inactive, buffers, cached)
+
+
+def swap_memory():
+    _, _, _, _, total, free = cext.linux_sysinfo()
+    used = total - free
+    percent = usage_percent(used, total, _round=1)
+    # get pgin/pgouts
+    f = open("/proc/vmstat", "rb")
+    SIN, SOUT = b('pswpin'), b('pswpout')
+    sin = sout = None
+    try:
+        for line in f:
+            # values are expressed in 4 kilo bytes, we want bytes instead
+            if line.startswith(SIN):
+                sin = int(line.split(b(' '))[1]) * 4 * 1024
+            elif line.startswith(SOUT):
+                sout = int(line.split(b(' '))[1]) * 4 * 1024
+            if sin is not None and sout is not None:
+                break
+        else:
+            # we might get here when dealing with exotic Linux flavors, see:
+            # http://code.google.com/p/psutil/issues/detail?id=313
+            msg = "'sin' and 'sout' swap memory stats couldn't " \
+                  "be determined and were set to 0"
+            warnings.warn(msg, RuntimeWarning)
+            sin = sout = 0
+    finally:
+        f.close()
+    return _common.sswap(total, used, free, percent, sin, sout)
+
+
+@deprecated(replacement='psutil.virtual_memory().cached')
+def cached_phymem():
+    return virtual_memory().cached
+
+
+@deprecated(replacement='psutil.virtual_memory().buffers')
+def phymem_buffers():
+    return virtual_memory().buffers
+
+
+# --- CPUs
+
+def cpu_times():
+    """Return a named tuple representing the following system-wide
+    CPU times:
+    (user, nice, system, idle, iowait, irq, softirq [steal, [guest,
+     [guest_nice]]])
+    Last 3 fields may not be available on all Linux kernel versions.
+    """
+    f = open('/proc/stat', 'rb')
+    try:
+        values = f.readline().split()
+    finally:
+        f.close()
+    fields = values[1:len(scputimes._fields) + 1]
+    fields = [float(x) / CLOCK_TICKS for x in fields]
+    return scputimes(*fields)
+
+
+def per_cpu_times():
+    """Return a list of namedtuple representing the CPU times
+    for every CPU available on the system.
+    """
+    cpus = []
+    f = open('/proc/stat', 'rb')
+    try:
+        # get rid of the first line which refers to system wide CPU stats
+        f.readline()
+        CPU = b('cpu')
+        for line in f:
+            if line.startswith(CPU):
+                values = line.split()
+                fields = values[1:len(scputimes._fields) + 1]
+                fields = [float(x) / CLOCK_TICKS for x in fields]
+                entry = scputimes(*fields)
+                cpus.append(entry)
+        return cpus
+    finally:
+        f.close()
+
+
+def cpu_count_logical():
+    """Return the number of logical CPUs in the system."""
+    try:
+        return os.sysconf("SC_NPROCESSORS_ONLN")
+    except ValueError:
+        # as a second fallback we try to parse /proc/cpuinfo
+        num = 0
+        f = open('/proc/cpuinfo', 'rb')
+        try:
+            lines = f.readlines()
+        finally:
+            f.close()
+        PROCESSOR = b('processor')
+        for line in lines:
+            if line.lower().startswith(PROCESSOR):
+                num += 1
+
+    # unknown format (e.g. amrel/sparc architectures), see:
+    # http://code.google.com/p/psutil/issues/detail?id=200
+    # try to parse /proc/stat as a last resort
+    if num == 0:
+        f = open('/proc/stat', 'rt')
+        try:
+            lines = f.readlines()
+        finally:
+            f.close()
+        search = re.compile('cpu\d')
+        for line in lines:
+            line = line.split(' ')[0]
+            if search.match(line):
+                num += 1
+
+    if num == 0:
+        # mimic os.cpu_count()
+        return None
+    return num
+
+
+def cpu_count_physical():
+    """Return the number of physical CPUs in the system."""
+    f = open('/proc/cpuinfo', 'rb')
+    try:
+        lines = f.readlines()
+    finally:
+        f.close()
+    found = set()
+    PHYSICAL_ID = b('physical id')
+    for line in lines:
+        if line.lower().startswith(PHYSICAL_ID):
+            found.add(line.strip())
+    if found:
+        return len(found)
+    else:
+        return None  # mimic os.cpu_count()
+
+
+# --- other system functions
+
+def users():
+    """Return currently connected users as a list of namedtuples."""
+    retlist = []
+    rawlist = cext.users()
+    for item in rawlist:
+        user, tty, hostname, tstamp, user_process = item
+        # note: the underlying C function includes entries about
+        # system boot, run level and others.  We might want
+        # to use them in the future.
+        if not user_process:
+            continue
+        if hostname == ':0.0':
+            hostname = 'localhost'
+        nt = _common.suser(user, tty or None, hostname, tstamp)
+        retlist.append(nt)
+    return retlist
+
+
+def boot_time():
+    """Return the system boot time expressed in seconds since the epoch."""
+    global BOOT_TIME
+    f = open('/proc/stat', 'rb')
+    try:
+        BTIME = b('btime')
+        for line in f:
+            if line.startswith(BTIME):
+                ret = float(line.strip().split()[1])
+                BOOT_TIME = ret
+                return ret
+        raise RuntimeError("line 'btime' not found")
+    finally:
+        f.close()
+
+
+# --- processes
+
+def pids():
+    """Returns a list of PIDs currently running on the system."""
+    return [int(x) for x in os.listdir(b('/proc')) if x.isdigit()]
+
+
+def pid_exists(pid):
+    """Check For the existence of a unix pid."""
+    return _psposix.pid_exists(pid)
+
+
+# --- network
+
+class Connections:
+    """A wrapper on top of /proc/net/* files, retrieving per-process
+    and system-wide open connections (TCP, UDP, UNIX) similarly to
+    "netstat -an".
+
+    Note: in case of UNIX sockets we're only able to determine the
+    local endpoint/path, not the one it's connected to.
+    According to [1] it would be possible but not easily.
+
+    [1] http://serverfault.com/a/417946
+    """
+
+    def __init__(self):
+        tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM)
+        tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM)
+        udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM)
+        udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM)
+        unix = ("unix", socket.AF_UNIX, None)
+        self.tmap = {
+            "all": (tcp4, tcp6, udp4, udp6, unix),
+            "tcp": (tcp4, tcp6),
+            "tcp4": (tcp4,),
+            "tcp6": (tcp6,),
+            "udp": (udp4, udp6),
+            "udp4": (udp4,),
+            "udp6": (udp6,),
+            "unix": (unix,),
+            "inet": (tcp4, tcp6, udp4, udp6),
+            "inet4": (tcp4, udp4),
+            "inet6": (tcp6, udp6),
+        }
+
+    def get_proc_inodes(self, pid):
+        inodes = defaultdict(list)
+        for fd in os.listdir("/proc/%s/fd" % pid):
+            try:
+                inode = os.readlink("/proc/%s/fd/%s" % (pid, fd))
+            except OSError:
+                # TODO: need comment here
+                continue
+            else:
+                if inode.startswith('socket:['):
+                    # the process is using a socket
+                    inode = inode[8:][:-1]
+                    inodes[inode].append((pid, int(fd)))
+        return inodes
+
+    def get_all_inodes(self):
+        inodes = {}
+        for pid in pids():
+            try:
+                inodes.update(self.get_proc_inodes(pid))
+            except OSError:
+                # os.listdir() is gonna raise a lot of access denied
+                # exceptions in case of unprivileged user; that's fine
+                # as we'll just end up returning a connection with PID
+                # and fd set to None anyway.
+                # Both netstat -an and lsof does the same so it's
+                # unlikely we can do any better.
+                # ENOENT just means a PID disappeared on us.
+                err = sys.exc_info()[1]
+                if err.errno not in (errno.ENOENT, errno.EPERM, errno.EACCES):
+                    raise
+        return inodes
+
+    def decode_address(self, addr, family):
+        """Accept an "ip:port" address as displayed in /proc/net/*
+        and convert it into a human readable form, like:
+
+        "0500000A:0016" -> ("10.0.0.5", 22)
+        "0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
+
+        The IP address portion is a little or big endian four-byte
+        hexadecimal number; that is, the least significant byte is listed
+        first, so we need to reverse the order of the bytes to convert it
+        to an IP address.
+        The port is represented as a two-byte hexadecimal number.
+
+        Reference:
+        http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
+        """
+        ip, port = addr.split(':')
+        port = int(port, 16)
+        if PY3:
+            ip = ip.encode('ascii')
+        # this usually refers to a local socket in listen mode with
+        # no end-points connected
+        if not port:
+            return ()
+        if family == socket.AF_INET:
+            # see: http://code.google.com/p/psutil/issues/detail?id=201
+            if sys.byteorder == 'little':
+                ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])
+            else:
+                ip = socket.inet_ntop(family, base64.b16decode(ip))
+        else:  # IPv6
+            # old version - let's keep it, just in case...
+            # ip = ip.decode('hex')
+            # return socket.inet_ntop(socket.AF_INET6,
+            #          ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4)))
+            ip = base64.b16decode(ip)
+            # see: http://code.google.com/p/psutil/issues/detail?id=201
+            if sys.byteorder == 'little':
+                ip = socket.inet_ntop(
+                    socket.AF_INET6,
+                    struct.pack('>4I', *struct.unpack('<4I', ip)))
+            else:
+                ip = socket.inet_ntop(
+                    socket.AF_INET6,
+                    struct.pack('<4I', *struct.unpack('<4I', ip)))
+        return (ip, port)
+
+    def process_inet(self, file, family, type_, inodes, filter_pid=None):
+        """Parse /proc/net/tcp* and /proc/net/udp* files."""
+        if file.endswith('6') and not os.path.exists(file):
+            # IPv6 not supported
+            return
+        f = open(file, 'rt')
+        try:
+            f.readline()  # skip the first line
+            for line in f:
+                _, laddr, raddr, status, _, _, _, _, _, inode = \
+                    line.split()[:10]
+                if inode in inodes:
+                    # We assume inet sockets are unique, so we error
+                    # out if there are multiple references to the
+                    # same inode. We won't do this for UNIX sockets.
+                    if len(inodes[inode]) > 1 and type_ != socket.AF_UNIX:
+                        raise ValueError("ambiguos inode with multiple "
+                                         "PIDs references")
+                    pid, fd = inodes[inode][0]
+                else:
+                    pid, fd = None, -1
+                if filter_pid is not None and filter_pid != pid:
+                    continue
+                else:
+                    if type_ == socket.SOCK_STREAM:
+                        status = TCP_STATUSES[status]
+                    else:
+                        status = _common.CONN_NONE
+                    laddr = self.decode_address(laddr, family)
+                    raddr = self.decode_address(raddr, family)
+                    yield (fd, family, type_, laddr, raddr, status, pid)
+        finally:
+            f.close()
+
+    def process_unix(self, file, family, inodes, filter_pid=None):
+        """Parse /proc/net/unix files."""
+        f = open(file, 'rt')
+        try:
+            f.readline()  # skip the first line
+            for line in f:
+                tokens = line.split()
+                _, _, _, _, type_, _, inode = tokens[0:7]
+                if inode in inodes:
+                    # With UNIX sockets we can have a single inode
+                    # referencing many file descriptors.
+                    pairs = inodes[inode]
+                else:
+                    pairs = [(None, -1)]
+                for pid, fd in pairs:
+                    if filter_pid is not None and filter_pid != pid:
+                        continue
+                    else:
+                        if len(tokens) == 8:
+                            path = tokens[-1]
+                        else:
+                            path = ""
+                        type_ = int(type_)
+                        raddr = None
+                        status = _common.CONN_NONE
+                        yield (fd, family, type_, path, raddr, status, pid)
+        finally:
+            f.close()
+
+    def retrieve(self, kind, pid=None):
+        if kind not in self.tmap:
+            raise ValueError("invalid %r kind argument; choose between %s"
+                             % (kind, ', '.join([repr(x) for x in self.tmap])))
+        if pid is not None:
+            inodes = self.get_proc_inodes(pid)
+            if not inodes:
+                # no connections for this process
+                return []
+        else:
+            inodes = self.get_all_inodes()
+        ret = []
+        for f, family, type_ in self.tmap[kind]:
+            if family in (socket.AF_INET, socket.AF_INET6):
+                ls = self.process_inet(
+                    "/proc/net/%s" % f, family, type_, inodes, filter_pid=pid)
+            else:
+                ls = self.process_unix(
+                    "/proc/net/%s" % f, family, inodes, filter_pid=pid)
+            for fd, family, type_, laddr, raddr, status, bound_pid in ls:
+                if pid:
+                    conn = _common.pconn(fd, family, type_, laddr, raddr,
+                                         status)
+                else:
+                    conn = _common.sconn(fd, family, type_, laddr, raddr,
+                                         status, bound_pid)
+                ret.append(conn)
+        return ret
+
+
+_connections = Connections()
+
+
+def net_connections(kind='inet'):
+    """Return system-wide open connections."""
+    return _connections.retrieve(kind)
+
+
+def net_io_counters():
+    """Return network I/O statistics for every network interface
+    installed on the system as a dict of raw tuples.
+    """
+    f = open("/proc/net/dev", "rt")
+    try:
+        lines = f.readlines()
+    finally:
+        f.close()
+
+    retdict = {}
+    for line in lines[2:]:
+        colon = line.rfind(':')
+        assert colon > 0, repr(line)
+        name = line[:colon].strip()
+        fields = line[colon + 1:].strip().split()
+        bytes_recv = int(fields[0])
+        packets_recv = int(fields[1])
+        errin = int(fields[2])
+        dropin = int(fields[3])
+        bytes_sent = int(fields[8])
+        packets_sent = int(fields[9])
+        errout = int(fields[10])
+        dropout = int(fields[11])
+        retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv,
+                         errin, errout, dropin, dropout)
+    return retdict
+
+
+# --- disks
+
+def disk_io_counters():
+    """Return disk I/O statistics for every disk installed on the
+    system as a dict of raw tuples.
+    """
+    # man iostat states that sectors are equivalent with blocks and
+    # have a size of 512 bytes since 2.4 kernels. This value is
+    # needed to calculate the amount of disk I/O in bytes.
+    SECTOR_SIZE = 512
+
+    # determine partitions we want to look for
+    partitions = []
+    f = open("/proc/partitions", "rt")
+    try:
+        lines = f.readlines()[2:]
+    finally:
+        f.close()
+    for line in reversed(lines):
+        _, _, _, name = line.split()
+        if name[-1].isdigit():
+            # we're dealing with a partition (e.g. 'sda1'); 'sda' will
+            # also be around but we want to omit it
+            partitions.append(name)
+        else:
+            if not partitions or not partitions[-1].startswith(name):
+                # we're dealing with a disk entity for which no
+                # partitions have been defined (e.g. 'sda' but
+                # 'sda1' was not around), see:
+                # http://code.google.com/p/psutil/issues/detail?id=338
+                partitions.append(name)
+    #
+    retdict = {}
+    f = open("/proc/diskstats", "rt")
+    try:
+        lines = f.readlines()
+    finally:
+        f.close()
+    for line in lines:
+        # http://www.mjmwired.net/kernel/Documentation/iostats.txt
+        _, _, name, reads, _, rbytes, rtime, writes, _, wbytes, wtime = \
+            line.split()[:11]
+        if name in partitions:
+            rbytes = int(rbytes) * SECTOR_SIZE
+            wbytes = int(wbytes) * SECTOR_SIZE
+            reads = int(reads)
+            writes = int(writes)
+            rtime = int(rtime)
+            wtime = int(wtime)
+            retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime)
+    return retdict
+
+
+def disk_partitions(all=False):
+    """Return mounted disk partitions as a list of nameduples"""
+    phydevs = []
+    f = open("/proc/filesystems", "r")
+    try:
+        for line in f:
+            if not line.startswith("nodev"):
+                phydevs.append(line.strip())
+    finally:
+        f.close()
+
+    retlist = []
+    partitions = cext.disk_partitions()
+    for partition in partitions:
+        device, mountpoint, fstype, opts = partition
+        if device == 'none':
+            device = ''
+        if not all:
+            if device == '' or fstype not in phydevs:
+                continue
+        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
+        retlist.append(ntuple)
+    return retlist
+
+
+disk_usage = _psposix.disk_usage
+
+
+# --- decorators
+
+def wrap_exceptions(fun):
+    """Decorator which translates bare OSError and IOError exceptions
+    into NoSuchProcess and AccessDenied.
+    """
+    @wraps(fun)
+    def wrapper(self, *args, **kwargs):
+        try:
+            return fun(self, *args, **kwargs)
+        except EnvironmentError:
+            # support for private module import
+            if NoSuchProcess is None or AccessDenied is None:
+                raise
+            # ENOENT (no such file or directory) gets raised on open().
+            # ESRCH (no such process) can get raised on read() if
+            # process is gone in meantime.
+            err = sys.exc_info()[1]
+            if err.errno in (errno.ENOENT, errno.ESRCH):
+                raise NoSuchProcess(self.pid, self._name)
+            if err.errno in (errno.EPERM, errno.EACCES):
+                raise AccessDenied(self.pid, self._name)
+            raise
+    return wrapper
+
+
+class Process(object):
+    """Linux process implementation."""
+
+    __slots__ = ["pid", "_name"]
+
+    def __init__(self, pid):
+        self.pid = pid
+        self._name = None
+
+    @wrap_exceptions
+    def name(self):
+        fname = "/proc/%s/stat" % self.pid
+        if PY3:
+            f = open(fname, "rt", encoding=DEFAULT_ENCODING)
+        else:
+            f = open(fname, "rt")
+        try:
+            name = f.read().split(' ')[1].replace('(', '').replace(')', '')
+        finally:
+            f.close()
+        # XXX - gets changed later and probably needs refactoring
+        return name
+
+    def exe(self):
+        try:
+            exe = os.readlink("/proc/%s/exe" % self.pid)
+        except (OSError, IOError):
+            err = sys.exc_info()[1]
+            if err.errno == errno.ENOENT:
+                # no such file error; might be raised also if the
+                # path actually exists for system processes with
+                # low pids (about 0-20)
+                if os.path.lexists("/proc/%s" % self.pid):
+                    return ""
+                else:
+                    # ok, it is a process which has gone away
+                    raise NoSuchProcess(self.pid, self._name)
+            if err.errno in (errno.EPERM, errno.EACCES):
+                raise AccessDenied(self.pid, self._name)
+            raise
+
+        # readlink() might return paths containing null bytes ('\x00').
+        # Certain names have ' (deleted)' appended. Usually this is
+        # bogus as the file actually exists. Either way that's not
+        # important as we don't want to discriminate executables which
+        # have been deleted.
+        exe = exe.split('\x00')[0]
+        if exe.endswith(' (deleted)') and not os.path.exists(exe):
+            exe = exe[:-10]
+        return exe
+
+    @wrap_exceptions
+    def cmdline(self):
+        fname = "/proc/%s/cmdline" % self.pid
+        if PY3:
+            f = open(fname, "rt", encoding=DEFAULT_ENCODING)
+        else:
+            f = open(fname, "rt")
+        try:
+            # return the args as a list
+            return [x for x in f.read().split('\x00') if x]
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def terminal(self):
+        tmap = _psposix._get_terminal_map()
+        f = open("/proc/%s/stat" % self.pid, 'rb')
+        try:
+            tty_nr = int(f.read().split(b(' '))[6])
+        finally:
+            f.close()
+        try:
+            return tmap[tty_nr]
+        except KeyError:
+            return None
+
+    if os.path.exists('/proc/%s/io' % os.getpid()):
+        @wrap_exceptions
+        def io_counters(self):
+            fname = "/proc/%s/io" % self.pid
+            f = open(fname, 'rb')
+            SYSCR, SYSCW = b("syscr"), b("syscw")
+            READ_BYTES, WRITE_BYTES = b("read_bytes"), b("write_bytes")
+            try:
+                rcount = wcount = rbytes = wbytes = None
+                for line in f:
+                    if rcount is None and line.startswith(SYSCR):
+                        rcount = int(line.split()[1])
+                    elif wcount is None and line.startswith(SYSCW):
+                        wcount = int(line.split()[1])
+                    elif rbytes is None and line.startswith(READ_BYTES):
+                        rbytes = int(line.split()[1])
+                    elif wbytes is None and line.startswith(WRITE_BYTES):
+                        wbytes = int(line.split()[1])
+                for x in (rcount, wcount, rbytes, wbytes):
+                    if x is None:
+                        raise NotImplementedError(
+                            "couldn't read all necessary info from %r" % fname)
+                return _common.pio(rcount, wcount, rbytes, wbytes)
+            finally:
+                f.close()
+    else:
+        def io_counters(self):
+            raise NotImplementedError("couldn't find /proc/%s/io (kernel "
+                                      "too old?)" % self.pid)
+
+    @wrap_exceptions
+    def cpu_times(self):
+        f = open("/proc/%s/stat" % self.pid, 'rb')
+        try:
+            st = f.read().strip()
+        finally:
+            f.close()
+        # ignore the first two values ("pid (exe)")
+        st = st[st.find(b(')')) + 2:]
+        values = st.split(b(' '))
+        utime = float(values[11]) / CLOCK_TICKS
+        stime = float(values[12]) / CLOCK_TICKS
+        return _common.pcputimes(utime, stime)
+
+    @wrap_exceptions
+    def wait(self, timeout=None):
+        try:
+            return _psposix.wait_pid(self.pid, timeout)
+        except _psposix.TimeoutExpired:
+            # support for private module import
+            if TimeoutExpired is None:
+                raise
+            raise TimeoutExpired(timeout, self.pid, self._name)
+
+    @wrap_exceptions
+    def create_time(self):
+        f = open("/proc/%s/stat" % self.pid, 'rb')
+        try:
+            st = f.read().strip()
+        finally:
+            f.close()
+        # ignore the first two values ("pid (exe)")
+        st = st[st.rfind(b(')')) + 2:]
+        values = st.split(b(' '))
+        # According to documentation, starttime is in field 21 and the
+        # unit is jiffies (clock ticks).
+        # We first divide it for clock ticks and then add uptime returning
+        # seconds since the epoch, in UTC.
+        # Also use cached value if available.
+        bt = BOOT_TIME or boot_time()
+        return (float(values[19]) / CLOCK_TICKS) + bt
+
+    @wrap_exceptions
+    def memory_info(self):
+        f = open("/proc/%s/statm" % self.pid, 'rb')
+        try:
+            vms, rss = f.readline().split()[:2]
+            return _common.pmem(int(rss) * PAGESIZE,
+                                int(vms) * PAGESIZE)
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def memory_info_ex(self):
+        #  ============================================================
+        # | FIELD  | DESCRIPTION                         | AKA  | TOP  |
+        #  ============================================================
+        # | rss    | resident set size                   |      | RES  |
+        # | vms    | total program size                  | size | VIRT |
+        # | shared | shared pages (from shared mappings) |      | SHR  |
+        # | text   | text ('code')                       | trs  | CODE |
+        # | lib    | library (unused in Linux 2.6)       | lrs  |      |
+        # | data   | data + stack                        | drs  | DATA |
+        # | dirty  | dirty pages (unused in Linux 2.6)   | dt   |      |
+        #  ============================================================
+        f = open("/proc/%s/statm" % self.pid, "rb")
+        try:
+            vms, rss, shared, text, lib, data, dirty = \
+                [int(x) * PAGESIZE for x in f.readline().split()[:7]]
+        finally:
+            f.close()
+        return pextmem(rss, vms, shared, text, lib, data, dirty)
+
+    if os.path.exists('/proc/%s/smaps' % os.getpid()):
+        def memory_maps(self):
+            """Return process's mapped memory regions as a list of nameduples.
+            Fields are explained in 'man proc'; here is an updated (Apr 2012)
+            version: http://goo.gl/fmebo
+            """
+            f = None
+            try:
+                f = open("/proc/%s/smaps" % self.pid, "rt")
+                first_line = f.readline()
+                current_block = [first_line]
+
+                def get_blocks():
+                    data = {}
+                    for line in f:
+                        fields = line.split(None, 5)
+                        if not fields[0].endswith(':'):
+                            # new block section
+                            yield (current_block.pop(), data)
+                            current_block.append(line)
+                        else:
+                            try:
+                                data[fields[0]] = int(fields[1]) * 1024
+                            except ValueError:
+                                if fields[0].startswith('VmFlags:'):
+                                    # see issue #369
+                                    continue
+                                else:
+                                    raise ValueError("don't know how to inte"
+                                                     "rpret line %r" % line)
+                    yield (current_block.pop(), data)
+
+                if first_line:  # smaps file can be empty
+                    for header, data in get_blocks():
+                        hfields = header.split(None, 5)
+                        try:
+                            addr, perms, offset, dev, inode, path = hfields
+                        except ValueError:
+                            addr, perms, offset, dev, inode, path = \
+                                hfields + ['']
+                        if not path:
+                            path = '[anon]'
+                        else:
+                            path = path.strip()
+                        yield (addr, perms, path,
+                               data['Rss:'],
+                               data.get('Size:', 0),
+                               data.get('Pss:', 0),
+                               data.get('Shared_Clean:', 0),
+                               data.get('Shared_Dirty:', 0),
+                               data.get('Private_Clean:', 0),
+                               data.get('Private_Dirty:', 0),
+                               data.get('Referenced:', 0),
+                               data.get('Anonymous:', 0),
+                               data.get('Swap:', 0))
+                f.close()
+            except EnvironmentError:
+                # XXX - Can't use wrap_exceptions decorator as we're
+                # returning a generator;  this probably needs some
+                # refactoring in order to avoid this code duplication.
+                if f is not None:
+                    f.close()
+                err = sys.exc_info()[1]
+                if err.errno in (errno.ENOENT, errno.ESRCH):
+                    raise NoSuchProcess(self.pid, self._name)
+                if err.errno in (errno.EPERM, errno.EACCES):
+                    raise AccessDenied(self.pid, self._name)
+                raise
+            except:
+                if f is not None:
+                    f.close()
+                raise
+            f.close()
+
+    else:
+        def memory_maps(self, ext):
+            msg = "couldn't find /proc/%s/smaps; kernel < 2.6.14 or "  \
+                  "CONFIG_MMU kernel configuration option is not enabled" \
+                  % self.pid
+            raise NotImplementedError(msg)
+
+    @wrap_exceptions
+    def cwd(self):
+        # readlink() might return paths containing null bytes causing
+        # problems when used with other fs-related functions (os.*,
+        # open(), ...)
+        path = os.readlink("/proc/%s/cwd" % self.pid)
+        return path.replace('\x00', '')
+
+    @wrap_exceptions
+    def num_ctx_switches(self):
+        vol = unvol = None
+        f = open("/proc/%s/status" % self.pid, "rb")
+        VOLUNTARY = b("voluntary_ctxt_switches")
+        NON_VOLUNTARY = b("nonvoluntary_ctxt_switches")
+        try:
+            for line in f:
+                if line.startswith(VOLUNTARY):
+                    vol = int(line.split()[1])
+                elif line.startswith(NON_VOLUNTARY):
+                    unvol = int(line.split()[1])
+                if vol is not None and unvol is not None:
+                    return _common.pctxsw(vol, unvol)
+            raise NotImplementedError(
+                "'voluntary_ctxt_switches' and 'nonvoluntary_ctxt_switches'"
+                "fields were not found in /proc/%s/status; the kernel is "
+                "probably older than 2.6.23" % self.pid)
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def num_threads(self):
+        f = open("/proc/%s/status" % self.pid, "rb")
+        try:
+            THREADS = b("Threads:")
+            for line in f:
+                if line.startswith(THREADS):
+                    return int(line.split()[1])
+            raise NotImplementedError("line not found")
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def threads(self):
+        thread_ids = os.listdir("/proc/%s/task" % self.pid)
+        thread_ids.sort()
+        retlist = []
+        hit_enoent = False
+        for thread_id in thread_ids:
+            try:
+                f = open("/proc/%s/task/%s/stat" % (self.pid, thread_id), 'rb')
+            except EnvironmentError:
+                err = sys.exc_info()[1]
+                if err.errno == errno.ENOENT:
+                    # no such file or directory; it means thread
+                    # disappeared on us
+                    hit_enoent = True
+                    continue
+                raise
+            try:
+                st = f.read().strip()
+            finally:
+                f.close()
+            # ignore the first two values ("pid (exe)")
+            st = st[st.find(b(')')) + 2:]
+            values = st.split(b(' '))
+            utime = float(values[11]) / CLOCK_TICKS
+            stime = float(values[12]) / CLOCK_TICKS
+            ntuple = _common.pthread(int(thread_id), utime, stime)
+            retlist.append(ntuple)
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+        return retlist
+
+    @wrap_exceptions
+    def nice_get(self):
+        #f = open('/proc/%s/stat' % self.pid, 'r')
+        # try:
+        #   data = f.read()
+        #   return int(data.split()[18])
+        # finally:
+        #   f.close()
+
+        # Use C implementation
+        return _psutil_posix.getpriority(self.pid)
+
+    @wrap_exceptions
+    def nice_set(self, value):
+        return _psutil_posix.setpriority(self.pid, value)
+
+    @wrap_exceptions
+    def cpu_affinity_get(self):
+        from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x]
+        bitmask = cext.proc_cpu_affinity_get(self.pid)
+        return from_bitmask(bitmask)
+
+    @wrap_exceptions
+    def cpu_affinity_set(self, cpus):
+        try:
+            cext.proc_cpu_affinity_set(self.pid, cpus)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno == errno.EINVAL:
+                allcpus = tuple(range(len(per_cpu_times())))
+                for cpu in cpus:
+                    if cpu not in allcpus:
+                        raise ValueError("invalid CPU #%i (choose between %s)"
+                                         % (cpu, allcpus))
+            raise
+
+    # only starting from kernel 2.6.13
+    if hasattr(cext, "proc_ioprio_get"):
+
+        @wrap_exceptions
+        def ionice_get(self):
+            ioclass, value = cext.proc_ioprio_get(self.pid)
+            return _common.pionice(ioclass, value)
+
+        @wrap_exceptions
+        def ionice_set(self, ioclass, value):
+            if ioclass in (IOPRIO_CLASS_NONE, None):
+                if value:
+                    msg = "can't specify value with IOPRIO_CLASS_NONE"
+                    raise ValueError(msg)
+                ioclass = IOPRIO_CLASS_NONE
+                value = 0
+            if ioclass in (IOPRIO_CLASS_RT, IOPRIO_CLASS_BE):
+                if value is None:
+                    value = 4
+            elif ioclass == IOPRIO_CLASS_IDLE:
+                if value:
+                    msg = "can't specify value with IOPRIO_CLASS_IDLE"
+                    raise ValueError(msg)
+                value = 0
+            else:
+                value = 0
+            if not 0 <= value <= 8:
+                raise ValueError(
+                    "value argument range expected is between 0 and 8")
+            return cext.proc_ioprio_set(self.pid, ioclass, value)
+
+    if HAS_PRLIMIT:
+        @wrap_exceptions
+        def rlimit(self, resource, limits=None):
+            # if pid is 0 prlimit() applies to the calling process and
+            # we don't want that
+            if self.pid == 0:
+                raise ValueError("can't use prlimit() against PID 0 process")
+            if limits is None:
+                # get
+                return cext.linux_prlimit(self.pid, resource)
+            else:
+                # set
+                if len(limits) != 2:
+                    raise ValueError(
+                        "second argument must be a (soft, hard) tuple")
+                soft, hard = limits
+                cext.linux_prlimit(self.pid, resource, soft, hard)
+
+    @wrap_exceptions
+    def status(self):
+        f = open("/proc/%s/status" % self.pid, 'rb')
+        try:
+            STATE = b("State:")
+            for line in f:
+                if line.startswith(STATE):
+                    letter = line.split()[1]
+                    if PY3:
+                        letter = letter.decode()
+                    # XXX is '?' legit? (we're not supposed to return
+                    # it anyway)
+                    return PROC_STATUSES.get(letter, '?')
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def open_files(self):
+        retlist = []
+        files = os.listdir("/proc/%s/fd" % self.pid)
+        hit_enoent = False
+        for fd in files:
+            file = "/proc/%s/fd/%s" % (self.pid, fd)
+            if os.path.islink(file):
+                try:
+                    file = os.readlink(file)
+                except OSError:
+                    # ENOENT == file which is gone in the meantime
+                    err = sys.exc_info()[1]
+                    if err.errno == errno.ENOENT:
+                        hit_enoent = True
+                        continue
+                    raise
+                else:
+                    # If file is not an absolute path there's no way
+                    # to tell whether it's a regular file or not,
+                    # so we skip it. A regular file is always supposed
+                    # to be absolutized though.
+                    if file.startswith('/') and isfile_strict(file):
+                        ntuple = _common.popenfile(file, int(fd))
+                        retlist.append(ntuple)
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+        return retlist
+
+    @wrap_exceptions
+    def connections(self, kind='inet'):
+        ret = _connections.retrieve(kind, self.pid)
+        # raise NSP if the process disappeared on us
+        os.stat('/proc/%s' % self.pid)
+        return ret
+
+    @wrap_exceptions
+    def num_fds(self):
+        return len(os.listdir("/proc/%s/fd" % self.pid))
+
+    @wrap_exceptions
+    def ppid(self):
+        f = open("/proc/%s/status" % self.pid, 'rb')
+        try:
+            PPID = b("PPid:")
+            for line in f:
+                if line.startswith(PPID):
+                    # PPid: nnnn
+                    return int(line.split()[1])
+            raise NotImplementedError("line not found")
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def uids(self):
+        f = open("/proc/%s/status" % self.pid, 'rb')
+        try:
+            UID = b('Uid:')
+            for line in f:
+                if line.startswith(UID):
+                    _, real, effective, saved, fs = line.split()
+                    return _common.puids(int(real), int(effective), int(saved))
+            raise NotImplementedError("line not found")
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def gids(self):
+        f = open("/proc/%s/status" % self.pid, 'rb')
+        try:
+            GID = b('Gid:')
+            for line in f:
+                if line.startswith(GID):
+                    _, real, effective, saved, fs = line.split()
+                    return _common.pgids(int(real), int(effective), int(saved))
+            raise NotImplementedError("line not found")
+        finally:
+            f.close()

+ 341 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psosx.py

@@ -0,0 +1,341 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""OSX platform implementation."""
+
+import errno
+import os
+import sys
+
+from psutil import _common
+from psutil import _psposix
+from psutil._common import conn_tmap, usage_percent, isfile_strict
+from psutil._compat import namedtuple, wraps
+import _psutil_osx as cext
+import _psutil_posix
+
+
+__extra__all__ = []
+
+# --- constants
+
+PAGESIZE = os.sysconf("SC_PAGE_SIZE")
+
+# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
+TCP_STATUSES = {
+    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+    cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV,
+    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+    cext.TCPS_CLOSED: _common.CONN_CLOSE,
+    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+    cext.TCPS_LISTEN: _common.CONN_LISTEN,
+    cext.TCPS_CLOSING: _common.CONN_CLOSING,
+    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+}
+
+PROC_STATUSES = {
+    cext.SIDL: _common.STATUS_IDLE,
+    cext.SRUN: _common.STATUS_RUNNING,
+    cext.SSLEEP: _common.STATUS_SLEEPING,
+    cext.SSTOP: _common.STATUS_STOPPED,
+    cext.SZOMB: _common.STATUS_ZOMBIE,
+}
+
+scputimes = namedtuple('scputimes', ['user', 'nice', 'system', 'idle'])
+
+svmem = namedtuple(
+    'svmem', ['total', 'available', 'percent', 'used', 'free',
+              'active', 'inactive', 'wired'])
+
+pextmem = namedtuple('pextmem', ['rss', 'vms', 'pfaults', 'pageins'])
+
+pmmap_grouped = namedtuple(
+    'pmmap_grouped',
+    'path rss private swapped dirtied ref_count shadow_depth')
+
+pmmap_ext = namedtuple(
+    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
+
+# set later from __init__.py
+NoSuchProcess = None
+AccessDenied = None
+TimeoutExpired = None
+
+
+# --- functions
+
+def virtual_memory():
+    """System virtual memory as a namedtuple."""
+    total, active, inactive, wired, free = cext.virtual_mem()
+    avail = inactive + free
+    used = active + inactive + wired
+    percent = usage_percent((total - avail), total, _round=1)
+    return svmem(total, avail, percent, used, free,
+                 active, inactive, wired)
+
+
+def swap_memory():
+    """Swap system memory as a (total, used, free, sin, sout) tuple."""
+    total, used, free, sin, sout = cext.swap_mem()
+    percent = usage_percent(used, total, _round=1)
+    return _common.sswap(total, used, free, percent, sin, sout)
+
+
+def cpu_times():
+    """Return system CPU times as a namedtuple."""
+    user, nice, system, idle = cext.cpu_times()
+    return scputimes(user, nice, system, idle)
+
+
+def per_cpu_times():
+    """Return system CPU times as a named tuple"""
+    ret = []
+    for cpu_t in cext.per_cpu_times():
+        user, nice, system, idle = cpu_t
+        item = scputimes(user, nice, system, idle)
+        ret.append(item)
+    return ret
+
+
+def cpu_count_logical():
+    """Return the number of logical CPUs in the system."""
+    return cext.cpu_count_logical()
+
+
+def cpu_count_physical():
+    """Return the number of physical CPUs in the system."""
+    return cext.cpu_count_phys()
+
+
+def boot_time():
+    """The system boot time expressed in seconds since the epoch."""
+    return cext.boot_time()
+
+
+def disk_partitions(all=False):
+    retlist = []
+    partitions = cext.disk_partitions()
+    for partition in partitions:
+        device, mountpoint, fstype, opts = partition
+        if device == 'none':
+            device = ''
+        if not all:
+            if not os.path.isabs(device) or not os.path.exists(device):
+                continue
+        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
+        retlist.append(ntuple)
+    return retlist
+
+
+def users():
+    retlist = []
+    rawlist = cext.users()
+    for item in rawlist:
+        user, tty, hostname, tstamp = item
+        if tty == '~':
+            continue  # reboot or shutdown
+        if not tstamp:
+            continue
+        nt = _common.suser(user, tty or None, hostname or None, tstamp)
+        retlist.append(nt)
+    return retlist
+
+
+def net_connections(kind='inet'):
+    # Note: on OSX this will fail with AccessDenied unless
+    # the process is owned by root.
+    ret = []
+    for pid in pids():
+        try:
+            cons = Process(pid).connections(kind)
+        except NoSuchProcess:
+            continue
+        else:
+            if cons:
+                for c in cons:
+                    c = list(c) + [pid]
+                    ret.append(_common.sconn(*c))
+    return ret
+
+
+pids = cext.pids
+pid_exists = _psposix.pid_exists
+disk_usage = _psposix.disk_usage
+net_io_counters = cext.net_io_counters
+disk_io_counters = cext.disk_io_counters
+
+
+def wrap_exceptions(fun):
+    """Decorator which translates bare OSError exceptions into
+    NoSuchProcess and AccessDenied.
+    """
+    @wraps(fun)
+    def wrapper(self, *args, **kwargs):
+        try:
+            return fun(self, *args, **kwargs)
+        except OSError:
+            # support for private module import
+            if NoSuchProcess is None or AccessDenied is None:
+                raise
+            err = sys.exc_info()[1]
+            if err.errno == errno.ESRCH:
+                raise NoSuchProcess(self.pid, self._name)
+            if err.errno in (errno.EPERM, errno.EACCES):
+                raise AccessDenied(self.pid, self._name)
+            raise
+    return wrapper
+
+
+class Process(object):
+    """Wrapper class around underlying C implementation."""
+
+    __slots__ = ["pid", "_name"]
+
+    def __init__(self, pid):
+        self.pid = pid
+        self._name = None
+
+    @wrap_exceptions
+    def name(self):
+        return cext.proc_name(self.pid)
+
+    @wrap_exceptions
+    def exe(self):
+        return cext.proc_exe(self.pid)
+
+    @wrap_exceptions
+    def cmdline(self):
+        if not pid_exists(self.pid):
+            raise NoSuchProcess(self.pid, self._name)
+        return cext.proc_cmdline(self.pid)
+
+    @wrap_exceptions
+    def ppid(self):
+        return cext.proc_ppid(self.pid)
+
+    @wrap_exceptions
+    def cwd(self):
+        return cext.proc_cwd(self.pid)
+
+    @wrap_exceptions
+    def uids(self):
+        real, effective, saved = cext.proc_uids(self.pid)
+        return _common.puids(real, effective, saved)
+
+    @wrap_exceptions
+    def gids(self):
+        real, effective, saved = cext.proc_gids(self.pid)
+        return _common.pgids(real, effective, saved)
+
+    @wrap_exceptions
+    def terminal(self):
+        tty_nr = cext.proc_tty_nr(self.pid)
+        tmap = _psposix._get_terminal_map()
+        try:
+            return tmap[tty_nr]
+        except KeyError:
+            return None
+
+    @wrap_exceptions
+    def memory_info(self):
+        rss, vms = cext.proc_memory_info(self.pid)[:2]
+        return _common.pmem(rss, vms)
+
+    @wrap_exceptions
+    def memory_info_ex(self):
+        rss, vms, pfaults, pageins = cext.proc_memory_info(self.pid)
+        return pextmem(rss, vms, pfaults * PAGESIZE, pageins * PAGESIZE)
+
+    @wrap_exceptions
+    def cpu_times(self):
+        user, system = cext.proc_cpu_times(self.pid)
+        return _common.pcputimes(user, system)
+
+    @wrap_exceptions
+    def create_time(self):
+        return cext.proc_create_time(self.pid)
+
+    @wrap_exceptions
+    def num_ctx_switches(self):
+        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))
+
+    @wrap_exceptions
+    def num_threads(self):
+        return cext.proc_num_threads(self.pid)
+
+    @wrap_exceptions
+    def open_files(self):
+        if self.pid == 0:
+            return []
+        files = []
+        rawlist = cext.proc_open_files(self.pid)
+        for path, fd in rawlist:
+            if isfile_strict(path):
+                ntuple = _common.popenfile(path, fd)
+                files.append(ntuple)
+        return files
+
+    @wrap_exceptions
+    def connections(self, kind='inet'):
+        if kind not in conn_tmap:
+            raise ValueError("invalid %r kind argument; choose between %s"
+                             % (kind, ', '.join([repr(x) for x in conn_tmap])))
+        families, types = conn_tmap[kind]
+        rawlist = cext.proc_connections(self.pid, families, types)
+        ret = []
+        for item in rawlist:
+            fd, fam, type, laddr, raddr, status = item
+            status = TCP_STATUSES[status]
+            nt = _common.pconn(fd, fam, type, laddr, raddr, status)
+            ret.append(nt)
+        return ret
+
+    @wrap_exceptions
+    def num_fds(self):
+        if self.pid == 0:
+            return 0
+        return cext.proc_num_fds(self.pid)
+
+    @wrap_exceptions
+    def wait(self, timeout=None):
+        try:
+            return _psposix.wait_pid(self.pid, timeout)
+        except _psposix.TimeoutExpired:
+            # support for private module import
+            if TimeoutExpired is None:
+                raise
+            raise TimeoutExpired(timeout, self.pid, self._name)
+
+    @wrap_exceptions
+    def nice_get(self):
+        return _psutil_posix.getpriority(self.pid)
+
+    @wrap_exceptions
+    def nice_set(self, value):
+        return _psutil_posix.setpriority(self.pid, value)
+
+    @wrap_exceptions
+    def status(self):
+        code = cext.proc_status(self.pid)
+        # XXX is '?' legit? (we're not supposed to return it anyway)
+        return PROC_STATUSES.get(code, '?')
+
+    @wrap_exceptions
+    def threads(self):
+        rawlist = cext.proc_threads(self.pid)
+        retlist = []
+        for thread_id, utime, stime in rawlist:
+            ntuple = _common.pthread(thread_id, utime, stime)
+            retlist.append(ntuple)
+        return retlist
+
+    @wrap_exceptions
+    def memory_maps(self):
+        return cext.proc_memory_maps(self.pid)

+ 157 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psposix.py

@@ -0,0 +1,157 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Routines common to all posix systems."""
+
+import errno
+import glob
+import os
+import sys
+import time
+
+from psutil._common import sdiskusage, usage_percent, memoize
+from psutil._compat import PY3, unicode
+
+
+class TimeoutExpired(Exception):
+    pass
+
+
+def pid_exists(pid):
+    """Check whether pid exists in the current process table."""
+    if pid == 0:
+        # According to "man 2 kill" PID 0 has a special meaning:
+        # it refers to <<every process in the process group of the
+        # calling process>> so we don't want to go any further.
+        # If we get here it means this UNIX platform *does* have
+        # a process with id 0.
+        return True
+    try:
+        os.kill(pid, 0)
+    except OSError:
+        err = sys.exc_info()[1]
+        if err.errno == errno.ESRCH:
+            # ESRCH == No such process
+            return False
+        elif err.errno == errno.EPERM:
+            # EPERM clearly means there's a process to deny access to
+            return True
+        else:
+            # According to "man 2 kill" possible error values are
+            # (EINVAL, EPERM, ESRCH) therefore we should never get
+            # here. If we do let's be explicit in considering this
+            # an error.
+            raise err
+    else:
+        return True
+
+
+def wait_pid(pid, timeout=None):
+    """Wait for process with pid 'pid' to terminate and return its
+    exit status code as an integer.
+
+    If pid is not a children of os.getpid() (current process) just
+    waits until the process disappears and return None.
+
+    If pid does not exist at all return None immediately.
+
+    Raise TimeoutExpired on timeout expired.
+    """
+    def check_timeout(delay):
+        if timeout is not None:
+            if timer() >= stop_at:
+                raise TimeoutExpired()
+        time.sleep(delay)
+        return min(delay * 2, 0.04)
+
+    timer = getattr(time, 'monotonic', time.time)
+    if timeout is not None:
+        waitcall = lambda: os.waitpid(pid, os.WNOHANG)
+        stop_at = timer() + timeout
+    else:
+        waitcall = lambda: os.waitpid(pid, 0)
+
+    delay = 0.0001
+    while 1:
+        try:
+            retpid, status = waitcall()
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno == errno.EINTR:
+                delay = check_timeout(delay)
+                continue
+            elif err.errno == errno.ECHILD:
+                # This has two meanings:
+                # - pid is not a child of os.getpid() in which case
+                #   we keep polling until it's gone
+                # - pid never existed in the first place
+                # In both cases we'll eventually return None as we
+                # can't determine its exit status code.
+                while 1:
+                    if pid_exists(pid):
+                        delay = check_timeout(delay)
+                    else:
+                        return
+            else:
+                raise
+        else:
+            if retpid == 0:
+                # WNOHANG was used, pid is still running
+                delay = check_timeout(delay)
+                continue
+            # process exited due to a signal; return the integer of
+            # that signal
+            if os.WIFSIGNALED(status):
+                return os.WTERMSIG(status)
+            # process exited using exit(2) system call; return the
+            # integer exit(2) system call has been called with
+            elif os.WIFEXITED(status):
+                return os.WEXITSTATUS(status)
+            else:
+                # should never happen
+                raise RuntimeError("unknown process exit status")
+
+
+def disk_usage(path):
+    """Return disk usage associated with path."""
+    try:
+        st = os.statvfs(path)
+    except UnicodeEncodeError:
+        if not PY3 and isinstance(path, unicode):
+            # this is a bug with os.statvfs() and unicode on
+            # Python 2, see:
+            # - https://code.google.com/p/psutil/issues/detail?id=416
+            # - http://bugs.python.org/issue18695
+            try:
+                path = path.encode(sys.getfilesystemencoding())
+            except UnicodeEncodeError:
+                pass
+            st = os.statvfs(path)
+        else:
+            raise
+    free = (st.f_bavail * st.f_frsize)
+    total = (st.f_blocks * st.f_frsize)
+    used = (st.f_blocks - st.f_bfree) * st.f_frsize
+    percent = usage_percent(used, total, _round=1)
+    # NB: the percentage is -5% than what shown by df due to
+    # reserved blocks that we are currently not considering:
+    # http://goo.gl/sWGbH
+    return sdiskusage(total, used, free, percent)
+
+
+@memoize
+def _get_terminal_map():
+    ret = {}
+    ls = glob.glob('/dev/tty*') + glob.glob('/dev/pts/*')
+    for name in ls:
+        assert name not in ret
+        try:
+            ret[os.stat(name).st_rdev] = name
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno != errno.ENOENT:
+                raise
+    return ret

+ 533 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pssunos.py

@@ -0,0 +1,533 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Sun OS Solaris platform implementation."""
+
+import errno
+import os
+import socket
+import subprocess
+import sys
+
+from psutil import _common
+from psutil import _psposix
+from psutil._common import (conn_tmap, usage_percent, isfile_strict)
+from psutil._compat import namedtuple, PY3
+import _psutil_posix
+import _psutil_sunos as cext
+
+
+__extra__all__ = ["CONN_IDLE", "CONN_BOUND"]
+
+PAGE_SIZE = os.sysconf('SC_PAGE_SIZE')
+
+CONN_IDLE = "IDLE"
+CONN_BOUND = "BOUND"
+
+PROC_STATUSES = {
+    cext.SSLEEP: _common.STATUS_SLEEPING,
+    cext.SRUN: _common.STATUS_RUNNING,
+    cext.SZOMB: _common.STATUS_ZOMBIE,
+    cext.SSTOP: _common.STATUS_STOPPED,
+    cext.SIDL: _common.STATUS_IDLE,
+    cext.SONPROC: _common.STATUS_RUNNING,  # same as run
+    cext.SWAIT: _common.STATUS_WAITING,
+}
+
+TCP_STATUSES = {
+    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+    cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV,
+    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+    cext.TCPS_CLOSED: _common.CONN_CLOSE,
+    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+    cext.TCPS_LISTEN: _common.CONN_LISTEN,
+    cext.TCPS_CLOSING: _common.CONN_CLOSING,
+    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+    cext.TCPS_IDLE: CONN_IDLE,  # sunos specific
+    cext.TCPS_BOUND: CONN_BOUND,  # sunos specific
+}
+
+scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait'])
+svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
+pextmem = namedtuple('pextmem', ['rss', 'vms'])
+pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss', 'anon', 'locked'])
+pmmap_ext = namedtuple(
+    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
+
+# set later from __init__.py
+NoSuchProcess = None
+AccessDenied = None
+TimeoutExpired = None
+
+# --- functions
+
+disk_io_counters = cext.disk_io_counters
+net_io_counters = cext.net_io_counters
+disk_usage = _psposix.disk_usage
+
+
+def virtual_memory():
+    # we could have done this with kstat, but imho this is good enough
+    total = os.sysconf('SC_PHYS_PAGES') * PAGE_SIZE
+    # note: there's no difference on Solaris
+    free = avail = os.sysconf('SC_AVPHYS_PAGES') * PAGE_SIZE
+    used = total - free
+    percent = usage_percent(used, total, _round=1)
+    return svmem(total, avail, percent, used, free)
+
+
+def swap_memory():
+    sin, sout = cext.swap_mem()
+    # XXX
+    # we are supposed to get total/free by doing so:
+    # http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/
+    #     usr/src/cmd/swap/swap.c
+    # ...nevertheless I can't manage to obtain the same numbers as 'swap'
+    # cmdline utility, so let's parse its output (sigh!)
+    p = subprocess.Popen(['swap', '-l', '-k'], stdout=subprocess.PIPE)
+    stdout, stderr = p.communicate()
+    if PY3:
+        stdout = stdout.decode(sys.stdout.encoding)
+    if p.returncode != 0:
+        raise RuntimeError("'swap -l -k' failed (retcode=%s)" % p.returncode)
+
+    lines = stdout.strip().split('\n')[1:]
+    if not lines:
+        raise RuntimeError('no swap device(s) configured')
+    total = free = 0
+    for line in lines:
+        line = line.split()
+        t, f = line[-2:]
+        t = t.replace('K', '')
+        f = f.replace('K', '')
+        total += int(int(t) * 1024)
+        free += int(int(f) * 1024)
+    used = total - free
+    percent = usage_percent(used, total, _round=1)
+    return _common.sswap(total, used, free, percent,
+                       sin * PAGE_SIZE, sout * PAGE_SIZE)
+
+
+def pids():
+    """Returns a list of PIDs currently running on the system."""
+    return [int(x) for x in os.listdir('/proc') if x.isdigit()]
+
+
+def pid_exists(pid):
+    """Check for the existence of a unix pid."""
+    return _psposix.pid_exists(pid)
+
+
+def cpu_times():
+    """Return system-wide CPU times as a named tuple"""
+    ret = cext.per_cpu_times()
+    return scputimes(*[sum(x) for x in zip(*ret)])
+
+
+def per_cpu_times():
+    """Return system per-CPU times as a list of named tuples"""
+    ret = cext.per_cpu_times()
+    return [scputimes(*x) for x in ret]
+
+
+def cpu_count_logical():
+    """Return the number of logical CPUs in the system."""
+    try:
+        return os.sysconf("SC_NPROCESSORS_ONLN")
+    except ValueError:
+        # mimic os.cpu_count() behavior
+        return None
+
+
+def cpu_count_physical():
+    """Return the number of physical CPUs in the system."""
+    return cext.cpu_count_phys()
+
+
+def boot_time():
+    """The system boot time expressed in seconds since the epoch."""
+    return cext.boot_time()
+
+
+def users():
+    """Return currently connected users as a list of namedtuples."""
+    retlist = []
+    rawlist = cext.users()
+    localhost = (':0.0', ':0')
+    for item in rawlist:
+        user, tty, hostname, tstamp, user_process = item
+        # note: the underlying C function includes entries about
+        # system boot, run level and others.  We might want
+        # to use them in the future.
+        if not user_process:
+            continue
+        if hostname in localhost:
+            hostname = 'localhost'
+        nt = _common.suser(user, tty, hostname, tstamp)
+        retlist.append(nt)
+    return retlist
+
+
+def disk_partitions(all=False):
+    """Return system disk partitions."""
+    # TODO - the filtering logic should be better checked so that
+    # it tries to reflect 'df' as much as possible
+    retlist = []
+    partitions = cext.disk_partitions()
+    for partition in partitions:
+        device, mountpoint, fstype, opts = partition
+        if device == 'none':
+            device = ''
+        if not all:
+            # Differently from, say, Linux, we don't have a list of
+            # common fs types so the best we can do, AFAIK, is to
+            # filter by filesystem having a total size > 0.
+            if not disk_usage(mountpoint).total:
+                continue
+        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
+        retlist.append(ntuple)
+    return retlist
+
+
+def net_connections(kind, _pid=-1):
+    """Return socket connections.  If pid == -1 return system-wide
+    connections (as opposed to connections opened by one process only).
+    Only INET sockets are returned (UNIX are not).
+    """
+    cmap = _common.conn_tmap.copy()
+    if _pid == -1:
+        cmap.pop('unix', 0)
+    if kind not in cmap:
+        raise ValueError("invalid %r kind argument; choose between %s"
+                         % (kind, ', '.join([repr(x) for x in cmap])))
+    families, types = _common.conn_tmap[kind]
+    rawlist = cext.net_connections(_pid, families, types)
+    ret = []
+    for item in rawlist:
+        fd, fam, type_, laddr, raddr, status, pid = item
+        if fam not in families:
+            continue
+        if type_ not in types:
+            continue
+        status = TCP_STATUSES[status]
+        if _pid == -1:
+            nt = _common.sconn(fd, fam, type_, laddr, raddr, status, pid)
+        else:
+            nt = _common.pconn(fd, fam, type_, laddr, raddr, status)
+        ret.append(nt)
+    return ret
+
+
+def wrap_exceptions(fun):
+    """Call callable into a try/except clause and translate ENOENT,
+    EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
+    """
+    def wrapper(self, *args, **kwargs):
+        try:
+            return fun(self, *args, **kwargs)
+        except EnvironmentError:
+            # support for private module import
+            if NoSuchProcess is None or AccessDenied is None:
+                raise
+            # ENOENT (no such file or directory) gets raised on open().
+            # ESRCH (no such process) can get raised on read() if
+            # process is gone in meantime.
+            err = sys.exc_info()[1]
+            if err.errno in (errno.ENOENT, errno.ESRCH):
+                raise NoSuchProcess(self.pid, self._name)
+            if err.errno in (errno.EPERM, errno.EACCES):
+                raise AccessDenied(self.pid, self._name)
+            raise
+    return wrapper
+
+
+class Process(object):
+    """Wrapper class around underlying C implementation."""
+
+    __slots__ = ["pid", "_name"]
+
+    def __init__(self, pid):
+        self.pid = pid
+        self._name = None
+
+    @wrap_exceptions
+    def name(self):
+        # note: max len == 15
+        return cext.proc_name_and_args(self.pid)[0]
+
+    @wrap_exceptions
+    def exe(self):
+        # Will be guess later from cmdline but we want to explicitly
+        # invoke cmdline here in order to get an AccessDenied
+        # exception if the user has not enough privileges.
+        self.cmdline()
+        return ""
+
+    @wrap_exceptions
+    def cmdline(self):
+        return cext.proc_name_and_args(self.pid)[1].split(' ')
+
+    @wrap_exceptions
+    def create_time(self):
+        return cext.proc_basic_info(self.pid)[3]
+
+    @wrap_exceptions
+    def num_threads(self):
+        return cext.proc_basic_info(self.pid)[5]
+
+    @wrap_exceptions
+    def nice_get(self):
+        # For some reason getpriority(3) return ESRCH (no such process)
+        # for certain low-pid processes, no matter what (even as root).
+        # The process actually exists though, as it has a name,
+        # creation time, etc.
+        # The best thing we can do here appears to be raising AD.
+        # Note: tested on Solaris 11; on Open Solaris 5 everything is
+        # fine.
+        try:
+            return _psutil_posix.getpriority(self.pid)
+        except EnvironmentError:
+            err = sys.exc_info()[1]
+            if err.errno in (errno.ENOENT, errno.ESRCH):
+                if pid_exists(self.pid):
+                    raise AccessDenied(self.pid, self._name)
+            raise
+
+    @wrap_exceptions
+    def nice_set(self, value):
+        if self.pid in (2, 3):
+            # Special case PIDs: internally setpriority(3) return ESRCH
+            # (no such process), no matter what.
+            # The process actually exists though, as it has a name,
+            # creation time, etc.
+            raise AccessDenied(self.pid, self._name)
+        return _psutil_posix.setpriority(self.pid, value)
+
+    @wrap_exceptions
+    def ppid(self):
+        return cext.proc_basic_info(self.pid)[0]
+
+    @wrap_exceptions
+    def uids(self):
+        real, effective, saved, _, _, _ = cext.proc_cred(self.pid)
+        return _common.puids(real, effective, saved)
+
+    @wrap_exceptions
+    def gids(self):
+        _, _, _, real, effective, saved = cext.proc_cred(self.pid)
+        return _common.puids(real, effective, saved)
+
+    @wrap_exceptions
+    def cpu_times(self):
+        user, system = cext.proc_cpu_times(self.pid)
+        return _common.pcputimes(user, system)
+
+    @wrap_exceptions
+    def terminal(self):
+        hit_enoent = False
+        tty = wrap_exceptions(
+            cext.proc_basic_info(self.pid)[0])
+        if tty != cext.PRNODEV:
+            for x in (0, 1, 2, 255):
+                try:
+                    return os.readlink('/proc/%d/path/%d' % (self.pid, x))
+                except OSError:
+                    err = sys.exc_info()[1]
+                    if err.errno == errno.ENOENT:
+                        hit_enoent = True
+                        continue
+                    raise
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+
+    @wrap_exceptions
+    def cwd(self):
+        # /proc/PID/path/cwd may not be resolved by readlink() even if
+        # it exists (ls shows it). If that's the case and the process
+        # is still alive return None (we can return None also on BSD).
+        # Reference: http://goo.gl/55XgO
+        try:
+            return os.readlink("/proc/%s/path/cwd" % self.pid)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno == errno.ENOENT:
+                os.stat("/proc/%s" % self.pid)
+                return None
+            raise
+
+    @wrap_exceptions
+    def memory_info(self):
+        ret = cext.proc_basic_info(self.pid)
+        rss, vms = ret[1] * 1024, ret[2] * 1024
+        return _common.pmem(rss, vms)
+
+    # it seems Solaris uses rss and vms only
+    memory_info_ex = memory_info
+
+    @wrap_exceptions
+    def status(self):
+        code = cext.proc_basic_info(self.pid)[6]
+        # XXX is '?' legit? (we're not supposed to return it anyway)
+        return PROC_STATUSES.get(code, '?')
+
+    @wrap_exceptions
+    def threads(self):
+        ret = []
+        tids = os.listdir('/proc/%d/lwp' % self.pid)
+        hit_enoent = False
+        for tid in tids:
+            tid = int(tid)
+            try:
+                utime, stime = cext.query_process_thread(
+                    self.pid, tid)
+            except EnvironmentError:
+                # ENOENT == thread gone in meantime
+                err = sys.exc_info()[1]
+                if err.errno == errno.ENOENT:
+                    hit_enoent = True
+                    continue
+                raise
+            else:
+                nt = _common.pthread(tid, utime, stime)
+                ret.append(nt)
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+        return ret
+
+    @wrap_exceptions
+    def open_files(self):
+        retlist = []
+        hit_enoent = False
+        pathdir = '/proc/%d/path' % self.pid
+        for fd in os.listdir('/proc/%d/fd' % self.pid):
+            path = os.path.join(pathdir, fd)
+            if os.path.islink(path):
+                try:
+                    file = os.readlink(path)
+                except OSError:
+                    # ENOENT == file which is gone in the meantime
+                    err = sys.exc_info()[1]
+                    if err.errno == errno.ENOENT:
+                        hit_enoent = True
+                        continue
+                    raise
+                else:
+                    if isfile_strict(file):
+                        retlist.append(_common.popenfile(file, int(fd)))
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+        return retlist
+
+    def _get_unix_sockets(self, pid):
+        """Get UNIX sockets used by process by parsing 'pfiles' output."""
+        # TODO: rewrite this in C (...but the damn netstat source code
+        # does not include this part! Argh!!)
+        cmd = "pfiles %s" % pid
+        p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE)
+        stdout, stderr = p.communicate()
+        if PY3:
+            stdout, stderr = [x.decode(sys.stdout.encoding)
+                              for x in (stdout, stderr)]
+        if p.returncode != 0:
+            if 'permission denied' in stderr.lower():
+                raise AccessDenied(self.pid, self._name)
+            if 'no such process' in stderr.lower():
+                raise NoSuchProcess(self.pid, self._name)
+            raise RuntimeError("%r command error\n%s" % (cmd, stderr))
+
+        lines = stdout.split('\n')[2:]
+        for i, line in enumerate(lines):
+            line = line.lstrip()
+            if line.startswith('sockname: AF_UNIX'):
+                path = line.split(' ', 2)[2]
+                type = lines[i - 2].strip()
+                if type == 'SOCK_STREAM':
+                    type = socket.SOCK_STREAM
+                elif type == 'SOCK_DGRAM':
+                    type = socket.SOCK_DGRAM
+                else:
+                    type = -1
+                yield (-1, socket.AF_UNIX, type, path, "", _common.CONN_NONE)
+
+    @wrap_exceptions
+    def connections(self, kind='inet'):
+        ret = net_connections(kind, _pid=self.pid)
+        # The underlying C implementation retrieves all OS connections
+        # and filters them by PID.  At this point we can't tell whether
+        # an empty list means there were no connections for process or
+        # process is no longer active so we force NSP in case the PID
+        # is no longer there.
+        if not ret:
+            os.stat('/proc/%s' % self.pid)  # will raise NSP if process is gone
+
+        # UNIX sockets
+        if kind in ('all', 'unix'):
+            ret.extend([_common.pconn(*conn) for conn in
+                        self._get_unix_sockets(self.pid)])
+        return ret
+
+    nt_mmap_grouped = namedtuple('mmap', 'path rss anon locked')
+    nt_mmap_ext = namedtuple('mmap', 'addr perms path rss anon locked')
+
+    @wrap_exceptions
+    def memory_maps(self):
+        def toaddr(start, end):
+            return '%s-%s' % (hex(start)[2:].strip('L'),
+                              hex(end)[2:].strip('L'))
+
+        retlist = []
+        rawlist = cext.proc_memory_maps(self.pid)
+        hit_enoent = False
+        for item in rawlist:
+            addr, addrsize, perm, name, rss, anon, locked = item
+            addr = toaddr(addr, addrsize)
+            if not name.startswith('['):
+                try:
+                    name = os.readlink('/proc/%s/path/%s' % (self.pid, name))
+                except OSError:
+                    err = sys.exc_info()[1]
+                    if err.errno == errno.ENOENT:
+                        # sometimes the link may not be resolved by
+                        # readlink() even if it exists (ls shows it).
+                        # If that's the case we just return the
+                        # unresolved link path.
+                        # This seems an incosistency with /proc similar
+                        # to: http://goo.gl/55XgO
+                        name = '/proc/%s/path/%s' % (self.pid, name)
+                        hit_enoent = True
+                    else:
+                        raise
+            retlist.append((addr, perm, name, rss, anon, locked))
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+        return retlist
+
+    @wrap_exceptions
+    def num_fds(self):
+        return len(os.listdir("/proc/%s/fd" % self.pid))
+
+    @wrap_exceptions
+    def num_ctx_switches(self):
+        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))
+
+    @wrap_exceptions
+    def wait(self, timeout=None):
+        try:
+            return _psposix.wait_pid(self.pid, timeout)
+        except _psposix.TimeoutExpired:
+            # support for private module import
+            if TimeoutExpired is None:
+                raise
+            raise TimeoutExpired(timeout, self.pid, self._name)

+ 2212 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_bsd.c

@@ -0,0 +1,2212 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * FreeBSD platform-specific module methods for _psutil_bsd
+ */
+
+
+#include <Python.h>
+#include <assert.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <signal.h>
+#include <fcntl.h>
+#include <paths.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <sys/param.h>
+#include <sys/user.h>
+#include <sys/proc.h>
+#include <sys/file.h>
+#include <net/route.h>
+
+#include <sys/socket.h>
+#include <sys/socketvar.h>    // for struct xsocket
+#include <sys/un.h>
+#include <sys/unpcb.h>
+// for xinpcb struct
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <netinet/in_pcb.h>
+#include <netinet/tcp_var.h>   // for struct xtcpcb
+#include <netinet/tcp_fsm.h>   // for TCP connection states
+#include <arpa/inet.h>         // for inet_ntop()
+
+#if __FreeBSD_version < 900000
+#include <utmp.h>         // system users
+#else
+#include <utmpx.h>
+#endif
+#include <devstat.h>      // get io counters
+#include <sys/vmmeter.h>  // needed for vmtotal struct
+#include <libutil.h>      // process open files, shared libs (kinfo_getvmmap)
+#include <sys/mount.h>
+
+#include <net/if.h>       // net io counters
+#include <net/if_dl.h>
+#include <net/route.h>
+
+#include <netinet/in.h>   // process open files/connections
+#include <sys/un.h>
+
+#include "_psutil_bsd.h"
+#include "_psutil_common.h"
+#include "arch/bsd/process_info.h"
+
+
+// convert a timeval struct to a double
+#define TV2DOUBLE(t)    ((t).tv_sec + (t).tv_usec / 1000000.0)
+
+
+/*
+ * Utility function which fills a kinfo_proc struct based on process pid
+ */
+static int
+psutil_kinfo_proc(const pid_t pid, struct kinfo_proc *proc)
+{
+    int mib[4];
+    size_t size;
+    mib[0] = CTL_KERN;
+    mib[1] = KERN_PROC;
+    mib[2] = KERN_PROC_PID;
+    mib[3] = pid;
+
+    size = sizeof(struct kinfo_proc);
+
+    if (sysctl((int *)mib, 4, proc, &size, NULL, 0) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        return -1;
+    }
+
+    // sysctl stores 0 in the size if we can't find the process information.
+    if (size == 0) {
+        NoSuchProcess();
+        return -1;
+    }
+    return 0;
+}
+
+
+/*
+ * Return a Python list of all the PIDs running on the system.
+ */
+static PyObject *
+psutil_pids(PyObject *self, PyObject *args)
+{
+    kinfo_proc *proclist = NULL;
+    kinfo_proc *orig_address = NULL;
+    size_t num_processes;
+    size_t idx;
+    PyObject *retlist = PyList_New(0);
+    PyObject *pid = NULL;
+
+    if (retlist == NULL) {
+        return NULL;
+    }
+    if (psutil_get_proc_list(&proclist, &num_processes) != 0) {
+        PyErr_SetString(PyExc_RuntimeError,
+                        "failed to retrieve process list.");
+        goto error;
+    }
+
+    if (num_processes > 0) {
+        orig_address = proclist; // save so we can free it after we're done
+        for (idx = 0; idx < num_processes; idx++) {
+            pid = Py_BuildValue("i", proclist->ki_pid);
+            if (!pid)
+                goto error;
+            if (PyList_Append(retlist, pid))
+                goto error;
+            Py_DECREF(pid);
+            proclist++;
+        }
+        free(orig_address);
+    }
+
+    return retlist;
+
+error:
+    Py_XDECREF(pid);
+    Py_DECREF(retlist);
+    if (orig_address != NULL) {
+        free(orig_address);
+    }
+    return NULL;
+}
+
+
+/*
+ * Return a Python float indicating the system boot time expressed in
+ * seconds since the epoch.
+ */
+static PyObject *
+psutil_boot_time(PyObject *self, PyObject *args)
+{
+    // fetch sysctl "kern.boottime"
+    static int request[2] = { CTL_KERN, KERN_BOOTTIME };
+    struct timeval boottime;
+    size_t len = sizeof(boottime);
+
+    if (sysctl(request, 2, &boottime, &len, NULL, 0) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        return NULL;
+    }
+    return Py_BuildValue("d", (double)boottime.tv_sec);
+}
+
+
+/*
+ * Return process name from kinfo_proc as a Python string.
+ */
+static PyObject *
+psutil_proc_name(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("s", kp.ki_comm);
+}
+
+
+/*
+ * Return process pathname executable.
+ * Thanks to Robert N. M. Watson:
+ * http://fxr.googlebit.com/source/usr.bin/procstat/procstat_bin.c?v=8-CURRENT
+ */
+static PyObject *
+psutil_proc_exe(PyObject *self, PyObject *args)
+{
+    long pid;
+    char pathname[PATH_MAX];
+    int error;
+    int mib[4];
+    size_t size;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    mib[0] = CTL_KERN;
+    mib[1] = KERN_PROC;
+    mib[2] = KERN_PROC_PATHNAME;
+    mib[3] = pid;
+
+    size = sizeof(pathname);
+    error = sysctl(mib, 4, pathname, &size, NULL, 0);
+    if (error == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        return NULL;
+    }
+    if (size == 0 || strlen(pathname) == 0) {
+        if (psutil_pid_exists(pid) == 0) {
+            return NoSuchProcess();
+        }
+        else {
+            strcpy(pathname, "");
+        }
+    }
+    return Py_BuildValue("s", pathname);
+}
+
+
+/*
+ * Return process cmdline as a Python list of cmdline arguments.
+ */
+static PyObject *
+psutil_proc_cmdline(PyObject *self, PyObject *args)
+{
+    long pid;
+    PyObject *arglist = NULL;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    // get the commandline, defined in arch/bsd/process_info.c
+    arglist = psutil_get_arg_list(pid);
+
+    // psutil_get_arg_list() returns NULL only if psutil_cmd_args
+    // failed with ESRCH (no process with that PID)
+    if (NULL == arglist) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+    return Py_BuildValue("N", arglist);
+}
+
+
+/*
+ * Return process parent pid from kinfo_proc as a Python integer.
+ */
+static PyObject *
+psutil_proc_ppid(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("l", (long)kp.ki_ppid);
+}
+
+
+/*
+ * Return process status as a Python integer.
+ */
+static PyObject *
+psutil_proc_status(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("i", (int)kp.ki_stat);
+}
+
+
+/*
+ * Return process real, effective and saved user ids from kinfo_proc
+ * as a Python tuple.
+ */
+static PyObject *
+psutil_proc_uids(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("lll",
+                         (long)kp.ki_ruid,
+                         (long)kp.ki_uid,
+                         (long)kp.ki_svuid);
+}
+
+
+/*
+ * Return process real, effective and saved group ids from kinfo_proc
+ * as a Python tuple.
+ */
+static PyObject *
+psutil_proc_gids(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("lll",
+                         (long)kp.ki_rgid,
+                         (long)kp.ki_groups[0],
+                         (long)kp.ki_svuid);
+}
+
+
+/*
+ * Return process real, effective and saved group ids from kinfo_proc
+ * as a Python tuple.
+ */
+static PyObject *
+psutil_proc_tty_nr(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("i", kp.ki_tdev);
+}
+
+
+/*
+ * Return the number of context switches performed by process as a tuple.
+ */
+static PyObject *
+psutil_proc_num_ctx_switches(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("(ll)",
+                         kp.ki_rusage.ru_nvcsw,
+                         kp.ki_rusage.ru_nivcsw);
+}
+
+
+/*
+ * Return number of threads used by process as a Python integer.
+ */
+static PyObject *
+psutil_proc_num_threads(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("l", (long)kp.ki_numthreads);
+}
+
+
+/*
+ * Retrieves all threads used by process returning a list of tuples
+ * including thread id, user time and system time.
+ * Thanks to Robert N. M. Watson:
+ * http://fxr.googlebit.com/source/usr.bin/procstat/
+ *     procstat_threads.c?v=8-CURRENT
+ */
+static PyObject *
+psutil_proc_threads(PyObject *self, PyObject *args)
+{
+    long pid;
+    int mib[4];
+    struct kinfo_proc *kip = NULL;
+    struct kinfo_proc *kipp = NULL;
+    int error;
+    unsigned int i;
+    size_t size;
+    PyObject *retList = PyList_New(0);
+    PyObject *pyTuple = NULL;
+
+    if (retList == NULL)
+        return NULL;
+    if (! PyArg_ParseTuple(args, "l", &pid))
+        goto error;
+
+    // we need to re-query for thread information, so don't use *kipp
+    mib[0] = CTL_KERN;
+    mib[1] = KERN_PROC;
+    mib[2] = KERN_PROC_PID | KERN_PROC_INC_THREAD;
+    mib[3] = pid;
+
+    size = 0;
+    error = sysctl(mib, 4, NULL, &size, NULL, 0);
+    if (error == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+    if (size == 0) {
+        NoSuchProcess();
+        goto error;
+    }
+
+    kip = malloc(size);
+    if (kip == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+
+    error = sysctl(mib, 4, kip, &size, NULL, 0);
+    if (error == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+    if (size == 0) {
+        NoSuchProcess();
+        goto error;
+    }
+
+    for (i = 0; i < size / sizeof(*kipp); i++) {
+        kipp = &kip[i];
+        pyTuple = Py_BuildValue("Idd",
+                                kipp->ki_tid,
+                                TV2DOUBLE(kipp->ki_rusage.ru_utime),
+                                TV2DOUBLE(kipp->ki_rusage.ru_stime));
+        if (pyTuple == NULL)
+            goto error;
+        if (PyList_Append(retList, pyTuple))
+            goto error;
+        Py_DECREF(pyTuple);
+    }
+    free(kip);
+    return retList;
+
+error:
+    Py_XDECREF(pyTuple);
+    Py_DECREF(retList);
+    if (kip != NULL) {
+        free(kip);
+    }
+    return NULL;
+}
+
+
+/*
+ * Return a Python tuple (user_time, kernel_time)
+ */
+static PyObject *
+psutil_proc_cpu_times(PyObject *self, PyObject *args)
+{
+    long pid;
+    double user_t, sys_t;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    // convert from microseconds to seconds
+    user_t = TV2DOUBLE(kp.ki_rusage.ru_utime);
+    sys_t = TV2DOUBLE(kp.ki_rusage.ru_stime);
+    return Py_BuildValue("(dd)", user_t, sys_t);
+}
+
+
+/*
+ * Return the number of logical CPUs in the system.
+ * XXX this could be shared with OSX
+ */
+static PyObject *
+psutil_cpu_count_logical(PyObject *self, PyObject *args)
+{
+    int mib[2];
+    int ncpu;
+    size_t len;
+
+    mib[0] = CTL_HW;
+    mib[1] = HW_NCPU;
+    len = sizeof(ncpu);
+
+    if (sysctl(mib, 2, &ncpu, &len, NULL, 0) == -1) {
+        // mimic os.cpu_count()
+        Py_INCREF(Py_None);
+        return Py_None;
+    }
+    else {
+        return Py_BuildValue("i", ncpu);
+    }
+}
+
+
+/*
+ * Return an XML string from which we'll determine the number of
+ * physical CPU cores in the system.
+ */
+static PyObject *
+psutil_cpu_count_phys(PyObject *self, PyObject *args)
+{
+    void *topology = NULL;
+    size_t size = 0;
+
+    if (sysctlbyname("kern.sched.topology_spec", NULL, &size, NULL, 0))
+        goto error;
+
+    topology = malloc(size);
+    if (!topology) {
+        PyErr_NoMemory();
+        return NULL;
+    }
+
+    if (sysctlbyname("kern.sched.topology_spec", topology, &size, NULL, 0))
+        goto error;
+
+    return Py_BuildValue("s", topology);
+
+error:
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+
+/*
+ * Return a Python float indicating the process create time expressed in
+ * seconds since the epoch.
+ */
+static PyObject *
+psutil_proc_create_time(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("d", TV2DOUBLE(kp.ki_start));
+}
+
+
+/*
+ * Return a Python float indicating the process create time expressed in
+ * seconds since the epoch.
+ */
+static PyObject *
+psutil_proc_io_counters(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    // there's apparently no way to determine bytes count, hence return -1.
+    return Py_BuildValue("(llll)",
+                         kp.ki_rusage.ru_inblock,
+                         kp.ki_rusage.ru_oublock,
+                         -1,
+                         -1);
+}
+
+
+/*
+ * Return extended memory info for a process as a Python tuple.
+ */
+static PyObject *
+psutil_proc_memory_info(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("(lllll)",
+                         ptoa(kp.ki_rssize),    // rss
+                         (long)kp.ki_size,      // vms
+                         ptoa(kp.ki_tsize),     // text
+                         ptoa(kp.ki_dsize),     // data
+                         ptoa(kp.ki_ssize));    // stack
+}
+
+
+/*
+ * Return virtual memory usage statistics.
+ */
+static PyObject *
+psutil_virtual_mem(PyObject *self, PyObject *args)
+{
+    unsigned int   total, active, inactive, wired, cached, free;
+    size_t         size = sizeof(total);
+    struct vmtotal vm;
+    int            mib[] = {CTL_VM, VM_METER};
+    long           pagesize = getpagesize();
+#if __FreeBSD_version > 702101
+    long buffers;
+#else
+    int buffers;
+#endif
+    size_t buffers_size = sizeof(buffers);
+
+    if (sysctlbyname("vm.stats.vm.v_page_count", &total, &size, NULL, 0))
+        goto error;
+    if (sysctlbyname("vm.stats.vm.v_active_count", &active, &size, NULL, 0))
+        goto error;
+    if (sysctlbyname("vm.stats.vm.v_inactive_count",
+                     &inactive, &size, NULL, 0))
+        goto error;
+    if (sysctlbyname("vm.stats.vm.v_wire_count", &wired, &size, NULL, 0))
+        goto error;
+    if (sysctlbyname("vm.stats.vm.v_cache_count", &cached, &size, NULL, 0))
+        goto error;
+    if (sysctlbyname("vm.stats.vm.v_free_count", &free, &size, NULL, 0))
+        goto error;
+    if (sysctlbyname("vfs.bufspace", &buffers, &buffers_size, NULL, 0))
+        goto error;
+
+    size = sizeof(vm);
+    if (sysctl(mib, 2, &vm, &size, NULL, 0) != 0)
+        goto error;
+
+    return Py_BuildValue("KKKKKKKK",
+        (unsigned long long) total    * pagesize,
+        (unsigned long long) free     * pagesize,
+        (unsigned long long) active   * pagesize,
+        (unsigned long long) inactive * pagesize,
+        (unsigned long long) wired    * pagesize,
+        (unsigned long long) cached   * pagesize,
+        (unsigned long long) buffers,
+        (unsigned long long) (vm.t_vmshr + vm.t_rmshr) * pagesize  // shared
+    );
+
+error:
+    PyErr_SetFromErrno(PyExc_OSError);
+    return NULL;
+}
+
+
+#ifndef _PATH_DEVNULL
+#define _PATH_DEVNULL "/dev/null"
+#endif
+
+/*
+ * Return swap memory stats (see 'swapinfo' cmdline tool)
+ */
+static PyObject *
+psutil_swap_mem(PyObject *self, PyObject *args)
+{
+    kvm_t *kd;
+    struct kvm_swap kvmsw[1];
+    unsigned int swapin, swapout, nodein, nodeout;
+    size_t size = sizeof(unsigned int);
+
+    kd = kvm_open(NULL, _PATH_DEVNULL, NULL, O_RDONLY, "kvm_open failed");
+    if (kd == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "kvm_open failed");
+        return NULL;
+    }
+
+    if (kvm_getswapinfo(kd, kvmsw, 1, 0) < 0) {
+        kvm_close(kd);
+        PyErr_SetString(PyExc_RuntimeError, "kvm_getswapinfo failed");
+        return NULL;
+    }
+
+    kvm_close(kd);
+
+    if (sysctlbyname("vm.stats.vm.v_swapin", &swapin, &size, NULL, 0) == -1)
+        goto sbn_error;
+    if (sysctlbyname("vm.stats.vm.v_swapout", &swapout, &size, NULL, 0) == -1)
+        goto sbn_error;
+    if (sysctlbyname("vm.stats.vm.v_vnodein", &nodein, &size, NULL, 0) == -1)
+        goto sbn_error;
+    if (sysctlbyname("vm.stats.vm.v_vnodeout", &nodeout, &size, NULL, 0) == -1)
+        goto sbn_error;
+
+    return Py_BuildValue("(iiiII)",
+                         kvmsw[0].ksw_total,                     // total
+                         kvmsw[0].ksw_used,                      // used
+                         kvmsw[0].ksw_total - kvmsw[0].ksw_used, // free
+                         swapin + swapout,                       // swap in
+                         nodein + nodeout);                      // swap out
+
+sbn_error:
+    PyErr_SetFromErrno(PyExc_OSError);
+    return NULL;
+}
+
+
+/*
+ * Return a Python tuple representing user, kernel and idle CPU times
+ */
+static PyObject *
+psutil_cpu_times(PyObject *self, PyObject *args)
+{
+    long cpu_time[CPUSTATES];
+    size_t size;
+
+    size = sizeof(cpu_time);
+
+    if (sysctlbyname("kern.cp_time", &cpu_time, &size, NULL, 0) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        return NULL;
+    }
+
+    return Py_BuildValue("(ddddd)",
+                         (double)cpu_time[CP_USER] / CLOCKS_PER_SEC,
+                         (double)cpu_time[CP_NICE] / CLOCKS_PER_SEC,
+                         (double)cpu_time[CP_SYS] / CLOCKS_PER_SEC,
+                         (double)cpu_time[CP_IDLE] / CLOCKS_PER_SEC,
+                         (double)cpu_time[CP_INTR] / CLOCKS_PER_SEC
+                        );
+}
+
+
+/*
+ * XXX
+ * These functions are available on FreeBSD 8 only.
+ * In the upper python layer we do various tricks to avoid crashing
+ * and/or to provide alternatives where possible.
+ */
+
+
+#if defined(__FreeBSD_version) && __FreeBSD_version >= 800000
+/*
+ * Return files opened by process as a list of (path, fd) tuples
+ */
+static PyObject *
+psutil_proc_open_files(PyObject *self, PyObject *args)
+{
+    long pid;
+    int i, cnt;
+    struct kinfo_file *freep = NULL;
+    struct kinfo_file *kif;
+    struct kinfo_proc kipp;
+    PyObject *retList = PyList_New(0);
+    PyObject *tuple = NULL;
+
+    if (retList == NULL)
+        return NULL;
+    if (! PyArg_ParseTuple(args, "l", &pid))
+        goto error;
+    if (psutil_kinfo_proc(pid, &kipp) == -1)
+        goto error;
+
+    freep = kinfo_getfile(pid, &cnt);
+    if (freep == NULL) {
+        psutil_raise_ad_or_nsp(pid);
+        goto error;
+    }
+
+    for (i = 0; i < cnt; i++) {
+        kif = &freep[i];
+        if ((kif->kf_type == KF_TYPE_VNODE) &&
+                (kif->kf_vnode_type == KF_VTYPE_VREG))
+        {
+            tuple = Py_BuildValue("(si)", kif->kf_path, kif->kf_fd);
+            if (tuple == NULL)
+                goto error;
+            if (PyList_Append(retList, tuple))
+                goto error;
+            Py_DECREF(tuple);
+        }
+    }
+    free(freep);
+    return retList;
+
+error:
+    Py_XDECREF(tuple);
+    Py_DECREF(retList);
+    if (freep != NULL)
+        free(freep);
+    return NULL;
+}
+
+
+/*
+ * Return files opened by process as a list of (path, fd) tuples
+ */
+static PyObject *
+psutil_proc_num_fds(PyObject *self, PyObject *args)
+{
+    long pid;
+    int cnt;
+
+    struct kinfo_file *freep;
+    struct kinfo_proc kipp;
+
+    if (! PyArg_ParseTuple(args, "l", &pid))
+        return NULL;
+    if (psutil_kinfo_proc(pid, &kipp) == -1)
+        return NULL;
+
+    freep = kinfo_getfile(pid, &cnt);
+    if (freep == NULL) {
+        psutil_raise_ad_or_nsp(pid);
+        return NULL;
+    }
+    free(freep);
+
+    return Py_BuildValue("i", cnt);
+}
+
+
+/*
+ * Return process current working directory.
+ */
+static PyObject *
+psutil_proc_cwd(PyObject *self, PyObject *args)
+{
+    long pid;
+    PyObject *path = NULL;
+    struct kinfo_file *freep = NULL;
+    struct kinfo_file *kif;
+    struct kinfo_proc kipp;
+
+    int i, cnt;
+
+    if (! PyArg_ParseTuple(args, "l", &pid))
+        goto error;
+    if (psutil_kinfo_proc(pid, &kipp) == -1)
+        goto error;
+
+    freep = kinfo_getfile(pid, &cnt);
+    if (freep == NULL) {
+        psutil_raise_ad_or_nsp(pid);
+        goto error;
+    }
+
+    for (i = 0; i < cnt; i++) {
+        kif = &freep[i];
+        if (kif->kf_fd == KF_FD_TYPE_CWD) {
+            path = Py_BuildValue("s", kif->kf_path);
+            if (!path)
+                goto error;
+            break;
+        }
+    }
+    /*
+     * For lower pids it seems we can't retrieve any information
+     * (lsof can't do that it either).  Since this happens even
+     * as root we return an empty string instead of AccessDenied.
+     */
+    if (path == NULL) {
+        path = Py_BuildValue("s", "");
+    }
+    free(freep);
+    return path;
+
+error:
+    Py_XDECREF(path);
+    if (freep != NULL)
+        free(freep);
+    return NULL;
+}
+
+
+// The tcplist fetching and walking is borrowed from netstat/inet.c.
+static char *
+psutil_fetch_tcplist(void)
+{
+    char *buf;
+    size_t len;
+    int error;
+
+    for (;;) {
+        if (sysctlbyname("net.inet.tcp.pcblist", NULL, &len, NULL, 0) < 0) {
+            PyErr_SetFromErrno(PyExc_OSError);
+            return NULL;
+        }
+        buf = malloc(len);
+        if (buf == NULL) {
+            PyErr_NoMemory();
+            return NULL;
+        }
+        if (sysctlbyname("net.inet.tcp.pcblist", buf, &len, NULL, 0) < 0) {
+            free(buf);
+            PyErr_SetFromErrno(PyExc_OSError);
+            return NULL;
+        }
+        return buf;
+    }
+}
+
+static int
+psutil_sockaddr_port(int family, struct sockaddr_storage *ss)
+{
+    struct sockaddr_in6 *sin6;
+    struct sockaddr_in *sin;
+
+    if (family == AF_INET) {
+        sin = (struct sockaddr_in *)ss;
+        return (sin->sin_port);
+    } else {
+        sin6 = (struct sockaddr_in6 *)ss;
+        return (sin6->sin6_port);
+    }
+}
+
+static void *
+psutil_sockaddr_addr(int family, struct sockaddr_storage *ss)
+{
+    struct sockaddr_in6 *sin6;
+    struct sockaddr_in *sin;
+
+    if (family == AF_INET) {
+        sin = (struct sockaddr_in *)ss;
+        return (&sin->sin_addr);
+    } else {
+        sin6 = (struct sockaddr_in6 *)ss;
+        return (&sin6->sin6_addr);
+    }
+}
+
+static socklen_t
+psutil_sockaddr_addrlen(int family)
+{
+    if (family == AF_INET)
+        return (sizeof(struct in_addr));
+    else
+        return (sizeof(struct in6_addr));
+}
+
+static int
+psutil_sockaddr_matches(int family, int port, void *pcb_addr,
+                        struct sockaddr_storage *ss)
+{
+    if (psutil_sockaddr_port(family, ss) != port)
+        return (0);
+    return (memcmp(psutil_sockaddr_addr(family, ss), pcb_addr,
+                   psutil_sockaddr_addrlen(family)) == 0);
+}
+
+static struct tcpcb *
+psutil_search_tcplist(char *buf, struct kinfo_file *kif)
+{
+    struct tcpcb *tp;
+    struct inpcb *inp;
+    struct xinpgen *xig, *oxig;
+    struct xsocket *so;
+
+    oxig = xig = (struct xinpgen *)buf;
+    for (xig = (struct xinpgen *)((char *)xig + xig->xig_len);
+            xig->xig_len > sizeof(struct xinpgen);
+            xig = (struct xinpgen *)((char *)xig + xig->xig_len)) {
+        tp = &((struct xtcpcb *)xig)->xt_tp;
+        inp = &((struct xtcpcb *)xig)->xt_inp;
+        so = &((struct xtcpcb *)xig)->xt_socket;
+
+        if (so->so_type != kif->kf_sock_type ||
+                so->xso_family != kif->kf_sock_domain ||
+                so->xso_protocol != kif->kf_sock_protocol)
+            continue;
+
+        if (kif->kf_sock_domain == AF_INET) {
+            if (!psutil_sockaddr_matches(
+                    AF_INET, inp->inp_lport, &inp->inp_laddr,
+                    &kif->kf_sa_local))
+                continue;
+            if (!psutil_sockaddr_matches(
+                    AF_INET, inp->inp_fport, &inp->inp_faddr,
+                    &kif->kf_sa_peer))
+                continue;
+        } else {
+            if (!psutil_sockaddr_matches(
+                    AF_INET6, inp->inp_lport, &inp->in6p_laddr,
+                    &kif->kf_sa_local))
+                continue;
+            if (!psutil_sockaddr_matches(
+                    AF_INET6, inp->inp_fport, &inp->in6p_faddr,
+                    &kif->kf_sa_peer))
+                continue;
+        }
+
+        return (tp);
+    }
+    return NULL;
+}
+
+
+// a signaler for connections without an actual status
+static int PSUTIL_CONN_NONE = 128;
+
+/*
+ * Return connections opened by process.
+ */
+static PyObject *
+psutil_proc_connections(PyObject *self, PyObject *args)
+{
+    long pid;
+    int i, cnt;
+
+    struct kinfo_file *freep = NULL;
+    struct kinfo_file *kif;
+    char *tcplist = NULL;
+    struct tcpcb *tcp;
+
+    PyObject *retList = PyList_New(0);
+    PyObject *tuple = NULL;
+    PyObject *laddr = NULL;
+    PyObject *raddr = NULL;
+    PyObject *af_filter = NULL;
+    PyObject *type_filter = NULL;
+    PyObject *_family = NULL;
+    PyObject *_type = NULL;
+
+    if (retList == NULL) {
+        return NULL;
+    }
+    if (! PyArg_ParseTuple(args, "lOO", &pid, &af_filter, &type_filter)) {
+        goto error;
+    }
+    if (!PySequence_Check(af_filter) || !PySequence_Check(type_filter)) {
+        PyErr_SetString(PyExc_TypeError, "arg 2 or 3 is not a sequence");
+        goto error;
+    }
+
+    freep = kinfo_getfile(pid, &cnt);
+    if (freep == NULL) {
+        psutil_raise_ad_or_nsp(pid);
+        goto error;
+    }
+
+    tcplist = psutil_fetch_tcplist();
+    if (tcplist == NULL) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    for (i = 0; i < cnt; i++) {
+        int lport, rport, state;
+        char lip[200], rip[200];
+        char path[PATH_MAX];
+        int inseq;
+        tuple = NULL;
+        laddr = NULL;
+        raddr = NULL;
+
+        kif = &freep[i];
+        if (kif->kf_type == KF_TYPE_SOCKET)
+        {
+            // apply filters
+            _family = PyLong_FromLong((long)kif->kf_sock_domain);
+            inseq = PySequence_Contains(af_filter, _family);
+            Py_DECREF(_family);
+            if (inseq == 0) {
+                continue;
+            }
+            _type = PyLong_FromLong((long)kif->kf_sock_type);
+            inseq = PySequence_Contains(type_filter, _type);
+            Py_DECREF(_type);
+            if (inseq == 0) {
+                continue;
+            }
+
+            // IPv4 / IPv6 socket
+            if ((kif->kf_sock_domain == AF_INET) ||
+                    (kif->kf_sock_domain == AF_INET6)) {
+                // fill status
+                state = PSUTIL_CONN_NONE;
+                if (kif->kf_sock_type == SOCK_STREAM) {
+                    tcp = psutil_search_tcplist(tcplist, kif);
+                    if (tcp != NULL)
+                        state = (int)tcp->t_state;
+                }
+
+                // build addr and port
+                inet_ntop(
+                    kif->kf_sock_domain,
+                    psutil_sockaddr_addr(kif->kf_sock_domain,
+                                         &kif->kf_sa_local),
+                    lip,
+                    sizeof(lip));
+                inet_ntop(
+                    kif->kf_sock_domain,
+                    psutil_sockaddr_addr(kif->kf_sock_domain,
+                                         &kif->kf_sa_peer),
+                    rip,
+                    sizeof(rip));
+                lport = htons(psutil_sockaddr_port(kif->kf_sock_domain,
+                                                   &kif->kf_sa_local));
+                rport = htons(psutil_sockaddr_port(kif->kf_sock_domain,
+                                                   &kif->kf_sa_peer));
+
+                // construct python tuple/list
+                laddr = Py_BuildValue("(si)", lip, lport);
+                if (!laddr)
+                    goto error;
+                if (rport != 0) {
+                    raddr = Py_BuildValue("(si)", rip, rport);
+                }
+                else {
+                    raddr = Py_BuildValue("()");
+                }
+                if (!raddr)
+                    goto error;
+                tuple = Py_BuildValue("(iiiNNi)",
+                                      kif->kf_fd,
+                                      kif->kf_sock_domain,
+                                      kif->kf_sock_type,
+                                      laddr,
+                                      raddr,
+                                      state);
+                if (!tuple)
+                    goto error;
+                if (PyList_Append(retList, tuple))
+                    goto error;
+                Py_DECREF(tuple);
+            }
+            // UNIX socket
+            else if (kif->kf_sock_domain == AF_UNIX) {
+                struct sockaddr_un *sun;
+
+                sun = (struct sockaddr_un *)&kif->kf_sa_local;
+                snprintf(
+                    path, sizeof(path), "%.*s",
+                    (sun->sun_len - (sizeof(*sun) - sizeof(sun->sun_path))),
+                    sun->sun_path);
+
+                tuple = Py_BuildValue("(iiisOi)",
+                                      kif->kf_fd,
+                                      kif->kf_sock_domain,
+                                      kif->kf_sock_type,
+                                      path,
+                                      Py_None,
+                                      PSUTIL_CONN_NONE);
+                if (!tuple)
+                    goto error;
+                if (PyList_Append(retList, tuple))
+                    goto error;
+                Py_DECREF(tuple);
+                Py_INCREF(Py_None);
+            }
+        }
+    }
+    free(freep);
+    free(tcplist);
+    return retList;
+
+error:
+    Py_XDECREF(tuple);
+    Py_XDECREF(laddr);
+    Py_XDECREF(raddr);
+    Py_DECREF(retList);
+    if (freep != NULL)
+        free(freep);
+    if (tcplist != NULL)
+        free(tcplist);
+    return NULL;
+}
+
+
+/*
+ * Return a Python list of tuple representing per-cpu times
+ */
+static PyObject *
+psutil_per_cpu_times(PyObject *self, PyObject *args)
+{
+    static int maxcpus;
+    int mib[2];
+    int ncpu;
+    size_t len;
+    size_t size;
+    int i;
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_cputime = NULL;
+
+    if (py_retlist == NULL)
+        return NULL;
+
+    // retrieve maxcpus value
+    size = sizeof(maxcpus);
+    if (sysctlbyname("kern.smp.maxcpus", &maxcpus, &size, NULL, 0) < 0) {
+        Py_DECREF(py_retlist);
+        PyErr_SetFromErrno(PyExc_OSError);
+        return NULL;
+    }
+    long cpu_time[maxcpus][CPUSTATES];
+
+    // retrieve the number of cpus
+    mib[0] = CTL_HW;
+    mib[1] = HW_NCPU;
+    len = sizeof(ncpu);
+    if (sysctl(mib, 2, &ncpu, &len, NULL, 0) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    // per-cpu info
+    size = sizeof(cpu_time);
+    if (sysctlbyname("kern.cp_times", &cpu_time, &size, NULL, 0) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    for (i = 0; i < ncpu; i++) {
+        py_cputime = Py_BuildValue(
+            "(ddddd)",
+            (double)cpu_time[i][CP_USER] / CLOCKS_PER_SEC,
+            (double)cpu_time[i][CP_NICE] / CLOCKS_PER_SEC,
+            (double)cpu_time[i][CP_SYS] / CLOCKS_PER_SEC,
+            (double)cpu_time[i][CP_IDLE] / CLOCKS_PER_SEC,
+            (double)cpu_time[i][CP_INTR] / CLOCKS_PER_SEC);
+        if (!py_cputime)
+            goto error;
+        if (PyList_Append(py_retlist, py_cputime))
+            goto error;
+        Py_DECREF(py_cputime);
+    }
+
+    return py_retlist;
+
+error:
+    Py_XDECREF(py_cputime);
+    Py_DECREF(py_retlist);
+    return NULL;
+}
+
+
+// remove spaces from string
+void remove_spaces(char *str) {
+    char *p1 = str;
+    char *p2 = str;
+    do
+        while (*p2 == ' ')
+            p2++;
+    while (*p1++ = *p2++);
+}
+
+
+/*
+ * Return a list of tuples for every process memory maps.
+ * 'procstat' cmdline utility has been used as an example.
+ */
+static PyObject *
+psutil_proc_memory_maps(PyObject *self, PyObject *args)
+{
+    long pid;
+    int ptrwidth;
+    int i, cnt;
+    char addr[30];
+    char perms[4];
+    const char *path;
+    struct kinfo_proc kp;
+    struct kinfo_vmentry *freep = NULL;
+    struct kinfo_vmentry *kve;
+    ptrwidth = 2 * sizeof(void *);
+    PyObject *pytuple = NULL;
+    PyObject *retlist = PyList_New(0);
+
+    if (retlist == NULL) {
+        return NULL;
+    }
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        goto error;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        goto error;
+    }
+
+    freep = kinfo_getvmmap(pid, &cnt);
+    if (freep == NULL) {
+        psutil_raise_ad_or_nsp(pid);
+        goto error;
+    }
+    for (i = 0; i < cnt; i++) {
+        pytuple = NULL;
+        kve = &freep[i];
+        addr[0] = '\0';
+        perms[0] = '\0';
+        sprintf(addr, "%#*jx-%#*jx", ptrwidth, (uintmax_t)kve->kve_start,
+                ptrwidth, (uintmax_t)kve->kve_end);
+        remove_spaces(addr);
+        strlcat(perms, kve->kve_protection & KVME_PROT_READ ? "r" : "-",
+                sizeof(perms));
+        strlcat(perms, kve->kve_protection & KVME_PROT_WRITE ? "w" : "-",
+                sizeof(perms));
+        strlcat(perms, kve->kve_protection & KVME_PROT_EXEC ? "x" : "-",
+                sizeof(perms));
+
+        if (strlen(kve->kve_path) == 0) {
+            switch (kve->kve_type) {
+            case KVME_TYPE_NONE:
+                path = "[none]";
+                break;
+            case KVME_TYPE_DEFAULT:
+                path = "[default]";
+                break;
+            case KVME_TYPE_VNODE:
+                path = "[vnode]";
+                break;
+            case KVME_TYPE_SWAP:
+                path = "[swap]";
+                break;
+            case KVME_TYPE_DEVICE:
+                path = "[device]";
+                break;
+            case KVME_TYPE_PHYS:
+                path = "[phys]";
+                break;
+            case KVME_TYPE_DEAD:
+                path = "[dead]";
+                break;
+            case KVME_TYPE_SG:
+                path = "[sg]";
+                break;
+            case KVME_TYPE_UNKNOWN:
+                path = "[unknown]";
+                break;
+            default:
+                path = "[?]";
+                break;
+            }
+        }
+        else {
+            path = kve->kve_path;
+        }
+
+        pytuple = Py_BuildValue("sssiiii",
+            addr,                       // "start-end" address
+            perms,                      // "rwx" permissions
+            path,                       // path
+            kve->kve_resident,          // rss
+            kve->kve_private_resident,  // private
+            kve->kve_ref_count,         // ref count
+            kve->kve_shadow_count);     // shadow count
+        if (!pytuple)
+            goto error;
+        if (PyList_Append(retlist, pytuple))
+            goto error;
+        Py_DECREF(pytuple);
+    }
+    free(freep);
+    return retlist;
+
+error:
+    Py_XDECREF(pytuple);
+    Py_DECREF(retlist);
+    if (freep != NULL)
+        free(freep);
+    return NULL;
+}
+#endif
+
+
+/*
+ * Return a list of tuples including device, mount point and fs type
+ * for all partitions mounted on the system.
+ */
+static PyObject *
+psutil_disk_partitions(PyObject *self, PyObject *args)
+{
+    int num;
+    int i;
+    long len;
+    uint64_t flags;
+    char opts[200];
+    struct statfs *fs = NULL;
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_tuple = NULL;
+
+    if (py_retlist == NULL)
+        return NULL;
+
+    // get the number of mount points
+    Py_BEGIN_ALLOW_THREADS
+    num = getfsstat(NULL, 0, MNT_NOWAIT);
+    Py_END_ALLOW_THREADS
+    if (num == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    len = sizeof(*fs) * num;
+    fs = malloc(len);
+    if (fs == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+
+    Py_BEGIN_ALLOW_THREADS
+    num = getfsstat(fs, len, MNT_NOWAIT);
+    Py_END_ALLOW_THREADS
+    if (num == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    for (i = 0; i < num; i++) {
+        py_tuple = NULL;
+        opts[0] = 0;
+        flags = fs[i].f_flags;
+
+        // see sys/mount.h
+        if (flags & MNT_RDONLY)
+            strlcat(opts, "ro", sizeof(opts));
+        else
+            strlcat(opts, "rw", sizeof(opts));
+        if (flags & MNT_SYNCHRONOUS)
+            strlcat(opts, ",sync", sizeof(opts));
+        if (flags & MNT_NOEXEC)
+            strlcat(opts, ",noexec", sizeof(opts));
+        if (flags & MNT_NOSUID)
+            strlcat(opts, ",nosuid", sizeof(opts));
+        if (flags & MNT_UNION)
+            strlcat(opts, ",union", sizeof(opts));
+        if (flags & MNT_ASYNC)
+            strlcat(opts, ",async", sizeof(opts));
+        if (flags & MNT_SUIDDIR)
+            strlcat(opts, ",suiddir", sizeof(opts));
+        if (flags & MNT_SOFTDEP)
+            strlcat(opts, ",softdep", sizeof(opts));
+        if (flags & MNT_NOSYMFOLLOW)
+            strlcat(opts, ",nosymfollow", sizeof(opts));
+        if (flags & MNT_GJOURNAL)
+            strlcat(opts, ",gjournal", sizeof(opts));
+        if (flags & MNT_MULTILABEL)
+            strlcat(opts, ",multilabel", sizeof(opts));
+        if (flags & MNT_ACLS)
+            strlcat(opts, ",acls", sizeof(opts));
+        if (flags & MNT_NOATIME)
+            strlcat(opts, ",noatime", sizeof(opts));
+        if (flags & MNT_NOCLUSTERR)
+            strlcat(opts, ",noclusterr", sizeof(opts));
+        if (flags & MNT_NOCLUSTERW)
+            strlcat(opts, ",noclusterw", sizeof(opts));
+        if (flags & MNT_NFS4ACLS)
+            strlcat(opts, ",nfs4acls", sizeof(opts));
+
+        py_tuple = Py_BuildValue("(ssss)",
+                                 fs[i].f_mntfromname,  // device
+                                 fs[i].f_mntonname,    // mount point
+                                 fs[i].f_fstypename,   // fs type
+                                 opts);                // options
+        if (!py_tuple)
+            goto error;
+        if (PyList_Append(py_retlist, py_tuple))
+            goto error;
+        Py_DECREF(py_tuple);
+    }
+
+    free(fs);
+    return py_retlist;
+
+error:
+    Py_XDECREF(py_tuple);
+    Py_DECREF(py_retlist);
+    if (fs != NULL)
+        free(fs);
+    return NULL;
+}
+
+
+/*
+ * Return a Python list of named tuples with overall network I/O information
+ */
+static PyObject *
+psutil_net_io_counters(PyObject *self, PyObject *args)
+{
+    char *buf = NULL, *lim, *next;
+    struct if_msghdr *ifm;
+    int mib[6];
+    size_t len;
+    PyObject *py_retdict = PyDict_New();
+    PyObject *py_ifc_info = NULL;
+    if (py_retdict == NULL)
+        return NULL;
+
+    mib[0] = CTL_NET;          // networking subsystem
+    mib[1] = PF_ROUTE;         // type of information
+    mib[2] = 0;                // protocol (IPPROTO_xxx)
+    mib[3] = 0;                // address family
+    mib[4] = NET_RT_IFLIST;   // operation
+    mib[5] = 0;
+
+    if (sysctl(mib, 6, NULL, &len, NULL, 0) < 0) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    buf = malloc(len);
+    if (buf == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+
+    if (sysctl(mib, 6, buf, &len, NULL, 0) < 0) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    lim = buf + len;
+
+    for (next = buf; next < lim; ) {
+        py_ifc_info = NULL;
+        ifm = (struct if_msghdr *)next;
+        next += ifm->ifm_msglen;
+
+        if (ifm->ifm_type == RTM_IFINFO) {
+            struct if_msghdr *if2m = (struct if_msghdr *)ifm;
+            struct sockaddr_dl *sdl = (struct sockaddr_dl *)(if2m + 1);
+            char ifc_name[32];
+
+            strncpy(ifc_name, sdl->sdl_data, sdl->sdl_nlen);
+            ifc_name[sdl->sdl_nlen] = 0;
+            // XXX: ignore usbus interfaces:
+            // http://lists.freebsd.org/pipermail/freebsd-current/
+            //     2011-October/028752.html
+            // 'ifconfig -a' doesn't show them, nor do we.
+            if (strncmp(ifc_name, "usbus", 5) == 0) {
+                continue;
+            }
+
+            py_ifc_info = Py_BuildValue("(kkkkkkki)",
+                                        if2m->ifm_data.ifi_obytes,
+                                        if2m->ifm_data.ifi_ibytes,
+                                        if2m->ifm_data.ifi_opackets,
+                                        if2m->ifm_data.ifi_ipackets,
+                                        if2m->ifm_data.ifi_ierrors,
+                                        if2m->ifm_data.ifi_oerrors,
+                                        if2m->ifm_data.ifi_iqdrops,
+                                        0);  // dropout not supported
+            if (!py_ifc_info)
+                goto error;
+            if (PyDict_SetItemString(py_retdict, ifc_name, py_ifc_info))
+                goto error;
+            Py_DECREF(py_ifc_info);
+        }
+        else {
+            continue;
+        }
+    }
+
+    free(buf);
+    return py_retdict;
+
+error:
+    Py_XDECREF(py_ifc_info);
+    Py_DECREF(py_retdict);
+    if (buf != NULL)
+        free(buf);
+    return NULL;
+}
+
+
+/*
+ * Return a Python dict of tuples for disk I/O information
+ */
+static PyObject *
+psutil_disk_io_counters(PyObject *self, PyObject *args)
+{
+    int i;
+    struct statinfo stats;
+
+    PyObject *py_retdict = PyDict_New();
+    PyObject *py_disk_info = NULL;
+    if (py_retdict == NULL)
+        return NULL;
+
+    if (devstat_checkversion(NULL) < 0) {
+        PyErr_Format(PyExc_RuntimeError, "devstat_checkversion() failed");
+        goto error;
+    }
+
+    stats.dinfo = (struct devinfo *)malloc(sizeof(struct devinfo));
+    if (stats.dinfo == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+    bzero(stats.dinfo, sizeof(struct devinfo));
+
+    if (devstat_getdevs(NULL, &stats) == -1) {
+        PyErr_Format(PyExc_RuntimeError, "devstat_getdevs() failed");
+        goto error;
+    }
+
+    for (i = 0; i < stats.dinfo->numdevs; i++) {
+        py_disk_info = NULL;
+        struct devstat current;
+        char disk_name[128];
+        current = stats.dinfo->devices[i];
+        snprintf(disk_name, sizeof(disk_name), "%s%d",
+                 current.device_name,
+                 current.unit_number);
+
+        py_disk_info = Py_BuildValue(
+            "(KKKKLL)",
+            current.operations[DEVSTAT_READ],   // no reads
+            current.operations[DEVSTAT_WRITE],  // no writes
+            current.bytes[DEVSTAT_READ],        // bytes read
+            current.bytes[DEVSTAT_WRITE],       // bytes written
+            (long long)devstat_compute_etime(
+                &current.duration[DEVSTAT_READ], NULL),  // r time
+            (long long)devstat_compute_etime(
+                &current.duration[DEVSTAT_WRITE], NULL));  // w time
+        if (!py_disk_info)
+            goto error;
+        if (PyDict_SetItemString(py_retdict, disk_name, py_disk_info))
+            goto error;
+        Py_DECREF(py_disk_info);
+    }
+
+    if (stats.dinfo->mem_ptr) {
+        free(stats.dinfo->mem_ptr);
+    }
+    free(stats.dinfo);
+    return py_retdict;
+
+error:
+    Py_XDECREF(py_disk_info);
+    Py_DECREF(py_retdict);
+    if (stats.dinfo != NULL)
+        free(stats.dinfo);
+    return NULL;
+}
+
+
+/*
+ * Return currently connected users as a list of tuples.
+ */
+static PyObject *
+psutil_users(PyObject *self, PyObject *args)
+{
+    PyObject *ret_list = PyList_New(0);
+    PyObject *tuple = NULL;
+
+    if (ret_list == NULL)
+        return NULL;
+
+#if __FreeBSD_version < 900000
+    struct utmp ut;
+    FILE *fp;
+
+    fp = fopen(_PATH_UTMP, "r");
+    if (fp == NULL) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    while (fread(&ut, sizeof(ut), 1, fp) == 1) {
+        if (*ut.ut_name == '\0')
+            continue;
+        tuple = Py_BuildValue(
+            "(sssf)",
+            ut.ut_name,         // username
+            ut.ut_line,         // tty
+            ut.ut_host,         // hostname
+           (float)ut.ut_time);  // start time
+        if (!tuple) {
+            fclose(fp);
+            goto error;
+        }
+        if (PyList_Append(ret_list, tuple)) {
+            fclose(fp);
+            goto error;
+        }
+        Py_DECREF(tuple);
+    }
+
+    fclose(fp);
+#else
+    struct utmpx *utx;
+
+    while ((utx = getutxent()) != NULL) {
+        if (utx->ut_type != USER_PROCESS)
+            continue;
+        tuple = Py_BuildValue(
+            "(sssf)",
+            utx->ut_user,  // username
+            utx->ut_line,  // tty
+            utx->ut_host,  // hostname
+            (float)utx->ut_tv.tv_sec  // start time
+        );
+
+        if (!tuple) {
+            endutxent();
+            goto error;
+        }
+        if (PyList_Append(ret_list, tuple)) {
+            endutxent();
+            goto error;
+        }
+        Py_DECREF(tuple);
+    }
+
+    endutxent();
+#endif
+    return ret_list;
+
+error:
+    Py_XDECREF(tuple);
+    Py_DECREF(ret_list);
+    return NULL;
+}
+
+
+
+/*
+ * System-wide open connections.
+ */
+
+#define HASHSIZE 1009
+static struct xfile *psutil_xfiles;
+static int psutil_nxfiles;
+
+int
+psutil_populate_xfiles()
+{
+    size_t len;
+
+    if ((psutil_xfiles = malloc(len = sizeof *psutil_xfiles)) == NULL) {
+        PyErr_NoMemory();
+        return 0;
+    }
+    while (sysctlbyname("kern.file", psutil_xfiles, &len, 0, 0) == -1) {
+        if (errno != ENOMEM) {
+            PyErr_SetFromErrno(0);
+            return 0;
+        }
+        len *= 2;
+        if ((psutil_xfiles = realloc(psutil_xfiles, len)) == NULL) {
+            PyErr_NoMemory();
+            return 0;
+        }
+    }
+    if (len > 0 && psutil_xfiles->xf_size != sizeof *psutil_xfiles) {
+        PyErr_Format(PyExc_RuntimeError, "struct xfile size mismatch");
+        return 0;
+    }
+    psutil_nxfiles = len / sizeof *psutil_xfiles;
+    return 1;
+}
+
+int
+psutil_get_pid_from_sock(int sock_hash)
+{
+    struct xfile *xf;
+    int hash, n;
+    for (xf = psutil_xfiles, n = 0; n < psutil_nxfiles; ++n, ++xf) {
+        if (xf->xf_data == NULL)
+            continue;
+        hash = (int)((uintptr_t)xf->xf_data % HASHSIZE);
+        if (sock_hash == hash) {
+            return xf->xf_pid;
+        }
+    }
+    return -1;
+}
+
+
+int psutil_gather_inet(int proto, PyObject *py_retlist)
+{
+    struct xinpgen *xig, *exig;
+    struct xinpcb *xip;
+    struct xtcpcb *xtp;
+    struct inpcb *inp;
+    struct xsocket *so;
+    struct sock *sock;
+    const char *varname;
+    size_t len, bufsize;
+    void *buf;
+    int hash, retry, vflag, type;
+
+    PyObject *tuple = NULL;
+    PyObject *laddr = NULL;
+    PyObject *raddr = NULL;
+
+    switch (proto) {
+    case IPPROTO_TCP:
+        varname = "net.inet.tcp.pcblist";
+        type = SOCK_STREAM;
+        break;
+    case IPPROTO_UDP:
+        varname = "net.inet.udp.pcblist";
+        type = SOCK_DGRAM;
+        break;
+    }
+
+    buf = NULL;
+    bufsize = 8192;
+    retry = 5;
+    do {
+        for (;;) {
+            buf = realloc(buf, bufsize);
+            if (buf == NULL) {
+                // XXX
+                continue;
+            }
+            len = bufsize;
+            if (sysctlbyname(varname, buf, &len, NULL, 0) == 0)
+                break;
+            if (errno != ENOMEM) {
+                PyErr_SetFromErrno(0);
+                goto error;
+            }
+            bufsize *= 2;
+        }
+        xig = (struct xinpgen *)buf;
+        exig = (struct xinpgen *)(void *)((char *)buf + len - sizeof *exig);
+        if (xig->xig_len != sizeof *xig || exig->xig_len != sizeof *exig) {
+            PyErr_Format(PyExc_RuntimeError, "struct xinpgen size mismatch");
+            goto error;
+        }
+    } while (xig->xig_gen != exig->xig_gen && retry--);
+
+
+    for (;;) {
+        xig = (struct xinpgen *)(void *)((char *)xig + xig->xig_len);
+        if (xig >= exig)
+            break;
+
+        switch (proto) {
+        case IPPROTO_TCP:
+            xtp = (struct xtcpcb *)xig;
+            if (xtp->xt_len != sizeof *xtp) {
+                PyErr_Format(PyExc_RuntimeError, "struct xtcpcb size mismatch");
+                goto error;
+            }
+            break;
+        case IPPROTO_UDP:
+            xip = (struct xinpcb *)xig;
+            if (xip->xi_len != sizeof *xip) {
+                PyErr_Format(PyExc_RuntimeError, "struct xinpcb size mismatch");
+                goto error;
+            }
+            inp = &xip->xi_inp;
+            so = &xip->xi_socket;
+            break;
+        }
+
+        inp = &xtp->xt_inp;
+        so = &xtp->xt_socket;
+        char lip[200], rip[200];
+        int family, lport, rport, pid, status;
+
+        hash = (int)((uintptr_t)so->xso_so % HASHSIZE);
+        pid = psutil_get_pid_from_sock(hash);
+        if (pid < 0)
+            continue;
+        lport = ntohs(inp->inp_lport);
+        rport = ntohs(inp->inp_fport);
+        status = xtp->xt_tp.t_state;
+
+        if (inp->inp_vflag & INP_IPV4) {
+            family = AF_INET;
+            inet_ntop(AF_INET, &inp->inp_laddr.s_addr, lip, sizeof(lip));
+            inet_ntop(AF_INET, &inp->inp_faddr.s_addr, rip, sizeof(rip));
+        }
+        else if (inp->inp_vflag & INP_IPV6) {
+            family = AF_INET6;
+            inet_ntop(AF_INET6, &inp->in6p_laddr.s6_addr, lip, sizeof(lip));
+            inet_ntop(AF_INET6, &inp->in6p_faddr.s6_addr, rip, sizeof(rip));
+        }
+
+        // construct python tuple/list
+        laddr = Py_BuildValue("(si)", lip, lport);
+        if (!laddr)
+            goto error;
+        if (rport != 0) {
+            raddr = Py_BuildValue("(si)", rip, rport);
+        }
+        else {
+            raddr = Py_BuildValue("()");
+        }
+        if (!raddr)
+            goto error;
+        tuple = Py_BuildValue("(iiiNNii)", -1, family, type, laddr, raddr,
+                                               status, pid);
+        if (!tuple)
+            goto error;
+        if (PyList_Append(py_retlist, tuple))
+            goto error;
+        Py_DECREF(tuple);
+  }
+
+    free(buf);
+    return 1;
+
+error:
+    Py_XDECREF(tuple);
+    Py_XDECREF(laddr);
+    Py_XDECREF(raddr);
+    free(buf);
+    return 0;
+}
+
+
+int psutil_gather_unix(int proto, PyObject *py_retlist)
+{
+    struct xunpgen *xug, *exug;
+    struct xunpcb *xup;
+    struct sock *sock;
+    const char *varname, *protoname;
+    size_t len, bufsize;
+    void *buf;
+    int hash, retry;
+    int family, lport, rport, pid;
+    struct sockaddr_un *sun;
+    char path[PATH_MAX];
+
+    PyObject *tuple = NULL;
+    PyObject *laddr = NULL;
+    PyObject *raddr = NULL;
+
+    switch (proto) {
+    case SOCK_STREAM:
+        varname = "net.local.stream.pcblist";
+        protoname = "stream";
+        break;
+    case SOCK_DGRAM:
+        varname = "net.local.dgram.pcblist";
+        protoname = "dgram";
+        break;
+    }
+
+    buf = NULL;
+    bufsize = 8192;
+    retry = 5;
+
+    do {
+        for (;;) {
+            buf = realloc(buf, bufsize);
+            if (buf == NULL) {
+                PyErr_NoMemory();
+                goto error;
+            }
+            len = bufsize;
+            if (sysctlbyname(varname, buf, &len, NULL, 0) == 0)
+                break;
+            if (errno != ENOMEM) {
+                PyErr_SetFromErrno(0);
+                goto error;
+            }
+            bufsize *= 2;
+        }
+        xug = (struct xunpgen *)buf;
+        exug = (struct xunpgen *)(void *)
+            ((char *)buf + len - sizeof *exug);
+        if (xug->xug_len != sizeof *xug || exug->xug_len != sizeof *exug) {
+            PyErr_Format(PyExc_RuntimeError, "struct xinpgen size mismatch");
+            goto error;
+        }
+    } while (xug->xug_gen != exug->xug_gen && retry--);
+
+    for (;;) {
+        xug = (struct xunpgen *)(void *)((char *)xug + xug->xug_len);
+        if (xug >= exug)
+            break;
+        xup = (struct xunpcb *)xug;
+        if (xup->xu_len != sizeof *xup) {
+            warnx("struct xunpgen size mismatch");
+            goto error;
+        }
+
+        hash = (int)((uintptr_t) xup->xu_socket.xso_so % HASHSIZE);
+        pid = psutil_get_pid_from_sock(hash);
+        if (pid < 0)
+            continue;
+
+        sun = (struct sockaddr_un *)&xup->xu_addr;
+        snprintf(path, sizeof(path), "%.*s",
+                 (sun->sun_len - (sizeof(*sun) - sizeof(sun->sun_path))),
+                 sun->sun_path);
+
+        tuple = Py_BuildValue("(iiisOii)", -1, AF_UNIX, proto, path, Py_None,
+                                               PSUTIL_CONN_NONE, pid);
+        if (!tuple)
+            goto error;
+        if (PyList_Append(py_retlist, tuple))
+            goto error;
+        Py_DECREF(tuple);
+        Py_INCREF(Py_None);
+    }
+
+    free(buf);
+    return 1;
+
+error:
+    Py_XDECREF(tuple);
+    Py_XDECREF(laddr);
+    Py_XDECREF(raddr);
+    free(buf);
+    return 0;
+}
+
+
+/*
+ * Return system-wide open connections.
+ */
+static PyObject*
+psutil_net_connections(PyObject* self, PyObject* args)
+{
+    PyObject *af_filter = NULL;
+    PyObject *type_filter = NULL;
+    PyObject *py_retlist = PyList_New(0);
+
+    if (psutil_populate_xfiles() != 1)
+        goto error;
+
+    if (psutil_gather_inet(IPPROTO_TCP, py_retlist) == 0)
+        goto error;
+    if (psutil_gather_inet(IPPROTO_UDP, py_retlist) == 0)
+        goto error;
+    if (psutil_gather_unix(SOCK_STREAM, py_retlist) == 0)
+       goto error;
+    if (psutil_gather_unix(SOCK_DGRAM, py_retlist) == 0)
+        goto error;
+
+    free(psutil_xfiles);
+    return py_retlist;
+
+error:
+    Py_DECREF(py_retlist);
+    free(psutil_xfiles);
+    return NULL;
+}
+
+
+/*
+ * define the psutil C module methods and initialize the module.
+ */
+static PyMethodDef
+PsutilMethods[] =
+{
+    // --- per-process functions
+
+    {"proc_name", psutil_proc_name, METH_VARARGS,
+     "Return process name"},
+    {"proc_connections", psutil_proc_connections, METH_VARARGS,
+     "Return connections opened by process"},
+    {"proc_exe", psutil_proc_exe, METH_VARARGS,
+     "Return process pathname executable"},
+    {"proc_cmdline", psutil_proc_cmdline, METH_VARARGS,
+     "Return process cmdline as a list of cmdline arguments"},
+    {"proc_ppid", psutil_proc_ppid, METH_VARARGS,
+     "Return process ppid as an integer"},
+    {"proc_uids", psutil_proc_uids, METH_VARARGS,
+     "Return process real effective and saved user ids as a Python tuple"},
+    {"proc_gids", psutil_proc_gids, METH_VARARGS,
+     "Return process real effective and saved group ids as a Python tuple"},
+    {"proc_cpu_times", psutil_proc_cpu_times, METH_VARARGS,
+     "Return tuple of user/kern time for the given PID"},
+    {"proc_create_time", psutil_proc_create_time, METH_VARARGS,
+     "Return a float indicating the process create time expressed in "
+     "seconds since the epoch"},
+    {"proc_memory_info", psutil_proc_memory_info, METH_VARARGS,
+     "Return extended memory info for a process as a Python tuple."},
+    {"proc_num_threads", psutil_proc_num_threads, METH_VARARGS,
+     "Return number of threads used by process"},
+    {"proc_num_ctx_switches", psutil_proc_num_ctx_switches, METH_VARARGS,
+     "Return the number of context switches performed by process"},
+    {"proc_threads", psutil_proc_threads, METH_VARARGS,
+     "Return process threads"},
+    {"proc_status", psutil_proc_status, METH_VARARGS,
+     "Return process status as an integer"},
+    {"proc_io_counters", psutil_proc_io_counters, METH_VARARGS,
+     "Return process IO counters"},
+    {"proc_tty_nr", psutil_proc_tty_nr, METH_VARARGS,
+     "Return process tty (terminal) number"},
+#if defined(__FreeBSD_version) && __FreeBSD_version >= 800000
+    {"proc_open_files", psutil_proc_open_files, METH_VARARGS,
+     "Return files opened by process as a list of (path, fd) tuples"},
+    {"proc_cwd", psutil_proc_cwd, METH_VARARGS,
+     "Return process current working directory."},
+    {"proc_memory_maps", psutil_proc_memory_maps, METH_VARARGS,
+     "Return a list of tuples for every process's memory map"},
+    {"proc_num_fds", psutil_proc_num_fds, METH_VARARGS,
+     "Return the number of file descriptors opened by this process"},
+#endif
+
+    // --- system-related functions
+
+    {"pids", psutil_pids, METH_VARARGS,
+     "Returns a list of PIDs currently running on the system"},
+    {"cpu_count_logical", psutil_cpu_count_logical, METH_VARARGS,
+     "Return number of logical CPUs on the system"},
+    {"cpu_count_phys", psutil_cpu_count_phys, METH_VARARGS,
+     "Return an XML string to determine the number physical CPUs."},
+    {"virtual_mem", psutil_virtual_mem, METH_VARARGS,
+     "Return system virtual memory usage statistics"},
+    {"swap_mem", psutil_swap_mem, METH_VARARGS,
+     "Return swap mem stats"},
+    {"cpu_times", psutil_cpu_times, METH_VARARGS,
+     "Return system cpu times as a tuple (user, system, nice, idle, irc)"},
+#if defined(__FreeBSD_version) && __FreeBSD_version >= 800000
+    {"per_cpu_times", psutil_per_cpu_times, METH_VARARGS,
+     "Return system per-cpu times as a list of tuples"},
+#endif
+    {"boot_time", psutil_boot_time, METH_VARARGS,
+     "Return the system boot time expressed in seconds since the epoch."},
+    {"disk_partitions", psutil_disk_partitions, METH_VARARGS,
+     "Return a list of tuples including device, mount point and "
+     "fs type for all partitions mounted on the system."},
+    {"net_io_counters", psutil_net_io_counters, METH_VARARGS,
+     "Return dict of tuples of networks I/O information."},
+    {"disk_io_counters", psutil_disk_io_counters, METH_VARARGS,
+     "Return a Python dict of tuples for disk I/O information"},
+    {"users", psutil_users, METH_VARARGS,
+     "Return currently connected users as a list of tuples"},
+    {"net_connections", psutil_net_connections, METH_VARARGS,
+     "Return system-wide open connections."},
+
+    {NULL, NULL, 0, NULL}
+};
+
+struct module_state {
+    PyObject *error;
+};
+
+#if PY_MAJOR_VERSION >= 3
+#define GETSTATE(m) ((struct module_state*)PyModule_GetState(m))
+#else
+#define GETSTATE(m) (&_state)
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+
+static int
+psutil_bsd_traverse(PyObject *m, visitproc visit, void *arg) {
+    Py_VISIT(GETSTATE(m)->error);
+    return 0;
+}
+
+static int
+psutil_bsd_clear(PyObject *m) {
+    Py_CLEAR(GETSTATE(m)->error);
+    return 0;
+}
+
+static struct PyModuleDef
+        moduledef = {
+    PyModuleDef_HEAD_INIT,
+    "psutil_bsd",
+    NULL,
+    sizeof(struct module_state),
+    PsutilMethods,
+    NULL,
+    psutil_bsd_traverse,
+    psutil_bsd_clear,
+    NULL
+};
+
+#define INITERROR return NULL
+
+PyMODINIT_FUNC PyInit__psutil_bsd(void)
+
+#else
+#define INITERROR return
+
+void init_psutil_bsd(void)
+#endif
+{
+#if PY_MAJOR_VERSION >= 3
+    PyObject *module = PyModule_Create(&moduledef);
+#else
+    PyObject *module = Py_InitModule("_psutil_bsd", PsutilMethods);
+#endif
+    // process status constants
+    PyModule_AddIntConstant(module, "SSTOP", SSTOP);
+    PyModule_AddIntConstant(module, "SSLEEP", SSLEEP);
+    PyModule_AddIntConstant(module, "SRUN", SRUN);
+    PyModule_AddIntConstant(module, "SIDL", SIDL);
+    PyModule_AddIntConstant(module, "SWAIT", SWAIT);
+    PyModule_AddIntConstant(module, "SLOCK", SLOCK);
+    PyModule_AddIntConstant(module, "SZOMB", SZOMB);
+    // connection status constants
+    PyModule_AddIntConstant(module, "TCPS_CLOSED", TCPS_CLOSED);
+    PyModule_AddIntConstant(module, "TCPS_CLOSING", TCPS_CLOSING);
+    PyModule_AddIntConstant(module, "TCPS_CLOSE_WAIT", TCPS_CLOSE_WAIT);
+    PyModule_AddIntConstant(module, "TCPS_LISTEN", TCPS_LISTEN);
+    PyModule_AddIntConstant(module, "TCPS_ESTABLISHED", TCPS_ESTABLISHED);
+    PyModule_AddIntConstant(module, "TCPS_SYN_SENT", TCPS_SYN_SENT);
+    PyModule_AddIntConstant(module, "TCPS_SYN_RECEIVED", TCPS_SYN_RECEIVED);
+    PyModule_AddIntConstant(module, "TCPS_FIN_WAIT_1", TCPS_FIN_WAIT_1);
+    PyModule_AddIntConstant(module, "TCPS_FIN_WAIT_2", TCPS_FIN_WAIT_2);
+    PyModule_AddIntConstant(module, "TCPS_LAST_ACK", TCPS_LAST_ACK);
+    PyModule_AddIntConstant(module, "TCPS_TIME_WAIT", TCPS_TIME_WAIT);
+    PyModule_AddIntConstant(module, "PSUTIL_CONN_NONE", PSUTIL_CONN_NONE);
+
+    if (module == NULL) {
+        INITERROR;
+    }
+#if PY_MAJOR_VERSION >= 3
+    return module;
+#endif
+}

+ 51 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_bsd.h

@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+
+// --- per-process functions
+
+static PyObject* psutil_proc_cmdline(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_connections(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_create_time(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_exe(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_gids(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_memory_info(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_memory_maps(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_name(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_ctx_switches(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_fds(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_threads(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_ppid(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_status(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_threads(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_tty_nr(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_uids(PyObject* self, PyObject* args);
+
+#if defined(__FreeBSD_version) && __FreeBSD_version >= 800000
+static PyObject* psutil_proc_open_files(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cwd(PyObject* self, PyObject* args);
+#endif
+
+// --- system-related functions
+
+static PyObject* psutil_boot_time(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_count_logical(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_count_phys(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_disk_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_disk_partitions(PyObject* self, PyObject* args);
+static PyObject* psutil_net_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_pids(PyObject* self, PyObject* args);
+static PyObject* psutil_swap_mem(PyObject* self, PyObject* args);
+static PyObject* psutil_users(PyObject* self, PyObject* args);
+static PyObject* psutil_virtual_mem(PyObject* self, PyObject* args);
+
+#if defined(__FreeBSD_version) && __FreeBSD_version >= 800000
+static PyObject* psutil_per_cpu_times(PyObject* self, PyObject* args);
+#endif

+ 37 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_common.c

@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Routines common to all platforms.
+ */
+
+#include <Python.h>
+
+
+/*
+ * Set OSError(errno=ESRCH, strerror="No such process") Python exception.
+ */
+PyObject *
+NoSuchProcess(void) {
+    PyObject *exc;
+    char *msg = strerror(ESRCH);
+    exc = PyObject_CallFunction(PyExc_OSError, "(is)", ESRCH, msg);
+    PyErr_SetObject(PyExc_OSError, exc);
+    Py_XDECREF(exc);
+    return NULL;
+}
+
+
+/*
+ * Set OSError(errno=EACCES, strerror="Permission denied") Python exception.
+ */
+PyObject *
+AccessDenied(void) {
+    PyObject *exc;
+    char *msg = strerror(EACCES);
+    exc = PyObject_CallFunction(PyExc_OSError, "(is)", EACCES, msg);
+    PyErr_SetObject(PyExc_OSError, exc);
+    Py_XDECREF(exc);
+    return NULL;
+}

+ 10 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_common.h

@@ -0,0 +1,10 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+
+PyObject* AccessDenied(void);
+PyObject* NoSuchProcess(void);

+ 510 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_linux.c

@@ -0,0 +1,510 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Linux-specific functions.
+ */
+
+#ifndef _GNU_SOURCE
+    #define _GNU_SOURCE 1
+#endif
+#include <Python.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <mntent.h>
+#include <features.h>
+#include <utmp.h>
+#include <sched.h>
+#include <linux/version.h>
+#include <sys/syscall.h>
+#include <sys/sysinfo.h>
+
+#include "_psutil_linux.h"
+
+
+// Linux >= 2.6.13
+#define PSUTIL_HAVE_IOPRIO defined(__NR_ioprio_get) && defined(__NR_ioprio_set)
+
+// Linux >= 2.6.36 (supposedly) and glibc >= 13
+#define PSUTIL_HAVE_PRLIMIT \
+    (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)) && \
+    (__GLIBC__ >= 2 && __GLIBC_MINOR__ >= 13) && \
+    defined(__NR_prlimit64)
+
+#if PSUTIL_HAVE_PRLIMIT
+    #define _FILE_OFFSET_BITS 64
+    #include <time.h>
+    #include <sys/resource.h>
+#endif
+
+
+#if PSUTIL_HAVE_IOPRIO
+enum {
+    IOPRIO_WHO_PROCESS = 1,
+};
+
+static inline int
+ioprio_get(int which, int who)
+{
+    return syscall(__NR_ioprio_get, which, who);
+}
+
+static inline int
+ioprio_set(int which, int who, int ioprio)
+{
+    return syscall(__NR_ioprio_set, which, who, ioprio);
+}
+
+#define IOPRIO_CLASS_SHIFT 13
+#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1)
+
+#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT)
+#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK)
+#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data)
+
+
+/*
+ * Return a (ioclass, iodata) Python tuple representing process I/O priority.
+ */
+static PyObject *
+psutil_proc_ioprio_get(PyObject *self, PyObject *args)
+{
+    long pid;
+    int ioprio, ioclass, iodata;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    ioprio = ioprio_get(IOPRIO_WHO_PROCESS, pid);
+    if (ioprio == -1) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+    ioclass = IOPRIO_PRIO_CLASS(ioprio);
+    iodata = IOPRIO_PRIO_DATA(ioprio);
+    return Py_BuildValue("ii", ioclass, iodata);
+}
+
+
+/*
+ * A wrapper around ioprio_set(); sets process I/O priority.
+ * ioclass can be either IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE
+ * or 0. iodata goes from 0 to 7 depending on ioclass specified.
+ */
+static PyObject *
+psutil_proc_ioprio_set(PyObject *self, PyObject *args)
+{
+    long pid;
+    int ioprio, ioclass, iodata;
+    int retval;
+
+    if (! PyArg_ParseTuple(args, "lii", &pid, &ioclass, &iodata)) {
+        return NULL;
+    }
+    ioprio = IOPRIO_PRIO_VALUE(ioclass, iodata);
+    retval = ioprio_set(IOPRIO_WHO_PROCESS, pid, ioprio);
+    if (retval == -1) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+#endif
+
+
+#if PSUTIL_HAVE_PRLIMIT
+/*
+ * A wrapper around prlimit(2); sets process resource limits.
+ * This can be used for both get and set, in which case extra
+ * 'soft' and 'hard' args must be provided.
+ */
+static PyObject *
+psutil_linux_prlimit(PyObject *self, PyObject *args)
+{
+    long pid;
+    int ret, resource;
+    struct rlimit old, new;
+    struct rlimit *newp = NULL;
+    PyObject *soft = NULL;
+    PyObject *hard = NULL;
+
+    if (! PyArg_ParseTuple(args, "li|OO", &pid, &resource, &soft, &hard)) {
+        return NULL;
+    }
+
+    // get
+    if (soft == NULL && hard == NULL) {
+        ret = prlimit(pid, resource, NULL, &old);
+        if (ret == -1)
+            return PyErr_SetFromErrno(PyExc_OSError);
+#if defined(PSUTIL_HAVE_LONG_LONG)
+        if (sizeof(old.rlim_cur) > sizeof(long)) {
+            return Py_BuildValue("LL",
+                                 (PY_LONG_LONG)old.rlim_cur,
+                                 (PY_LONG_LONG)old.rlim_max);
+        }
+#endif
+        return Py_BuildValue("ll", (long)old.rlim_cur, (long)old.rlim_max);
+    }
+
+    // set
+    else {
+#if defined(PSUTIL_HAVE_LARGEFILE_SUPPORT)
+        new.rlim_cur = PyLong_AsLongLong(soft);
+        if (new.rlim_cur == (rlim_t) - 1 && PyErr_Occurred())
+            return NULL;
+        new.rlim_max = PyLong_AsLongLong(hard);
+        if (new.rlim_max == (rlim_t) - 1 && PyErr_Occurred())
+            return NULL;
+#else
+        new.rlim_cur = PyLong_AsLong(soft);
+        if (new.rlim_cur == (rlim_t) - 1 && PyErr_Occurred())
+            return NULL;
+        new.rlim_max = PyLong_AsLong(hard);
+        if (new.rlim_max == (rlim_t) - 1 && PyErr_Occurred())
+            return NULL;
+#endif
+        newp = &new;
+        ret = prlimit(pid, resource, newp, &old);
+        if (ret == -1)
+            return PyErr_SetFromErrno(PyExc_OSError);
+        Py_INCREF(Py_None);
+        return Py_None;
+    }
+}
+#endif
+
+
+/*
+ * Return disk mounted partitions as a list of tuples including device,
+ * mount point and filesystem type
+ */
+static PyObject *
+psutil_disk_partitions(PyObject *self, PyObject *args)
+{
+    FILE *file = NULL;
+    struct mntent *entry;
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_tuple = NULL;
+
+    if (py_retlist == NULL)
+        return NULL;
+
+    // MOUNTED constant comes from mntent.h and it's == '/etc/mtab'
+    Py_BEGIN_ALLOW_THREADS
+    file = setmntent(MOUNTED, "r");
+    Py_END_ALLOW_THREADS
+    if ((file == 0) || (file == NULL)) {
+        PyErr_SetFromErrnoWithFilename(PyExc_OSError, MOUNTED);
+        goto error;
+    }
+
+    while ((entry = getmntent(file))) {
+        if (entry == NULL) {
+            PyErr_Format(PyExc_RuntimeError, "getmntent() failed");
+            goto error;
+        }
+        py_tuple = Py_BuildValue("(ssss)",
+                                 entry->mnt_fsname,  // device
+                                 entry->mnt_dir,     // mount point
+                                 entry->mnt_type,    // fs type
+                                 entry->mnt_opts);   // options
+        if (! py_tuple)
+            goto error;
+        if (PyList_Append(py_retlist, py_tuple))
+            goto error;
+        Py_DECREF(py_tuple);
+    }
+    endmntent(file);
+    return py_retlist;
+
+error:
+    if (file != NULL)
+        endmntent(file);
+    Py_XDECREF(py_tuple);
+    Py_DECREF(py_retlist);
+    return NULL;
+}
+
+
+/*
+ * A wrapper around sysinfo(), return system memory usage statistics.
+ */
+static PyObject *
+psutil_linux_sysinfo(PyObject *self, PyObject *args)
+{
+    struct sysinfo info;
+    if (sysinfo(&info) != 0) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+
+    // note: boot time might also be determined from here
+    return Py_BuildValue(
+        "(KKKKKK)",
+        (unsigned long long)info.totalram  * info.mem_unit,   // total
+        (unsigned long long)info.freeram   * info.mem_unit,   // free
+        (unsigned long long)info.bufferram * info.mem_unit,   // buffer
+        (unsigned long long)info.sharedram * info.mem_unit,   // shared
+        (unsigned long long)info.totalswap * info.mem_unit,   // swap tot
+        (unsigned long long)info.freeswap  * info.mem_unit);  // swap free
+}
+
+
+/*
+ * Return process CPU affinity as a Python long (the bitmask)
+ */
+static PyObject *
+psutil_proc_cpu_affinity_get(PyObject *self, PyObject *args)
+{
+    unsigned long mask;
+    unsigned int len = sizeof(mask);
+    long pid;
+
+    if (!PyArg_ParseTuple(args, "i", &pid)) {
+        return NULL;
+    }
+    if (sched_getaffinity(pid, len, (cpu_set_t *)&mask) < 0) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+    return Py_BuildValue("l", mask);
+}
+
+
+/*
+ * Set process CPU affinity; expects a bitmask
+ */
+static PyObject *
+psutil_proc_cpu_affinity_set(PyObject *self, PyObject *args)
+{
+    cpu_set_t cpu_set;
+    size_t len;
+    long pid;
+    int i, seq_len;
+    PyObject *py_cpu_set;
+    PyObject *py_cpu_seq = NULL;
+
+    if (!PyArg_ParseTuple(args, "lO", &pid, &py_cpu_set)) {
+        goto error;
+    }
+
+    if (!PySequence_Check(py_cpu_set)) {
+        // does not work on Python 2.4
+        // PyErr_Format(PyExc_TypeError, "sequence argument expected, got %s",
+        //              Py_TYPE(py_cpu_set)->tp_name);
+        PyErr_Format(PyExc_TypeError, "sequence argument expected");
+        goto error;
+    }
+
+    py_cpu_seq = PySequence_Fast(py_cpu_set, "expected a sequence or integer");
+    if (!py_cpu_seq) {
+        goto error;
+    }
+    seq_len = PySequence_Fast_GET_SIZE(py_cpu_seq);
+    CPU_ZERO(&cpu_set);
+    for (i = 0; i < seq_len; i++) {
+        PyObject *item = PySequence_Fast_GET_ITEM(py_cpu_seq, i);
+#if PY_MAJOR_VERSION >= 3
+        long value = PyLong_AsLong(item);
+#else
+        long value = PyInt_AsLong(item);
+#endif
+        if (value == -1 && PyErr_Occurred()) {
+            goto error;
+        }
+        CPU_SET(value, &cpu_set);
+    }
+
+    len = sizeof(cpu_set);
+    if (sched_setaffinity(pid, len, &cpu_set)) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    Py_DECREF(py_cpu_seq);
+    Py_INCREF(Py_None);
+    return Py_None;
+
+error:
+    if (py_cpu_seq != NULL)
+        Py_DECREF(py_cpu_seq);
+    return NULL;
+}
+
+
+/*
+ * Return currently connected users as a list of tuples.
+ */
+static PyObject *
+psutil_users(PyObject *self, PyObject *args)
+{
+    PyObject *ret_list = PyList_New(0);
+    PyObject *tuple = NULL;
+    PyObject *user_proc = NULL;
+    struct utmp *ut;
+
+    if (ret_list == NULL)
+        return NULL;
+    setutent();
+    while (NULL != (ut = getutent())) {
+        tuple = NULL;
+        user_proc = NULL;
+        if (ut->ut_type == USER_PROCESS)
+            user_proc = Py_True;
+        else
+            user_proc = Py_False;
+        tuple = Py_BuildValue(
+            "(sssfO)",
+            ut->ut_user,              // username
+            ut->ut_line,              // tty
+            ut->ut_host,              // hostname
+            (float)ut->ut_tv.tv_sec,  // tstamp
+            user_proc                 // (bool) user process
+        );
+    if (! tuple)
+            goto error;
+        if (PyList_Append(ret_list, tuple))
+            goto error;
+        Py_DECREF(tuple);
+    }
+    endutent();
+    return ret_list;
+
+error:
+    Py_XDECREF(tuple);
+    Py_XDECREF(user_proc);
+    Py_DECREF(ret_list);
+    endutent();
+    return NULL;
+}
+
+
+/*
+ * Define the psutil C module methods and initialize the module.
+ */
+static PyMethodDef
+PsutilMethods[] =
+{
+    // --- per-process functions
+
+#if PSUTIL_HAVE_IOPRIO
+    {"proc_ioprio_get", psutil_proc_ioprio_get, METH_VARARGS,
+     "Get process I/O priority"},
+    {"proc_ioprio_set", psutil_proc_ioprio_set, METH_VARARGS,
+     "Set process I/O priority"},
+#endif
+    {"proc_cpu_affinity_get", psutil_proc_cpu_affinity_get, METH_VARARGS,
+     "Return process CPU affinity as a Python long (the bitmask)."},
+    {"proc_cpu_affinity_set", psutil_proc_cpu_affinity_set, METH_VARARGS,
+     "Set process CPU affinity; expects a bitmask."},
+
+    // --- system related functions
+
+    {"disk_partitions", psutil_disk_partitions, METH_VARARGS,
+     "Return disk mounted partitions as a list of tuples including "
+     "device, mount point and filesystem type"},
+    {"users", psutil_users, METH_VARARGS,
+     "Return currently connected users as a list of tuples"},
+
+    // --- linux specific
+
+    {"linux_sysinfo", psutil_linux_sysinfo, METH_VARARGS,
+     "A wrapper around sysinfo(), return system memory usage statistics"},
+#if PSUTIL_HAVE_PRLIMIT
+    {"linux_prlimit", psutil_linux_prlimit, METH_VARARGS,
+     "Get or set process resource limits."},
+#endif
+
+
+    {NULL, NULL, 0, NULL}
+};
+
+struct module_state {
+    PyObject *error;
+};
+
+#if PY_MAJOR_VERSION >= 3
+#define GETSTATE(m) ((struct module_state*)PyModule_GetState(m))
+#else
+#define GETSTATE(m) (&_state)
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+
+static int
+psutil_linux_traverse(PyObject *m, visitproc visit, void *arg) {
+    Py_VISIT(GETSTATE(m)->error);
+    return 0;
+}
+
+static int
+psutil_linux_clear(PyObject *m) {
+    Py_CLEAR(GETSTATE(m)->error);
+    return 0;
+}
+
+static struct PyModuleDef
+        moduledef = {
+    PyModuleDef_HEAD_INIT,
+    "psutil_linux",
+    NULL,
+    sizeof(struct module_state),
+    PsutilMethods,
+    NULL,
+    psutil_linux_traverse,
+    psutil_linux_clear,
+    NULL
+};
+
+#define INITERROR return NULL
+
+PyMODINIT_FUNC PyInit__psutil_linux(void)
+
+#else
+#define INITERROR return
+
+void init_psutil_linux(void)
+#endif
+{
+#if PY_MAJOR_VERSION >= 3
+    PyObject *module = PyModule_Create(&moduledef);
+#else
+    PyObject *module = Py_InitModule("_psutil_linux", PsutilMethods);
+#endif
+
+
+#if PSUTIL_HAVE_PRLIMIT
+    PyModule_AddIntConstant(module, "RLIM_INFINITY", RLIM_INFINITY);
+    PyModule_AddIntConstant(module, "RLIMIT_AS", RLIMIT_AS);
+    PyModule_AddIntConstant(module, "RLIMIT_CORE", RLIMIT_CORE);
+    PyModule_AddIntConstant(module, "RLIMIT_CPU", RLIMIT_CPU);
+    PyModule_AddIntConstant(module, "RLIMIT_DATA", RLIMIT_DATA);
+    PyModule_AddIntConstant(module, "RLIMIT_FSIZE", RLIMIT_FSIZE);
+    PyModule_AddIntConstant(module, "RLIMIT_LOCKS", RLIMIT_LOCKS);
+    PyModule_AddIntConstant(module, "RLIMIT_MEMLOCK", RLIMIT_MEMLOCK);
+    PyModule_AddIntConstant(module, "RLIMIT_NOFILE", RLIMIT_NOFILE);
+    PyModule_AddIntConstant(module, "RLIMIT_NPROC", RLIMIT_NPROC);
+    PyModule_AddIntConstant(module, "RLIMIT_RSS", RLIMIT_RSS);
+    PyModule_AddIntConstant(module, "RLIMIT_STACK", RLIMIT_STACK);
+#ifdef RLIMIT_MSGQUEUE
+    PyModule_AddIntConstant(module, "RLIMIT_MSGQUEUE", RLIMIT_MSGQUEUE);
+#endif
+#ifdef RLIMIT_NICE
+    PyModule_AddIntConstant(module, "RLIMIT_NICE", RLIMIT_NICE);
+#endif
+#ifdef RLIMIT_RTPRIO
+    PyModule_AddIntConstant(module, "RLIMIT_RTPRIO", RLIMIT_RTPRIO);
+#endif
+#ifdef RLIMIT_RTTIME
+    PyModule_AddIntConstant(module, "RLIMIT_RTTIME", RLIMIT_RTTIME);
+#endif
+#ifdef RLIMIT_SIGPENDING
+    PyModule_AddIntConstant(module, "RLIMIT_SIGPENDING", RLIMIT_SIGPENDING);
+#endif
+#endif
+
+    if (module == NULL) {
+        INITERROR;
+    }
+#if PY_MAJOR_VERSION >= 3
+    return module;
+#endif
+}

+ 20 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_linux.h

@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+
+// process
+
+static PyObject* psutil_proc_cpu_affinity_get(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cpu_affinity_set(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_ioprio_get(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_ioprio_get(PyObject* self, PyObject* args);
+
+// system
+
+static PyObject* psutil_disk_partitions(PyObject* self, PyObject* args);
+static PyObject* psutil_linux_sysinfo(PyObject* self, PyObject* args);
+static PyObject* psutil_users(PyObject* self, PyObject* args);

+ 1881 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_osx.c

@@ -0,0 +1,1881 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * OS X platform-specific module methods for _psutil_osx
+ */
+
+#include <Python.h>
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <utmpx.h>
+#include <sys/sysctl.h>
+#include <sys/vmmeter.h>
+#include <libproc.h>
+#include <sys/proc_info.h>
+#include <netinet/tcp_fsm.h>
+#include <arpa/inet.h>
+#include <net/if_dl.h>
+#include <pwd.h>
+
+#include <mach/mach.h>
+#include <mach/task.h>
+#include <mach/mach_init.h>
+#include <mach/host_info.h>
+#include <mach/mach_host.h>
+#include <mach/mach_traps.h>
+#include <mach/mach_vm.h>
+#include <mach/shared_region.h>
+
+#include <mach-o/loader.h>
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <IOKit/IOKitLib.h>
+#include <IOKit/storage/IOBlockStorageDriver.h>
+#include <IOKit/storage/IOMedia.h>
+#include <IOKit/IOBSD.h>
+
+#include "_psutil_osx.h"
+#include "_psutil_common.h"
+#include "arch/osx/process_info.h"
+
+
+/*
+ * A wrapper around host_statistics() invoked with HOST_VM_INFO.
+ */
+int
+psutil_sys_vminfo(vm_statistics_data_t *vmstat)
+{
+    kern_return_t ret;
+    mach_msg_type_number_t count = sizeof(*vmstat) / sizeof(integer_t);
+    mach_port_t mport = mach_host_self();
+
+    ret = host_statistics(mport, HOST_VM_INFO, (host_info_t)vmstat, &count);
+    if (ret != KERN_SUCCESS) {
+        PyErr_Format(PyExc_RuntimeError,
+                     "host_statistics() failed: %s", mach_error_string(ret));
+        return 0;
+    }
+    mach_port_deallocate(mach_task_self(), mport);
+    return 1;
+}
+
+
+/*
+ * Return a Python list of all the PIDs running on the system.
+ */
+static PyObject *
+psutil_pids(PyObject *self, PyObject *args)
+{
+    kinfo_proc *proclist = NULL;
+    kinfo_proc *orig_address = NULL;
+    size_t num_processes;
+    size_t idx;
+    PyObject *pid = NULL;
+    PyObject *retlist = PyList_New(0);
+
+    if (retlist == NULL)
+        return NULL;
+
+    if (psutil_get_proc_list(&proclist, &num_processes) != 0) {
+        PyErr_SetString(PyExc_RuntimeError,
+                        "failed to retrieve process list.");
+        goto error;
+    }
+
+    if (num_processes > 0) {
+        // save the address of proclist so we can free it later
+        orig_address = proclist;
+        for (idx = 0; idx < num_processes; idx++) {
+            pid = Py_BuildValue("i", proclist->kp_proc.p_pid);
+            if (!pid)
+                goto error;
+            if (PyList_Append(retlist, pid))
+                goto error;
+            Py_DECREF(pid);
+            proclist++;
+        }
+        free(orig_address);
+    }
+    return retlist;
+
+error:
+    Py_XDECREF(pid);
+    Py_DECREF(retlist);
+    if (orig_address != NULL)
+        free(orig_address);
+    return NULL;
+}
+
+
+/*
+ * Return process name from kinfo_proc as a Python string.
+ */
+static PyObject *
+psutil_proc_name(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("s", kp.kp_proc.p_comm);
+}
+
+
+/*
+ * Return process current working directory.
+ */
+static PyObject *
+psutil_proc_cwd(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct proc_vnodepathinfo pathinfo;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    if (! psutil_proc_pidinfo(pid, PROC_PIDVNODEPATHINFO, &pathinfo,
+                              sizeof(pathinfo)))
+    {
+        return NULL;
+    }
+    return Py_BuildValue("s", pathinfo.pvi_cdir.vip_path);
+}
+
+
+/*
+ * Return path of the process executable.
+ */
+static PyObject *
+psutil_proc_exe(PyObject *self, PyObject *args)
+{
+    long pid;
+    char buf[PATH_MAX];
+    int ret;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    ret = proc_pidpath(pid, &buf, sizeof(buf));
+    if (ret == 0) {
+        if (! psutil_pid_exists(pid)) {
+            return NoSuchProcess();
+        }
+        else {
+            return AccessDenied();
+        }
+    }
+    return Py_BuildValue("s", buf);
+}
+
+
+/*
+ * Return process cmdline as a Python list of cmdline arguments.
+ */
+static PyObject *
+psutil_proc_cmdline(PyObject *self, PyObject *args)
+{
+    long pid;
+    PyObject *arglist = NULL;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    // get the commandline, defined in arch/osx/process_info.c
+    arglist = psutil_get_arg_list(pid);
+    return arglist;
+}
+
+
+/*
+ * Return process parent pid from kinfo_proc as a Python integer.
+ */
+static PyObject *
+psutil_proc_ppid(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("l", (long)kp.kp_eproc.e_ppid);
+}
+
+
+/*
+ * Return process real uid from kinfo_proc as a Python integer.
+ */
+static PyObject *
+psutil_proc_uids(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("lll",
+                         (long)kp.kp_eproc.e_pcred.p_ruid,
+                         (long)kp.kp_eproc.e_ucred.cr_uid,
+                         (long)kp.kp_eproc.e_pcred.p_svuid);
+}
+
+
+/*
+ * Return process real group id from ki_comm as a Python integer.
+ */
+static PyObject *
+psutil_proc_gids(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("lll",
+                         (long)kp.kp_eproc.e_pcred.p_rgid,
+                         (long)kp.kp_eproc.e_ucred.cr_groups[0],
+                         (long)kp.kp_eproc.e_pcred.p_svgid);
+}
+
+
+/*
+ * Return process controlling terminal number as an integer.
+ */
+static PyObject *
+psutil_proc_tty_nr(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("i", kp.kp_eproc.e_tdev);
+}
+
+
+/*
+ * Return a list of tuples for every process memory maps.
+ * 'procstat' cmdline utility has been used as an example.
+ */
+static PyObject *
+psutil_proc_memory_maps(PyObject *self, PyObject *args)
+{
+    char buf[PATH_MAX];
+    char addr_str[34];
+    char perms[8];
+    int pagesize = getpagesize();
+    long pid;
+    kern_return_t err = KERN_SUCCESS;
+    mach_port_t task = MACH_PORT_NULL;
+    uint32_t depth = 1;
+    vm_address_t address = 0;
+    vm_size_t size = 0;
+
+    PyObject *py_tuple = NULL;
+    PyObject *py_list = PyList_New(0);
+
+    if (py_list == NULL)
+        return NULL;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        goto error;
+    }
+
+    err = task_for_pid(mach_task_self(), pid, &task);
+
+    if (err != KERN_SUCCESS) {
+        if (! psutil_pid_exists(pid)) {
+            NoSuchProcess();
+        }
+        else {
+            // pid exists, so return AccessDenied error since task_for_pid()
+            // failed
+            AccessDenied();
+        }
+        goto error;
+    }
+
+    while (1) {
+        py_tuple = NULL;
+        struct vm_region_submap_info_64 info;
+        mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
+
+        err = vm_region_recurse_64(task, &address, &size, &depth,
+                                   (vm_region_info_64_t)&info, &count);
+
+        if (err == KERN_INVALID_ADDRESS) {
+            break;
+        }
+
+        if (info.is_submap) {
+            depth++;
+        }
+        else {
+            // Free/Reset the char[]s to avoid weird paths
+            memset(buf, 0, sizeof(buf));
+            memset(addr_str, 0, sizeof(addr_str));
+            memset(perms, 0, sizeof(perms));
+
+            sprintf(addr_str, "%016lx-%016lx", address, address + size);
+            sprintf(perms, "%c%c%c/%c%c%c",
+                    (info.protection & VM_PROT_READ) ? 'r' : '-',
+                    (info.protection & VM_PROT_WRITE) ? 'w' : '-',
+                    (info.protection & VM_PROT_EXECUTE) ? 'x' : '-',
+                    (info.max_protection & VM_PROT_READ) ? 'r' : '-',
+                    (info.max_protection & VM_PROT_WRITE) ? 'w' : '-',
+                    (info.max_protection & VM_PROT_EXECUTE) ? 'x' : '-');
+
+            err = proc_regionfilename(pid, address, buf, sizeof(buf));
+
+            if (info.share_mode == SM_COW && info.ref_count == 1) {
+                // Treat single reference SM_COW as SM_PRIVATE
+                info.share_mode = SM_PRIVATE;
+            }
+
+            if (strlen(buf) == 0) {
+                switch (info.share_mode) {
+                /*
+                case SM_LARGE_PAGE:
+                    // Treat SM_LARGE_PAGE the same as SM_PRIVATE
+                    // since they are not shareable and are wired.
+                */
+                case SM_COW:
+                    strcpy(buf, "[cow]");
+                    break;
+                case SM_PRIVATE:
+                    strcpy(buf, "[prv]");
+                    break;
+                case SM_EMPTY:
+                    strcpy(buf, "[nul]");
+                    break;
+                case SM_SHARED:
+                case SM_TRUESHARED:
+                    strcpy(buf, "[shm]");
+                    break;
+                case SM_PRIVATE_ALIASED:
+                    strcpy(buf, "[ali]");
+                    break;
+                case SM_SHARED_ALIASED:
+                    strcpy(buf, "[s/a]");
+                    break;
+                default:
+                    strcpy(buf, "[???]");
+                }
+            }
+
+            py_tuple = Py_BuildValue(
+                "sssIIIIIH",
+                addr_str,                                 // "start-end"address
+                perms,                                    // "rwx" permissions
+                buf,                                      // path
+                info.pages_resident * pagesize,           // rss
+                info.pages_shared_now_private * pagesize, // private
+                info.pages_swapped_out * pagesize,        // swapped
+                info.pages_dirtied * pagesize,            // dirtied
+                info.ref_count,                           // ref count
+                info.shadow_depth                         // shadow depth
+            );
+            if (!py_tuple)
+                goto error;
+            if (PyList_Append(py_list, py_tuple))
+                goto error;
+            Py_DECREF(py_tuple);
+        }
+
+        // increment address for the next map/file
+        address += size;
+    }
+
+    if (task != MACH_PORT_NULL)
+        mach_port_deallocate(mach_task_self(), task);
+
+    return py_list;
+
+error:
+    if (task != MACH_PORT_NULL)
+        mach_port_deallocate(mach_task_self(), task);
+    Py_XDECREF(py_tuple);
+    Py_DECREF(py_list);
+    return NULL;
+}
+
+
+/*
+ * Return the number of logical CPUs in the system.
+ * XXX this could be shared with BSD.
+ */
+static PyObject *
+psutil_cpu_count_logical(PyObject *self, PyObject *args)
+{
+    int mib[2];
+    int ncpu;
+    size_t len;
+
+    mib[0] = CTL_HW;
+    mib[1] = HW_NCPU;
+    len = sizeof(ncpu);
+
+    if (sysctl(mib, 2, &ncpu, &len, NULL, 0) == -1) {
+        // mimic os.cpu_count()
+        Py_INCREF(Py_None);
+        return Py_None;
+    }
+    else {
+        return Py_BuildValue("i", ncpu);
+    }
+}
+
+
+/*
+ * Return the number of physical CPUs in the system.
+ */
+static PyObject *
+psutil_cpu_count_phys(PyObject *self, PyObject *args)
+{
+    int num;
+    size_t size = sizeof(int);
+    if (sysctlbyname("hw.physicalcpu", &num, &size, NULL, 0)) {
+        // mimic os.cpu_count()
+        Py_INCREF(Py_None);
+        return Py_None;
+    }
+    return Py_BuildValue("i", num);
+}
+
+
+#define TV2DOUBLE(t)    ((t).tv_sec + (t).tv_usec / 1000000.0)
+
+/*
+ * Return a Python tuple (user_time, kernel_time)
+ */
+static PyObject *
+psutil_proc_cpu_times(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct proc_taskinfo pti;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_proc_pidinfo(pid, PROC_PIDTASKINFO, &pti, sizeof(pti))) {
+        return NULL;
+    }
+    return Py_BuildValue("(dd)",
+                         (float)pti.pti_total_user / 1000000000.0,
+                         (float)pti.pti_total_system / 1000000000.0);
+}
+
+
+/*
+ * Return a Python float indicating the process create time expressed in
+ * seconds since the epoch.
+ */
+static PyObject *
+psutil_proc_create_time(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("d", TV2DOUBLE(kp.kp_proc.p_starttime));
+}
+
+
+/*
+ * Return extended memory info about a process.
+ */
+static PyObject *
+psutil_proc_memory_info(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct proc_taskinfo pti;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_proc_pidinfo(pid, PROC_PIDTASKINFO, &pti, sizeof(pti))) {
+        return NULL;
+    }
+
+    // Note: determining other memory stats on OSX is a mess:
+    // http://www.opensource.apple.com/source/top/top-67/libtop.c?txt
+    // I just give up...
+    // struct proc_regioninfo pri;
+    // psutil_proc_pidinfo(pid, PROC_PIDREGIONINFO, &pri, sizeof(pri))
+    return Py_BuildValue(
+        "(KKkk)",
+        pti.pti_resident_size,  // resident memory size (rss)
+        pti.pti_virtual_size,   // virtual memory size (vms)
+        pti.pti_faults,         // number of page faults (pages)
+        pti.pti_pageins         // number of actual pageins (pages)
+    );
+}
+
+
+/*
+ * Return number of threads used by process as a Python integer.
+ */
+static PyObject *
+psutil_proc_num_threads(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct proc_taskinfo pti;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_proc_pidinfo(pid, PROC_PIDTASKINFO, &pti, sizeof(pti))) {
+        return NULL;
+    }
+    return Py_BuildValue("k", pti.pti_threadnum);
+}
+
+
+/*
+ * Return the number of context switches performed by process.
+ */
+static PyObject *
+psutil_proc_num_ctx_switches(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct proc_taskinfo pti;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_proc_pidinfo(pid, PROC_PIDTASKINFO, &pti, sizeof(pti))) {
+        return NULL;
+    }
+    // unvoluntary value seems not to be available;
+    // pti.pti_csw probably refers to the sum of the two (getrusage()
+    // numbers seems to confirm this theory).
+    return Py_BuildValue("ki", pti.pti_csw, 0);
+}
+
+
+/*
+ * Return system virtual memory stats
+ */
+static PyObject *
+psutil_virtual_mem(PyObject *self, PyObject *args)
+{
+
+    int      mib[2];
+    uint64_t total;
+    size_t   len = sizeof(total);
+    vm_statistics_data_t vm;
+    int pagesize = getpagesize();
+
+    // physical mem
+    mib[0] = CTL_HW;
+    mib[1] = HW_MEMSIZE;
+    if (sysctl(mib, 2, &total, &len, NULL, 0)) {
+        if (errno != 0)
+            PyErr_SetFromErrno(PyExc_OSError);
+        else
+            PyErr_Format(PyExc_RuntimeError, "sysctl(HW_MEMSIZE) failed");
+        return NULL;
+    }
+
+    // vm
+    if (!psutil_sys_vminfo(&vm)) {
+        return NULL;
+    }
+
+    return Py_BuildValue(
+        "KKKKK",
+        total,
+        (unsigned long long) vm.active_count * pagesize,
+        (unsigned long long) vm.inactive_count * pagesize,
+        (unsigned long long) vm.wire_count * pagesize,
+        (unsigned long long) vm.free_count * pagesize
+    );
+}
+
+
+/*
+ * Return stats about swap memory.
+ */
+static PyObject *
+psutil_swap_mem(PyObject *self, PyObject *args)
+{
+    int mib[2];
+    size_t size;
+    struct xsw_usage totals;
+    vm_statistics_data_t vmstat;
+    int pagesize = getpagesize();
+
+    mib[0] = CTL_VM;
+    mib[1] = VM_SWAPUSAGE;
+    size = sizeof(totals);
+    if (sysctl(mib, 2, &totals, &size, NULL, 0) == -1) {
+        if (errno != 0)
+            PyErr_SetFromErrno(PyExc_OSError);
+        else
+            PyErr_Format(PyExc_RuntimeError, "sysctl(VM_SWAPUSAGE) failed");
+        return NULL;
+    }
+    if (!psutil_sys_vminfo(&vmstat)) {
+        return NULL;
+    }
+
+    return Py_BuildValue(
+        "LLLKK",
+        totals.xsu_total,
+        totals.xsu_used,
+        totals.xsu_avail,
+        (unsigned long long)vmstat.pageins * pagesize,
+        (unsigned long long)vmstat.pageouts * pagesize);
+}
+
+
+/*
+ * Return a Python tuple representing user, kernel and idle CPU times
+ */
+static PyObject *
+psutil_cpu_times(PyObject *self, PyObject *args)
+{
+    mach_msg_type_number_t count = HOST_CPU_LOAD_INFO_COUNT;
+    kern_return_t error;
+    host_cpu_load_info_data_t r_load;
+
+    mach_port_t host_port = mach_host_self();
+    error = host_statistics(host_port, HOST_CPU_LOAD_INFO,
+                            (host_info_t)&r_load, &count);
+    if (error != KERN_SUCCESS) {
+        return PyErr_Format(PyExc_RuntimeError,
+                            "Error in host_statistics(): %s",
+                            mach_error_string(error));
+    }
+    mach_port_deallocate(mach_task_self(), host_port);
+
+    return Py_BuildValue(
+        "(dddd)",
+        (double)r_load.cpu_ticks[CPU_STATE_USER] / CLK_TCK,
+        (double)r_load.cpu_ticks[CPU_STATE_NICE] / CLK_TCK,
+        (double)r_load.cpu_ticks[CPU_STATE_SYSTEM] / CLK_TCK,
+        (double)r_load.cpu_ticks[CPU_STATE_IDLE] / CLK_TCK
+    );
+}
+
+
+/*
+ * Return a Python list of tuple representing per-cpu times
+ */
+static PyObject *
+psutil_per_cpu_times(PyObject *self, PyObject *args)
+{
+    natural_t cpu_count;
+    processor_info_array_t info_array;
+    mach_msg_type_number_t info_count;
+    kern_return_t error;
+    processor_cpu_load_info_data_t *cpu_load_info = NULL;
+    int i, ret;
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_cputime = NULL;
+
+    if (py_retlist == NULL)
+        return NULL;
+
+    mach_port_t host_port = mach_host_self();
+    error = host_processor_info(host_port, PROCESSOR_CPU_LOAD_INFO,
+                                &cpu_count, &info_array, &info_count);
+    if (error != KERN_SUCCESS) {
+        PyErr_Format(PyExc_RuntimeError, "Error in host_processor_info(): %s",
+                     mach_error_string(error));
+        goto error;
+    }
+    mach_port_deallocate(mach_task_self(), host_port);
+
+    cpu_load_info = (processor_cpu_load_info_data_t *) info_array;
+
+    for (i = 0; i < cpu_count; i++) {
+        py_cputime = Py_BuildValue(
+            "(dddd)",
+            (double)cpu_load_info[i].cpu_ticks[CPU_STATE_USER] / CLK_TCK,
+            (double)cpu_load_info[i].cpu_ticks[CPU_STATE_NICE] / CLK_TCK,
+            (double)cpu_load_info[i].cpu_ticks[CPU_STATE_SYSTEM] / CLK_TCK,
+            (double)cpu_load_info[i].cpu_ticks[CPU_STATE_IDLE] / CLK_TCK
+        );
+        if (!py_cputime)
+            goto error;
+        if (PyList_Append(py_retlist, py_cputime))
+            goto error;
+        Py_DECREF(py_cputime);
+    }
+
+    ret = vm_deallocate(mach_task_self(), (vm_address_t)info_array,
+                        info_count * sizeof(int));
+    if (ret != KERN_SUCCESS) {
+        PyErr_WarnEx(PyExc_RuntimeWarning, "vm_deallocate() failed", 2);
+    }
+    return py_retlist;
+
+error:
+    Py_XDECREF(py_cputime);
+    Py_DECREF(py_retlist);
+    if (cpu_load_info != NULL) {
+        ret = vm_deallocate(mach_task_self(), (vm_address_t)info_array,
+                            info_count * sizeof(int));
+        if (ret != KERN_SUCCESS) {
+            PyErr_WarnEx(PyExc_RuntimeWarning, "vm_deallocate() failed", 2);
+        }
+    }
+    return NULL;
+}
+
+
+/*
+ * Return a Python float indicating the system boot time expressed in
+ * seconds since the epoch.
+ */
+static PyObject *
+psutil_boot_time(PyObject *self, PyObject *args)
+{
+    // fetch sysctl "kern.boottime"
+    static int request[2] = { CTL_KERN, KERN_BOOTTIME };
+    struct timeval result;
+    size_t result_len = sizeof result;
+    time_t boot_time = 0;
+
+    if (sysctl(request, 2, &result, &result_len, NULL, 0) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        return NULL;
+    }
+    boot_time = result.tv_sec;
+    return Py_BuildValue("f", (float)boot_time);
+}
+
+
+/*
+ * Return a list of tuples including device, mount point and fs type
+ * for all partitions mounted on the system.
+ */
+static PyObject *
+psutil_disk_partitions(PyObject *self, PyObject *args)
+{
+    int num;
+    int i;
+    long len;
+    uint64_t flags;
+    char opts[400];
+    struct statfs *fs = NULL;
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_tuple = NULL;
+
+    if (py_retlist == NULL)
+        return NULL;
+
+    // get the number of mount points
+    Py_BEGIN_ALLOW_THREADS
+    num = getfsstat(NULL, 0, MNT_NOWAIT);
+    Py_END_ALLOW_THREADS
+    if (num == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    len = sizeof(*fs) * num;
+    fs = malloc(len);
+    if (fs == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+
+    Py_BEGIN_ALLOW_THREADS
+    num = getfsstat(fs, len, MNT_NOWAIT);
+    Py_END_ALLOW_THREADS
+    if (num == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    for (i = 0; i < num; i++) {
+        opts[0] = 0;
+        flags = fs[i].f_flags;
+
+        // see sys/mount.h
+        if (flags & MNT_RDONLY)
+            strlcat(opts, "ro", sizeof(opts));
+        else
+            strlcat(opts, "rw", sizeof(opts));
+        if (flags & MNT_SYNCHRONOUS)
+            strlcat(opts, ",sync", sizeof(opts));
+        if (flags & MNT_NOEXEC)
+            strlcat(opts, ",noexec", sizeof(opts));
+        if (flags & MNT_NOSUID)
+            strlcat(opts, ",nosuid", sizeof(opts));
+        if (flags & MNT_UNION)
+            strlcat(opts, ",union", sizeof(opts));
+        if (flags & MNT_ASYNC)
+            strlcat(opts, ",async", sizeof(opts));
+        if (flags & MNT_EXPORTED)
+            strlcat(opts, ",exported", sizeof(opts));
+        if (flags & MNT_QUARANTINE)
+            strlcat(opts, ",quarantine", sizeof(opts));
+        if (flags & MNT_LOCAL)
+            strlcat(opts, ",local", sizeof(opts));
+        if (flags & MNT_QUOTA)
+            strlcat(opts, ",quota", sizeof(opts));
+        if (flags & MNT_ROOTFS)
+            strlcat(opts, ",rootfs", sizeof(opts));
+        if (flags & MNT_DOVOLFS)
+            strlcat(opts, ",dovolfs", sizeof(opts));
+        if (flags & MNT_DONTBROWSE)
+            strlcat(opts, ",dontbrowse", sizeof(opts));
+        if (flags & MNT_IGNORE_OWNERSHIP)
+            strlcat(opts, ",ignore-ownership", sizeof(opts));
+        if (flags & MNT_AUTOMOUNTED)
+            strlcat(opts, ",automounted", sizeof(opts));
+        if (flags & MNT_JOURNALED)
+            strlcat(opts, ",journaled", sizeof(opts));
+        if (flags & MNT_NOUSERXATTR)
+            strlcat(opts, ",nouserxattr", sizeof(opts));
+        if (flags & MNT_DEFWRITE)
+            strlcat(opts, ",defwrite", sizeof(opts));
+        if (flags & MNT_MULTILABEL)
+            strlcat(opts, ",multilabel", sizeof(opts));
+        if (flags & MNT_NOATIME)
+            strlcat(opts, ",noatime", sizeof(opts));
+        if (flags & MNT_UPDATE)
+            strlcat(opts, ",update", sizeof(opts));
+        if (flags & MNT_RELOAD)
+            strlcat(opts, ",reload", sizeof(opts));
+        if (flags & MNT_FORCE)
+            strlcat(opts, ",force", sizeof(opts));
+        if (flags & MNT_CMDFLAGS)
+            strlcat(opts, ",cmdflags", sizeof(opts));
+
+        py_tuple = Py_BuildValue(
+            "(ssss)", fs[i].f_mntfromname,  // device
+            fs[i].f_mntonname,    // mount point
+            fs[i].f_fstypename,   // fs type
+            opts);                // options
+        if (!py_tuple)
+            goto error;
+        if (PyList_Append(py_retlist, py_tuple))
+            goto error;
+        Py_DECREF(py_tuple);
+    }
+
+    free(fs);
+    return py_retlist;
+
+error:
+    Py_XDECREF(py_tuple);
+    Py_DECREF(py_retlist);
+    if (fs != NULL)
+        free(fs);
+    return NULL;
+}
+
+
+/*
+ * Return process status as a Python integer.
+ */
+static PyObject *
+psutil_proc_status(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("i", (int)kp.kp_proc.p_stat);
+}
+
+
+/*
+ * Return process threads
+ */
+static PyObject *
+psutil_proc_threads(PyObject *self, PyObject *args)
+{
+    long pid;
+    int err, j, ret;
+    kern_return_t kr;
+    unsigned int info_count = TASK_BASIC_INFO_COUNT;
+    mach_port_t task = MACH_PORT_NULL;
+    struct task_basic_info tasks_info;
+    thread_act_port_array_t thread_list = NULL;
+    thread_info_data_t thinfo_basic;
+    thread_basic_info_t basic_info_th;
+    mach_msg_type_number_t thread_count, thread_info_count;
+
+    PyObject *retList = PyList_New(0);
+    PyObject *pyTuple = NULL;
+
+    if (retList == NULL)
+        return NULL;
+
+    // the argument passed should be a process id
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        goto error;
+    }
+
+    // task_for_pid() requires special privileges
+    err = task_for_pid(mach_task_self(), pid, &task);
+    if (err != KERN_SUCCESS) {
+        if (! psutil_pid_exists(pid)) {
+            NoSuchProcess();
+        }
+        else {
+            AccessDenied();
+        }
+        goto error;
+    }
+
+    info_count = TASK_BASIC_INFO_COUNT;
+    err = task_info(task, TASK_BASIC_INFO, (task_info_t)&tasks_info,
+                    &info_count);
+    if (err != KERN_SUCCESS) {
+        // errcode 4 is "invalid argument" (access denied)
+        if (err == 4) {
+            AccessDenied();
+        }
+        else {
+            // otherwise throw a runtime error with appropriate error code
+            PyErr_Format(PyExc_RuntimeError,
+                         "task_info(TASK_BASIC_INFO) failed");
+        }
+        goto error;
+    }
+
+    err = task_threads(task, &thread_list, &thread_count);
+    if (err != KERN_SUCCESS) {
+        PyErr_Format(PyExc_RuntimeError, "task_threads() failed");
+        goto error;
+    }
+
+    for (j = 0; j < thread_count; j++) {
+        pyTuple = NULL;
+        thread_info_count = THREAD_INFO_MAX;
+        kr = thread_info(thread_list[j], THREAD_BASIC_INFO,
+                         (thread_info_t)thinfo_basic, &thread_info_count);
+        if (kr != KERN_SUCCESS) {
+            PyErr_Format(PyExc_RuntimeError,
+                         "thread_info() with flag THREAD_BASIC_INFO failed");
+            goto error;
+        }
+
+        basic_info_th = (thread_basic_info_t)thinfo_basic;
+        pyTuple = Py_BuildValue(
+            "Iff",
+            j + 1,
+            (float)basic_info_th->user_time.microseconds / 1000000.0,
+            (float)basic_info_th->system_time.microseconds / 1000000.0
+        );
+        if (!pyTuple)
+            goto error;
+        if (PyList_Append(retList, pyTuple))
+            goto error;
+        Py_DECREF(pyTuple);
+    }
+
+    ret = vm_deallocate(task, (vm_address_t)thread_list,
+                        thread_count * sizeof(int));
+    if (ret != KERN_SUCCESS) {
+        PyErr_WarnEx(PyExc_RuntimeWarning, "vm_deallocate() failed", 2);
+    }
+
+    mach_port_deallocate(mach_task_self(), task);
+
+    return retList;
+
+error:
+    if (task != MACH_PORT_NULL)
+        mach_port_deallocate(mach_task_self(), task);
+    Py_XDECREF(pyTuple);
+    Py_DECREF(retList);
+    if (thread_list != NULL) {
+        ret = vm_deallocate(task, (vm_address_t)thread_list,
+                            thread_count * sizeof(int));
+        if (ret != KERN_SUCCESS) {
+            PyErr_WarnEx(PyExc_RuntimeWarning, "vm_deallocate() failed", 2);
+        }
+    }
+    return NULL;
+}
+
+
+/*
+ * Return process open files as a Python tuple.
+ * References:
+ * - lsof source code: http://goo.gl/SYW79 and http://goo.gl/m78fd
+ * - /usr/include/sys/proc_info.h
+ */
+static PyObject *
+psutil_proc_open_files(PyObject *self, PyObject *args)
+{
+    long pid;
+    int pidinfo_result;
+    int iterations;
+    int i;
+    int nb;
+
+    struct proc_fdinfo *fds_pointer = NULL;
+    struct proc_fdinfo *fdp_pointer;
+    struct vnode_fdinfowithpath vi;
+
+    PyObject *retList = PyList_New(0);
+    PyObject *tuple = NULL;
+
+    if (retList == NULL)
+        return NULL;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        goto error;
+    }
+
+    pidinfo_result = proc_pidinfo(pid, PROC_PIDLISTFDS, 0, NULL, 0);
+    if (pidinfo_result <= 0) {
+        // may be be ignored later if errno != 0
+        PyErr_Format(PyExc_RuntimeError,
+                     "proc_pidinfo(PROC_PIDLISTFDS) failed");
+        goto error;
+    }
+
+    fds_pointer = malloc(pidinfo_result);
+    if (fds_pointer == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+    pidinfo_result = proc_pidinfo(pid, PROC_PIDLISTFDS, 0, fds_pointer,
+                                  pidinfo_result);
+    if (pidinfo_result <= 0) {
+        // may be be ignored later if errno != 0
+        PyErr_Format(PyExc_RuntimeError,
+                     "proc_pidinfo(PROC_PIDLISTFDS) failed");
+        goto error;
+    }
+
+    iterations = (pidinfo_result / PROC_PIDLISTFD_SIZE);
+
+    for (i = 0; i < iterations; i++) {
+        tuple = NULL;
+        fdp_pointer = &fds_pointer[i];
+
+        if (fdp_pointer->proc_fdtype == PROX_FDTYPE_VNODE)
+        {
+            nb = proc_pidfdinfo(pid,
+                                fdp_pointer->proc_fd,
+                                PROC_PIDFDVNODEPATHINFO,
+                                &vi,
+                                sizeof(vi));
+
+            // --- errors checking
+            if (nb <= 0) {
+                if ((errno == ENOENT) || (errno == EBADF)) {
+                    // no such file or directory or bad file descriptor;
+                    // let's assume the file has been closed or removed
+                    continue;
+                }
+                // may be be ignored later if errno != 0
+                PyErr_Format(PyExc_RuntimeError,
+                             "proc_pidinfo(PROC_PIDFDVNODEPATHINFO) failed");
+                goto error;
+            }
+            if (nb < sizeof(vi)) {
+                PyErr_Format(PyExc_RuntimeError,
+                             "proc_pidinfo(PROC_PIDFDVNODEPATHINFO) failed "
+                             "(buffer mismatch)");
+                goto error;
+            }
+            // --- /errors checking
+
+            // --- construct python list
+            tuple = Py_BuildValue("(si)",
+                                  vi.pvip.vip_path,
+                                  (int)fdp_pointer->proc_fd);
+            if (!tuple)
+                goto error;
+            if (PyList_Append(retList, tuple))
+                goto error;
+            Py_DECREF(tuple);
+            // --- /construct python list
+        }
+    }
+
+    free(fds_pointer);
+    return retList;
+
+error:
+    Py_XDECREF(tuple);
+    Py_DECREF(retList);
+    if (fds_pointer != NULL) {
+        free(fds_pointer);
+    }
+    if (errno != 0) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+    else if (! psutil_pid_exists(pid)) {
+        return NoSuchProcess();
+    }
+    else {
+        // exception has already been set earlier
+        return NULL;
+    }
+}
+
+
+// a signaler for connections without an actual status
+static int PSUTIL_CONN_NONE = 128;
+
+/*
+ * Return process TCP and UDP connections as a list of tuples.
+ * References:
+ * - lsof source code: http://goo.gl/SYW79 and http://goo.gl/wNrC0
+ * - /usr/include/sys/proc_info.h
+ */
+static PyObject *
+psutil_proc_connections(PyObject *self, PyObject *args)
+{
+    long pid;
+    int pidinfo_result;
+    int iterations;
+    int i;
+    int nb;
+
+    struct proc_fdinfo *fds_pointer = NULL;
+    struct proc_fdinfo *fdp_pointer;
+    struct socket_fdinfo si;
+
+    PyObject *retList = PyList_New(0);
+    PyObject *tuple = NULL;
+    PyObject *laddr = NULL;
+    PyObject *raddr = NULL;
+    PyObject *af_filter = NULL;
+    PyObject *type_filter = NULL;
+
+    if (retList == NULL)
+        return NULL;
+
+    if (! PyArg_ParseTuple(args, "lOO", &pid, &af_filter, &type_filter)) {
+        goto error;
+    }
+
+    if (!PySequence_Check(af_filter) || !PySequence_Check(type_filter)) {
+        PyErr_SetString(PyExc_TypeError, "arg 2 or 3 is not a sequence");
+        goto error;
+    }
+
+    if (pid == 0) {
+        return retList;
+    }
+
+    pidinfo_result = proc_pidinfo(pid, PROC_PIDLISTFDS, 0, NULL, 0);
+    if (pidinfo_result <= 0) {
+        goto error;
+    }
+
+    fds_pointer = malloc(pidinfo_result);
+    if (fds_pointer == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+    pidinfo_result = proc_pidinfo(pid, PROC_PIDLISTFDS, 0, fds_pointer,
+                                  pidinfo_result);
+
+    if (pidinfo_result <= 0) {
+        goto error;
+    }
+
+    iterations = (pidinfo_result / PROC_PIDLISTFD_SIZE);
+
+    for (i = 0; i < iterations; i++) {
+        tuple = NULL;
+        laddr = NULL;
+        raddr = NULL;
+        errno = 0;
+        fdp_pointer = &fds_pointer[i];
+
+        if (fdp_pointer->proc_fdtype == PROX_FDTYPE_SOCKET)
+        {
+            nb = proc_pidfdinfo(pid, fdp_pointer->proc_fd,
+                                PROC_PIDFDSOCKETINFO, &si, sizeof(si));
+
+            // --- errors checking
+            if (nb <= 0) {
+                if (errno == EBADF) {
+                    // let's assume socket has been closed
+                    continue;
+                }
+                if (errno != 0) {
+                    PyErr_SetFromErrno(PyExc_OSError);
+                }
+                else {
+                    PyErr_Format(
+                        PyExc_RuntimeError,
+                        "proc_pidinfo(PROC_PIDFDVNODEPATHINFO) failed");
+                }
+                goto error;
+            }
+            if (nb < sizeof(si)) {
+                PyErr_Format(PyExc_RuntimeError,
+                             "proc_pidinfo(PROC_PIDFDVNODEPATHINFO) failed "
+                             "(buffer mismatch)");
+                goto error;
+            }
+            // --- /errors checking
+
+            //
+            int fd, family, type, lport, rport, state;
+            char lip[200], rip[200];
+            int inseq;
+            PyObject *_family;
+            PyObject *_type;
+
+            fd = (int)fdp_pointer->proc_fd;
+            family = si.psi.soi_family;
+            type = si.psi.soi_type;
+
+            // apply filters
+            _family = PyLong_FromLong((long)family);
+            inseq = PySequence_Contains(af_filter, _family);
+            Py_DECREF(_family);
+            if (inseq == 0) {
+                continue;
+            }
+            _type = PyLong_FromLong((long)type);
+            inseq = PySequence_Contains(type_filter, _type);
+            Py_DECREF(_type);
+            if (inseq == 0) {
+                continue;
+            }
+
+            if (errno != 0) {
+                PyErr_SetFromErrno(PyExc_OSError);
+                goto error;
+            }
+
+            if ((family == AF_INET) || (family == AF_INET6)) {
+                if (family == AF_INET) {
+                    inet_ntop(AF_INET,
+                              &si.psi.soi_proto.pri_tcp.tcpsi_ini. \
+                                  insi_laddr.ina_46.i46a_addr4,
+                              lip,
+                              sizeof(lip));
+                    inet_ntop(AF_INET,
+                              &si.psi.soi_proto.pri_tcp.tcpsi_ini.insi_faddr. \
+                                  ina_46.i46a_addr4,
+                              rip,
+                              sizeof(rip));
+                }
+                else {
+                    inet_ntop(AF_INET6,
+                              &si.psi.soi_proto.pri_tcp.tcpsi_ini. \
+                                  insi_laddr.ina_6,
+                              lip, sizeof(lip));
+                    inet_ntop(AF_INET6,
+                              &si.psi.soi_proto.pri_tcp.tcpsi_ini. \
+                                  insi_faddr.ina_6,
+                              rip, sizeof(rip));
+                }
+
+                // check for inet_ntop failures
+                if (errno != 0) {
+                    PyErr_SetFromErrno(PyExc_OSError);
+                    goto error;
+                }
+
+                lport = ntohs(si.psi.soi_proto.pri_tcp.tcpsi_ini.insi_lport);
+                rport = ntohs(si.psi.soi_proto.pri_tcp.tcpsi_ini.insi_fport);
+                if (type == SOCK_STREAM) {
+                    state = (int)si.psi.soi_proto.pri_tcp.tcpsi_state;
+                }
+                else {
+                    state = PSUTIL_CONN_NONE;
+                }
+
+                laddr = Py_BuildValue("(si)", lip, lport);
+                if (!laddr)
+                    goto error;
+                if (rport != 0) {
+                    raddr = Py_BuildValue("(si)", rip, rport);
+                }
+                else {
+                    raddr = Py_BuildValue("()");
+                }
+                if (!raddr)
+                    goto error;
+
+                // construct the python list
+                tuple = Py_BuildValue("(iiiNNi)", fd, family, type, laddr,
+                                      raddr, state);
+                if (!tuple)
+                    goto error;
+                if (PyList_Append(retList, tuple))
+                    goto error;
+                Py_DECREF(tuple);
+            }
+            else if (family == AF_UNIX) {
+                // construct the python list
+                tuple = Py_BuildValue(
+                    "(iiissi)",
+                    fd, family, type,
+                    si.psi.soi_proto.pri_un.unsi_addr.ua_sun.sun_path,
+                    si.psi.soi_proto.pri_un.unsi_caddr.ua_sun.sun_path,
+                    PSUTIL_CONN_NONE);
+                if (!tuple)
+                    goto error;
+                if (PyList_Append(retList, tuple))
+                    goto error;
+                Py_DECREF(tuple);
+            }
+        }
+    }
+
+    free(fds_pointer);
+    return retList;
+
+error:
+    Py_XDECREF(tuple);
+    Py_XDECREF(laddr);
+    Py_XDECREF(raddr);
+    Py_DECREF(retList);
+
+    if (fds_pointer != NULL) {
+        free(fds_pointer);
+    }
+    if (errno != 0) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+    else if (! psutil_pid_exists(pid) ) {
+        return NoSuchProcess();
+    }
+    else {
+        return PyErr_Format(PyExc_RuntimeError,
+                            "proc_pidinfo(PROC_PIDLISTFDS) failed");
+    }
+}
+
+
+/*
+ * Return number of file descriptors opened by process.
+ */
+static PyObject *
+psutil_proc_num_fds(PyObject *self, PyObject *args)
+{
+    long pid;
+    int pidinfo_result;
+    int num;
+    struct proc_fdinfo *fds_pointer;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    pidinfo_result = proc_pidinfo(pid, PROC_PIDLISTFDS, 0, NULL, 0);
+    if (pidinfo_result <= 0) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+
+    fds_pointer = malloc(pidinfo_result);
+    if (fds_pointer == NULL) {
+        return PyErr_NoMemory();
+    }
+    pidinfo_result = proc_pidinfo(pid, PROC_PIDLISTFDS, 0, fds_pointer,
+                                  pidinfo_result);
+    if (pidinfo_result <= 0) {
+        free(fds_pointer);
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+
+    num = (pidinfo_result / PROC_PIDLISTFD_SIZE);
+    free(fds_pointer);
+    return Py_BuildValue("i", num);
+}
+
+
+/*
+ * Return a Python list of named tuples with overall network I/O information
+ */
+static PyObject *
+psutil_net_io_counters(PyObject *self, PyObject *args)
+{
+    char *buf = NULL, *lim, *next;
+    struct if_msghdr *ifm;
+    int mib[6];
+    size_t len;
+    PyObject *py_retdict = PyDict_New();
+    PyObject *py_ifc_info = NULL;
+
+    if (py_retdict == NULL)
+        return NULL;
+
+    mib[0] = CTL_NET;          // networking subsystem
+    mib[1] = PF_ROUTE;         // type of information
+    mib[2] = 0;                // protocol (IPPROTO_xxx)
+    mib[3] = 0;                // address family
+    mib[4] = NET_RT_IFLIST2;   // operation
+    mib[5] = 0;
+
+    if (sysctl(mib, 6, NULL, &len, NULL, 0) < 0) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    buf = malloc(len);
+    if (buf == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+
+    if (sysctl(mib, 6, buf, &len, NULL, 0) < 0) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    lim = buf + len;
+
+    for (next = buf; next < lim; ) {
+        ifm = (struct if_msghdr *)next;
+        next += ifm->ifm_msglen;
+
+        if (ifm->ifm_type == RTM_IFINFO2) {
+            py_ifc_info = NULL;
+            struct if_msghdr2 *if2m = (struct if_msghdr2 *)ifm;
+            struct sockaddr_dl *sdl = (struct sockaddr_dl *)(if2m + 1);
+            char ifc_name[32];
+
+            strncpy(ifc_name, sdl->sdl_data, sdl->sdl_nlen);
+            ifc_name[sdl->sdl_nlen] = 0;
+
+            py_ifc_info = Py_BuildValue(
+                "(KKKKKKKi)",
+                if2m->ifm_data.ifi_obytes,
+                if2m->ifm_data.ifi_ibytes,
+                if2m->ifm_data.ifi_opackets,
+                if2m->ifm_data.ifi_ipackets,
+                if2m->ifm_data.ifi_ierrors,
+                if2m->ifm_data.ifi_oerrors,
+                if2m->ifm_data.ifi_iqdrops,
+                0);  // dropout not supported
+
+            if (!py_ifc_info)
+                goto error;
+            if (PyDict_SetItemString(py_retdict, ifc_name, py_ifc_info))
+                goto error;
+            Py_DECREF(py_ifc_info);
+        }
+        else {
+            continue;
+        }
+    }
+
+    free(buf);
+    return py_retdict;
+
+error:
+    Py_XDECREF(py_ifc_info);
+    Py_DECREF(py_retdict);
+    if (buf != NULL)
+        free(buf);
+    return NULL;
+}
+
+
+/*
+ * Return a Python dict of tuples for disk I/O information
+ */
+static PyObject *
+psutil_disk_io_counters(PyObject *self, PyObject *args)
+{
+    CFDictionaryRef parent_dict;
+    CFDictionaryRef props_dict;
+    CFDictionaryRef stats_dict;
+    io_registry_entry_t parent;
+    io_registry_entry_t disk;
+    io_iterator_t disk_list;
+    PyObject *py_retdict = PyDict_New();
+    PyObject *py_disk_info = NULL;
+
+    if (py_retdict == NULL)
+        return NULL;
+
+    // Get list of disks
+    if (IOServiceGetMatchingServices(kIOMasterPortDefault,
+                                     IOServiceMatching(kIOMediaClass),
+                                     &disk_list) != kIOReturnSuccess) {
+        PyErr_SetString(PyExc_RuntimeError,
+                        "unable to get the list of disks.");
+        goto error;
+    }
+
+    // Iterate over disks
+    while ((disk = IOIteratorNext(disk_list)) != 0) {
+        py_disk_info = NULL;
+        parent_dict = NULL;
+        props_dict = NULL;
+        stats_dict = NULL;
+
+        if (IORegistryEntryGetParentEntry(disk, kIOServicePlane, &parent)
+                != kIOReturnSuccess) {
+            PyErr_SetString(PyExc_RuntimeError,
+                            "unable to get the disk's parent.");
+            IOObjectRelease(disk);
+            goto error;
+        }
+
+        if (IOObjectConformsTo(parent, "IOBlockStorageDriver")) {
+            if (IORegistryEntryCreateCFProperties(
+                    disk,
+                    (CFMutableDictionaryRef *) &parent_dict,
+                    kCFAllocatorDefault,
+                    kNilOptions
+                ) != kIOReturnSuccess)
+            {
+                PyErr_SetString(PyExc_RuntimeError,
+                                "unable to get the parent's properties.");
+                IOObjectRelease(disk);
+                IOObjectRelease(parent);
+                goto error;
+            }
+
+            if (IORegistryEntryCreateCFProperties(
+                    parent,
+                    (CFMutableDictionaryRef *) &props_dict,
+                    kCFAllocatorDefault,
+                    kNilOptions
+                ) != kIOReturnSuccess)
+            {
+                PyErr_SetString(PyExc_RuntimeError,
+                                "unable to get the disk properties.");
+                CFRelease(props_dict);
+                IOObjectRelease(disk);
+                IOObjectRelease(parent);
+                goto error;
+            }
+
+            const int kMaxDiskNameSize = 64;
+            CFStringRef disk_name_ref = (CFStringRef)CFDictionaryGetValue(
+                parent_dict, CFSTR(kIOBSDNameKey));
+            char disk_name[kMaxDiskNameSize];
+
+            CFStringGetCString(disk_name_ref,
+                               disk_name,
+                               kMaxDiskNameSize,
+                               CFStringGetSystemEncoding());
+
+            stats_dict = (CFDictionaryRef)CFDictionaryGetValue(
+                props_dict, CFSTR(kIOBlockStorageDriverStatisticsKey));
+
+            if (stats_dict == NULL) {
+                PyErr_SetString(PyExc_RuntimeError,
+                                "Unable to get disk stats.");
+                goto error;
+            }
+
+            CFNumberRef number;
+            int64_t reads = 0;
+            int64_t writes = 0;
+            int64_t read_bytes = 0;
+            int64_t write_bytes = 0;
+            int64_t read_time = 0;
+            int64_t write_time = 0;
+
+            // Get disk reads/writes
+            if ((number = (CFNumberRef)CFDictionaryGetValue(
+                    stats_dict,
+                    CFSTR(kIOBlockStorageDriverStatisticsReadsKey))))
+            {
+                CFNumberGetValue(number, kCFNumberSInt64Type, &reads);
+            }
+            if ((number = (CFNumberRef)CFDictionaryGetValue(
+                    stats_dict,
+                    CFSTR(kIOBlockStorageDriverStatisticsWritesKey))))
+            {
+                CFNumberGetValue(number, kCFNumberSInt64Type, &writes);
+            }
+
+            // Get disk bytes read/written
+            if ((number = (CFNumberRef)CFDictionaryGetValue(
+                    stats_dict,
+                    CFSTR(kIOBlockStorageDriverStatisticsBytesReadKey))))
+            {
+                CFNumberGetValue(number, kCFNumberSInt64Type, &read_bytes);
+            }
+            if ((number = (CFNumberRef)CFDictionaryGetValue(
+                    stats_dict,
+                    CFSTR(kIOBlockStorageDriverStatisticsBytesWrittenKey))))
+            {
+                CFNumberGetValue(number, kCFNumberSInt64Type, &write_bytes);
+            }
+
+            // Get disk time spent reading/writing (nanoseconds)
+            if ((number = (CFNumberRef)CFDictionaryGetValue(
+                    stats_dict,
+                    CFSTR(kIOBlockStorageDriverStatisticsTotalReadTimeKey))))
+            {
+                CFNumberGetValue(number, kCFNumberSInt64Type, &read_time);
+            }
+            if ((number = (CFNumberRef)CFDictionaryGetValue(
+                    stats_dict,
+                    CFSTR(kIOBlockStorageDriverStatisticsTotalWriteTimeKey))))
+            {
+                CFNumberGetValue(number, kCFNumberSInt64Type, &write_time);
+            }
+
+            // Read/Write time on OS X comes back in nanoseconds and in psutil
+            // we've standardized on milliseconds so do the conversion.
+            py_disk_info = Py_BuildValue(
+                "(KKKKKK)",
+                reads,
+                writes,
+                read_bytes,
+                write_bytes,
+                read_time / 1000 / 1000,
+                write_time / 1000 / 1000);
+           if (!py_disk_info)
+                goto error;
+            if (PyDict_SetItemString(py_retdict, disk_name, py_disk_info))
+                goto error;
+            Py_DECREF(py_disk_info);
+
+            CFRelease(parent_dict);
+            IOObjectRelease(parent);
+            CFRelease(props_dict);
+            IOObjectRelease(disk);
+        }
+    }
+
+    IOObjectRelease (disk_list);
+
+    return py_retdict;
+
+error:
+    Py_XDECREF(py_disk_info);
+    Py_DECREF(py_retdict);
+    return NULL;
+}
+
+
+/*
+ * Return currently connected users as a list of tuples.
+ */
+static PyObject *
+psutil_users(PyObject *self, PyObject *args)
+{
+    struct utmpx *utx;
+    PyObject *ret_list = PyList_New(0);
+    PyObject *tuple = NULL;
+
+    if (ret_list == NULL)
+        return NULL;
+    while ((utx = getutxent()) != NULL) {
+        if (utx->ut_type != USER_PROCESS)
+            continue;
+        tuple = Py_BuildValue(
+            "(sssf)",
+            utx->ut_user,             // username
+            utx->ut_line,             // tty
+            utx->ut_host,             // hostname
+            (float)utx->ut_tv.tv_sec  // start time
+        );
+        if (!tuple) {
+            endutxent();
+            goto error;
+        }
+        if (PyList_Append(ret_list, tuple)) {
+            endutxent();
+            goto error;
+        }
+        Py_DECREF(tuple);
+    }
+
+    endutxent();
+    return ret_list;
+
+error:
+    Py_XDECREF(tuple);
+    Py_DECREF(ret_list);
+    return NULL;
+}
+
+
+/*
+ * define the psutil C module methods and initialize the module.
+ */
+static PyMethodDef
+PsutilMethods[] =
+{
+    // --- per-process functions
+
+    {"proc_name", psutil_proc_name, METH_VARARGS,
+     "Return process name"},
+    {"proc_cmdline", psutil_proc_cmdline, METH_VARARGS,
+     "Return process cmdline as a list of cmdline arguments"},
+    {"proc_exe", psutil_proc_exe, METH_VARARGS,
+     "Return path of the process executable"},
+    {"proc_cwd", psutil_proc_cwd, METH_VARARGS,
+     "Return process current working directory."},
+    {"proc_ppid", psutil_proc_ppid, METH_VARARGS,
+     "Return process ppid as an integer"},
+    {"proc_uids", psutil_proc_uids, METH_VARARGS,
+     "Return process real user id as an integer"},
+    {"proc_gids", psutil_proc_gids, METH_VARARGS,
+     "Return process real group id as an integer"},
+    {"proc_cpu_times", psutil_proc_cpu_times, METH_VARARGS,
+     "Return tuple of user/kern time for the given PID"},
+    {"proc_create_time", psutil_proc_create_time, METH_VARARGS,
+     "Return a float indicating the process create time expressed in "
+     "seconds since the epoch"},
+    {"proc_memory_info", psutil_proc_memory_info, METH_VARARGS,
+     "Return memory information about a process"},
+    {"proc_num_threads", psutil_proc_num_threads, METH_VARARGS,
+     "Return number of threads used by process"},
+    {"proc_status", psutil_proc_status, METH_VARARGS,
+     "Return process status as an integer"},
+    {"proc_threads", psutil_proc_threads, METH_VARARGS,
+     "Return process threads as a list of tuples"},
+    {"proc_open_files", psutil_proc_open_files, METH_VARARGS,
+     "Return files opened by process as a list of tuples"},
+    {"proc_num_fds", psutil_proc_num_fds, METH_VARARGS,
+     "Return the number of fds opened by process."},
+    {"proc_num_ctx_switches", psutil_proc_num_ctx_switches, METH_VARARGS,
+     "Return the number of context switches performed by process"},
+    {"proc_connections", psutil_proc_connections, METH_VARARGS,
+     "Get process TCP and UDP connections as a list of tuples"},
+    {"proc_tty_nr", psutil_proc_tty_nr, METH_VARARGS,
+     "Return process tty number as an integer"},
+    {"proc_memory_maps", psutil_proc_memory_maps, METH_VARARGS,
+     "Return a list of tuples for every process's memory map"},
+
+    // --- system-related functions
+
+    {"pids", psutil_pids, METH_VARARGS,
+     "Returns a list of PIDs currently running on the system"},
+    {"cpu_count_logical", psutil_cpu_count_logical, METH_VARARGS,
+     "Return number of logical CPUs on the system"},
+    {"cpu_count_phys", psutil_cpu_count_phys, METH_VARARGS,
+     "Return number of physical CPUs on the system"},
+    {"virtual_mem", psutil_virtual_mem, METH_VARARGS,
+     "Return system virtual memory stats"},
+    {"swap_mem", psutil_swap_mem, METH_VARARGS,
+     "Return stats about swap memory, in bytes"},
+    {"cpu_times", psutil_cpu_times, METH_VARARGS,
+     "Return system cpu times as a tuple (user, system, nice, idle, irc)"},
+    {"per_cpu_times", psutil_per_cpu_times, METH_VARARGS,
+     "Return system per-cpu times as a list of tuples"},
+    {"boot_time", psutil_boot_time, METH_VARARGS,
+     "Return the system boot time expressed in seconds since the epoch."},
+    {"disk_partitions", psutil_disk_partitions, METH_VARARGS,
+     "Return a list of tuples including device, mount point and "
+     "fs type for all partitions mounted on the system."},
+    {"net_io_counters", psutil_net_io_counters, METH_VARARGS,
+     "Return dict of tuples of networks I/O information."},
+    {"disk_io_counters", psutil_disk_io_counters, METH_VARARGS,
+     "Return dict of tuples of disks I/O information."},
+    {"users", psutil_users, METH_VARARGS,
+     "Return currently connected users as a list of tuples"},
+
+    {NULL, NULL, 0, NULL}
+};
+
+
+struct module_state {
+    PyObject *error;
+};
+
+#if PY_MAJOR_VERSION >= 3
+#define GETSTATE(m) ((struct module_state*)PyModule_GetState(m))
+#else
+#define GETSTATE(m) (&_state)
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+
+static int
+psutil_osx_traverse(PyObject *m, visitproc visit, void *arg) {
+    Py_VISIT(GETSTATE(m)->error);
+    return 0;
+}
+
+static int
+psutil_osx_clear(PyObject *m) {
+    Py_CLEAR(GETSTATE(m)->error);
+    return 0;
+}
+
+
+static struct PyModuleDef moduledef = {
+    PyModuleDef_HEAD_INIT,
+    "psutil_osx",
+    NULL,
+    sizeof(struct module_state),
+    PsutilMethods,
+    NULL,
+    psutil_osx_traverse,
+    psutil_osx_clear,
+    NULL
+};
+
+#define INITERROR return NULL
+
+PyMODINIT_FUNC PyInit__psutil_osx(void)
+
+#else
+#define INITERROR return
+
+void
+init_psutil_osx(void)
+#endif
+{
+#if PY_MAJOR_VERSION >= 3
+    PyObject *module = PyModule_Create(&moduledef);
+#else
+    PyObject *module = Py_InitModule("_psutil_osx", PsutilMethods);
+#endif
+    // process status constants, defined in:
+    // http://fxr.watson.org/fxr/source/bsd/sys/proc.h?v=xnu-792.6.70#L149
+    PyModule_AddIntConstant(module, "SIDL", SIDL);
+    PyModule_AddIntConstant(module, "SRUN", SRUN);
+    PyModule_AddIntConstant(module, "SSLEEP", SSLEEP);
+    PyModule_AddIntConstant(module, "SSTOP", SSTOP);
+    PyModule_AddIntConstant(module, "SZOMB", SZOMB);
+    // connection status constants
+    PyModule_AddIntConstant(module, "TCPS_CLOSED", TCPS_CLOSED);
+    PyModule_AddIntConstant(module, "TCPS_CLOSING", TCPS_CLOSING);
+    PyModule_AddIntConstant(module, "TCPS_CLOSE_WAIT", TCPS_CLOSE_WAIT);
+    PyModule_AddIntConstant(module, "TCPS_LISTEN", TCPS_LISTEN);
+    PyModule_AddIntConstant(module, "TCPS_ESTABLISHED", TCPS_ESTABLISHED);
+    PyModule_AddIntConstant(module, "TCPS_SYN_SENT", TCPS_SYN_SENT);
+    PyModule_AddIntConstant(module, "TCPS_SYN_RECEIVED", TCPS_SYN_RECEIVED);
+    PyModule_AddIntConstant(module, "TCPS_FIN_WAIT_1", TCPS_FIN_WAIT_1);
+    PyModule_AddIntConstant(module, "TCPS_FIN_WAIT_2", TCPS_FIN_WAIT_2);
+    PyModule_AddIntConstant(module, "TCPS_LAST_ACK", TCPS_LAST_ACK);
+    PyModule_AddIntConstant(module, "TCPS_TIME_WAIT", TCPS_TIME_WAIT);
+    PyModule_AddIntConstant(module, "PSUTIL_CONN_NONE", PSUTIL_CONN_NONE);
+
+    if (module == NULL) {
+        INITERROR;
+    }
+#if PY_MAJOR_VERSION >= 3
+    return module;
+#endif
+}

+ 41 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_osx.h

@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+
+// --- per-process functions
+static PyObject* psutil_proc_cmdline(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_connections(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_create_time(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cwd(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_exe(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_gids(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_memory_info(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_memory_maps(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_name(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_fds(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_threads(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_open_files(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_ppid(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_status(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_threads(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_tty_nr(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_uids(PyObject* self, PyObject* args);
+
+// --- system-related functions
+static PyObject* psutil_boot_time(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_count_logical(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_count_phys(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_disk_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_disk_partitions(PyObject* self, PyObject* args);
+static PyObject* psutil_net_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_per_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_pids(PyObject* self, PyObject* args);
+static PyObject* psutil_swap_mem(PyObject* self, PyObject* args);
+static PyObject* psutil_users(PyObject* self, PyObject* args);
+static PyObject* psutil_virtual_mem(PyObject* self, PyObject* args);

+ 128 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_posix.c

@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Functions specific to all POSIX compliant platforms.
+ */
+
+#include <Python.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+
+#include "_psutil_posix.h"
+
+
+/*
+ * Given a PID return process priority as a Python integer.
+ */
+static PyObject *
+psutil_posix_getpriority(PyObject *self, PyObject *args)
+{
+    long pid;
+    int priority;
+    errno = 0;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    priority = getpriority(PRIO_PROCESS, pid);
+    if (errno != 0) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+    return Py_BuildValue("i", priority);
+}
+
+
+/*
+ * Given a PID and a value change process priority.
+ */
+static PyObject *
+psutil_posix_setpriority(PyObject *self, PyObject *args)
+{
+    long pid;
+    int priority;
+    int retval;
+    if (! PyArg_ParseTuple(args, "li", &pid, &priority)) {
+        return NULL;
+    }
+    retval = setpriority(PRIO_PROCESS, pid, priority);
+    if (retval == -1) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+
+/*
+ * define the psutil C module methods and initialize the module.
+ */
+static PyMethodDef
+PsutilMethods[] =
+{
+    {"getpriority", psutil_posix_getpriority, METH_VARARGS,
+     "Return process priority"},
+    {"setpriority", psutil_posix_setpriority, METH_VARARGS,
+     "Set process priority"},
+    {NULL, NULL, 0, NULL}
+};
+
+struct module_state {
+    PyObject *error;
+};
+
+#if PY_MAJOR_VERSION >= 3
+#define GETSTATE(m) ((struct module_state*)PyModule_GetState(m))
+#else
+#define GETSTATE(m) (&_state)
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+
+static int
+psutil_posix_traverse(PyObject *m, visitproc visit, void *arg) {
+    Py_VISIT(GETSTATE(m)->error);
+    return 0;
+}
+
+static int
+psutil_posix_clear(PyObject *m) {
+    Py_CLEAR(GETSTATE(m)->error);
+    return 0;
+}
+
+static struct PyModuleDef moduledef = {
+    PyModuleDef_HEAD_INIT,
+    "psutil_posix",
+    NULL,
+    sizeof(struct module_state),
+    PsutilMethods,
+    NULL,
+    psutil_posix_traverse,
+    psutil_posix_clear,
+    NULL
+};
+
+#define INITERROR return NULL
+
+PyMODINIT_FUNC PyInit__psutil_posix(void)
+
+#else
+#define INITERROR return
+
+void init_psutil_posix(void)
+#endif
+{
+#if PY_MAJOR_VERSION >= 3
+    PyObject *module = PyModule_Create(&moduledef);
+#else
+    PyObject *module = Py_InitModule("_psutil_posix", PsutilMethods);
+#endif
+    if (module == NULL) {
+        INITERROR;
+    }
+#if PY_MAJOR_VERSION >= 3
+    return module;
+#endif
+}

+ 10 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_posix.h

@@ -0,0 +1,10 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+
+static PyObject* psutil_posix_getpriority(PyObject* self, PyObject* args);
+static PyObject* psutil_posix_setpriority(PyObject* self, PyObject* args);

+ 1290 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_sunos.c

@@ -0,0 +1,1290 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Functions specific to Sun OS Solaris platforms.
+ *
+ * Thanks to Justin Venus who originally wrote a consistent part of
+ * this in Cython which I later on translated in C.
+ */
+
+
+#include <Python.h>
+
+// fix for "Cannot use procfs in the large file compilation environment"
+// error, see:
+// http://sourceware.org/ml/gdb-patches/2010-11/msg00336.html
+#undef _FILE_OFFSET_BITS
+#define _STRUCTURED_PROC 1
+
+// fix compilation issue on SunOS 5.10, see:
+// https://code.google.com/p/psutil/issues/detail?id=421
+#define NEW_MIB_COMPLIANT
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/proc.h>
+#include <sys/swap.h>
+#include <sys/sysinfo.h>
+#include <sys/mntent.h>  // for MNTTAB
+#include <sys/mnttab.h>
+#include <sys/procfs.h>
+#include <fcntl.h>
+#include <utmpx.h>
+#include <kstat.h>
+#include <sys/ioctl.h>
+#include <sys/tihdr.h>
+#include <stropts.h>
+#include <inet/tcp.h>
+#include <arpa/inet.h>
+
+#include "_psutil_sunos.h"
+
+
+#define TV2DOUBLE(t)   (((t).tv_nsec * 0.000000001) + (t).tv_sec)
+
+/*
+ * Read a file content and fills a C structure with it.
+ */
+int
+psutil_file_to_struct(char *path, void *fstruct, size_t size)
+{
+    int fd;
+    size_t nbytes;
+    fd = open(path, O_RDONLY);
+    if (fd == -1) {
+        PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
+        return 0;
+    }
+    nbytes = read(fd, fstruct, size);
+    if (nbytes <= 0) {
+        close(fd);
+        PyErr_SetFromErrno(PyExc_OSError);
+        return 0;
+    }
+    if (nbytes != size) {
+        close(fd);
+        PyErr_SetString(PyExc_RuntimeError, "structure size mismatch");
+        return 0;
+    }
+    close(fd);
+    return nbytes;
+}
+
+
+/*
+ * Return process ppid, rss, vms, ctime, nice, nthreads, status and tty
+ * as a Python tuple.
+ */
+static PyObject *
+psutil_proc_basic_info(PyObject *self, PyObject *args)
+{
+    int pid;
+    char path[100];
+    psinfo_t info;
+
+    if (! PyArg_ParseTuple(args, "i", &pid))
+        return NULL;
+    sprintf(path, "/proc/%i/psinfo", pid);
+    if (! psutil_file_to_struct(path, (void *)&info, sizeof(info)))
+        return NULL;
+    return Py_BuildValue("ikkdiiik",
+                         info.pr_ppid,              // parent pid
+                         info.pr_rssize,            // rss
+                         info.pr_size,              // vms
+                         TV2DOUBLE(info.pr_start),  // create time
+                         info.pr_lwp.pr_nice,       // nice
+                         info.pr_nlwp,              // no. of threads
+                         info.pr_lwp.pr_state,      // status code
+                         info.pr_ttydev             // tty nr
+                        );
+}
+
+
+/*
+ * Return process name and args as a Python tuple.
+ */
+static PyObject *
+psutil_proc_name_and_args(PyObject *self, PyObject *args)
+{
+    int pid;
+    char path[100];
+    psinfo_t info;
+
+    if (! PyArg_ParseTuple(args, "i", &pid))
+        return NULL;
+    sprintf(path, "/proc/%i/psinfo", pid);
+    if (! psutil_file_to_struct(path, (void *)&info, sizeof(info)))
+        return NULL;
+    return Py_BuildValue("ss", info.pr_fname, info.pr_psargs);
+}
+
+
+/*
+ * Return process user and system CPU times as a Python tuple.
+ */
+static PyObject *
+psutil_proc_cpu_times(PyObject *self, PyObject *args)
+{
+    int pid;
+    char path[100];
+    pstatus_t info;
+
+    if (! PyArg_ParseTuple(args, "i", &pid))
+        return NULL;
+    sprintf(path, "/proc/%i/status", pid);
+    if (! psutil_file_to_struct(path, (void *)&info, sizeof(info)))
+        return NULL;
+    // results are more precise than os.times()
+    return Py_BuildValue("dd",
+                         TV2DOUBLE(info.pr_utime),
+                         TV2DOUBLE(info.pr_stime));
+}
+
+
+/*
+ * Return process uids/gids as a Python tuple.
+ */
+static PyObject *
+psutil_proc_cred(PyObject *self, PyObject *args)
+{
+    int pid;
+    char path[100];
+    prcred_t info;
+
+    if (! PyArg_ParseTuple(args, "i", &pid))
+        return NULL;
+    sprintf(path, "/proc/%i/cred", pid);
+    if (! psutil_file_to_struct(path, (void *)&info, sizeof(info)))
+        return NULL;
+    return Py_BuildValue("iiiiii",
+                         info.pr_ruid, info.pr_euid, info.pr_suid,
+                         info.pr_rgid, info.pr_egid, info.pr_sgid);
+}
+
+
+/*
+ * Return process uids/gids as a Python tuple.
+ */
+static PyObject *
+psutil_proc_num_ctx_switches(PyObject *self, PyObject *args)
+{
+    int pid;
+    char path[100];
+    prusage_t info;
+
+    if (! PyArg_ParseTuple(args, "i", &pid))
+        return NULL;
+    sprintf(path, "/proc/%i/usage", pid);
+    if (! psutil_file_to_struct(path, (void *)&info, sizeof(info)))
+        return NULL;
+    return Py_BuildValue("kk", info.pr_vctx, info.pr_ictx);
+}
+
+
+/*
+ * Process IO counters.
+ *
+ * Commented out and left here as a reminder.  Apparently we cannot
+ * retrieve process IO stats because:
+ * - 'pr_ioch' is a sum of chars read and written, with no distinction
+ * - 'pr_inblk' and 'pr_oublk', which should be the number of bytes
+ *    read and written, hardly increase and according to:
+ *    http://www.brendangregg.com/Perf/paper_diskubyp1.pdf
+ *    ...they should be meaningless anyway.
+ *
+static PyObject*
+proc_io_counters(PyObject* self, PyObject* args)
+{
+    int pid;
+    char path[100];
+    prusage_t info;
+
+    if (! PyArg_ParseTuple(args, "i", &pid)) {
+        return NULL;
+    }
+    sprintf(path, "/proc/%i/usage", pid);
+    if (! psutil_file_to_struct(path, (void *)&info, sizeof(info))) {
+        return NULL;
+    }
+
+    // On Solaris we only have 'pr_ioch' which accounts for bytes read
+    // *and* written.
+    // 'pr_inblk' and 'pr_oublk' should be expressed in blocks of
+    // 8KB according to:
+    // http://www.brendangregg.com/Perf/paper_diskubyp1.pdf  (pag. 8)
+    return Py_BuildValue("kkkk",
+                         info.pr_ioch,
+                         info.pr_ioch,
+                         info.pr_inblk,
+                         info.pr_oublk);
+}
+ */
+
+
+/*
+ * Return information about a given process thread.
+ */
+static PyObject *
+psutil_proc_query_thread(PyObject *self, PyObject *args)
+{
+    int pid, tid;
+    char path[100];
+    lwpstatus_t info;
+
+    if (! PyArg_ParseTuple(args, "ii", &pid, &tid))
+        return NULL;
+    sprintf(path, "/proc/%i/lwp/%i/lwpstatus", pid, tid);
+    if (! psutil_file_to_struct(path, (void *)&info, sizeof(info)))
+        return NULL;
+    return Py_BuildValue("dd",
+                         TV2DOUBLE(info.pr_utime),
+                         TV2DOUBLE(info.pr_stime));
+}
+
+
+/*
+ * Return information about system virtual memory.
+ */
+static PyObject *
+psutil_swap_mem(PyObject *self, PyObject *args)
+{
+// XXX (arghhh!)
+// total/free swap mem: commented out as for some reason I can't
+// manage to get the same results shown by "swap -l", despite the
+// code below is exactly the same as:
+// http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/
+//    cmd/swap/swap.c
+// We're going to parse "swap -l" output from Python (sigh!)
+
+/*
+    struct swaptable     *st;
+    struct swapent    *swapent;
+    int    i;
+    struct stat64 statbuf;
+    char *path;
+    char fullpath[MAXPATHLEN+1];
+    int    num;
+
+    if ((num = swapctl(SC_GETNSWP, NULL)) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        return NULL;
+    }
+    if (num == 0) {
+        PyErr_SetString(PyExc_RuntimeError, "no swap devices configured");
+        return NULL;
+    }
+    if ((st = malloc(num * sizeof(swapent_t) + sizeof (int))) == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "malloc failed");
+        return NULL;
+    }
+    if ((path = malloc(num * MAXPATHLEN)) == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "malloc failed");
+        return NULL;
+    }
+    swapent = st->swt_ent;
+    for (i = 0; i < num; i++, swapent++) {
+        swapent->ste_path = path;
+        path += MAXPATHLEN;
+    }
+    st->swt_n = num;
+    if ((num = swapctl(SC_LIST, st)) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        return NULL;
+    }
+
+    swapent = st->swt_ent;
+    long t = 0, f = 0;
+    for (i = 0; i < num; i++, swapent++) {
+        int diskblks_per_page =(int)(sysconf(_SC_PAGESIZE) >> DEV_BSHIFT);
+        t += (long)swapent->ste_pages;
+        f += (long)swapent->ste_free;
+    }
+
+    free(st);
+    return Py_BuildValue("(kk)", t, f);
+*/
+
+    kstat_ctl_t *kc;
+    kstat_t     *k;
+    cpu_stat_t  *cpu;
+    int         cpu_count = 0;
+    int         flag = 0;
+    uint_t      sin = 0;
+    uint_t      sout = 0;
+
+    kc = kstat_open();
+    if (kc == NULL) {
+        return PyErr_SetFromErrno(PyExc_OSError);;
+    }
+
+    k = kc->kc_chain;
+    while (k != NULL) {
+        if ((strncmp(k->ks_name, "cpu_stat", 8) == 0) && \
+                (kstat_read(kc, k, NULL) != -1) )
+        {
+            flag = 1;
+            cpu = (cpu_stat_t *) k->ks_data;
+            sin += cpu->cpu_vminfo.pgswapin;    // num pages swapped in
+            sout += cpu->cpu_vminfo.pgswapout;  // num pages swapped out
+        }
+        cpu_count += 1;
+        k = k->ks_next;
+    }
+    kstat_close(kc);
+    if (!flag) {
+        PyErr_SetString(PyExc_RuntimeError, "no swap device was found");
+        return NULL;
+    }
+    return Py_BuildValue("(II)", sin, sout);
+}
+
+
+/*
+ * Return users currently connected on the system.
+ */
+static PyObject *
+psutil_users(PyObject *self, PyObject *args)
+{
+    struct utmpx *ut;
+    PyObject *ret_list = PyList_New(0);
+    PyObject *tuple = NULL;
+    PyObject *user_proc = NULL;
+
+    if (ret_list == NULL)
+        return NULL;
+
+    while (NULL != (ut = getutxent())) {
+        if (ut->ut_type == USER_PROCESS)
+            user_proc = Py_True;
+        else
+            user_proc = Py_False;
+        tuple = Py_BuildValue(
+            "(sssfO)",
+            ut->ut_user,              // username
+            ut->ut_line,              // tty
+            ut->ut_host,              // hostname
+            (float)ut->ut_tv.tv_sec,  // tstamp
+            user_proc);               // (bool) user process
+        if (tuple == NULL)
+            goto error;
+        if (PyList_Append(ret_list, tuple))
+            goto error;
+        Py_DECREF(tuple);
+    }
+    endutent();
+
+    return ret_list;
+
+error:
+    Py_XDECREF(tuple);
+    Py_DECREF(ret_list);
+    if (ut != NULL)
+        endutent();
+    return NULL;
+}
+
+
+/*
+ * Return disk mounted partitions as a list of tuples including device,
+ * mount point and filesystem type.
+ */
+static PyObject *
+psutil_disk_partitions(PyObject *self, PyObject *args)
+{
+    FILE *file;
+    struct mnttab mt;
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_tuple = NULL;
+
+    if (py_retlist == NULL)
+        return NULL;
+
+    file = fopen(MNTTAB, "rb");
+    if (file == NULL) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    while (getmntent(file, &mt) == 0) {
+        py_tuple = Py_BuildValue(
+            "(ssss)",
+            mt.mnt_special,   // device
+            mt.mnt_mountp,    // mount point
+            mt.mnt_fstype,    // fs type
+            mt.mnt_mntopts);  // options
+        if (py_tuple == NULL)
+            goto error;
+        if (PyList_Append(py_retlist, py_tuple))
+            goto error;
+        Py_DECREF(py_tuple);
+
+    }
+    fclose(file);
+    return py_retlist;
+
+error:
+    Py_XDECREF(py_tuple);
+    Py_DECREF(py_retlist);
+    if (file != NULL)
+        fclose(file);
+    return NULL;
+}
+
+
+/*
+ * Return system-wide CPU times.
+ */
+static PyObject *
+psutil_per_cpu_times(PyObject *self, PyObject *args)
+{
+    kstat_ctl_t *kc;
+    kstat_t *ksp;
+    cpu_stat_t cs;
+    int numcpus;
+    int i;
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_cputime = NULL;
+
+    if (py_retlist == NULL)
+        return NULL;
+
+    kc = kstat_open();
+    if (kc == NULL) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    numcpus = sysconf(_SC_NPROCESSORS_ONLN) - 1;
+    for (i = 0; i <= numcpus; i++) {
+        ksp = kstat_lookup(kc, "cpu_stat", i, NULL);
+        if (ksp == NULL) {
+            PyErr_SetFromErrno(PyExc_OSError);
+            goto error;
+        }
+        if (kstat_read(kc, ksp, &cs) == -1) {
+            PyErr_SetFromErrno(PyExc_OSError);
+            goto error;
+        }
+
+        py_cputime = Py_BuildValue("ffff",
+                                   (float)cs.cpu_sysinfo.cpu[CPU_USER],
+                                   (float)cs.cpu_sysinfo.cpu[CPU_KERNEL],
+                                   (float)cs.cpu_sysinfo.cpu[CPU_IDLE],
+                                   (float)cs.cpu_sysinfo.cpu[CPU_WAIT]);
+        if (py_cputime == NULL)
+            goto error;
+        if (PyList_Append(py_retlist, py_cputime))
+            goto error;
+        Py_DECREF(py_cputime);
+
+    }
+
+    kstat_close(kc);
+    return py_retlist;
+
+error:
+    Py_XDECREF(py_cputime);
+    Py_DECREF(py_retlist);
+    if (kc != NULL)
+        kstat_close(kc);
+    return NULL;
+}
+
+
+/*
+ * Return disk IO statistics.
+ */
+static PyObject *
+psutil_disk_io_counters(PyObject *self, PyObject *args)
+{
+    kstat_ctl_t *kc;
+    kstat_t *ksp;
+    kstat_io_t kio;
+    PyObject *py_retdict = PyDict_New();
+    PyObject *py_disk_info = NULL;
+
+    if (py_retdict == NULL)
+        return NULL;
+    kc = kstat_open();
+    if (kc == NULL) {
+        PyErr_SetFromErrno(PyExc_OSError);;
+        goto error;
+    }
+    ksp = kc->kc_chain;
+    while (ksp != NULL) {
+        if (ksp->ks_type == KSTAT_TYPE_IO) {
+            if (strcmp(ksp->ks_class, "disk") == 0) {
+                if (kstat_read(kc, ksp, &kio) == -1) {
+                    kstat_close(kc);
+                    return PyErr_SetFromErrno(PyExc_OSError);;
+                }
+                py_disk_info = Py_BuildValue(
+                    "(IIKKLL)",
+                    kio.reads,
+                    kio.writes,
+                    kio.nread,
+                    kio.nwritten,
+                    kio.rtime / 1000 / 1000,  // from nano to milli secs
+                    kio.wtime / 1000 / 1000   // from nano to milli secs
+                );
+                if (!py_disk_info)
+                    goto error;
+                if (PyDict_SetItemString(py_retdict, ksp->ks_name,
+                                         py_disk_info))
+                    goto error;
+                Py_DECREF(py_disk_info);
+            }
+        }
+        ksp = ksp->ks_next;
+    }
+    kstat_close(kc);
+
+    return py_retdict;
+
+error:
+    Py_XDECREF(py_disk_info);
+    Py_DECREF(py_retdict);
+    if (kc != NULL)
+        kstat_close(kc);
+    return NULL;
+}
+
+
+/*
+ * Return process memory mappings.
+ */
+static PyObject *
+psutil_proc_memory_maps(PyObject *self, PyObject *args)
+{
+    int pid;
+    int fd = -1;
+    char path[100];
+    char perms[10];
+    char *name;
+    struct stat st;
+    pstatus_t status;
+
+    prxmap_t *xmap = NULL, *p;
+    off_t size;
+    size_t nread;
+    int nmap;
+    uintptr_t pr_addr_sz;
+    uintptr_t stk_base_sz, brk_base_sz;
+
+    PyObject *pytuple = NULL;
+    PyObject *py_retlist = PyList_New(0);
+
+    if (py_retlist == NULL) {
+        return NULL;
+    }
+    if (! PyArg_ParseTuple(args, "i", &pid)) {
+        goto error;
+    }
+
+    sprintf(path, "/proc/%i/status", pid);
+    if (! psutil_file_to_struct(path, (void *)&status, sizeof(status))) {
+        goto error;
+    }
+
+    sprintf(path, "/proc/%i/xmap", pid);
+    if (stat(path, &st) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    size = st.st_size;
+
+    fd = open(path, O_RDONLY);
+    if (fd == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    xmap = (prxmap_t *)malloc(size);
+    if (xmap == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+
+    nread = pread(fd, xmap, size, 0);
+    nmap = nread / sizeof(prxmap_t);
+    p = xmap;
+
+    while (nmap) {
+        nmap -= 1;
+        if (p == NULL) {
+            p += 1;
+            continue;
+        }
+
+        perms[0] = '\0';
+        pr_addr_sz = p->pr_vaddr + p->pr_size;
+
+        // perms
+        sprintf(perms, "%c%c%c%c%c%c", p->pr_mflags & MA_READ ? 'r' : '-',
+                p->pr_mflags & MA_WRITE ? 'w' : '-',
+                p->pr_mflags & MA_EXEC ? 'x' : '-',
+                p->pr_mflags & MA_SHARED ? 's' : '-',
+                p->pr_mflags & MA_NORESERVE ? 'R' : '-',
+                p->pr_mflags & MA_RESERVED1 ? '*' : ' ');
+
+        // name
+        if (strlen(p->pr_mapname) > 0) {
+            name = p->pr_mapname;
+        }
+        else {
+            if ((p->pr_mflags & MA_ISM) || (p->pr_mflags & MA_SHM)) {
+                name = "[shmid]";
+            }
+            else {
+                stk_base_sz = status.pr_stkbase + status.pr_stksize;
+                brk_base_sz = status.pr_brkbase + status.pr_brksize;
+
+                if ((pr_addr_sz > status.pr_stkbase) &&
+                        (p->pr_vaddr < stk_base_sz)) {
+                    name = "[stack]";
+                }
+                else if ((p->pr_mflags & MA_ANON) && \
+                         (pr_addr_sz > status.pr_brkbase) && \
+                         (p->pr_vaddr < brk_base_sz)) {
+                    name = "[heap]";
+                }
+                else {
+                    name = "[anon]";
+                }
+            }
+        }
+
+        pytuple = Py_BuildValue("iisslll",
+                                p->pr_vaddr,
+                                pr_addr_sz,
+                                perms,
+                                name,
+                                (long)p->pr_rss * p->pr_pagesize,
+                                (long)p->pr_anon * p->pr_pagesize,
+                                (long)p->pr_locked * p->pr_pagesize);
+        if (!pytuple)
+            goto error;
+        if (PyList_Append(py_retlist, pytuple))
+            goto error;
+        Py_DECREF(pytuple);
+
+        // increment pointer
+        p += 1;
+    }
+
+    close(fd);
+    free(xmap);
+    return py_retlist;
+
+error:
+    if (fd != -1)
+        close(fd);
+    Py_XDECREF(pytuple);
+    Py_DECREF(py_retlist);
+    if (xmap != NULL)
+        free(xmap);
+    return NULL;
+}
+
+
+/*
+ * Return a list of tuples for network I/O statistics.
+ */
+static PyObject *
+psutil_net_io_counters(PyObject *self, PyObject *args)
+{
+    kstat_ctl_t    *kc = NULL;
+    kstat_t *ksp;
+    kstat_named_t *rbytes, *wbytes, *rpkts, *wpkts, *ierrs, *oerrs;
+
+    PyObject *py_retdict = PyDict_New();
+    PyObject *py_ifc_info = NULL;
+
+    if (py_retdict == NULL)
+        return NULL;
+    kc = kstat_open();
+    if (kc == NULL)
+        goto error;
+
+    ksp = kc->kc_chain;
+    while (ksp != NULL) {
+        if (ksp->ks_type != KSTAT_TYPE_NAMED)
+            goto next;
+        if (strcmp(ksp->ks_class, "net") != 0)
+            goto next;
+        /*
+        // XXX "lo" (localhost) interface makes kstat_data_lookup() fail
+        // (maybe because "ifconfig -a" says it's a virtual interface?).
+        if ((strcmp(ksp->ks_module, "link") != 0) &&
+            (strcmp(ksp->ks_module, "lo") != 0)) {
+            goto skip;
+        */
+        if ((strcmp(ksp->ks_module, "link") != 0)) {
+            goto next;
+        }
+
+        if (kstat_read(kc, ksp, NULL) == -1) {
+            errno = 0;
+            continue;
+        }
+
+        rbytes = (kstat_named_t *)kstat_data_lookup(ksp, "rbytes");
+        wbytes = (kstat_named_t *)kstat_data_lookup(ksp, "obytes");
+        rpkts = (kstat_named_t *)kstat_data_lookup(ksp, "ipackets");
+        wpkts = (kstat_named_t *)kstat_data_lookup(ksp, "opackets");
+        ierrs = (kstat_named_t *)kstat_data_lookup(ksp, "ierrors");
+        oerrs = (kstat_named_t *)kstat_data_lookup(ksp, "oerrors");
+
+        if ((rbytes == NULL) || (wbytes == NULL) || (rpkts == NULL) ||
+                (wpkts == NULL) || (ierrs == NULL) || (oerrs == NULL))
+        {
+            PyErr_SetString(PyExc_RuntimeError, "kstat_data_lookup() failed");
+            goto error;
+        }
+
+#if defined(_INT64_TYPE)
+        py_ifc_info = Py_BuildValue("(KKKKkkii)",
+                                    rbytes->value.ui64,
+                                    wbytes->value.ui64,
+                                    rpkts->value.ui64,
+                                    wpkts->value.ui64,
+                                    ierrs->value.ui32,
+                                    oerrs->value.ui32,
+#else
+        py_ifc_info = Py_BuildValue("(kkkkkkii)",
+                                    rbytes->value.ui32,
+                                    wbytes->value.ui32,
+                                    rpkts->value.ui32,
+                                    wpkts->value.ui32,
+                                    ierrs->value.ui32,
+                                    oerrs->value.ui32,
+#endif
+                                    0,  // dropin not supported
+                                    0   // dropout not supported
+                                   );
+        if (!py_ifc_info)
+            goto error;
+        if (PyDict_SetItemString(py_retdict, ksp->ks_name, py_ifc_info))
+            goto error;
+        Py_DECREF(py_ifc_info);
+        goto next;
+
+next:
+        ksp = ksp->ks_next;
+    }
+
+    kstat_close(kc);
+    return py_retdict;
+
+error:
+    Py_XDECREF(py_ifc_info);
+    Py_DECREF(py_retdict);
+    if (kc != NULL)
+        kstat_close(kc);
+    return NULL;
+}
+
+
+#ifndef EXPER_IP_AND_ALL_IRES
+#define EXPER_IP_AND_ALL_IRES   (1024+4)
+#endif
+
+// a signaler for connections without an actual status
+static int PSUTIL_CONN_NONE = 128;
+
+/*
+ * Return TCP and UDP connections opened by process.
+ * UNIX sockets are excluded.
+ *
+ * Thanks to:
+ * https://github.com/DavidGriffith/finx/blob/master/
+ *     nxsensor-3.5.0-1/src/sysdeps/solaris.c
+ * ...and:
+ * https://hg.java.net/hg/solaris~on-src/file/tip/usr/src/cmd/
+ *     cmd-inet/usr.bin/netstat/netstat.c
+ */
+static PyObject *
+psutil_net_connections(PyObject *self, PyObject *args)
+{
+    long pid;
+    int sd = NULL;
+    mib2_tcpConnEntry_t *tp = NULL;
+    mib2_udpEntry_t     *ude;
+#if defined(AF_INET6)
+    mib2_tcp6ConnEntry_t *tp6;
+    mib2_udp6Entry_t     *ude6;
+#endif
+    char buf[512];
+    int i, flags, getcode, num_ent, state;
+    char lip[200], rip[200];
+    int lport, rport;
+    int processed_pid;
+    struct strbuf ctlbuf, databuf;
+    struct T_optmgmt_req *tor = (struct T_optmgmt_req *)buf;
+    struct T_optmgmt_ack *toa = (struct T_optmgmt_ack *)buf;
+    struct T_error_ack   *tea = (struct T_error_ack *)buf;
+    struct opthdr        *mibhdr;
+
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_tuple = NULL;
+    PyObject *py_laddr = NULL;
+    PyObject *py_raddr = NULL;
+    PyObject *af_filter = NULL;
+    PyObject *type_filter = NULL;
+
+    if (py_retlist == NULL)
+        return NULL;
+    if (! PyArg_ParseTuple(args, "lOO", &pid, &af_filter, &type_filter))
+        goto error;
+    if (!PySequence_Check(af_filter) || !PySequence_Check(type_filter)) {
+        PyErr_SetString(PyExc_TypeError, "arg 2 or 3 is not a sequence");
+        goto error;
+    }
+
+    sd = open("/dev/arp", O_RDWR);
+    if (sd == -1) {
+        PyErr_SetFromErrnoWithFilename(PyExc_OSError, "/dev/arp");
+        goto error;
+    }
+
+    /*
+    XXX - These 2 are used in ifconfig.c but they seem unnecessary
+    ret = ioctl(sd, I_PUSH, "tcp");
+    if (ret == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+    ret = ioctl(sd, I_PUSH, "udp");
+    if (ret == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+    */
+
+    // OK, this mess is basically copied and pasted from nxsensor project
+    // which copied and pasted it from netstat source code, mibget()
+    // function.  Also see:
+    // http://stackoverflow.com/questions/8723598/
+    tor->PRIM_type = T_SVR4_OPTMGMT_REQ;
+    tor->OPT_offset = sizeof (struct T_optmgmt_req);
+    tor->OPT_length = sizeof (struct opthdr);
+    tor->MGMT_flags = T_CURRENT;
+    mibhdr = (struct opthdr *)&tor[1];
+    mibhdr->level = EXPER_IP_AND_ALL_IRES;
+    mibhdr->name  = 0;
+    mibhdr->len   = 0;
+
+    ctlbuf.buf = buf;
+    ctlbuf.len = tor->OPT_offset + tor->OPT_length;
+    flags = 0;  // request to be sent in non-priority
+
+    if (putmsg(sd, &ctlbuf, (struct strbuf *)0, flags) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    mibhdr = (struct opthdr *)&toa[1];
+    ctlbuf.maxlen = sizeof (buf);
+
+    for (;;) {
+        flags = 0;
+        getcode = getmsg(sd, &ctlbuf, (struct strbuf *)0, &flags);
+
+        if (getcode != MOREDATA ||
+                ctlbuf.len < sizeof (struct T_optmgmt_ack) ||
+                toa->PRIM_type != T_OPTMGMT_ACK ||
+                toa->MGMT_flags != T_SUCCESS)
+        {
+            break;
+        }
+        if (ctlbuf.len >= sizeof (struct T_error_ack) &&
+                tea->PRIM_type == T_ERROR_ACK)
+        {
+            PyErr_SetString(PyExc_RuntimeError, "ERROR_ACK");
+            goto error;
+        }
+        if (getcode == 0 &&
+                ctlbuf.len >= sizeof (struct T_optmgmt_ack) &&
+                toa->PRIM_type == T_OPTMGMT_ACK &&
+                toa->MGMT_flags == T_SUCCESS)
+        {
+            PyErr_SetString(PyExc_RuntimeError, "ERROR_T_OPTMGMT_ACK");
+            goto error;
+        }
+
+        databuf.maxlen = mibhdr->len;
+        databuf.len = 0;
+        databuf.buf = (char *)malloc((int)mibhdr->len);
+        if (!databuf.buf) {
+            PyErr_NoMemory();
+            goto error;
+        }
+
+        flags = 0;
+        getcode = getmsg(sd, (struct strbuf *)0, &databuf, &flags);
+        if (getcode < 0) {
+            PyErr_SetFromErrno(PyExc_OSError);
+            goto error;
+        }
+
+        // TCPv4
+        if (mibhdr->level == MIB2_TCP && mibhdr->name == MIB2_TCP_13) {
+            tp = (mib2_tcpConnEntry_t *)databuf.buf;
+            num_ent = mibhdr->len / sizeof(mib2_tcpConnEntry_t);
+            for (i = 0; i < num_ent; i++, tp++) {
+                processed_pid = tp->tcpConnCreationProcess;
+                if (pid != -1 && processed_pid != pid)
+                    continue;
+                // construct local/remote addresses
+                inet_ntop(AF_INET, &tp->tcpConnLocalAddress, lip, sizeof(lip));
+                inet_ntop(AF_INET, &tp->tcpConnRemAddress, rip, sizeof(rip));
+                lport = tp->tcpConnLocalPort;
+                rport = tp->tcpConnRemPort;
+
+                // contruct python tuple/list
+                py_laddr = Py_BuildValue("(si)", lip, lport);
+                if (!py_laddr)
+                    goto error;
+                if (rport != 0) {
+                    py_raddr = Py_BuildValue("(si)", rip, rport);
+                }
+                else {
+                    py_raddr = Py_BuildValue("()");
+                }
+                if (!py_raddr)
+                    goto error;
+                state = tp->tcpConnEntryInfo.ce_state;
+
+                // add item
+                py_tuple = Py_BuildValue("(iiiNNiI)", -1, AF_INET, SOCK_STREAM,
+                                         py_laddr, py_raddr, state,
+                                         processed_pid);
+                if (!py_tuple) {
+                    goto error;
+                }
+                if (PyList_Append(py_retlist, py_tuple))
+                    goto error;
+                Py_DECREF(py_tuple);
+            }
+        }
+#if defined(AF_INET6)
+        // TCPv6
+        else if (mibhdr->level == MIB2_TCP6 && mibhdr->name == MIB2_TCP6_CONN)
+        {
+            tp6 = (mib2_tcp6ConnEntry_t *)databuf.buf;
+            num_ent = mibhdr->len / sizeof(mib2_tcp6ConnEntry_t);
+
+            for (i = 0; i < num_ent; i++, tp6++) {
+                processed_pid = tp6->tcp6ConnCreationProcess;
+                if (pid != -1 && processed_pid != pid)
+                    continue;
+                // construct local/remote addresses
+                inet_ntop(AF_INET6, &tp6->tcp6ConnLocalAddress, lip, sizeof(lip));
+                inet_ntop(AF_INET6, &tp6->tcp6ConnRemAddress, rip, sizeof(rip));
+                lport = tp6->tcp6ConnLocalPort;
+                rport = tp6->tcp6ConnRemPort;
+
+                // contruct python tuple/list
+                py_laddr = Py_BuildValue("(si)", lip, lport);
+                if (!py_laddr)
+                    goto error;
+                if (rport != 0) {
+                    py_raddr = Py_BuildValue("(si)", rip, rport);
+                }
+                else {
+                    py_raddr = Py_BuildValue("()");
+                }
+                if (!py_raddr)
+                    goto error;
+                state = tp6->tcp6ConnEntryInfo.ce_state;
+
+                // add item
+                py_tuple = Py_BuildValue("(iiiNNiI)", -1, AF_INET6, SOCK_STREAM,
+                                         py_laddr, py_raddr, state, processed_pid);
+                if (!py_tuple) {
+                    goto error;
+                }
+                if (PyList_Append(py_retlist, py_tuple))
+                    goto error;
+                Py_DECREF(py_tuple);
+            }
+        }
+#endif
+        // UDPv4
+        else if (mibhdr->level == MIB2_UDP || mibhdr->level == MIB2_UDP_ENTRY) {
+            ude = (mib2_udpEntry_t *)databuf.buf;
+            num_ent = mibhdr->len / sizeof(mib2_udpEntry_t);
+            for (i = 0; i < num_ent; i++, ude++) {
+                processed_pid = ude->udpCreationProcess;
+                if (pid != -1 && processed_pid != pid)
+                    continue;
+                // XXX Very ugly hack! It seems we get here only the first
+                // time we bump into a UDPv4 socket.  PID is a very high
+                // number (clearly impossible) and the address does not
+                // belong to any valid interface.  Not sure what else
+                // to do other than skipping.
+                if (processed_pid > 131072)
+                    continue;
+                inet_ntop(AF_INET, &ude->udpLocalAddress, lip, sizeof(lip));
+                lport = ude->udpLocalPort;
+                py_laddr = Py_BuildValue("(si)", lip, lport);
+                if (!py_laddr)
+                    goto error;
+                py_raddr = Py_BuildValue("()");
+                if (!py_raddr)
+                    goto error;
+                py_tuple = Py_BuildValue("(iiiNNiI)", -1, AF_INET, SOCK_DGRAM,
+                                         py_laddr, py_raddr, PSUTIL_CONN_NONE,
+                                         processed_pid);
+                if (!py_tuple) {
+                    goto error;
+                }
+                if (PyList_Append(py_retlist, py_tuple))
+                    goto error;
+                Py_DECREF(py_tuple);
+            }
+        }
+#if defined(AF_INET6)
+        // UDPv6
+        else if (mibhdr->level == MIB2_UDP6 || mibhdr->level == MIB2_UDP6_ENTRY) {
+            ude6 = (mib2_udp6Entry_t *)databuf.buf;
+            num_ent = mibhdr->len / sizeof(mib2_udp6Entry_t);
+            for (i = 0; i < num_ent; i++, ude6++) {
+                processed_pid = ude6->udp6CreationProcess;
+                if (pid != -1 && processed_pid != pid)
+                    continue;
+                inet_ntop(AF_INET6, &ude6->udp6LocalAddress, lip, sizeof(lip));
+                lport = ude6->udp6LocalPort;
+                py_laddr = Py_BuildValue("(si)", lip, lport);
+                if (!py_laddr)
+                    goto error;
+                py_raddr = Py_BuildValue("()");
+                if (!py_raddr)
+                    goto error;
+                py_tuple = Py_BuildValue("(iiiNNiI)", -1, AF_INET6, SOCK_DGRAM,
+                                         py_laddr, py_raddr, PSUTIL_CONN_NONE,
+                                         processed_pid);
+                if (!py_tuple) {
+                    goto error;
+                }
+                if (PyList_Append(py_retlist, py_tuple))
+                    goto error;
+                Py_DECREF(py_tuple);
+            }
+        }
+#endif
+        free(databuf.buf);
+    }
+
+    close(sd);
+    return py_retlist;
+
+error:
+    Py_XDECREF(py_tuple);
+    Py_XDECREF(py_laddr);
+    Py_XDECREF(py_raddr);
+    Py_DECREF(py_retlist);
+    // TODO : free databuf
+    if (sd != NULL)
+        close(sd);
+    return NULL;
+}
+
+
+static PyObject *
+psutil_boot_time(PyObject *self, PyObject *args)
+{
+    float boot_time = 0.0;
+    struct utmpx *ut;
+
+    while (NULL != (ut = getutxent())) {
+        if (ut->ut_type == BOOT_TIME) {
+            boot_time = (float)ut->ut_tv.tv_sec;
+            break;
+        }
+    }
+    endutent();
+    if (boot_time != 0.0) {
+        return Py_BuildValue("f", boot_time);
+    }
+    else {
+        PyErr_SetString(PyExc_RuntimeError, "can't determine boot time");
+        return NULL;
+    }
+}
+
+
+/*
+ * Return the number of physical CPU cores on the system.
+ */
+static PyObject *
+psutil_cpu_count_phys(PyObject *self, PyObject *args)
+{
+    kstat_ctl_t *kc;
+    kstat_t *ksp;
+    int ncpus = 0;
+
+    kc = kstat_open();
+    if (kc == NULL)
+        goto error;
+    ksp = kstat_lookup(kc, "cpu_info", -1, NULL);
+    if (ksp == NULL)
+        goto error;
+
+    for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) {
+        if (strcmp(ksp->ks_module, "cpu_info") != 0)
+            continue;
+        if (kstat_read(kc, ksp, NULL) == NULL)
+            goto error;
+        ncpus += 1;
+    }
+
+    kstat_close(kc);
+    if (ncpus > 0)
+        return Py_BuildValue("i", ncpus);
+    else
+        goto error;
+
+error:
+    // mimic os.cpu_count()
+    if (kc != NULL)
+        kstat_close(kc);
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+
+/*
+ * define the psutil C module methods and initialize the module.
+ */
+static PyMethodDef
+PsutilMethods[] =
+{
+    // --- process-related functions
+    {"proc_basic_info", psutil_proc_basic_info, METH_VARARGS,
+     "Return process ppid, rss, vms, ctime, nice, nthreads, status and tty"},
+    {"proc_name_and_args", psutil_proc_name_and_args, METH_VARARGS,
+     "Return process name and args."},
+    {"proc_cpu_times", psutil_proc_cpu_times, METH_VARARGS,
+     "Return process user and system CPU times."},
+    {"proc_cred", psutil_proc_cred, METH_VARARGS,
+     "Return process uids/gids."},
+    {"query_process_thread", psutil_proc_query_thread, METH_VARARGS,
+     "Return info about a process thread"},
+    {"proc_memory_maps", psutil_proc_memory_maps, METH_VARARGS,
+     "Return process memory mappings"},
+    {"proc_num_ctx_switches", psutil_proc_num_ctx_switches, METH_VARARGS,
+     "Return the number of context switches performed by process"},
+
+    // --- system-related functions
+    {"swap_mem", psutil_swap_mem, METH_VARARGS,
+     "Return information about system swap memory."},
+    {"users", psutil_users, METH_VARARGS,
+     "Return currently connected users."},
+    {"disk_partitions", psutil_disk_partitions, METH_VARARGS,
+     "Return disk partitions."},
+    {"per_cpu_times", psutil_per_cpu_times, METH_VARARGS,
+     "Return system per-CPU times."},
+    {"disk_io_counters", psutil_disk_io_counters, METH_VARARGS,
+     "Return a Python dict of tuples for disk I/O statistics."},
+    {"net_io_counters", psutil_net_io_counters, METH_VARARGS,
+     "Return a Python dict of tuples for network I/O statistics."},
+    {"boot_time", psutil_boot_time, METH_VARARGS,
+     "Return system boot time in seconds since the EPOCH."},
+    {"cpu_count_phys", psutil_cpu_count_phys, METH_VARARGS,
+     "Return the number of physical CPUs on the system."},
+    {"net_connections", psutil_net_connections, METH_VARARGS,
+     "Return TCP and UDP syste-wide open connections."},
+
+{NULL, NULL, 0, NULL}
+};
+
+
+struct module_state {
+    PyObject *error;
+};
+
+#if PY_MAJOR_VERSION >= 3
+#define GETSTATE(m) ((struct module_state*)PyModule_GetState(m))
+#else
+#define GETSTATE(m) (&_state)
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+
+static int
+psutil_sunos_traverse(PyObject *m, visitproc visit, void *arg) {
+    Py_VISIT(GETSTATE(m)->error);
+    return 0;
+}
+
+static int
+psutil_sunos_clear(PyObject *m) {
+    Py_CLEAR(GETSTATE(m)->error);
+    return 0;
+}
+
+static struct PyModuleDef moduledef = {
+    PyModuleDef_HEAD_INIT,
+    "psutil_sunos",
+    NULL,
+    sizeof(struct module_state),
+    PsutilMethods,
+    NULL,
+    psutil_sunos_traverse,
+    psutil_sunos_clear,
+    NULL
+};
+
+#define INITERROR return NULL
+
+PyMODINIT_FUNC PyInit__psutil_sunos(void)
+
+#else
+#define INITERROR return
+
+void init_psutil_sunos(void)
+#endif
+{
+#if PY_MAJOR_VERSION >= 3
+    PyObject *module = PyModule_Create(&moduledef);
+#else
+    PyObject *module = Py_InitModule("_psutil_sunos", PsutilMethods);
+#endif
+    PyModule_AddIntConstant(module, "SSLEEP", SSLEEP);
+    PyModule_AddIntConstant(module, "SRUN", SRUN);
+    PyModule_AddIntConstant(module, "SZOMB", SZOMB);
+    PyModule_AddIntConstant(module, "SSTOP", SSTOP);
+    PyModule_AddIntConstant(module, "SIDL", SIDL);
+    PyModule_AddIntConstant(module, "SONPROC", SONPROC);
+    PyModule_AddIntConstant(module, "SWAIT", SWAIT);
+
+    PyModule_AddIntConstant(module, "PRNODEV", PRNODEV);  // for process tty
+
+    PyModule_AddIntConstant(module, "TCPS_CLOSED", TCPS_CLOSED);
+    PyModule_AddIntConstant(module, "TCPS_CLOSING", TCPS_CLOSING);
+    PyModule_AddIntConstant(module, "TCPS_CLOSE_WAIT", TCPS_CLOSE_WAIT);
+    PyModule_AddIntConstant(module, "TCPS_LISTEN", TCPS_LISTEN);
+    PyModule_AddIntConstant(module, "TCPS_ESTABLISHED", TCPS_ESTABLISHED);
+    PyModule_AddIntConstant(module, "TCPS_SYN_SENT", TCPS_SYN_SENT);
+    PyModule_AddIntConstant(module, "TCPS_SYN_RCVD", TCPS_SYN_RCVD);
+    PyModule_AddIntConstant(module, "TCPS_FIN_WAIT_1", TCPS_FIN_WAIT_1);
+    PyModule_AddIntConstant(module, "TCPS_FIN_WAIT_2", TCPS_FIN_WAIT_2);
+    PyModule_AddIntConstant(module, "TCPS_LAST_ACK", TCPS_LAST_ACK);
+    PyModule_AddIntConstant(module, "TCPS_TIME_WAIT", TCPS_TIME_WAIT);
+    // sunos specific
+    PyModule_AddIntConstant(module, "TCPS_IDLE", TCPS_IDLE);
+    // sunos specific
+    PyModule_AddIntConstant(module, "TCPS_BOUND", TCPS_BOUND);
+    PyModule_AddIntConstant(module, "PSUTIL_CONN_NONE", PSUTIL_CONN_NONE);
+
+    if (module == NULL) {
+        INITERROR;
+    }
+#if PY_MAJOR_VERSION >= 3
+    return module;
+#endif
+}

+ 27 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_sunos.h

@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+
+// processes
+static PyObject* psutil_proc_basic_info(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cred(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_memory_maps(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_name_and_args(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_ctx_switches(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_query_thread(PyObject* self, PyObject* args);
+
+// system
+static PyObject* psutil_boot_time(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_count_phys(PyObject* self, PyObject* args);
+static PyObject* psutil_disk_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_disk_partitions(PyObject* self, PyObject* args);
+static PyObject* psutil_net_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_per_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_swap_mem(PyObject* self, PyObject* args);
+static PyObject* psutil_users(PyObject* self, PyObject* args);
+static PyObject* psutil_net_connections(PyObject* self, PyObject* args);

+ 3241 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_windows.c

@@ -0,0 +1,3241 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Windows platform-specific module methods for _psutil_windows
+ */
+
+// Fixes clash between winsock2.h and windows.h
+#define WIN32_LEAN_AND_MEAN
+
+#include <Python.h>
+#include <windows.h>
+#include <Psapi.h>
+#include <time.h>
+#include <lm.h>
+#include <WinIoCtl.h>
+#include <tchar.h>
+#include <tlhelp32.h>
+#include <winsock2.h>
+#include <iphlpapi.h>
+#include <wtsapi32.h>
+
+// Link with Iphlpapi.lib
+#pragma comment(lib, "IPHLPAPI.lib")
+
+#include "_psutil_windows.h"
+#include "_psutil_common.h"
+#include "arch/windows/security.h"
+#include "arch/windows/process_info.h"
+#include "arch/windows/process_handles.h"
+#include "arch/windows/ntextapi.h"
+
+#ifdef __MINGW32__
+#include "arch/windows/glpi.h"
+#endif
+
+/*
+ * Return a Python float representing the system uptime expressed in seconds
+ * since the epoch.
+ */
+static PyObject *
+psutil_boot_time(PyObject *self, PyObject *args)
+{
+    double  uptime;
+    time_t pt;
+    FILETIME fileTime;
+    long long ll;
+
+    GetSystemTimeAsFileTime(&fileTime);
+
+    /*
+    HUGE thanks to:
+    http://johnstewien.spaces.live.com/blog/cns!E6885DB5CEBABBC8!831.entry
+
+    This function converts the FILETIME structure to the 32 bit
+    Unix time structure.
+    The time_t is a 32-bit value for the number of seconds since
+    January 1, 1970. A FILETIME is a 64-bit for the number of
+    100-nanosecond periods since January 1, 1601. Convert by
+    subtracting the number of 100-nanosecond period betwee 01-01-1970
+    and 01-01-1601, from time_t the divide by 1e+7 to get to the same
+    base granularity.
+    */
+    ll = (((LONGLONG)(fileTime.dwHighDateTime)) << 32) \
+        + fileTime.dwLowDateTime;
+    pt = (time_t)((ll - 116444736000000000ull) / 10000000ull);
+
+    // XXX - By using GetTickCount() time will wrap around to zero if the
+    // system is run continuously for 49.7 days.
+    uptime = GetTickCount() / 1000.00f;
+    return Py_BuildValue("d", (double)pt - uptime);
+}
+
+
+/*
+ * Return 1 if PID exists in the current process list, else 0.
+ */
+static PyObject *
+psutil_pid_exists(PyObject *self, PyObject *args)
+{
+    long pid;
+    int status;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    status = psutil_pid_is_running(pid);
+    if (-1 == status) {
+        return NULL; // exception raised in psutil_pid_is_running()
+    }
+    return PyBool_FromLong(status);
+}
+
+
+/*
+ * Return a Python list of all the PIDs running on the system.
+ */
+static PyObject *
+psutil_pids(PyObject *self, PyObject *args)
+{
+    DWORD *proclist = NULL;
+    DWORD numberOfReturnedPIDs;
+    DWORD i;
+    PyObject *pid = NULL;
+    PyObject *retlist = PyList_New(0);
+
+    if (retlist == NULL) {
+        return NULL;
+    }
+    proclist = psutil_get_pids(&numberOfReturnedPIDs);
+    if (NULL == proclist) {
+        goto error;
+    }
+
+    for (i = 0; i < numberOfReturnedPIDs; i++) {
+        pid = Py_BuildValue("I", proclist[i]);
+        if (!pid)
+            goto error;
+        if (PyList_Append(retlist, pid))
+            goto error;
+        Py_DECREF(pid);
+    }
+
+    // free C array allocated for PIDs
+    free(proclist);
+    return retlist;
+
+error:
+    Py_XDECREF(pid);
+    Py_DECREF(retlist);
+    if (proclist != NULL)
+        free(proclist);
+    return NULL;
+}
+
+
+/*
+ * Kill a process given its PID.
+ */
+static PyObject *
+psutil_proc_kill(PyObject *self, PyObject *args)
+{
+    HANDLE hProcess;
+    long pid;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (pid == 0) {
+        return AccessDenied();
+    }
+
+    hProcess = OpenProcess(PROCESS_TERMINATE, FALSE, pid);
+    if (hProcess == NULL) {
+        if (GetLastError() == ERROR_INVALID_PARAMETER) {
+            // see http://code.google.com/p/psutil/issues/detail?id=24
+            NoSuchProcess();
+        }
+        else {
+            PyErr_SetFromWindowsErr(0);
+        }
+        return NULL;
+    }
+
+    // kill the process
+    if (! TerminateProcess(hProcess, 0)) {
+        PyErr_SetFromWindowsErr(0);
+        CloseHandle(hProcess);
+        return NULL;
+    }
+
+    CloseHandle(hProcess);
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+
+/*
+ * Wait for process to terminate and return its exit code.
+ */
+static PyObject *
+psutil_proc_wait(PyObject *self, PyObject *args)
+{
+    HANDLE hProcess;
+    DWORD ExitCode;
+    DWORD retVal;
+    long pid;
+    long timeout;
+
+    if (! PyArg_ParseTuple(args, "ll", &pid, &timeout)) {
+        return NULL;
+    }
+    if (pid == 0) {
+        return AccessDenied();
+    }
+
+    hProcess = OpenProcess(SYNCHRONIZE | PROCESS_QUERY_INFORMATION,
+                           FALSE, pid);
+    if (hProcess == NULL) {
+        if (GetLastError() == ERROR_INVALID_PARAMETER) {
+            // no such process; we do not want to raise NSP but
+            // return None instead.
+            Py_INCREF(Py_None);
+            return Py_None;
+        }
+        else {
+            PyErr_SetFromWindowsErr(0);
+            return NULL;
+        }
+    }
+
+    // wait until the process has terminated
+    Py_BEGIN_ALLOW_THREADS
+    retVal = WaitForSingleObject(hProcess, timeout);
+    Py_END_ALLOW_THREADS
+
+    if (retVal == WAIT_FAILED) {
+        CloseHandle(hProcess);
+        return PyErr_SetFromWindowsErr(GetLastError());
+    }
+    if (retVal == WAIT_TIMEOUT) {
+        CloseHandle(hProcess);
+        return Py_BuildValue("l", WAIT_TIMEOUT);
+    }
+
+    // get the exit code; note: subprocess module (erroneously?) uses
+    // what returned by WaitForSingleObject
+    if (GetExitCodeProcess(hProcess, &ExitCode) == 0) {
+        CloseHandle(hProcess);
+        return PyErr_SetFromWindowsErr(GetLastError());
+    }
+    CloseHandle(hProcess);
+#if PY_MAJOR_VERSION >= 3
+    return PyLong_FromLong((long) ExitCode);
+#else
+    return PyInt_FromLong((long) ExitCode);
+#endif
+}
+
+
+/*
+ * Return a Python tuple (user_time, kernel_time)
+ */
+static PyObject *
+psutil_proc_cpu_times(PyObject *self, PyObject *args)
+{
+    long        pid;
+    HANDLE      hProcess;
+    FILETIME    ftCreate, ftExit, ftKernel, ftUser;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    hProcess = psutil_handle_from_pid(pid);
+    if (hProcess == NULL) {
+        return NULL;
+    }
+
+    if (! GetProcessTimes(hProcess, &ftCreate, &ftExit, &ftKernel, &ftUser)) {
+        CloseHandle(hProcess);
+        if (GetLastError() == ERROR_ACCESS_DENIED) {
+            // usually means the process has died so we throw a NoSuchProcess
+            // here
+            return NoSuchProcess();
+        }
+        else {
+            PyErr_SetFromWindowsErr(0);
+            return NULL;
+        }
+    }
+
+    CloseHandle(hProcess);
+
+    /*
+     * User and kernel times are represented as a FILETIME structure
+     * wich contains a 64-bit value representing the number of
+     * 100-nanosecond intervals since January 1, 1601 (UTC):
+     * http://msdn.microsoft.com/en-us/library/ms724284(VS.85).aspx
+     * To convert it into a float representing the seconds that the
+     * process has executed in user/kernel mode I borrowed the code
+     * below from Python's Modules/posixmodule.c
+     */
+    return Py_BuildValue(
+       "(dd)",
+       (double)(ftUser.dwHighDateTime * 429.4967296 + \
+                ftUser.dwLowDateTime * 1e-7),
+       (double)(ftKernel.dwHighDateTime * 429.4967296 + \
+                ftKernel.dwLowDateTime * 1e-7)
+   );
+}
+
+
+/*
+ * Alternative implementation of the one above but bypasses ACCESS DENIED.
+ */
+static PyObject *
+psutil_proc_cpu_times_2(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    PSYSTEM_PROCESS_INFORMATION process;
+    PVOID buffer;
+    double user, kernel;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_get_proc_info(pid, &process, &buffer)) {
+        return NULL;
+    }
+    user = (double)process->UserTime.HighPart * 429.4967296 + \
+           (double)process->UserTime.LowPart * 1e-7;
+    kernel = (double)process->KernelTime.HighPart * 429.4967296 + \
+             (double)process->KernelTime.LowPart * 1e-7;
+    free(buffer);
+    return Py_BuildValue("(dd)", user, kernel);
+}
+
+
+/*
+ * Return a Python float indicating the process create time expressed in
+ * seconds since the epoch.
+ */
+static PyObject *
+psutil_proc_create_time(PyObject *self, PyObject *args)
+{
+    long        pid;
+    long long   unix_time;
+    DWORD       exitCode;
+    HANDLE      hProcess;
+    BOOL        ret;
+    FILETIME    ftCreate, ftExit, ftKernel, ftUser;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    // special case for PIDs 0 and 4, return system boot time
+    if (0 == pid || 4 == pid) {
+        return psutil_boot_time(NULL, NULL);
+    }
+
+    hProcess = psutil_handle_from_pid(pid);
+    if (hProcess == NULL) {
+        return NULL;
+    }
+
+    if (! GetProcessTimes(hProcess, &ftCreate, &ftExit, &ftKernel, &ftUser)) {
+        CloseHandle(hProcess);
+        if (GetLastError() == ERROR_ACCESS_DENIED) {
+            // usually means the process has died so we throw a
+            // NoSuchProcess here
+            return NoSuchProcess();
+        }
+        else {
+            PyErr_SetFromWindowsErr(0);
+            return NULL;
+        }
+    }
+
+    // Make sure the process is not gone as OpenProcess alone seems to be
+    // unreliable in doing so (it seems a previous call to p.wait() makes
+    // it unreliable).
+    // This check is important as creation time is used to make sure the
+    // process is still running.
+    ret = GetExitCodeProcess(hProcess, &exitCode);
+    CloseHandle(hProcess);
+    if (ret != 0) {
+        if (exitCode != STILL_ACTIVE) {
+            return NoSuchProcess();
+        }
+    }
+    else {
+        // Ignore access denied as it means the process is still alive.
+        // For all other errors, we want an exception.
+        if (GetLastError() != ERROR_ACCESS_DENIED) {
+            PyErr_SetFromWindowsErr(0);
+            return NULL;
+        }
+    }
+
+    /*
+    Convert the FILETIME structure to a Unix time.
+    It's the best I could find by googling and borrowing code here and there.
+    The time returned has a precision of 1 second.
+    */
+    unix_time = ((LONGLONG)ftCreate.dwHighDateTime) << 32;
+    unix_time += ftCreate.dwLowDateTime - 116444736000000000LL;
+    unix_time /= 10000000;
+    return Py_BuildValue("d", (double)unix_time);
+}
+
+
+/*
+ * Alternative implementation of the one above but bypasses ACCESS DENIED.
+ */
+static PyObject *
+psutil_proc_create_time_2(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    PSYSTEM_PROCESS_INFORMATION process;
+    PVOID buffer;
+    long long   unix_time;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_get_proc_info(pid, &process, &buffer)) {
+        return NULL;
+    }
+    // special case for PIDs 0 and 4, return system boot time
+    if (0 == pid || 4 == pid) {
+        return psutil_boot_time(NULL, NULL);
+    }
+    /*
+    Convert the LARGE_INTEGER union to a Unix time.
+    It's the best I could find by googling and borrowing code here and there.
+    The time returned has a precision of 1 second.
+    */
+    unix_time = ((LONGLONG)process->CreateTime.HighPart) << 32;
+    unix_time += process->CreateTime.LowPart - 116444736000000000LL;
+    unix_time /= 10000000;
+    free(buffer);
+    return Py_BuildValue("d", (double)unix_time);
+}
+
+
+/*
+ * Return the number of logical CPUs.
+ */
+static PyObject *
+psutil_cpu_count_logical(PyObject *self, PyObject *args)
+{
+    SYSTEM_INFO system_info;
+    system_info.dwNumberOfProcessors = 0;
+
+    GetSystemInfo(&system_info);
+    if (system_info.dwNumberOfProcessors == 0) {
+        // mimic os.cpu_count()
+        Py_INCREF(Py_None);
+        return Py_None;
+    }
+    else {
+        return Py_BuildValue("I", system_info.dwNumberOfProcessors);
+    }
+}
+
+
+typedef BOOL (WINAPI *LPFN_GLPI) (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION,
+                                  PDWORD);
+
+/*
+ * Return the number of physical CPU cores.
+ */
+static PyObject *
+psutil_cpu_count_phys(PyObject *self, PyObject *args)
+{
+    LPFN_GLPI glpi;
+    DWORD rc;
+    PSYSTEM_LOGICAL_PROCESSOR_INFORMATION buffer = NULL;
+    PSYSTEM_LOGICAL_PROCESSOR_INFORMATION ptr = NULL;
+    DWORD length = 0;
+    DWORD offset = 0;
+    int ncpus = 0;
+
+    glpi = (LPFN_GLPI)GetProcAddress(GetModuleHandle(TEXT("kernel32")),
+                                     "GetLogicalProcessorInformation");
+    if (glpi == NULL)
+        goto return_none;
+
+    while (1) {
+        rc = glpi(buffer, &length);
+        if (rc == FALSE) {
+            if (GetLastError() == ERROR_INSUFFICIENT_BUFFER) {
+                if (buffer)
+                    free(buffer);
+                buffer = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION)malloc(
+                    length);
+                if (NULL == buffer) {
+                    PyErr_NoMemory();
+                    return NULL;
+                }
+            }
+            else {
+                goto return_none;
+            }
+        }
+        else {
+            break;
+        }
+    }
+
+    ptr = buffer;
+    while (offset + sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION) <= length) {
+        if (ptr->Relationship == RelationProcessorCore)
+            ncpus += 1;
+        offset += sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
+        ptr++;
+    }
+
+    free(buffer);
+    if (ncpus == 0)
+        goto return_none;
+    else
+        return Py_BuildValue("i", ncpus);
+
+return_none:
+    // mimic os.cpu_count()
+    if (buffer != NULL)
+        free(buffer);
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+
+/*
+ * Return process cmdline as a Python list of cmdline arguments.
+ */
+static PyObject *
+psutil_proc_cmdline(PyObject *self, PyObject *args) {
+    long pid;
+    int pid_return;
+    PyObject *arglist;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if ((pid == 0) || (pid == 4)) {
+        return Py_BuildValue("[]");
+    }
+
+    pid_return = psutil_pid_is_running(pid);
+    if (pid_return == 0) {
+        return NoSuchProcess();
+    }
+    if (pid_return == -1) {
+        return NULL;
+    }
+
+    // XXX the assumptio below probably needs to go away
+
+    // May fail any of several ReadProcessMemory calls etc. and
+    // not indicate a real problem so we ignore any errors and
+    // just live without commandline.
+    arglist = psutil_get_arg_list(pid);
+    if ( NULL == arglist ) {
+        // carry on anyway, clear any exceptions too
+        PyErr_Clear();
+        return Py_BuildValue("[]");
+    }
+
+    return arglist;
+}
+
+
+/*
+ * Return process executable path.
+ */
+static PyObject *
+psutil_proc_exe(PyObject *self, PyObject *args) {
+    long pid;
+    HANDLE hProcess;
+    wchar_t exe[MAX_PATH];
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    hProcess = psutil_handle_from_pid_waccess(pid, PROCESS_QUERY_INFORMATION);
+    if (NULL == hProcess) {
+        return NULL;
+    }
+    if (GetProcessImageFileNameW(hProcess, &exe, MAX_PATH) == 0) {
+        CloseHandle(hProcess);
+        if (GetLastError() == ERROR_INVALID_PARAMETER) {
+            // see https://code.google.com/p/psutil/issues/detail?id=414
+            AccessDenied();
+        }
+        else {
+            PyErr_SetFromWindowsErr(0);
+        }
+        return NULL;
+    }
+    CloseHandle(hProcess);
+    return Py_BuildValue("u", exe);
+}
+
+
+/*
+ * Return process memory information as a Python tuple.
+ */
+static PyObject *
+psutil_proc_memory_info(PyObject *self, PyObject *args)
+{
+    HANDLE hProcess;
+    DWORD pid;
+#if (_WIN32_WINNT >= 0x0501)  // Windows XP with SP2
+    PROCESS_MEMORY_COUNTERS_EX cnt;
+#else
+    PROCESS_MEMORY_COUNTERS cnt;
+#endif
+    SIZE_T private = 0;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    hProcess = psutil_handle_from_pid(pid);
+    if (NULL == hProcess) {
+        return NULL;
+    }
+
+    if (! GetProcessMemoryInfo(hProcess, &cnt, sizeof(cnt)) ) {
+        CloseHandle(hProcess);
+        return PyErr_SetFromWindowsErr(0);
+    }
+
+#if (_WIN32_WINNT >= 0x0501)  // Windows XP with SP2
+    private = cnt.PrivateUsage;
+#endif
+
+    CloseHandle(hProcess);
+
+    // PROCESS_MEMORY_COUNTERS values are defined as SIZE_T which on 64bits
+    // is an (unsigned long long) and on 32bits is an (unsigned int).
+    // "_WIN64" is defined if we're running a 64bit Python interpreter not
+    // exclusively if the *system* is 64bit.
+#if defined(_WIN64)
+    return Py_BuildValue(
+        "(kKKKKKKKKK)",
+        cnt.PageFaultCount,  // unsigned long
+        (unsigned long long)cnt.PeakWorkingSetSize,
+        (unsigned long long)cnt.WorkingSetSize,
+        (unsigned long long)cnt.QuotaPeakPagedPoolUsage,
+        (unsigned long long)cnt.QuotaPagedPoolUsage,
+        (unsigned long long)cnt.QuotaPeakNonPagedPoolUsage,
+        (unsigned long long)cnt.QuotaNonPagedPoolUsage,
+        (unsigned long long)cnt.PagefileUsage,
+        (unsigned long long)cnt.PeakPagefileUsage,
+        (unsigned long long)private);
+#else
+    return Py_BuildValue(
+        "(kIIIIIIIII)",
+        cnt.PageFaultCount,    // unsigned long
+        (unsigned int)cnt.PeakWorkingSetSize,
+        (unsigned int)cnt.WorkingSetSize,
+        (unsigned int)cnt.QuotaPeakPagedPoolUsage,
+        (unsigned int)cnt.QuotaPagedPoolUsage,
+        (unsigned int)cnt.QuotaPeakNonPagedPoolUsage,
+        (unsigned int)cnt.QuotaNonPagedPoolUsage,
+        (unsigned int)cnt.PagefileUsage,
+        (unsigned int)cnt.PeakPagefileUsage,
+        (unsigned int)private);
+#endif
+}
+
+
+/*
+ * Alternative implementation of the one above but bypasses ACCESS DENIED.
+ */
+static PyObject *
+psutil_proc_memory_info_2(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    PSYSTEM_PROCESS_INFORMATION process;
+    PVOID buffer;
+    SIZE_T private;
+    unsigned long pfault_count;
+
+#if defined(_WIN64)
+    unsigned long long m1, m2, m3, m4, m5, m6, m7, m8;
+#else
+    unsigned int m1, m2, m3, m4, m5, m6, m7, m8;
+#endif
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_get_proc_info(pid, &process, &buffer)) {
+        return NULL;
+    }
+
+#if (_WIN32_WINNT >= 0x0501)  // Windows XP with SP2
+    private = process->PrivatePageCount;
+#else
+    private = 0;
+#endif
+    pfault_count = process->PageFaultCount;
+
+    m1 = process->PeakWorkingSetSize;
+    m2 = process->WorkingSetSize;
+    m3 = process->QuotaPeakPagedPoolUsage;
+    m4 = process->QuotaPagedPoolUsage;
+    m5 = process->QuotaPeakNonPagedPoolUsage;
+    m6 = process->QuotaNonPagedPoolUsage;
+    m7 = process->PagefileUsage;
+    m8 = process->PeakPagefileUsage;
+
+    free(buffer);
+
+    // SYSTEM_PROCESS_INFORMATION values are defined as SIZE_T which on 64
+    // bits is an (unsigned long long) and on 32bits is an (unsigned int).
+    // "_WIN64" is defined if we're running a 64bit Python interpreter not
+    // exclusively if the *system* is 64bit.
+#if defined(_WIN64)
+    return Py_BuildValue("(kKKKKKKKKK)",
+#else
+    return Py_BuildValue("(kIIIIIIIII)",
+#endif
+        pfault_count, m1, m2, m3, m4, m5, m6, m7, m8, private);
+}
+
+
+/*
+ * Return a Python integer indicating the total amount of physical memory
+ * in bytes.
+ */
+static PyObject *
+psutil_virtual_mem(PyObject *self, PyObject *args)
+{
+    MEMORYSTATUSEX memInfo;
+    memInfo.dwLength = sizeof(MEMORYSTATUSEX);
+
+    if (! GlobalMemoryStatusEx(&memInfo) ) {
+        return PyErr_SetFromWindowsErr(0);
+    }
+
+    return Py_BuildValue("(LLLLLL)",
+                         memInfo.ullTotalPhys,      // total
+                         memInfo.ullAvailPhys,      // avail
+                         memInfo.ullTotalPageFile,  // total page file
+                         memInfo.ullAvailPageFile,  // avail page file
+                         memInfo.ullTotalVirtual,   // total virtual
+                         memInfo.ullAvailVirtual);  // avail virtual
+}
+
+
+#define LO_T ((float)1e-7)
+#define HI_T (LO_T*4294967296.0)
+
+
+/*
+ * Retrieves system CPU timing information as a (user, system, idle)
+ * tuple. On a multiprocessor system, the values returned are the
+ * sum of the designated times across all processors.
+ */
+static PyObject *
+psutil_cpu_times(PyObject *self, PyObject *args)
+{
+    float idle, kernel, user, system;
+    FILETIME idle_time, kernel_time, user_time;
+
+    if (!GetSystemTimes(&idle_time, &kernel_time, &user_time)) {
+        return PyErr_SetFromWindowsErr(0);
+    }
+
+    idle = (float)((HI_T * idle_time.dwHighDateTime) + \
+                   (LO_T * idle_time.dwLowDateTime));
+    user = (float)((HI_T * user_time.dwHighDateTime) + \
+                   (LO_T * user_time.dwLowDateTime));
+    kernel = (float)((HI_T * kernel_time.dwHighDateTime) + \
+                     (LO_T * kernel_time.dwLowDateTime));
+
+    // Kernel time includes idle time.
+    // We return only busy kernel time subtracting idle time from
+    // kernel time.
+    system = (kernel - idle);
+    return Py_BuildValue("(fff)", user, system, idle);
+}
+
+
+/*
+ * Same as above but for all system CPUs.
+ */
+static PyObject *
+psutil_per_cpu_times(PyObject *self, PyObject *args)
+{
+    float idle, kernel, user;
+    typedef DWORD (_stdcall * NTQSI_PROC) (int, PVOID, ULONG, PULONG);
+    NTQSI_PROC NtQuerySystemInformation;
+    HINSTANCE hNtDll;
+    SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION *sppi = NULL;
+    SYSTEM_INFO si;
+    UINT i;
+    PyObject *arg = NULL;
+    PyObject *retlist = PyList_New(0);
+
+    if (retlist == NULL)
+        return NULL;
+
+    // dynamic linking is mandatory to use NtQuerySystemInformation
+    hNtDll = LoadLibrary(TEXT("ntdll.dll"));
+    if (hNtDll != NULL) {
+        // gets NtQuerySystemInformation address
+        NtQuerySystemInformation = (NTQSI_PROC)GetProcAddress(
+                                       hNtDll, "NtQuerySystemInformation");
+
+        if (NtQuerySystemInformation != NULL)
+        {
+            // retrives number of processors
+            GetSystemInfo(&si);
+
+            // allocates an array of SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION
+            // structures, one per processor
+            sppi = (SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION *) \
+                   malloc(si.dwNumberOfProcessors * \
+                          sizeof(SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION));
+            if (sppi != NULL)
+            {
+                // gets cpu time informations
+                if (0 == NtQuerySystemInformation(
+                            SystemProcessorPerformanceInformation,
+                            sppi,
+                            si.dwNumberOfProcessors * sizeof
+                            (SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION),
+                            NULL)
+                   )
+                {
+                    // computes system global times summing each
+                    // processor value
+                    idle = user = kernel = 0;
+                    for (i = 0; i < si.dwNumberOfProcessors; i++) {
+                        arg = NULL;
+                        user = (float)((HI_T * sppi[i].UserTime.HighPart) +
+                                       (LO_T * sppi[i].UserTime.LowPart));
+                        idle = (float)((HI_T * sppi[i].IdleTime.HighPart) +
+                                       (LO_T * sppi[i].IdleTime.LowPart));
+                        kernel = (float)((HI_T * sppi[i].KernelTime.HighPart) +
+                                         (LO_T * sppi[i].KernelTime.LowPart));
+                        // kernel time includes idle time on windows
+                        // we return only busy kernel time subtracting
+                        // idle time from kernel time
+                        arg = Py_BuildValue("(ddd)",
+                                            user,
+                                            kernel - idle,
+                                            idle);
+                        if (!arg)
+                            goto error;
+                        if (PyList_Append(retlist, arg))
+                            goto error;
+                        Py_DECREF(arg);
+                    }
+                    free(sppi);
+                    FreeLibrary(hNtDll);
+                    return retlist;
+
+                }  // END NtQuerySystemInformation
+            }  // END malloc SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION
+        }  // END GetProcAddress
+    }  // END LoadLibrary
+    goto error;
+
+error:
+    Py_XDECREF(arg);
+    Py_DECREF(retlist);
+    if (sppi) {
+        free(sppi);
+    }
+    if (hNtDll) {
+        FreeLibrary(hNtDll);
+    }
+    PyErr_SetFromWindowsErr(0);
+    return NULL;
+}
+
+
+/*
+ * Return process current working directory as a Python string.
+ */
+
+static PyObject *
+psutil_proc_cwd(PyObject *self, PyObject *args)
+{
+    long pid;
+    HANDLE processHandle = NULL;
+    PVOID pebAddress;
+    PVOID rtlUserProcParamsAddress;
+    UNICODE_STRING currentDirectory;
+    WCHAR *currentDirectoryContent = NULL;
+    PyObject *returnPyObj = NULL;
+    PyObject *cwd_from_wchar = NULL;
+    PyObject *cwd = NULL;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    processHandle = psutil_handle_from_pid(pid);
+    if (processHandle == NULL) {
+        return NULL;
+    }
+
+    pebAddress = psutil_get_peb_address(processHandle);
+
+    // get the address of ProcessParameters
+#ifdef _WIN64
+    if (!ReadProcessMemory(processHandle, (PCHAR)pebAddress + 32,
+                           &rtlUserProcParamsAddress, sizeof(PVOID), NULL))
+#else
+    if (!ReadProcessMemory(processHandle, (PCHAR)pebAddress + 0x10,
+                           &rtlUserProcParamsAddress, sizeof(PVOID), NULL))
+#endif
+    {
+        CloseHandle(processHandle);
+        if (GetLastError() == ERROR_PARTIAL_COPY) {
+            // this occurs quite often with system processes
+            return AccessDenied();
+        }
+        else {
+            return PyErr_SetFromWindowsErr(0);
+        }
+    }
+
+    // Read the currentDirectory UNICODE_STRING structure.
+    // 0x24 refers to "CurrentDirectoryPath" of RTL_USER_PROCESS_PARAMETERS
+    // structure, see:
+    // http://wj32.wordpress.com/2009/01/24/
+    //     howto-get-the-command-line-of-processes/
+#ifdef _WIN64
+    if (!ReadProcessMemory(processHandle, (PCHAR)rtlUserProcParamsAddress + 56,
+                           &currentDirectory, sizeof(currentDirectory), NULL))
+#else
+    if (!ReadProcessMemory(processHandle,
+                           (PCHAR)rtlUserProcParamsAddress + 0x24,
+                           &currentDirectory, sizeof(currentDirectory), NULL))
+#endif
+    {
+        CloseHandle(processHandle);
+        if (GetLastError() == ERROR_PARTIAL_COPY) {
+            // this occurs quite often with system processes
+            return AccessDenied();
+        }
+        else {
+            return PyErr_SetFromWindowsErr(0);
+        }
+    }
+
+    // allocate memory to hold cwd
+    currentDirectoryContent = (WCHAR *)malloc(currentDirectory.Length + 1);
+    if (currentDirectoryContent == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+
+    // read cwd
+    if (!ReadProcessMemory(processHandle, currentDirectory.Buffer,
+                           currentDirectoryContent, currentDirectory.Length,
+                           NULL))
+    {
+        if (GetLastError() == ERROR_PARTIAL_COPY) {
+            // this occurs quite often with system processes
+            AccessDenied();
+        }
+        else {
+            PyErr_SetFromWindowsErr(0);
+        }
+        goto error;
+    }
+
+    // null-terminate the string to prevent wcslen from returning
+    // incorrect length the length specifier is in characters, but
+    // currentDirectory.Length is in bytes
+    currentDirectoryContent[(currentDirectory.Length / sizeof(WCHAR))] = '\0';
+
+    // convert wchar array to a Python unicode string, and then to UTF8
+    cwd_from_wchar = PyUnicode_FromWideChar(currentDirectoryContent,
+                                            wcslen(currentDirectoryContent));
+    if (cwd_from_wchar == NULL)
+        goto error;
+
+#if PY_MAJOR_VERSION >= 3
+    cwd = PyUnicode_FromObject(cwd_from_wchar);
+#else
+    cwd = PyUnicode_AsUTF8String(cwd_from_wchar);
+#endif
+    if (cwd == NULL)
+        goto error;
+
+    // decrement the reference count on our temp unicode str to avoid
+    // mem leak
+    returnPyObj = Py_BuildValue("N", cwd);
+    if (!returnPyObj)
+        goto error;
+
+    Py_DECREF(cwd_from_wchar);
+
+    CloseHandle(processHandle);
+    free(currentDirectoryContent);
+    return returnPyObj;
+
+error:
+    Py_XDECREF(cwd_from_wchar);
+    Py_XDECREF(cwd);
+    Py_XDECREF(returnPyObj);
+    if (currentDirectoryContent != NULL)
+        free(currentDirectoryContent);
+    if (processHandle != NULL)
+        CloseHandle(processHandle);
+    return NULL;
+}
+
+
+/*
+ * Resume or suspends a process
+ */
+int
+psutil_proc_suspend_or_resume(DWORD pid, int suspend)
+{
+    // a huge thanks to http://www.codeproject.com/KB/threads/pausep.aspx
+    HANDLE hThreadSnap = NULL;
+    THREADENTRY32  te32 = {0};
+
+    if (pid == 0) {
+        AccessDenied();
+        return FALSE;
+    }
+
+    hThreadSnap = CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD, 0);
+    if (hThreadSnap == INVALID_HANDLE_VALUE) {
+        PyErr_SetFromWindowsErr(0);
+        return FALSE;
+    }
+
+    // Fill in the size of the structure before using it
+    te32.dwSize = sizeof(THREADENTRY32);
+
+    if (! Thread32First(hThreadSnap, &te32)) {
+        PyErr_SetFromWindowsErr(0);
+        CloseHandle(hThreadSnap);
+        return FALSE;
+    }
+
+    // Walk the thread snapshot to find all threads of the process.
+    // If the thread belongs to the process, add its information
+    // to the display list.
+    do
+    {
+        if (te32.th32OwnerProcessID == pid)
+        {
+            HANDLE hThread = OpenThread(THREAD_SUSPEND_RESUME, FALSE,
+                                        te32.th32ThreadID);
+            if (hThread == NULL) {
+                PyErr_SetFromWindowsErr(0);
+                CloseHandle(hThread);
+                CloseHandle(hThreadSnap);
+                return FALSE;
+            }
+            if (suspend == 1)
+            {
+                if (SuspendThread(hThread) == (DWORD) - 1) {
+                    PyErr_SetFromWindowsErr(0);
+                    CloseHandle(hThread);
+                    CloseHandle(hThreadSnap);
+                    return FALSE;
+                }
+            }
+            else
+            {
+                if (ResumeThread(hThread) == (DWORD) - 1) {
+                    PyErr_SetFromWindowsErr(0);
+                    CloseHandle(hThread);
+                    CloseHandle(hThreadSnap);
+                    return FALSE;
+                }
+            }
+            CloseHandle(hThread);
+        }
+    } while (Thread32Next(hThreadSnap, &te32));
+
+    CloseHandle(hThreadSnap);
+    return TRUE;
+}
+
+
+static PyObject *
+psutil_proc_suspend(PyObject *self, PyObject *args)
+{
+    long pid;
+    int suspend = 1;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    if (! psutil_proc_suspend_or_resume(pid, suspend)) {
+        return NULL;
+    }
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+
+static PyObject *
+psutil_proc_resume(PyObject *self, PyObject *args)
+{
+    long pid;
+    int suspend = 0;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    if (! psutil_proc_suspend_or_resume(pid, suspend)) {
+        return NULL;
+    }
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+
+static PyObject *
+psutil_proc_num_threads(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    PSYSTEM_PROCESS_INFORMATION process;
+    PVOID buffer;
+    int num;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_get_proc_info(pid, &process, &buffer)) {
+        return NULL;
+    }
+    num = (int)process->NumberOfThreads;
+    free(buffer);
+    return Py_BuildValue("i", num);
+}
+
+
+static PyObject *
+psutil_proc_threads(PyObject *self, PyObject *args)
+{
+    HANDLE hThread;
+    THREADENTRY32 te32 = {0};
+    long pid;
+    int pid_return;
+    int rc;
+    FILETIME ftDummy, ftKernel, ftUser;
+    PyObject *retList = PyList_New(0);
+    PyObject *pyTuple = NULL;
+    HANDLE hThreadSnap = NULL;
+
+    if (retList == NULL) {
+        return NULL;
+    }
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        goto error;
+    }
+    if (pid == 0) {
+        // raise AD instead of returning 0 as procexp is able to
+        // retrieve useful information somehow
+        AccessDenied();
+        goto error;
+    }
+
+    pid_return = psutil_pid_is_running(pid);
+    if (pid_return == 0) {
+        NoSuchProcess();
+        goto error;
+    }
+    if (pid_return == -1) {
+        goto error;
+    }
+
+    hThreadSnap = CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD, 0);
+    if (hThreadSnap == INVALID_HANDLE_VALUE) {
+        PyErr_SetFromWindowsErr(0);
+        goto error;
+    }
+
+    // Fill in the size of the structure before using it
+    te32.dwSize = sizeof(THREADENTRY32);
+
+    if (! Thread32First(hThreadSnap, &te32)) {
+        PyErr_SetFromWindowsErr(0);
+        goto error;
+    }
+
+    // Walk the thread snapshot to find all threads of the process.
+    // If the thread belongs to the process, increase the counter.
+    do
+    {
+        if (te32.th32OwnerProcessID == pid)
+        {
+            pyTuple = NULL;
+            hThread = NULL;
+            hThread = OpenThread(THREAD_QUERY_INFORMATION,
+                                 FALSE, te32.th32ThreadID);
+            if (hThread == NULL) {
+                // thread has disappeared on us
+                continue;
+            }
+
+            rc = GetThreadTimes(hThread, &ftDummy, &ftDummy, &ftKernel,
+                                &ftUser);
+            if (rc == 0) {
+                PyErr_SetFromWindowsErr(0);
+                goto error;
+            }
+
+            /*
+             * User and kernel times are represented as a FILETIME structure
+             * wich contains a 64-bit value representing the number of
+             * 100-nanosecond intervals since January 1, 1601 (UTC):
+             * http://msdn.microsoft.com/en-us/library/ms724284(VS.85).aspx
+             * To convert it into a float representing the seconds that the
+             * process has executed in user/kernel mode I borrowed the code
+             * below from Python's Modules/posixmodule.c
+             */
+            pyTuple = Py_BuildValue(
+                "kdd",
+                te32.th32ThreadID,
+                (double)(ftUser.dwHighDateTime * 429.4967296 + \
+                         ftUser.dwLowDateTime * 1e-7),
+                (double)(ftKernel.dwHighDateTime * 429.4967296 + \
+                         ftKernel.dwLowDateTime * 1e-7));
+            if (!pyTuple)
+                goto error;
+            if (PyList_Append(retList, pyTuple))
+                goto error;
+            Py_DECREF(pyTuple);
+
+            CloseHandle(hThread);
+        }
+    } while (Thread32Next(hThreadSnap, &te32));
+
+    CloseHandle(hThreadSnap);
+    return retList;
+
+error:
+    Py_XDECREF(pyTuple);
+    Py_DECREF(retList);
+    if (hThread != NULL)
+        CloseHandle(hThread);
+    if (hThreadSnap != NULL) {
+        CloseHandle(hThreadSnap);
+    }
+    return NULL;
+}
+
+
+static PyObject *
+psutil_proc_open_files(PyObject *self, PyObject *args)
+{
+    long       pid;
+    HANDLE     processHandle;
+    DWORD      access = PROCESS_DUP_HANDLE | PROCESS_QUERY_INFORMATION;
+    PyObject  *filesList;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    processHandle = psutil_handle_from_pid_waccess(pid, access);
+    if (processHandle == NULL) {
+        return NULL;
+    }
+
+    filesList = psutil_get_open_files(pid, processHandle);
+    CloseHandle(processHandle);
+    if (filesList == NULL) {
+        return PyErr_SetFromWindowsErr(0);
+    }
+    return filesList;
+}
+
+
+/*
+ Accept a filename's drive in native  format like "\Device\HarddiskVolume1\"
+ and return the corresponding drive letter (e.g. "C:\\").
+ If no match is found return an empty string.
+*/
+static PyObject *
+psutil_win32_QueryDosDevice(PyObject *self, PyObject *args)
+{
+    LPCTSTR   lpDevicePath;
+    TCHAR d = TEXT('A');
+    TCHAR     szBuff[5];
+
+    if (!PyArg_ParseTuple(args, "s", &lpDevicePath)) {
+        return NULL;
+    }
+
+    while (d <= TEXT('Z'))
+    {
+        TCHAR szDeviceName[3] = {d, TEXT(':'), TEXT('\0')};
+        TCHAR szTarget[512] = {0};
+        if (QueryDosDevice(szDeviceName, szTarget, 511) != 0) {
+            if (_tcscmp(lpDevicePath, szTarget) == 0) {
+                _stprintf(szBuff, TEXT("%c:"), d);
+                return Py_BuildValue("s", szBuff);
+            }
+        }
+        d++;
+    }
+    return Py_BuildValue("s", "");
+}
+
+
+/*
+ * Return process username as a "DOMAIN//USERNAME" string.
+ */
+static PyObject *
+psutil_proc_username(PyObject *self, PyObject *args)
+{
+    long pid;
+    HANDLE processHandle;
+    HANDLE tokenHandle;
+    PTOKEN_USER user;
+    ULONG bufferSize;
+    PTSTR name;
+    ULONG nameSize;
+    PTSTR domainName;
+    ULONG domainNameSize;
+    SID_NAME_USE nameUse;
+    PTSTR fullName;
+    PyObject *returnObject;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    processHandle = psutil_handle_from_pid_waccess(
+        pid, PROCESS_QUERY_INFORMATION);
+    if (processHandle == NULL) {
+        return NULL;
+    }
+
+    if (!OpenProcessToken(processHandle, TOKEN_QUERY, &tokenHandle)) {
+        CloseHandle(processHandle);
+        return PyErr_SetFromWindowsErr(0);
+    }
+
+    CloseHandle(processHandle);
+
+    // Get the user SID.
+
+    bufferSize = 0x100;
+    user = malloc(bufferSize);
+    if (user == NULL) {
+        return PyErr_NoMemory();
+    }
+
+    if (!GetTokenInformation(tokenHandle, TokenUser, user, bufferSize,
+                             &bufferSize))
+    {
+        free(user);
+        user = malloc(bufferSize);
+        if (user == NULL) {
+            CloseHandle(tokenHandle);
+            return PyErr_NoMemory();
+        }
+        if (!GetTokenInformation(tokenHandle, TokenUser, user, bufferSize,
+                                 &bufferSize))
+        {
+            free(user);
+            CloseHandle(tokenHandle);
+            return PyErr_SetFromWindowsErr(0);
+        }
+    }
+
+    CloseHandle(tokenHandle);
+
+    // resolve the SID to a name
+    nameSize = 0x100;
+    domainNameSize = 0x100;
+
+    name = malloc(nameSize * sizeof(TCHAR));
+    if (name == NULL)
+        return PyErr_NoMemory();
+    domainName = malloc(domainNameSize * sizeof(TCHAR));
+    if (domainName == NULL)
+        return PyErr_NoMemory();
+
+    if (!LookupAccountSid(NULL, user->User.Sid, name, &nameSize, domainName,
+                          &domainNameSize, &nameUse))
+    {
+        free(name);
+        free(domainName);
+        name = malloc(nameSize * sizeof(TCHAR));
+        if (name == NULL)
+            return PyErr_NoMemory();
+        domainName = malloc(domainNameSize * sizeof(TCHAR));
+        if (domainName == NULL)
+            return PyErr_NoMemory();
+        if (!LookupAccountSid(NULL, user->User.Sid, name, &nameSize,
+                              domainName, &domainNameSize, &nameUse))
+        {
+            free(name);
+            free(domainName);
+            free(user);
+
+            return PyErr_SetFromWindowsErr(0);
+        }
+    }
+
+    nameSize = _tcslen(name);
+    domainNameSize = _tcslen(domainName);
+
+    // build the full username string
+    fullName = malloc((domainNameSize + 1 + nameSize + 1) * sizeof(TCHAR));
+    if (fullName == NULL) {
+        free(name);
+        free(domainName);
+        free(user);
+        return PyErr_NoMemory();
+    }
+    memcpy(fullName, domainName, domainNameSize);
+    fullName[domainNameSize] = '\\';
+    memcpy(&fullName[domainNameSize + 1], name, nameSize);
+    fullName[domainNameSize + 1 + nameSize] = '\0';
+
+    returnObject = Py_BuildValue("s", fullName);
+
+    free(fullName);
+    free(name);
+    free(domainName);
+    free(user);
+
+    return returnObject;
+}
+
+
+// --- network connections mingw32 support
+
+#ifndef _IPRTRMIB_H
+typedef struct _MIB_TCP6ROW_OWNER_PID {
+    UCHAR ucLocalAddr[16];
+    DWORD dwLocalScopeId;
+    DWORD dwLocalPort;
+    UCHAR ucRemoteAddr[16];
+    DWORD dwRemoteScopeId;
+    DWORD dwRemotePort;
+    DWORD dwState;
+    DWORD dwOwningPid;
+} MIB_TCP6ROW_OWNER_PID, *PMIB_TCP6ROW_OWNER_PID;
+
+typedef struct _MIB_TCP6TABLE_OWNER_PID {
+    DWORD dwNumEntries;
+    MIB_TCP6ROW_OWNER_PID table[ANY_SIZE];
+} MIB_TCP6TABLE_OWNER_PID, *PMIB_TCP6TABLE_OWNER_PID;
+#endif
+
+#ifndef __IPHLPAPI_H__
+typedef struct in6_addr {
+    union {
+        UCHAR Byte[16];
+        USHORT Word[8];
+    } u;
+} IN6_ADDR, *PIN6_ADDR, FAR *LPIN6_ADDR;
+
+typedef enum _UDP_TABLE_CLASS {
+    UDP_TABLE_BASIC,
+    UDP_TABLE_OWNER_PID,
+    UDP_TABLE_OWNER_MODULE
+} UDP_TABLE_CLASS, *PUDP_TABLE_CLASS;
+
+typedef struct _MIB_UDPROW_OWNER_PID {
+    DWORD dwLocalAddr;
+    DWORD dwLocalPort;
+    DWORD dwOwningPid;
+} MIB_UDPROW_OWNER_PID, *PMIB_UDPROW_OWNER_PID;
+
+typedef struct _MIB_UDPTABLE_OWNER_PID {
+    DWORD dwNumEntries;
+    MIB_UDPROW_OWNER_PID table[ANY_SIZE];
+} MIB_UDPTABLE_OWNER_PID, *PMIB_UDPTABLE_OWNER_PID;
+#endif
+
+typedef struct _MIB_UDP6ROW_OWNER_PID {
+    UCHAR ucLocalAddr[16];
+    DWORD dwLocalScopeId;
+    DWORD dwLocalPort;
+    DWORD dwOwningPid;
+} MIB_UDP6ROW_OWNER_PID, *PMIB_UDP6ROW_OWNER_PID;
+
+typedef struct _MIB_UDP6TABLE_OWNER_PID {
+    DWORD dwNumEntries;
+    MIB_UDP6ROW_OWNER_PID table[ANY_SIZE];
+} MIB_UDP6TABLE_OWNER_PID, *PMIB_UDP6TABLE_OWNER_PID;
+
+
+#define BYTESWAP_USHORT(x) ((((USHORT)(x) << 8) | ((USHORT)(x) >> 8)) & 0xffff)
+
+#ifndef AF_INET6
+#define AF_INET6 23
+#endif
+
+#define _psutil_conn_decref_objs() \
+    Py_DECREF(_AF_INET); \
+    Py_DECREF(_AF_INET6);\
+    Py_DECREF(_SOCK_STREAM);\
+    Py_DECREF(_SOCK_DGRAM);
+
+// a signaler for connections without an actual status
+static int PSUTIL_CONN_NONE = 128;
+
+
+/*
+ * Return a list of network connections opened by a process
+ */
+static PyObject *
+psutil_net_connections(PyObject *self, PyObject *args)
+{
+    static long null_address[4] = { 0, 0, 0, 0 };
+
+    unsigned long pid;
+    PyObject *connectionsList;
+    PyObject *connectionTuple = NULL;
+    PyObject *af_filter = NULL;
+    PyObject *type_filter = NULL;
+
+    PyObject *_AF_INET = PyLong_FromLong((long)AF_INET);
+    PyObject *_AF_INET6 = PyLong_FromLong((long)AF_INET6);
+    PyObject *_SOCK_STREAM = PyLong_FromLong((long)SOCK_STREAM);
+    PyObject *_SOCK_DGRAM = PyLong_FromLong((long)SOCK_DGRAM);
+
+    typedef PSTR (NTAPI * _RtlIpv4AddressToStringA)(struct in_addr *, PSTR);
+    _RtlIpv4AddressToStringA rtlIpv4AddressToStringA;
+    typedef PSTR (NTAPI * _RtlIpv6AddressToStringA)(struct in6_addr *, PSTR);
+    _RtlIpv6AddressToStringA rtlIpv6AddressToStringA;
+    typedef DWORD (WINAPI * _GetExtendedTcpTable)(PVOID, PDWORD, BOOL, ULONG,
+                                                  TCP_TABLE_CLASS, ULONG);
+    _GetExtendedTcpTable getExtendedTcpTable;
+    typedef DWORD (WINAPI * _GetExtendedUdpTable)(PVOID, PDWORD, BOOL, ULONG,
+                                                  UDP_TABLE_CLASS, ULONG);
+    _GetExtendedUdpTable getExtendedUdpTable;
+    PVOID table = NULL;
+    DWORD tableSize;
+    PMIB_TCPTABLE_OWNER_PID tcp4Table;
+    PMIB_UDPTABLE_OWNER_PID udp4Table;
+    PMIB_TCP6TABLE_OWNER_PID tcp6Table;
+    PMIB_UDP6TABLE_OWNER_PID udp6Table;
+    ULONG i;
+    CHAR addressBufferLocal[65];
+    PyObject *addressTupleLocal = NULL;
+    CHAR addressBufferRemote[65];
+    PyObject *addressTupleRemote = NULL;
+
+    if (! PyArg_ParseTuple(args, "lOO", &pid, &af_filter, &type_filter)) {
+        _psutil_conn_decref_objs();
+        return NULL;
+    }
+
+    if (!PySequence_Check(af_filter) || !PySequence_Check(type_filter)) {
+        _psutil_conn_decref_objs();
+        PyErr_SetString(PyExc_TypeError, "arg 2 or 3 is not a sequence");
+        return NULL;
+    }
+
+    if (pid != -1) {
+        if (psutil_pid_is_running(pid) == 0) {
+            _psutil_conn_decref_objs();
+            return NoSuchProcess();
+        }
+    }
+
+    // Import some functions.
+    {
+        HMODULE ntdll;
+        HMODULE iphlpapi;
+
+        ntdll = LoadLibrary(TEXT("ntdll.dll"));
+        rtlIpv4AddressToStringA = (_RtlIpv4AddressToStringA)GetProcAddress(
+                                   ntdll, "RtlIpv4AddressToStringA");
+        rtlIpv6AddressToStringA = (_RtlIpv6AddressToStringA)GetProcAddress(
+                                   ntdll, "RtlIpv6AddressToStringA");
+        /* TODO: Check these two function pointers */
+
+        iphlpapi = LoadLibrary(TEXT("iphlpapi.dll"));
+        getExtendedTcpTable = (_GetExtendedTcpTable)GetProcAddress(iphlpapi,
+                              "GetExtendedTcpTable");
+        getExtendedUdpTable = (_GetExtendedUdpTable)GetProcAddress(iphlpapi,
+                              "GetExtendedUdpTable");
+        FreeLibrary(ntdll);
+        FreeLibrary(iphlpapi);
+    }
+
+    if ((getExtendedTcpTable == NULL) || (getExtendedUdpTable == NULL)) {
+        PyErr_SetString(PyExc_NotImplementedError,
+                        "feature not supported on this Windows version");
+        _psutil_conn_decref_objs();
+        return NULL;
+    }
+
+    connectionsList = PyList_New(0);
+    if (connectionsList == NULL) {
+        _psutil_conn_decref_objs();
+        return NULL;
+    }
+
+    // TCP IPv4
+
+    if ((PySequence_Contains(af_filter, _AF_INET) == 1) &&
+            (PySequence_Contains(type_filter, _SOCK_STREAM) == 1))
+    {
+        table = NULL;
+        connectionTuple = NULL;
+        addressTupleLocal = NULL;
+        addressTupleRemote = NULL;
+        tableSize = 0;
+        getExtendedTcpTable(NULL, &tableSize, FALSE, AF_INET,
+                            TCP_TABLE_OWNER_PID_ALL, 0);
+
+        table = malloc(tableSize);
+        if (table == NULL) {
+            PyErr_NoMemory();
+            goto error;
+        }
+
+        if (getExtendedTcpTable(table, &tableSize, FALSE, AF_INET,
+                                TCP_TABLE_OWNER_PID_ALL, 0) == 0)
+        {
+            tcp4Table = table;
+
+            for (i = 0; i < tcp4Table->dwNumEntries; i++)
+            {
+                if (pid != -1) {
+                    if (tcp4Table->table[i].dwOwningPid != pid) {
+                        continue;
+                    }
+                }
+
+                if (tcp4Table->table[i].dwLocalAddr != 0 ||
+                        tcp4Table->table[i].dwLocalPort != 0)
+                {
+                    struct in_addr addr;
+
+                    addr.S_un.S_addr = tcp4Table->table[i].dwLocalAddr;
+                    rtlIpv4AddressToStringA(&addr, addressBufferLocal);
+                    addressTupleLocal = Py_BuildValue(
+                        "(si)",
+                        addressBufferLocal,
+                        BYTESWAP_USHORT(tcp4Table->table[i].dwLocalPort));
+                }
+                else {
+                    addressTupleLocal = PyTuple_New(0);
+                }
+
+                if (addressTupleLocal == NULL)
+                    goto error;
+
+                // On Windows <= XP, remote addr is filled even if socket
+                // is in LISTEN mode in which case we just ignore it.
+                if ((tcp4Table->table[i].dwRemoteAddr != 0 ||
+                        tcp4Table->table[i].dwRemotePort != 0) &&
+                        (tcp4Table->table[i].dwState != MIB_TCP_STATE_LISTEN))
+                {
+                    struct in_addr addr;
+
+                    addr.S_un.S_addr = tcp4Table->table[i].dwRemoteAddr;
+                    rtlIpv4AddressToStringA(&addr, addressBufferRemote);
+                    addressTupleRemote = Py_BuildValue(
+                        "(si)",
+                        addressBufferRemote,
+                        BYTESWAP_USHORT(tcp4Table->table[i].dwRemotePort));
+                }
+                else
+                {
+                    addressTupleRemote = PyTuple_New(0);
+                }
+
+                if (addressTupleRemote == NULL)
+                    goto error;
+
+                connectionTuple = Py_BuildValue(
+                    "(iiiNNiI)",
+                    -1,
+                    AF_INET,
+                    SOCK_STREAM,
+                    addressTupleLocal,
+                    addressTupleRemote,
+                    tcp4Table->table[i].dwState,
+                    tcp4Table->table[i].dwOwningPid);
+                if (!connectionTuple)
+                    goto error;
+                if (PyList_Append(connectionsList, connectionTuple))
+                    goto error;
+                Py_DECREF(connectionTuple);
+            }
+        }
+
+        free(table);
+    }
+
+    // TCP IPv6
+
+    if ((PySequence_Contains(af_filter, _AF_INET6) == 1) &&
+            (PySequence_Contains(type_filter, _SOCK_STREAM) == 1))
+    {
+        table = NULL;
+        connectionTuple = NULL;
+        addressTupleLocal = NULL;
+        addressTupleRemote = NULL;
+        tableSize = 0;
+        getExtendedTcpTable(NULL, &tableSize, FALSE, AF_INET6,
+                            TCP_TABLE_OWNER_PID_ALL, 0);
+
+        table = malloc(tableSize);
+        if (table == NULL) {
+            PyErr_NoMemory();
+            goto error;
+        }
+
+        if (getExtendedTcpTable(table, &tableSize, FALSE, AF_INET6,
+                                TCP_TABLE_OWNER_PID_ALL, 0) == 0)
+        {
+            tcp6Table = table;
+
+            for (i = 0; i < tcp6Table->dwNumEntries; i++)
+            {
+                if (pid != -1) {
+                    if (tcp6Table->table[i].dwOwningPid != pid) {
+                        continue;
+                    }
+                }
+
+                if (memcmp(tcp6Table->table[i].ucLocalAddr, null_address, 16)
+                        != 0 || tcp6Table->table[i].dwLocalPort != 0)
+                {
+                    struct in6_addr addr;
+
+                    memcpy(&addr, tcp6Table->table[i].ucLocalAddr, 16);
+                    rtlIpv6AddressToStringA(&addr, addressBufferLocal);
+                    addressTupleLocal = Py_BuildValue(
+                        "(si)",
+                        addressBufferLocal,
+                        BYTESWAP_USHORT(tcp6Table->table[i].dwLocalPort));
+                }
+                else
+                {
+                    addressTupleLocal = PyTuple_New(0);
+                }
+
+                if (addressTupleLocal == NULL)
+                    goto error;
+
+                // On Windows <= XP, remote addr is filled even if socket
+                // is in LISTEN mode in which case we just ignore it.
+                if ((memcmp(tcp6Table->table[i].ucRemoteAddr, null_address, 16)
+                        != 0 ||
+                        tcp6Table->table[i].dwRemotePort != 0) &&
+                        (tcp6Table->table[i].dwState != MIB_TCP_STATE_LISTEN))
+                {
+                    struct in6_addr addr;
+
+                    memcpy(&addr, tcp6Table->table[i].ucRemoteAddr, 16);
+                    rtlIpv6AddressToStringA(&addr, addressBufferRemote);
+                    addressTupleRemote = Py_BuildValue(
+                        "(si)",
+                        addressBufferRemote,
+                        BYTESWAP_USHORT(tcp6Table->table[i].dwRemotePort));
+                }
+                else
+                {
+                    addressTupleRemote = PyTuple_New(0);
+                }
+
+                if (addressTupleRemote == NULL)
+                    goto error;
+
+                connectionTuple = Py_BuildValue(
+                    "(iiiNNiI)",
+                    -1,
+                    AF_INET6,
+                    SOCK_STREAM,
+                    addressTupleLocal,
+                    addressTupleRemote,
+                    tcp6Table->table[i].dwState,
+                    tcp6Table->table[i].dwOwningPid);
+                if (!connectionTuple)
+                    goto error;
+                if (PyList_Append(connectionsList, connectionTuple))
+                    goto error;
+                Py_DECREF(connectionTuple);
+            }
+        }
+
+        free(table);
+    }
+
+    // UDP IPv4
+
+    if ((PySequence_Contains(af_filter, _AF_INET) == 1) &&
+            (PySequence_Contains(type_filter, _SOCK_DGRAM) == 1))
+    {
+        table = NULL;
+        connectionTuple = NULL;
+        addressTupleLocal = NULL;
+        addressTupleRemote = NULL;
+        tableSize = 0;
+        getExtendedUdpTable(NULL, &tableSize, FALSE, AF_INET,
+                            UDP_TABLE_OWNER_PID, 0);
+
+        table = malloc(tableSize);
+        if (table == NULL) {
+            PyErr_NoMemory();
+            goto error;
+        }
+
+        if (getExtendedUdpTable(table, &tableSize, FALSE, AF_INET,
+                                UDP_TABLE_OWNER_PID, 0) == 0)
+        {
+            udp4Table = table;
+
+            for (i = 0; i < udp4Table->dwNumEntries; i++)
+            {
+                if (pid != -1) {
+                    if (udp4Table->table[i].dwOwningPid != pid) {
+                        continue;
+                    }
+                }
+
+                if (udp4Table->table[i].dwLocalAddr != 0 ||
+                    udp4Table->table[i].dwLocalPort != 0)
+                {
+                    struct in_addr addr;
+
+                    addr.S_un.S_addr = udp4Table->table[i].dwLocalAddr;
+                    rtlIpv4AddressToStringA(&addr, addressBufferLocal);
+                    addressTupleLocal = Py_BuildValue(
+                        "(si)",
+                        addressBufferLocal,
+                        BYTESWAP_USHORT(udp4Table->table[i].dwLocalPort));
+                }
+                else {
+                    addressTupleLocal = PyTuple_New(0);
+                }
+
+                if (addressTupleLocal == NULL)
+                    goto error;
+
+                connectionTuple = Py_BuildValue(
+                    "(iiiNNiI)",
+                    -1,
+                    AF_INET,
+                    SOCK_DGRAM,
+                    addressTupleLocal,
+                    PyTuple_New(0),
+                    PSUTIL_CONN_NONE,
+                    udp4Table->table[i].dwOwningPid);
+                if (!connectionTuple)
+                    goto error;
+                if (PyList_Append(connectionsList, connectionTuple))
+                    goto error;
+                Py_DECREF(connectionTuple);
+            }
+        }
+
+        free(table);
+    }
+
+    // UDP IPv6
+
+    if ((PySequence_Contains(af_filter, _AF_INET6) == 1) &&
+            (PySequence_Contains(type_filter, _SOCK_DGRAM) == 1))
+    {
+        table = NULL;
+        connectionTuple = NULL;
+        addressTupleLocal = NULL;
+        addressTupleRemote = NULL;
+        tableSize = 0;
+        getExtendedUdpTable(NULL, &tableSize, FALSE,
+                            AF_INET6, UDP_TABLE_OWNER_PID, 0);
+
+        table = malloc(tableSize);
+        if (table == NULL) {
+            PyErr_NoMemory();
+            goto error;
+        }
+
+        if (getExtendedUdpTable(table, &tableSize, FALSE, AF_INET6,
+                                UDP_TABLE_OWNER_PID, 0) == 0)
+        {
+            udp6Table = table;
+
+            for (i = 0; i < udp6Table->dwNumEntries; i++)
+            {
+                if (pid != -1) {
+                    if (udp6Table->table[i].dwOwningPid != pid) {
+                        continue;
+                    }
+                }
+
+                if (memcmp(udp6Table->table[i].ucLocalAddr, null_address, 16)
+                        != 0 || udp6Table->table[i].dwLocalPort != 0)
+                {
+                    struct in6_addr addr;
+
+                    memcpy(&addr, udp6Table->table[i].ucLocalAddr, 16);
+                    rtlIpv6AddressToStringA(&addr, addressBufferLocal);
+                    addressTupleLocal = Py_BuildValue(
+                        "(si)",
+                        addressBufferLocal,
+                        BYTESWAP_USHORT(udp6Table->table[i].dwLocalPort));
+                }
+                else {
+                    addressTupleLocal = PyTuple_New(0);
+                }
+
+                if (addressTupleLocal == NULL)
+                    goto error;
+
+                connectionTuple = Py_BuildValue(
+                    "(iiiNNiI)",
+                    -1,
+                    AF_INET6,
+                    SOCK_DGRAM,
+                    addressTupleLocal,
+                    PyTuple_New(0),
+                    PSUTIL_CONN_NONE,
+                    udp6Table->table[i].dwOwningPid);
+                if (!connectionTuple)
+                    goto error;
+                if (PyList_Append(connectionsList, connectionTuple))
+                    goto error;
+                Py_DECREF(connectionTuple);
+            }
+        }
+
+        free(table);
+    }
+
+    _psutil_conn_decref_objs();
+    return connectionsList;
+
+error:
+    _psutil_conn_decref_objs();
+    Py_XDECREF(connectionTuple);
+    Py_XDECREF(addressTupleLocal);
+    Py_XDECREF(addressTupleRemote);
+    Py_DECREF(connectionsList);
+    if (table != NULL)
+        free(table);
+    return NULL;
+}
+
+
+/*
+ * Get process priority as a Python integer.
+ */
+static PyObject *
+psutil_proc_priority_get(PyObject *self, PyObject *args)
+{
+    long pid;
+    DWORD priority;
+    HANDLE hProcess;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    hProcess = psutil_handle_from_pid(pid);
+    if (hProcess == NULL) {
+        return NULL;
+    }
+
+    priority = GetPriorityClass(hProcess);
+    CloseHandle(hProcess);
+    if (priority == 0) {
+        PyErr_SetFromWindowsErr(0);
+        return NULL;
+    }
+    return Py_BuildValue("i", priority);
+}
+
+
+/*
+ * Set process priority.
+ */
+static PyObject *
+psutil_proc_priority_set(PyObject *self, PyObject *args)
+{
+    long pid;
+    int priority;
+    int retval;
+    HANDLE hProcess;
+    DWORD dwDesiredAccess = \
+        PROCESS_QUERY_INFORMATION | PROCESS_SET_INFORMATION;
+    if (! PyArg_ParseTuple(args, "li", &pid, &priority)) {
+        return NULL;
+    }
+
+    hProcess = psutil_handle_from_pid_waccess(pid, dwDesiredAccess);
+    if (hProcess == NULL) {
+        return NULL;
+    }
+
+    retval = SetPriorityClass(hProcess, priority);
+    CloseHandle(hProcess);
+    if (retval == 0) {
+        PyErr_SetFromWindowsErr(0);
+        return NULL;
+    }
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+
+#if (_WIN32_WINNT >= 0x0600)  // Windows Vista
+/*
+ * Get process IO priority as a Python integer.
+ */
+static PyObject *
+psutil_proc_io_priority_get(PyObject *self, PyObject *args)
+{
+    long pid;
+    HANDLE hProcess;
+    PULONG IoPriority;
+
+    _NtQueryInformationProcess NtQueryInformationProcess =
+        (_NtQueryInformationProcess)GetProcAddress(
+            GetModuleHandleA("ntdll.dll"), "NtQueryInformationProcess");
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    hProcess = psutil_handle_from_pid(pid);
+    if (hProcess == NULL) {
+        return NULL;
+    }
+
+    NtQueryInformationProcess(
+        hProcess,
+        ProcessIoPriority,
+        &IoPriority,
+        sizeof(ULONG),
+        NULL
+    );
+    CloseHandle(hProcess);
+    return Py_BuildValue("i", IoPriority);
+}
+
+
+/*
+ * Set process IO priority.
+ */
+static PyObject *
+psutil_proc_io_priority_set(PyObject *self, PyObject *args)
+{
+    long pid;
+    int prio;
+    HANDLE hProcess;
+
+    _NtSetInformationProcess NtSetInformationProcess =
+        (_NtSetInformationProcess)GetProcAddress(
+            GetModuleHandleA("ntdll.dll"), "NtSetInformationProcess");
+
+    if (NtSetInformationProcess == NULL) {
+        PyErr_SetString(PyExc_RuntimeError,
+                        "couldn't get NtSetInformationProcess");
+        return NULL;
+    }
+
+    if (! PyArg_ParseTuple(args, "li", &pid, &prio)) {
+        return NULL;
+    }
+    hProcess = psutil_handle_from_pid_waccess(pid, PROCESS_ALL_ACCESS);
+    if (hProcess == NULL) {
+        return NULL;
+    }
+
+    NtSetInformationProcess(
+        hProcess,
+        ProcessIoPriority,
+        (PVOID)&prio,
+        sizeof((PVOID)prio)
+    );
+
+    CloseHandle(hProcess);
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+#endif
+
+
+/*
+ * Return a Python tuple referencing process I/O counters.
+ */
+static PyObject *
+psutil_proc_io_counters(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    HANDLE hProcess;
+    IO_COUNTERS IoCounters;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    hProcess = psutil_handle_from_pid(pid);
+    if (NULL == hProcess) {
+        return NULL;
+    }
+    if (! GetProcessIoCounters(hProcess, &IoCounters)) {
+        CloseHandle(hProcess);
+        return PyErr_SetFromWindowsErr(0);
+    }
+    CloseHandle(hProcess);
+    return Py_BuildValue("(KKKK)",
+                         IoCounters.ReadOperationCount,
+                         IoCounters.WriteOperationCount,
+                         IoCounters.ReadTransferCount,
+                         IoCounters.WriteTransferCount);
+}
+
+
+/*
+ * Alternative implementation of the one above but bypasses ACCESS DENIED.
+ */
+static PyObject *
+psutil_proc_io_counters_2(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    PSYSTEM_PROCESS_INFORMATION process;
+    PVOID buffer;
+    LONGLONG rcount, wcount, rbytes, wbytes;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_get_proc_info(pid, &process, &buffer)) {
+        return NULL;
+    }
+    rcount = process->ReadOperationCount.QuadPart;
+    wcount = process->WriteOperationCount.QuadPart;
+    rbytes = process->ReadTransferCount.QuadPart;
+    wbytes = process->WriteTransferCount.QuadPart;
+    free(buffer);
+    return Py_BuildValue("KKKK", rcount, wcount, rbytes, wbytes);
+}
+
+
+/*
+ * Return process CPU affinity as a bitmask
+ */
+static PyObject *
+psutil_proc_cpu_affinity_get(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    HANDLE hProcess;
+    PDWORD_PTR proc_mask;
+    PDWORD_PTR system_mask;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    hProcess = psutil_handle_from_pid(pid);
+    if (hProcess == NULL) {
+        return NULL;
+    }
+    if (GetProcessAffinityMask(hProcess, &proc_mask, &system_mask) == 0) {
+        CloseHandle(hProcess);
+        return PyErr_SetFromWindowsErr(0);
+    }
+
+    CloseHandle(hProcess);
+#ifdef _WIN64
+    return Py_BuildValue("K", (unsigned long long)proc_mask);
+#else
+    return Py_BuildValue("k", (unsigned long)proc_mask);
+#endif
+}
+
+
+/*
+ * Set process CPU affinity
+ */
+static PyObject *
+psutil_proc_cpu_affinity_set(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    HANDLE hProcess;
+    DWORD dwDesiredAccess = \
+        PROCESS_QUERY_INFORMATION | PROCESS_SET_INFORMATION;
+    DWORD_PTR mask;
+
+#ifdef _WIN64
+    if (! PyArg_ParseTuple(args, "lK", &pid, &mask))
+#else
+    if (! PyArg_ParseTuple(args, "lk", &pid, &mask))
+#endif
+    {
+        return NULL;
+    }
+    hProcess = psutil_handle_from_pid_waccess(pid, dwDesiredAccess);
+    if (hProcess == NULL) {
+        return NULL;
+    }
+
+    if (SetProcessAffinityMask(hProcess, mask) == 0) {
+        CloseHandle(hProcess);
+        return PyErr_SetFromWindowsErr(0);
+    }
+
+    CloseHandle(hProcess);
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+
+/*
+ * Return True if one of the process threads is in a waiting or
+ * suspended status.
+ */
+static PyObject *
+psutil_proc_is_suspended(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    ULONG i;
+    PSYSTEM_PROCESS_INFORMATION process;
+    PVOID buffer;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_get_proc_info(pid, &process, &buffer)) {
+        return NULL;
+    }
+    for (i = 0; i < process->NumberOfThreads; i++) {
+        if (process->Threads[i].ThreadState != Waiting ||
+                process->Threads[i].WaitReason != Suspended)
+        {
+            free(buffer);
+            Py_RETURN_FALSE;
+        }
+    }
+    free(buffer);
+    Py_RETURN_TRUE;
+}
+
+
+/*
+ * Return path's disk total and free as a Python tuple.
+ */
+static PyObject *
+psutil_disk_usage(PyObject *self, PyObject *args)
+{
+    BOOL retval;
+    ULARGE_INTEGER _, total, free;
+    char *path;
+
+    if (PyArg_ParseTuple(args, "u", &path)) {
+        Py_BEGIN_ALLOW_THREADS
+        retval = GetDiskFreeSpaceExW((LPCWSTR)path, &_, &total, &free);
+        Py_END_ALLOW_THREADS
+        goto return_;
+    }
+
+    // on Python 2 we also want to accept plain strings other
+    // than Unicode
+#if PY_MAJOR_VERSION <= 2
+    PyErr_Clear();  // drop the argument parsing error
+    if (PyArg_ParseTuple(args, "s", &path)) {
+        Py_BEGIN_ALLOW_THREADS
+        retval = GetDiskFreeSpaceEx(path, &_, &total, &free);
+        Py_END_ALLOW_THREADS
+        goto return_;
+    }
+#endif
+
+    return NULL;
+
+return_:
+    if (retval == 0)
+        return PyErr_SetFromWindowsErr(0);
+    else
+        return Py_BuildValue("(LL)", total.QuadPart, free.QuadPart);
+}
+
+
+/*
+ * Return a Python list of named tuples with overall network I/O information
+ */
+static PyObject *
+psutil_net_io_counters(PyObject *self, PyObject *args)
+{
+    int attempts = 0;
+    int i;
+    int outBufLen = 15000;
+    char ifname[MAX_PATH];
+    DWORD dwRetVal = 0;
+    MIB_IFROW *pIfRow = NULL;
+    ULONG flags = 0;
+    ULONG family = AF_UNSPEC;
+    PIP_ADAPTER_ADDRESSES pAddresses = NULL;
+    PIP_ADAPTER_ADDRESSES pCurrAddresses = NULL;
+
+    PyObject *py_retdict = PyDict_New();
+    PyObject *py_nic_info = NULL;
+    PyObject *py_nic_name = NULL;
+
+    if (py_retdict == NULL) {
+        return NULL;
+    }
+    do {
+        pAddresses = (IP_ADAPTER_ADDRESSES *) malloc(outBufLen);
+        if (pAddresses == NULL) {
+            PyErr_NoMemory();
+            goto error;
+        }
+
+        dwRetVal = GetAdaptersAddresses(family, flags, NULL, pAddresses,
+                                        &outBufLen);
+        if (dwRetVal == ERROR_BUFFER_OVERFLOW) {
+            free(pAddresses);
+            pAddresses = NULL;
+        }
+        else {
+            break;
+        }
+
+        attempts++;
+    } while ((dwRetVal == ERROR_BUFFER_OVERFLOW) && (attempts < 3));
+
+    if (dwRetVal != NO_ERROR) {
+        PyErr_SetString(PyExc_RuntimeError, "GetAdaptersAddresses() failed.");
+        goto error;
+    }
+
+    pCurrAddresses = pAddresses;
+    while (pCurrAddresses) {
+        py_nic_name = NULL;
+        py_nic_info = NULL;
+        pIfRow = (MIB_IFROW *) malloc(sizeof(MIB_IFROW));
+
+        if (pIfRow == NULL) {
+            PyErr_NoMemory();
+            goto error;
+        }
+
+        pIfRow->dwIndex = pCurrAddresses->IfIndex;
+        dwRetVal = GetIfEntry(pIfRow);
+        if (dwRetVal != NO_ERROR) {
+            PyErr_SetString(PyExc_RuntimeError, "GetIfEntry() failed.");
+            goto error;
+        }
+
+        py_nic_info = Py_BuildValue("(kkkkkkkk)",
+                                    pIfRow->dwOutOctets,
+                                    pIfRow->dwInOctets,
+                                    pIfRow->dwOutUcastPkts,
+                                    pIfRow->dwInUcastPkts,
+                                    pIfRow->dwInErrors,
+                                    pIfRow->dwOutErrors,
+                                    pIfRow->dwInDiscards,
+                                    pIfRow->dwOutDiscards);
+        if (!py_nic_info)
+            goto error;
+
+        sprintf(ifname, "%wS", pCurrAddresses->FriendlyName);
+
+#if PY_MAJOR_VERSION >= 3
+        // XXX - Dirty hack to avoid encoding errors on Python 3, see:
+        // https://code.google.com/p/psutil/issues/detail?id=446#c9
+        for (i = 0; i < MAX_PATH; i++) {
+            if (*(ifname+i) < 0 || *(ifname+i) > 256) {
+                // replace the non unicode character
+                *(ifname+i) = '?';
+            }
+            else if (*(ifname+i) == '\0') {
+                break;
+            }
+        }
+#endif
+        py_nic_name = Py_BuildValue("s", ifname);
+        if (py_nic_name == NULL)
+            goto error;
+        if (PyDict_SetItem(py_retdict, py_nic_name, py_nic_info))
+            goto error;
+        Py_XDECREF(py_nic_name);
+        Py_XDECREF(py_nic_info);
+
+        free(pIfRow);
+        pCurrAddresses = pCurrAddresses->Next;
+    }
+
+    free(pAddresses);
+    return py_retdict;
+
+error:
+    Py_XDECREF(py_nic_name);
+    Py_XDECREF(py_nic_info);
+    Py_DECREF(py_retdict);
+    if (pAddresses != NULL)
+        free(pAddresses);
+    if (pIfRow != NULL)
+        free(pIfRow);
+    return NULL;
+}
+
+// fix for mingw32, see
+// https://code.google.com/p/psutil/issues/detail?id=351#c2
+typedef struct _DISK_PERFORMANCE_WIN_2008 {
+    LARGE_INTEGER BytesRead;
+    LARGE_INTEGER BytesWritten;
+    LARGE_INTEGER ReadTime;
+    LARGE_INTEGER WriteTime;
+    LARGE_INTEGER IdleTime;
+    DWORD         ReadCount;
+    DWORD         WriteCount;
+    DWORD         QueueDepth;
+    DWORD         SplitCount;
+    LARGE_INTEGER QueryTime;
+    DWORD         StorageDeviceNumber;
+    WCHAR         StorageManagerName[8];
+} DISK_PERFORMANCE_WIN_2008;
+
+/*
+ * Return a Python dict of tuples for disk I/O information
+ */
+static PyObject *
+psutil_disk_io_counters(PyObject *self, PyObject *args)
+{
+    DISK_PERFORMANCE_WIN_2008 diskPerformance;
+    DWORD dwSize;
+    HANDLE hDevice = NULL;
+    char szDevice[MAX_PATH];
+    char szDeviceDisplay[MAX_PATH];
+    int devNum;
+    PyObject *py_retdict = PyDict_New();
+    PyObject *py_disk_info = NULL;
+    if (py_retdict == NULL) {
+        return NULL;
+    }
+
+    // Apparently there's no way to figure out how many times we have
+    // to iterate in order to find valid drives.
+    // Let's assume 32, which is higher than 26, the number of letters
+    // in the alphabet (from A:\ to Z:\).
+    for (devNum = 0; devNum <= 32; ++devNum) {
+        py_disk_info = NULL;
+        sprintf(szDevice, "\\\\.\\PhysicalDrive%d", devNum);
+        hDevice = CreateFile(szDevice, 0, FILE_SHARE_READ | FILE_SHARE_WRITE,
+                             NULL, OPEN_EXISTING, 0, NULL);
+
+        if (hDevice == INVALID_HANDLE_VALUE) {
+            continue;
+        }
+        if (DeviceIoControl(hDevice, IOCTL_DISK_PERFORMANCE, NULL, 0,
+                            &diskPerformance, sizeof(diskPerformance),
+                            &dwSize, NULL))
+        {
+            sprintf(szDeviceDisplay, "PhysicalDrive%d", devNum);
+            py_disk_info = Py_BuildValue(
+                "(IILLLL)",
+                diskPerformance.ReadCount,
+                diskPerformance.WriteCount,
+                diskPerformance.BytesRead,
+                diskPerformance.BytesWritten,
+                (diskPerformance.ReadTime.QuadPart * 10) / 1000,
+                (diskPerformance.WriteTime.QuadPart * 10) / 1000);
+            if (!py_disk_info)
+                goto error;
+            if (PyDict_SetItemString(py_retdict, szDeviceDisplay,
+                                     py_disk_info))
+            {
+                goto error;
+            }
+            Py_XDECREF(py_disk_info);
+        }
+        else {
+            // XXX we might get here with ERROR_INSUFFICIENT_BUFFER when
+            // compiling with mingw32; not sure what to do.
+            // return PyErr_SetFromWindowsErr(0);
+            ;;
+        }
+
+        CloseHandle(hDevice);
+    }
+
+    return py_retdict;
+
+error:
+    Py_XDECREF(py_disk_info);
+    Py_DECREF(py_retdict);
+    if (hDevice != NULL)
+        CloseHandle(hDevice);
+    return NULL;
+}
+
+
+static char *psutil_get_drive_type(int type)
+{
+    switch (type) {
+    case DRIVE_FIXED:
+        return "fixed";
+    case DRIVE_CDROM:
+        return "cdrom";
+    case DRIVE_REMOVABLE:
+        return "removable";
+    case DRIVE_UNKNOWN:
+        return "unknown";
+    case DRIVE_NO_ROOT_DIR:
+        return "unmounted";
+    case DRIVE_REMOTE:
+        return "remote";
+    case DRIVE_RAMDISK:
+        return "ramdisk";
+    default:
+        return "?";
+    }
+}
+
+
+#ifndef _ARRAYSIZE
+#define _ARRAYSIZE(a) (sizeof(a)/sizeof(a[0]))
+#endif
+
+/*
+ * Return disk partitions as a list of tuples such as
+ * (drive_letter, drive_letter, type, "")
+ */
+static PyObject *
+psutil_disk_partitions(PyObject *self, PyObject *args)
+{
+    DWORD num_bytes;
+    char drive_strings[255];
+    char *drive_letter = drive_strings;
+    int all;
+    int type;
+    int ret;
+    char opts[20];
+    LPTSTR fs_type[MAX_PATH + 1] = { 0 };
+    DWORD pflags = 0;
+    PyObject *py_all;
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_tuple = NULL;
+
+    if (py_retlist == NULL) {
+        return NULL;
+    }
+
+    // avoid to visualize a message box in case something goes wrong
+    // see http://code.google.com/p/psutil/issues/detail?id=264
+    SetErrorMode(SEM_FAILCRITICALERRORS);
+
+    if (! PyArg_ParseTuple(args, "O", &py_all)) {
+        goto error;
+    }
+    all = PyObject_IsTrue(py_all);
+
+    Py_BEGIN_ALLOW_THREADS
+    num_bytes = GetLogicalDriveStrings(254, drive_letter);
+    Py_END_ALLOW_THREADS
+
+    if (num_bytes == 0) {
+        PyErr_SetFromWindowsErr(0);
+        goto error;
+    }
+
+    while (*drive_letter != 0) {
+        py_tuple = NULL;
+        opts[0] = 0;
+        fs_type[0] = 0;
+
+        Py_BEGIN_ALLOW_THREADS
+        type = GetDriveType(drive_letter);
+        Py_END_ALLOW_THREADS
+
+        // by default we only show hard drives and cd-roms
+        if (all == 0) {
+            if ((type == DRIVE_UNKNOWN) ||
+                    (type == DRIVE_NO_ROOT_DIR) ||
+                    (type == DRIVE_REMOTE) ||
+                    (type == DRIVE_RAMDISK)) {
+                goto next;
+            }
+            // floppy disk: skip it by default as it introduces a
+            // considerable slowdown.
+            if ((type == DRIVE_REMOVABLE) &&
+                    (strcmp(drive_letter, "A:\\")  == 0)) {
+                goto next;
+            }
+        }
+
+        ret = GetVolumeInformation(
+            (LPCTSTR)drive_letter, NULL, _ARRAYSIZE(drive_letter),
+            NULL, NULL, &pflags, (LPTSTR)fs_type, _ARRAYSIZE(fs_type));
+        if (ret == 0) {
+            // We might get here in case of a floppy hard drive, in
+            // which case the error is (21, "device not ready").
+            // Let's pretend it didn't happen as we already have
+            // the drive name and type ('removable').
+            strcat(opts, "");
+            SetLastError(0);
+        }
+        else {
+            if (pflags & FILE_READ_ONLY_VOLUME) {
+                strcat(opts, "ro");
+            }
+            else {
+                strcat(opts, "rw");
+            }
+            if (pflags & FILE_VOLUME_IS_COMPRESSED) {
+                strcat(opts, ",compressed");
+            }
+        }
+
+        if (strlen(opts) > 0) {
+            strcat(opts, ",");
+        }
+        strcat(opts, psutil_get_drive_type(type));
+
+        py_tuple = Py_BuildValue(
+            "(ssss)",
+            drive_letter,
+            drive_letter,
+            fs_type,  // either FAT, FAT32, NTFS, HPFS, CDFS, UDF or NWFS
+            opts);
+        if (!py_tuple)
+            goto error;
+        if (PyList_Append(py_retlist, py_tuple))
+            goto error;
+        Py_DECREF(py_tuple);
+        goto next;
+
+next:
+        drive_letter = strchr(drive_letter, 0) + 1;
+    }
+
+    SetErrorMode(0);
+    return py_retlist;
+
+error:
+    SetErrorMode(0);
+    Py_XDECREF(py_tuple);
+    Py_DECREF(py_retlist);
+    return NULL;
+}
+
+
+#ifdef UNICODE
+#define WTSOpenServer WTSOpenServerW
+#else
+#define WTSOpenServer WTSOpenServerA
+#endif
+
+
+/*
+ * Return a Python dict of tuples for disk I/O information
+ */
+static PyObject *
+psutil_users(PyObject *self, PyObject *args)
+{
+    HANDLE hServer = NULL;
+    LPTSTR buffer_user = NULL;
+    LPTSTR buffer_addr = NULL;
+    PWTS_SESSION_INFO sessions = NULL;
+    DWORD count;
+    DWORD i;
+    DWORD sessionId;
+    DWORD bytes;
+    PWTS_CLIENT_ADDRESS address;
+    char address_str[50];
+    long long unix_time;
+
+    PWINSTATIONQUERYINFORMATIONW WinStationQueryInformationW;
+    WINSTATION_INFO station_info;
+    HINSTANCE hInstWinSta = NULL;
+    ULONG returnLen;
+
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_tuple = NULL;
+    PyObject *py_address = NULL;
+    if (py_retlist == NULL) {
+        return NULL;
+    }
+
+    hInstWinSta = LoadLibraryA("winsta.dll");
+    WinStationQueryInformationW = (PWINSTATIONQUERYINFORMATIONW) \
+        GetProcAddress(hInstWinSta, "WinStationQueryInformationW");
+
+    hServer = WTSOpenServer('\0');
+    if (hServer == NULL) {
+        PyErr_SetFromWindowsErr(0);
+        goto error;
+    }
+
+    if (WTSEnumerateSessions(hServer, 0, 1, &sessions, &count) == 0) {
+        PyErr_SetFromWindowsErr(0);
+        goto error;
+    }
+
+    for (i = 0; i < count; i++) {
+        py_address = NULL;
+        py_tuple = NULL;
+        sessionId = sessions[i].SessionId;
+        if (buffer_user != NULL) {
+            WTSFreeMemory(buffer_user);
+        }
+        if (buffer_addr != NULL) {
+            WTSFreeMemory(buffer_addr);
+        }
+
+        buffer_user = NULL;
+        buffer_addr = NULL;
+
+        // username
+        bytes = 0;
+        if (WTSQuerySessionInformation(hServer, sessionId, WTSUserName,
+                                       &buffer_user, &bytes) == 0) {
+            PyErr_SetFromWindowsErr(0);
+            goto error;
+        }
+        if (bytes == 1) {
+            continue;
+        }
+
+        // address
+        bytes = 0;
+        if (WTSQuerySessionInformation(hServer, sessionId, WTSClientAddress,
+                                       &buffer_addr, &bytes) == 0) {
+            PyErr_SetFromWindowsErr(0);
+            goto error;
+        }
+
+        address = (PWTS_CLIENT_ADDRESS)buffer_addr;
+        if (address->AddressFamily == 0) {  // AF_INET
+            sprintf(address_str,
+                    "%u.%u.%u.%u",
+                    address->Address[0],
+                    address->Address[1],
+                    address->Address[2],
+                    address->Address[3]);
+            py_address = Py_BuildValue("s", address_str);
+            if (!py_address)
+                goto error;
+        }
+        else {
+            py_address = Py_None;
+        }
+
+        // login time
+        if (!WinStationQueryInformationW(hServer,
+                                         sessionId,
+                                         WinStationInformation,
+                                         &station_info,
+                                         sizeof(station_info),
+                                         &returnLen))
+        {
+            goto error;
+        }
+
+        unix_time = ((LONGLONG)station_info.ConnectTime.dwHighDateTime) << 32;
+        unix_time += \
+            station_info.ConnectTime.dwLowDateTime - 116444736000000000LL;
+        unix_time /= 10000000;
+
+        py_tuple = Py_BuildValue("sOd", buffer_user, py_address,
+                                 (double)unix_time);
+        if (!py_tuple)
+            goto error;
+        if (PyList_Append(py_retlist, py_tuple))
+            goto error;
+        Py_XDECREF(py_address);
+        Py_XDECREF(py_tuple);
+    }
+
+    WTSCloseServer(hServer);
+    WTSFreeMemory(sessions);
+    WTSFreeMemory(buffer_user);
+    WTSFreeMemory(buffer_addr);
+    FreeLibrary(hInstWinSta);
+    return py_retlist;
+
+error:
+    Py_XDECREF(py_tuple);
+    Py_XDECREF(py_address);
+    Py_DECREF(py_retlist);
+
+    if (hInstWinSta != NULL) {
+        FreeLibrary(hInstWinSta);
+    }
+    if (hServer != NULL) {
+        WTSCloseServer(hServer);
+    }
+    if (sessions != NULL) {
+        WTSFreeMemory(sessions);
+    }
+    if (buffer_user != NULL) {
+        WTSFreeMemory(buffer_user);
+    }
+    if (buffer_addr != NULL) {
+        WTSFreeMemory(buffer_addr);
+    }
+    return NULL;
+}
+
+
+/*
+ * Return the number of handles opened by process.
+ */
+static PyObject *
+psutil_proc_num_handles(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    HANDLE hProcess;
+    DWORD handleCount;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    hProcess = psutil_handle_from_pid(pid);
+    if (NULL == hProcess) {
+        return NULL;
+    }
+    if (! GetProcessHandleCount(hProcess, &handleCount)) {
+        CloseHandle(hProcess);
+        return PyErr_SetFromWindowsErr(0);
+    }
+    CloseHandle(hProcess);
+    return Py_BuildValue("k", handleCount);
+}
+
+
+/*
+ * Alternative implementation of the one above but bypasses ACCESS DENIED.
+ */
+static PyObject *
+psutil_proc_num_handles_2(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    PSYSTEM_PROCESS_INFORMATION process;
+    PVOID buffer;
+    ULONG count;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_get_proc_info(pid, &process, &buffer)) {
+        return NULL;
+    }
+    count = process->HandleCount;
+    free(buffer);
+    return Py_BuildValue("k", count);
+}
+
+
+/*
+ * Return the number of context switches executed by process.
+ */
+static PyObject *
+psutil_proc_num_ctx_switches(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    PSYSTEM_PROCESS_INFORMATION process;
+    PVOID buffer;
+    ULONG i;
+    ULONG total = 0;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_get_proc_info(pid, &process, &buffer)) {
+        return NULL;
+    }
+    for (i = 0; i < process->NumberOfThreads; i++) {
+        total += process->Threads[i].ContextSwitches;
+    }
+    free(buffer);
+    return Py_BuildValue("ki", total, 0);
+}
+
+
+static char *get_region_protection_string(ULONG protection)
+{
+    switch (protection & 0xff) {
+    case PAGE_NOACCESS:
+        return "";
+    case PAGE_READONLY:
+        return "r";
+    case PAGE_READWRITE:
+        return "rw";
+    case PAGE_WRITECOPY:
+        return "wc";
+    case PAGE_EXECUTE:
+        return "x";
+    case PAGE_EXECUTE_READ:
+        return "xr";
+    case PAGE_EXECUTE_READWRITE:
+        return "xrw";
+    case PAGE_EXECUTE_WRITECOPY:
+        return "xwc";
+    default:
+        return "?";
+    }
+}
+
+
+/*
+ * Return a list of process's memory mappings.
+ */
+static PyObject *
+psutil_proc_memory_maps(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    HANDLE hProcess = NULL;
+    MEMORY_BASIC_INFORMATION basicInfo;
+    PVOID baseAddress;
+    PVOID previousAllocationBase;
+    CHAR mappedFileName[MAX_PATH];
+    SYSTEM_INFO system_info;
+    LPVOID maxAddr;
+    PyObject *py_list = PyList_New(0);
+    PyObject *py_tuple = NULL;
+
+    if (py_list == NULL) {
+        return NULL;
+    }
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        goto error;
+    }
+    hProcess = psutil_handle_from_pid(pid);
+    if (NULL == hProcess) {
+        goto error;
+    }
+
+    GetSystemInfo(&system_info);
+    maxAddr = system_info.lpMaximumApplicationAddress;
+    baseAddress = NULL;
+    previousAllocationBase = NULL;
+
+    while (VirtualQueryEx(hProcess, baseAddress, &basicInfo,
+                          sizeof(MEMORY_BASIC_INFORMATION)))
+    {
+        py_tuple = NULL;
+        if (baseAddress > maxAddr) {
+            break;
+        }
+        if (GetMappedFileNameA(hProcess, baseAddress, mappedFileName,
+                               sizeof(mappedFileName)))
+        {
+            py_tuple = Py_BuildValue(
+                "(kssI)",
+                (unsigned long)baseAddress,
+                get_region_protection_string(basicInfo.Protect),
+                mappedFileName,
+                basicInfo.RegionSize);
+            if (!py_tuple)
+                goto error;
+            if (PyList_Append(py_list, py_tuple))
+                goto error;
+            Py_DECREF(py_tuple);
+        }
+        previousAllocationBase = basicInfo.AllocationBase;
+        baseAddress = (PCHAR)baseAddress + basicInfo.RegionSize;
+    }
+
+    CloseHandle(hProcess);
+    return py_list;
+
+error:
+    Py_XDECREF(py_tuple);
+    Py_DECREF(py_list);
+    if (hProcess != NULL)
+        CloseHandle(hProcess);
+    return NULL;
+}
+
+
+/*
+ * Return a {pid:ppid, ...} dict for all running processes.
+ */
+static PyObject *
+psutil_ppid_map(PyObject *self, PyObject *args)
+{
+    PyObject *pid = NULL;
+    PyObject *ppid = NULL;
+    PyObject *py_retdict = PyDict_New();
+    HANDLE handle = NULL;
+    PROCESSENTRY32 pe = {0};
+    pe.dwSize = sizeof(PROCESSENTRY32);
+
+    if (py_retdict == NULL)
+        return NULL;
+    handle = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0);
+    if (handle == INVALID_HANDLE_VALUE) {
+        PyErr_SetFromWindowsErr(0);
+        Py_DECREF(py_retdict);
+        return NULL;
+    }
+
+    if (Process32First(handle, &pe)) {
+        do {
+            pid = Py_BuildValue("I", pe.th32ProcessID);
+            if (pid == NULL)
+                goto error;
+            ppid = Py_BuildValue("I", pe.th32ParentProcessID);
+            if (ppid == NULL)
+                goto error;
+            if (PyDict_SetItem(py_retdict, pid, ppid))
+                goto error;
+            Py_DECREF(pid);
+            Py_DECREF(ppid);
+        } while (Process32Next(handle, &pe));
+    }
+
+    CloseHandle(handle);
+    return py_retdict;
+
+error:
+    Py_XDECREF(pid);
+    Py_XDECREF(ppid);
+    Py_DECREF(py_retdict);
+    CloseHandle(handle);
+    return NULL;
+}
+
+
+// ------------------------ Python init ---------------------------
+
+static PyMethodDef
+PsutilMethods[] =
+{
+    // --- per-process functions
+
+    {"proc_cmdline", psutil_proc_cmdline, METH_VARARGS,
+     "Return process cmdline as a list of cmdline arguments"},
+    {"proc_exe", psutil_proc_exe, METH_VARARGS,
+     "Return path of the process executable"},
+    {"proc_kill", psutil_proc_kill, METH_VARARGS,
+     "Kill the process identified by the given PID"},
+    {"proc_cpu_times", psutil_proc_cpu_times, METH_VARARGS,
+     "Return tuple of user/kern time for the given PID"},
+    {"proc_create_time", psutil_proc_create_time, METH_VARARGS,
+     "Return a float indicating the process create time expressed in "
+     "seconds since the epoch"},
+    {"proc_memory_info", psutil_proc_memory_info, METH_VARARGS,
+     "Return a tuple of process memory information"},
+    {"proc_cwd", psutil_proc_cwd, METH_VARARGS,
+     "Return process current working directory"},
+    {"proc_suspend", psutil_proc_suspend, METH_VARARGS,
+     "Suspend a process"},
+    {"proc_resume", psutil_proc_resume, METH_VARARGS,
+     "Resume a process"},
+    {"proc_open_files", psutil_proc_open_files, METH_VARARGS,
+     "Return files opened by process"},
+    {"proc_username", psutil_proc_username, METH_VARARGS,
+     "Return the username of a process"},
+    {"proc_num_threads", psutil_proc_num_threads, METH_VARARGS,
+     "Return the network connections of a process"},
+    {"proc_threads", psutil_proc_threads, METH_VARARGS,
+     "Return process threads information as a list of tuple"},
+    {"proc_wait", psutil_proc_wait, METH_VARARGS,
+     "Wait for process to terminate and return its exit code."},
+    {"proc_priority_get", psutil_proc_priority_get, METH_VARARGS,
+     "Return process priority."},
+    {"proc_priority_set", psutil_proc_priority_set, METH_VARARGS,
+     "Set process priority."},
+#if (_WIN32_WINNT >= 0x0600)  // Windows Vista
+    {"proc_io_priority_get", psutil_proc_io_priority_get, METH_VARARGS,
+     "Return process IO priority."},
+    {"proc_io_priority_set", psutil_proc_io_priority_set, METH_VARARGS,
+     "Set process IO priority."},
+#endif
+    {"proc_cpu_affinity_get", psutil_proc_cpu_affinity_get, METH_VARARGS,
+     "Return process CPU affinity as a bitmask."},
+    {"proc_cpu_affinity_set", psutil_proc_cpu_affinity_set, METH_VARARGS,
+     "Set process CPU affinity."},
+    {"proc_io_counters", psutil_proc_io_counters, METH_VARARGS,
+     "Get process I/O counters."},
+    {"proc_is_suspended", psutil_proc_is_suspended, METH_VARARGS,
+     "Return True if one of the process threads is in a suspended state"},
+    {"proc_num_handles", psutil_proc_num_handles, METH_VARARGS,
+     "Return the number of handles opened by process."},
+    {"proc_num_ctx_switches", psutil_proc_num_ctx_switches, METH_VARARGS,
+     "Return the number of context switches performed by process."},
+    {"proc_memory_maps", psutil_proc_memory_maps, METH_VARARGS,
+     "Return a list of process's memory mappings"},
+
+    // --- alternative pinfo interface
+    {"proc_cpu_times_2", psutil_proc_cpu_times_2, METH_VARARGS,
+     "Alternative implementation"},
+    {"proc_create_time_2", psutil_proc_create_time_2, METH_VARARGS,
+     "Alternative implementation"},
+    {"proc_num_handles_2", psutil_proc_num_handles_2, METH_VARARGS,
+     "Alternative implementation"},
+    {"proc_io_counters_2", psutil_proc_io_counters_2, METH_VARARGS,
+     "Alternative implementation"},
+    {"proc_memory_info_2", psutil_proc_memory_info_2, METH_VARARGS,
+     "Alternative implementation"},
+
+    // --- system-related functions
+    {"pids", psutil_pids, METH_VARARGS,
+     "Returns a list of PIDs currently running on the system"},
+    {"ppid_map", psutil_ppid_map, METH_VARARGS,
+     "Return a {pid:ppid, ...} dict for all running processes"},
+    {"pid_exists", psutil_pid_exists, METH_VARARGS,
+     "Determine if the process exists in the current process list."},
+    {"cpu_count_logical", psutil_cpu_count_logical, METH_VARARGS,
+     "Returns the number of logical CPUs on the system"},
+    {"cpu_count_phys", psutil_cpu_count_phys, METH_VARARGS,
+     "Returns the number of physical CPUs on the system"},
+    {"boot_time", psutil_boot_time, METH_VARARGS,
+     "Return the system boot time expressed in seconds since the epoch."},
+    {"virtual_mem", psutil_virtual_mem, METH_VARARGS,
+     "Return the total amount of physical memory, in bytes"},
+    {"cpu_times", psutil_cpu_times, METH_VARARGS,
+     "Return system cpu times as a list"},
+    {"per_cpu_times", psutil_per_cpu_times, METH_VARARGS,
+     "Return system per-cpu times as a list of tuples"},
+    {"disk_usage", psutil_disk_usage, METH_VARARGS,
+     "Return path's disk total and free as a Python tuple."},
+    {"net_io_counters", psutil_net_io_counters, METH_VARARGS,
+     "Return dict of tuples of networks I/O information."},
+    {"disk_io_counters", psutil_disk_io_counters, METH_VARARGS,
+     "Return dict of tuples of disks I/O information."},
+    {"users", psutil_users, METH_VARARGS,
+     "Return a list of currently connected users."},
+    {"disk_partitions", psutil_disk_partitions, METH_VARARGS,
+     "Return disk partitions."},
+    {"net_connections", psutil_net_connections, METH_VARARGS,
+     "Return system-wide connections"},
+
+
+    // --- windows API bindings
+    {"win32_QueryDosDevice", psutil_win32_QueryDosDevice, METH_VARARGS,
+     "QueryDosDevice binding"},
+
+    {NULL, NULL, 0, NULL}
+};
+
+
+struct module_state {
+    PyObject *error;
+};
+
+#if PY_MAJOR_VERSION >= 3
+#define GETSTATE(m) ((struct module_state*)PyModule_GetState(m))
+#else
+#define GETSTATE(m) (&_state)
+static struct module_state _state;
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+
+static int psutil_windows_traverse(PyObject *m, visitproc visit, void *arg) {
+    Py_VISIT(GETSTATE(m)->error);
+    return 0;
+}
+
+static int psutil_windows_clear(PyObject *m) {
+    Py_CLEAR(GETSTATE(m)->error);
+    return 0;
+}
+
+static struct PyModuleDef moduledef = {
+    PyModuleDef_HEAD_INIT,
+    "psutil_windows",
+    NULL,
+    sizeof(struct module_state),
+    PsutilMethods,
+    NULL,
+    psutil_windows_traverse,
+    psutil_windows_clear,
+    NULL
+};
+
+#define INITERROR return NULL
+
+PyMODINIT_FUNC PyInit__psutil_windows(void)
+
+#else
+#define INITERROR return
+void init_psutil_windows(void)
+#endif
+{
+    struct module_state *st = NULL;
+#if PY_MAJOR_VERSION >= 3
+    PyObject *module = PyModule_Create(&moduledef);
+#else
+    PyObject *module = Py_InitModule("_psutil_windows", PsutilMethods);
+#endif
+
+    if (module == NULL) {
+        INITERROR;
+    }
+
+    st = GETSTATE(module);
+    st->error = PyErr_NewException("_psutil_windows.Error", NULL, NULL);
+    if (st->error == NULL) {
+        Py_DECREF(module);
+        INITERROR;
+    }
+
+    // process status constants
+    // http://msdn.microsoft.com/en-us/library/ms683211(v=vs.85).aspx
+    PyModule_AddIntConstant(
+        module, "ABOVE_NORMAL_PRIORITY_CLASS", ABOVE_NORMAL_PRIORITY_CLASS);
+    PyModule_AddIntConstant(
+        module, "BELOW_NORMAL_PRIORITY_CLASS", BELOW_NORMAL_PRIORITY_CLASS);
+    PyModule_AddIntConstant(
+        module, "HIGH_PRIORITY_CLASS", HIGH_PRIORITY_CLASS);
+    PyModule_AddIntConstant(
+        module, "IDLE_PRIORITY_CLASS", IDLE_PRIORITY_CLASS);
+    PyModule_AddIntConstant(
+        module, "NORMAL_PRIORITY_CLASS", NORMAL_PRIORITY_CLASS);
+    PyModule_AddIntConstant(
+        module, "REALTIME_PRIORITY_CLASS", REALTIME_PRIORITY_CLASS);
+    // connection status constants
+    // http://msdn.microsoft.com/en-us/library/cc669305.aspx
+    PyModule_AddIntConstant(
+        module, "MIB_TCP_STATE_CLOSED", MIB_TCP_STATE_CLOSED);
+    PyModule_AddIntConstant(
+        module, "MIB_TCP_STATE_CLOSING", MIB_TCP_STATE_CLOSING);
+    PyModule_AddIntConstant(
+        module, "MIB_TCP_STATE_CLOSE_WAIT", MIB_TCP_STATE_CLOSE_WAIT);
+    PyModule_AddIntConstant(
+        module, "MIB_TCP_STATE_LISTEN", MIB_TCP_STATE_LISTEN);
+    PyModule_AddIntConstant(
+        module, "MIB_TCP_STATE_ESTAB", MIB_TCP_STATE_ESTAB);
+    PyModule_AddIntConstant(
+        module, "MIB_TCP_STATE_SYN_SENT", MIB_TCP_STATE_SYN_SENT);
+    PyModule_AddIntConstant(
+        module, "MIB_TCP_STATE_SYN_RCVD", MIB_TCP_STATE_SYN_RCVD);
+    PyModule_AddIntConstant(
+        module, "MIB_TCP_STATE_FIN_WAIT1", MIB_TCP_STATE_FIN_WAIT1);
+    PyModule_AddIntConstant(
+        module, "MIB_TCP_STATE_FIN_WAIT2", MIB_TCP_STATE_FIN_WAIT2);
+    PyModule_AddIntConstant(
+        module, "MIB_TCP_STATE_LAST_ACK", MIB_TCP_STATE_LAST_ACK);
+    PyModule_AddIntConstant(
+        module, "MIB_TCP_STATE_TIME_WAIT", MIB_TCP_STATE_TIME_WAIT);
+    PyModule_AddIntConstant(
+        module, "MIB_TCP_STATE_TIME_WAIT", MIB_TCP_STATE_TIME_WAIT);
+    PyModule_AddIntConstant(
+        module, "MIB_TCP_STATE_DELETE_TCB", MIB_TCP_STATE_DELETE_TCB);
+    PyModule_AddIntConstant(
+        module, "PSUTIL_CONN_NONE", PSUTIL_CONN_NONE);
+    // ...for internal use in _psutil_windows.py
+    PyModule_AddIntConstant(
+        module, "INFINITE", INFINITE);
+    PyModule_AddIntConstant(
+        module, "ERROR_ACCESS_DENIED", ERROR_ACCESS_DENIED);
+
+    // set SeDebug for the current process
+    psutil_set_se_debug();
+
+#if PY_MAJOR_VERSION >= 3
+    return module;
+#endif
+}

+ 70 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_windows.h

@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+#include <windows.h>
+
+// --- per-process functions
+
+static PyObject* psutil_proc_cmdline(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cpu_affinity_get(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cpu_affinity_set(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cpu_times_2(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_create_time(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_create_time_2(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cwd(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_exe(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_io_counters_2(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_is_suspended(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_kill(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_memory_info(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_memory_info_2(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_memory_maps(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_ctx_switches(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_handles(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_handles_2(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_threads(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_open_files(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_priority_get(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_priority_set(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_resume(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_suspend(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_threads(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_username(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_wait(PyObject* self, PyObject* args);
+
+#if (PSUTIL_WINVER >= 0x0600)  // Windows Vista
+static PyObject* psutil_proc_io_priority_get(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_io_priority_set(PyObject* self, PyObject* args);
+#endif
+
+// --- system-related functions
+
+static PyObject* psutil_boot_time(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_count_logical(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_count_phys(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_disk_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_disk_partitions(PyObject* self, PyObject* args);
+static PyObject* psutil_disk_usage(PyObject* self, PyObject* args);
+static PyObject* psutil_net_connections(PyObject* self, PyObject* args);
+static PyObject* psutil_net_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_per_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_pid_exists(PyObject* self, PyObject* args);
+static PyObject* psutil_pids(PyObject* self, PyObject* args);
+static PyObject* psutil_ppid_map(PyObject* self, PyObject* args);
+static PyObject* psutil_users(PyObject* self, PyObject* args);
+static PyObject* psutil_virtual_mem(PyObject* self, PyObject* args);
+
+// --- windows API bindings
+
+static PyObject* psutil_win32_QueryDosDevice(PyObject* self, PyObject* args);
+
+// --- internal
+
+int psutil_proc_suspend_or_resume(DWORD pid, int suspend);

+ 485 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pswindows.py

@@ -0,0 +1,485 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Windows platform implementation."""
+
+import errno
+import os
+import sys
+
+from psutil import _common
+from psutil._common import conn_tmap, usage_percent, isfile_strict
+from psutil._compat import PY3, xrange, wraps, lru_cache, namedtuple
+import _psutil_windows as cext
+
+# process priority constants, import from __init__.py:
+# http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx
+__extra__all__ = ["ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS",
+                  "HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS",
+                  "NORMAL_PRIORITY_CLASS", "REALTIME_PRIORITY_CLASS",
+                  #
+                  "CONN_DELETE_TCB",
+                  ]
+
+# --- module level constants (gets pushed up to psutil module)
+
+CONN_DELETE_TCB = "DELETE_TCB"
+WAIT_TIMEOUT = 0x00000102  # 258 in decimal
+ACCESS_DENIED_SET = frozenset([errno.EPERM, errno.EACCES,
+                               cext.ERROR_ACCESS_DENIED])
+
+TCP_STATUSES = {
+    cext.MIB_TCP_STATE_ESTAB: _common.CONN_ESTABLISHED,
+    cext.MIB_TCP_STATE_SYN_SENT: _common.CONN_SYN_SENT,
+    cext.MIB_TCP_STATE_SYN_RCVD: _common.CONN_SYN_RECV,
+    cext.MIB_TCP_STATE_FIN_WAIT1: _common.CONN_FIN_WAIT1,
+    cext.MIB_TCP_STATE_FIN_WAIT2: _common.CONN_FIN_WAIT2,
+    cext.MIB_TCP_STATE_TIME_WAIT: _common.CONN_TIME_WAIT,
+    cext.MIB_TCP_STATE_CLOSED: _common.CONN_CLOSE,
+    cext.MIB_TCP_STATE_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+    cext.MIB_TCP_STATE_LAST_ACK: _common.CONN_LAST_ACK,
+    cext.MIB_TCP_STATE_LISTEN: _common.CONN_LISTEN,
+    cext.MIB_TCP_STATE_CLOSING: _common.CONN_CLOSING,
+    cext.MIB_TCP_STATE_DELETE_TCB: CONN_DELETE_TCB,
+    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+}
+
+
+scputimes = namedtuple('scputimes', ['user', 'system', 'idle'])
+svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
+pextmem = namedtuple(
+    'pextmem', ['num_page_faults', 'peak_wset', 'wset', 'peak_paged_pool',
+                'paged_pool', 'peak_nonpaged_pool', 'nonpaged_pool',
+                'pagefile', 'peak_pagefile', 'private'])
+pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss'])
+pmmap_ext = namedtuple(
+    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
+
+# set later from __init__.py
+NoSuchProcess = None
+AccessDenied = None
+TimeoutExpired = None
+
+
+@lru_cache(maxsize=512)
+def _win32_QueryDosDevice(s):
+    return cext.win32_QueryDosDevice(s)
+
+
+def _convert_raw_path(s):
+    # convert paths using native DOS format like:
+    # "\Device\HarddiskVolume1\Windows\systemew\file.txt"
+    # into: "C:\Windows\systemew\file.txt"
+    if PY3 and not isinstance(s, str):
+        s = s.decode('utf8')
+    rawdrive = '\\'.join(s.split('\\')[:3])
+    driveletter = _win32_QueryDosDevice(rawdrive)
+    return os.path.join(driveletter, s[len(rawdrive):])
+
+
+# --- public functions
+
+
+def virtual_memory():
+    """System virtual memory as a namedtuple."""
+    mem = cext.virtual_mem()
+    totphys, availphys, totpagef, availpagef, totvirt, freevirt = mem
+    #
+    total = totphys
+    avail = availphys
+    free = availphys
+    used = total - avail
+    percent = usage_percent((total - avail), total, _round=1)
+    return svmem(total, avail, percent, used, free)
+
+
+def swap_memory():
+    """Swap system memory as a (total, used, free, sin, sout) tuple."""
+    mem = cext.virtual_mem()
+    total = mem[2]
+    free = mem[3]
+    used = total - free
+    percent = usage_percent(used, total, _round=1)
+    return _common.sswap(total, used, free, percent, 0, 0)
+
+
+def disk_usage(path):
+    """Return disk usage associated with path."""
+    try:
+        total, free = cext.disk_usage(path)
+    except WindowsError:
+        if not os.path.exists(path):
+            msg = "No such file or directory: '%s'" % path
+            raise OSError(errno.ENOENT, msg)
+        raise
+    used = total - free
+    percent = usage_percent(used, total, _round=1)
+    return _common.sdiskusage(total, used, free, percent)
+
+
+def disk_partitions(all):
+    """Return disk partitions."""
+    rawlist = cext.disk_partitions(all)
+    return [_common.sdiskpart(*x) for x in rawlist]
+
+
+def cpu_times():
+    """Return system CPU times as a named tuple."""
+    user, system, idle = cext.cpu_times()
+    return scputimes(user, system, idle)
+
+
+def per_cpu_times():
+    """Return system per-CPU times as a list of named tuples."""
+    ret = []
+    for cpu_t in cext.per_cpu_times():
+        user, system, idle = cpu_t
+        item = scputimes(user, system, idle)
+        ret.append(item)
+    return ret
+
+
+def cpu_count_logical():
+    """Return the number of logical CPUs in the system."""
+    return cext.cpu_count_logical()
+
+
+def cpu_count_physical():
+    """Return the number of physical CPUs in the system."""
+    return cext.cpu_count_phys()
+
+
+def boot_time():
+    """The system boot time expressed in seconds since the epoch."""
+    return cext.boot_time()
+
+
+def net_connections(kind, _pid=-1):
+    """Return socket connections.  If pid == -1 return system-wide
+    connections (as opposed to connections opened by one process only).
+    """
+    if kind not in conn_tmap:
+        raise ValueError("invalid %r kind argument; choose between %s"
+                         % (kind, ', '.join([repr(x) for x in conn_tmap])))
+    families, types = conn_tmap[kind]
+    rawlist = cext.net_connections(_pid, families, types)
+    ret = []
+    for item in rawlist:
+        fd, fam, type, laddr, raddr, status, pid = item
+        status = TCP_STATUSES[status]
+        if _pid == -1:
+            nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid)
+        else:
+            nt = _common.pconn(fd, fam, type, laddr, raddr, status)
+        ret.append(nt)
+    return ret
+
+
+def users():
+    """Return currently connected users as a list of namedtuples."""
+    retlist = []
+    rawlist = cext.users()
+    for item in rawlist:
+        user, hostname, tstamp = item
+        nt = _common.suser(user, None, hostname, tstamp)
+        retlist.append(nt)
+    return retlist
+
+
+pids = cext.pids
+pid_exists = cext.pid_exists
+net_io_counters = cext.net_io_counters
+disk_io_counters = cext.disk_io_counters
+ppid_map = cext.ppid_map  # not meant to be public
+
+
+def wrap_exceptions(fun):
+    """Decorator which translates bare OSError and WindowsError
+    exceptions into NoSuchProcess and AccessDenied.
+    """
+    @wraps(fun)
+    def wrapper(self, *args, **kwargs):
+        try:
+            return fun(self, *args, **kwargs)
+        except OSError:
+            # support for private module import
+            if NoSuchProcess is None or AccessDenied is None:
+                raise
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                raise AccessDenied(self.pid, self._name)
+            if err.errno == errno.ESRCH:
+                raise NoSuchProcess(self.pid, self._name)
+            raise
+    return wrapper
+
+
+class Process(object):
+    """Wrapper class around underlying C implementation."""
+
+    __slots__ = ["pid", "_name"]
+
+    def __init__(self, pid):
+        self.pid = pid
+        self._name = None
+
+    @wrap_exceptions
+    def name(self):
+        """Return process name, which on Windows is always the final
+        part of the executable.
+        """
+        # This is how PIDs 0 and 4 are always represented in taskmgr
+        # and process-hacker.
+        if self.pid == 0:
+            return "System Idle Process"
+        elif self.pid == 4:
+            return "System"
+        else:
+            return os.path.basename(self.exe())
+
+    @wrap_exceptions
+    def exe(self):
+        # Note: os.path.exists(path) may return False even if the file
+        # is there, see:
+        # http://stackoverflow.com/questions/3112546/os-path-exists-lies
+        return _convert_raw_path(cext.proc_exe(self.pid))
+
+    @wrap_exceptions
+    def cmdline(self):
+        return cext.proc_cmdline(self.pid)
+
+    def ppid(self):
+        try:
+            return ppid_map()[self.pid]
+        except KeyError:
+            raise NoSuchProcess(self.pid, self._name)
+
+    def _get_raw_meminfo(self):
+        try:
+            return cext.proc_memory_info(self.pid)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                return cext.proc_memory_info_2(self.pid)
+            raise
+
+    @wrap_exceptions
+    def memory_info(self):
+        # on Windows RSS == WorkingSetSize and VSM == PagefileUsage
+        # fields of PROCESS_MEMORY_COUNTERS struct:
+        # http://msdn.microsoft.com/en-us/library/windows/desktop/
+        #     ms684877(v=vs.85).aspx
+        t = self._get_raw_meminfo()
+        return _common.pmem(t[2], t[7])
+
+    @wrap_exceptions
+    def memory_info_ex(self):
+        return pextmem(*self._get_raw_meminfo())
+
+    def memory_maps(self):
+        try:
+            raw = cext.proc_memory_maps(self.pid)
+        except OSError:
+            # XXX - can't use wrap_exceptions decorator as we're
+            # returning a generator; probably needs refactoring.
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                raise AccessDenied(self.pid, self._name)
+            if err.errno == errno.ESRCH:
+                raise NoSuchProcess(self.pid, self._name)
+            raise
+        else:
+            for addr, perm, path, rss in raw:
+                path = _convert_raw_path(path)
+                addr = hex(addr)
+                yield (addr, perm, path, rss)
+
+    @wrap_exceptions
+    def kill(self):
+        return cext.proc_kill(self.pid)
+
+    @wrap_exceptions
+    def wait(self, timeout=None):
+        if timeout is None:
+            timeout = cext.INFINITE
+        else:
+            # WaitForSingleObject() expects time in milliseconds
+            timeout = int(timeout * 1000)
+        ret = cext.proc_wait(self.pid, timeout)
+        if ret == WAIT_TIMEOUT:
+            # support for private module import
+            if TimeoutExpired is None:
+                raise RuntimeError("timeout expired")
+            raise TimeoutExpired(timeout, self.pid, self._name)
+        return ret
+
+    @wrap_exceptions
+    def username(self):
+        if self.pid in (0, 4):
+            return 'NT AUTHORITY\\SYSTEM'
+        return cext.proc_username(self.pid)
+
+    @wrap_exceptions
+    def create_time(self):
+        # special case for kernel process PIDs; return system boot time
+        if self.pid in (0, 4):
+            return boot_time()
+        try:
+            return cext.proc_create_time(self.pid)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                return cext.proc_create_time_2(self.pid)
+            raise
+
+    @wrap_exceptions
+    def num_threads(self):
+        return cext.proc_num_threads(self.pid)
+
+    @wrap_exceptions
+    def threads(self):
+        rawlist = cext.proc_threads(self.pid)
+        retlist = []
+        for thread_id, utime, stime in rawlist:
+            ntuple = _common.pthread(thread_id, utime, stime)
+            retlist.append(ntuple)
+        return retlist
+
+    @wrap_exceptions
+    def cpu_times(self):
+        try:
+            ret = cext.proc_cpu_times(self.pid)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                ret = cext.proc_cpu_times_2(self.pid)
+            else:
+                raise
+        return _common.pcputimes(*ret)
+
+    @wrap_exceptions
+    def suspend(self):
+        return cext.proc_suspend(self.pid)
+
+    @wrap_exceptions
+    def resume(self):
+        return cext.proc_resume(self.pid)
+
+    @wrap_exceptions
+    def cwd(self):
+        if self.pid in (0, 4):
+            raise AccessDenied(self.pid, self._name)
+        # return a normalized pathname since the native C function appends
+        # "\\" at the and of the path
+        path = cext.proc_cwd(self.pid)
+        return os.path.normpath(path)
+
+    @wrap_exceptions
+    def open_files(self):
+        if self.pid in (0, 4):
+            return []
+        retlist = []
+        # Filenames come in in native format like:
+        # "\Device\HarddiskVolume1\Windows\systemew\file.txt"
+        # Convert the first part in the corresponding drive letter
+        # (e.g. "C:\") by using Windows's QueryDosDevice()
+        raw_file_names = cext.proc_open_files(self.pid)
+        for file in raw_file_names:
+            file = _convert_raw_path(file)
+            if isfile_strict(file) and file not in retlist:
+                ntuple = _common.popenfile(file, -1)
+                retlist.append(ntuple)
+        return retlist
+
+    @wrap_exceptions
+    def connections(self, kind='inet'):
+        return net_connections(kind, _pid=self.pid)
+
+    @wrap_exceptions
+    def nice_get(self):
+        return cext.proc_priority_get(self.pid)
+
+    @wrap_exceptions
+    def nice_set(self, value):
+        return cext.proc_priority_set(self.pid, value)
+
+    # available on Windows >= Vista
+    if hasattr(cext, "proc_io_priority_get"):
+        @wrap_exceptions
+        def ionice_get(self):
+            return cext.proc_io_priority_get(self.pid)
+
+        @wrap_exceptions
+        def ionice_set(self, value, _):
+            if _:
+                raise TypeError("set_proc_ionice() on Windows takes only "
+                                "1 argument (2 given)")
+            if value not in (2, 1, 0):
+                raise ValueError("value must be 2 (normal), 1 (low) or 0 "
+                                 "(very low); got %r" % value)
+            return cext.proc_io_priority_set(self.pid, value)
+
+    @wrap_exceptions
+    def io_counters(self):
+        try:
+            ret = cext.proc_io_counters(self.pid)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                ret = cext.proc_io_counters_2(self.pid)
+            else:
+                raise
+        return _common.pio(*ret)
+
+    @wrap_exceptions
+    def status(self):
+        suspended = cext.proc_is_suspended(self.pid)
+        if suspended:
+            return _common.STATUS_STOPPED
+        else:
+            return _common.STATUS_RUNNING
+
+    @wrap_exceptions
+    def cpu_affinity_get(self):
+        from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x]
+        bitmask = cext.proc_cpu_affinity_get(self.pid)
+        return from_bitmask(bitmask)
+
+    @wrap_exceptions
+    def cpu_affinity_set(self, value):
+        def to_bitmask(l):
+            if not l:
+                raise ValueError("invalid argument %r" % l)
+            out = 0
+            for b in l:
+                out |= 2 ** b
+            return out
+
+        # SetProcessAffinityMask() states that ERROR_INVALID_PARAMETER
+        # is returned for an invalid CPU but this seems not to be true,
+        # therefore we check CPUs validy beforehand.
+        allcpus = list(range(len(per_cpu_times())))
+        for cpu in value:
+            if cpu not in allcpus:
+                raise ValueError("invalid CPU %r" % cpu)
+
+        bitmask = to_bitmask(value)
+        cext.proc_cpu_affinity_set(self.pid, bitmask)
+
+    @wrap_exceptions
+    def num_handles(self):
+        try:
+            return cext.proc_num_handles(self.pid)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                return cext.proc_num_handles_2(self.pid)
+            raise
+
+    @wrap_exceptions
+    def num_ctx_switches(self):
+        tupl = cext.proc_num_ctx_switches(self.pid)
+        return _common.pctxsw(*tupl)

+ 285 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/bsd/process_info.c

@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Helper functions related to fetching process information.
+ * Used by _psutil_bsd module methods.
+ */
+
+
+#include <Python.h>
+#include <assert.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <sys/param.h>
+#include <sys/user.h>
+#include <sys/proc.h>
+#include <signal.h>
+
+#include "process_info.h"
+
+
+/*
+ * Returns a list of all BSD processes on the system.  This routine
+ * allocates the list and puts it in *procList and a count of the
+ * number of entries in *procCount.  You are responsible for freeing
+ * this list (use "free" from System framework).
+ * On success, the function returns 0.
+ * On error, the function returns a BSD errno value.
+ */
+int
+psutil_get_proc_list(struct kinfo_proc **procList, size_t *procCount)
+{
+    int err;
+    struct kinfo_proc *result;
+    int done;
+    static const int name[] = { CTL_KERN, KERN_PROC, KERN_PROC_PROC, 0 };
+    // Declaring name as const requires us to cast it when passing it to
+    // sysctl because the prototype doesn't include the const modifier.
+    size_t              length;
+
+    assert( procList != NULL);
+    assert(*procList == NULL);
+    assert(procCount != NULL);
+
+    *procCount = 0;
+
+    /*
+     * We start by calling sysctl with result == NULL and length == 0.
+     * That will succeed, and set length to the appropriate length.
+     * We then allocate a buffer of that size and call sysctl again
+     * with that buffer.  If that succeeds, we're done.  If that fails
+     * with ENOMEM, we have to throw away our buffer and loop.  Note
+     * that the loop causes use to call sysctl with NULL again; this
+     * is necessary because the ENOMEM failure case sets length to
+     * the amount of data returned, not the amount of data that
+     * could have been returned.
+     */
+    result = NULL;
+    done = 0;
+    do {
+        assert(result == NULL);
+        // Call sysctl with a NULL buffer.
+        length = 0;
+        err = sysctl((int *)name, (sizeof(name) / sizeof(*name)) - 1,
+                     NULL, &length, NULL, 0);
+        if (err == -1)
+            err = errno;
+
+        // Allocate an appropriately sized buffer based on the results
+        // from the previous call.
+        if (err == 0) {
+            result = malloc(length);
+            if (result == NULL)
+                err = ENOMEM;
+        }
+
+        // Call sysctl again with the new buffer.  If we get an ENOMEM
+        // error, toss away our buffer and start again.
+        if (err == 0) {
+            err = sysctl((int *) name, (sizeof(name) / sizeof(*name)) - 1,
+                         result, &length, NULL, 0);
+            if (err == -1)
+                err = errno;
+            if (err == 0) {
+                done = 1;
+            }
+            else if (err == ENOMEM) {
+                assert(result != NULL);
+                free(result);
+                result = NULL;
+                err = 0;
+            }
+        }
+    } while (err == 0 && ! done);
+
+    // Clean up and establish post conditions.
+    if (err != 0 && result != NULL) {
+        free(result);
+        result = NULL;
+    }
+
+    *procList = result;
+    *procCount = length / sizeof(struct kinfo_proc);
+
+    assert((err == 0) == (*procList != NULL));
+    return err;
+}
+
+
+char
+*psutil_get_cmd_path(long pid, size_t *pathsize)
+{
+    int mib[4];
+    char *path;
+    size_t size = 0;
+
+    /*
+     * Make a sysctl() call to get the raw argument space of the process.
+     */
+    mib[0] = CTL_KERN;
+    mib[1] = KERN_PROC;
+    mib[2] = KERN_PROC_PATHNAME;
+    mib[3] = pid;
+
+    // call with a null buffer first to determine if we need a buffer
+    if (sysctl(mib, 4, NULL, &size, NULL, 0) == -1) {
+        return NULL;
+    }
+
+    path = malloc(size);
+    if (path == NULL) {
+        PyErr_NoMemory();
+        return NULL;
+    }
+
+    *pathsize = size;
+    if (sysctl(mib, 4, path, &size, NULL, 0) == -1) {
+        free(path);
+        return NULL;       // Insufficient privileges
+    }
+
+    return path;
+}
+
+
+/*
+ * XXX no longer used; it probably makese sense to remove it.
+ * Borrowed from psi Python System Information project
+ *
+ * Get command arguments and environment variables.
+ *
+ * Based on code from ps.
+ *
+ * Returns:
+ *      0 for success;
+ *      -1 for failure (Exception raised);
+ *      1 for insufficient privileges.
+ */
+char
+*psutil_get_cmd_args(long pid, size_t *argsize)
+{
+    int mib[4], argmax;
+    size_t size = sizeof(argmax);
+    char *procargs = NULL;
+
+    // Get the maximum process arguments size.
+    mib[0] = CTL_KERN;
+    mib[1] = KERN_ARGMAX;
+
+    size = sizeof(argmax);
+    if (sysctl(mib, 2, &argmax, &size, NULL, 0) == -1)
+        return NULL;
+
+    // Allocate space for the arguments.
+    procargs = (char *)malloc(argmax);
+    if (procargs == NULL) {
+        PyErr_NoMemory();
+        return NULL;
+    }
+
+    /*
+     * Make a sysctl() call to get the raw argument space of the process.
+     */
+    mib[0] = CTL_KERN;
+    mib[1] = KERN_PROC;
+    mib[2] = KERN_PROC_ARGS;
+    mib[3] = pid;
+
+    size = argmax;
+    if (sysctl(mib, 4, procargs, &size, NULL, 0) == -1) {
+        free(procargs);
+        return NULL;       // Insufficient privileges
+    }
+
+    // return string and set the length of arguments
+    *argsize = size;
+    return procargs;
+}
+
+
+// returns the command line as a python list object
+PyObject *
+psutil_get_arg_list(long pid)
+{
+    char *argstr = NULL;
+    int pos = 0;
+    size_t argsize = 0;
+    PyObject *retlist = Py_BuildValue("[]");
+    PyObject *item = NULL;
+
+    if (pid < 0) {
+        return retlist;
+    }
+
+    argstr = psutil_get_cmd_args(pid, &argsize);
+    if (argstr == NULL) {
+        goto error;
+    }
+
+    // args are returned as a flattened string with \0 separators between
+    // arguments add each string to the list then step forward to the next
+    // separator
+    if (argsize > 0) {
+        while (pos < argsize) {
+            item = Py_BuildValue("s", &argstr[pos]);
+            if (!item)
+                goto error;
+            if (PyList_Append(retlist, item))
+                goto error;
+            Py_DECREF(item);
+            pos = pos + strlen(&argstr[pos]) + 1;
+        }
+    }
+
+    free(argstr);
+    return retlist;
+
+error:
+    Py_XDECREF(item);
+    Py_DECREF(retlist);
+    if (argstr != NULL)
+        free(argstr);
+    return NULL;
+}
+
+
+/*
+ * Return 1 if PID exists in the current process list, else 0.
+ */
+int
+psutil_pid_exists(long pid)
+{
+    int kill_ret;
+    if (pid < 0) {
+        return 0;
+    }
+
+    // if kill returns success of permission denied we know it's a valid PID
+    kill_ret = kill(pid , 0);
+    if ((0 == kill_ret) || (EPERM == errno)) {
+        return 1;
+    }
+
+    // otherwise return 0 for PID not found
+    return 0;
+}
+
+
+/*
+ * Set exception to AccessDenied if pid exists else NoSuchProcess.
+ */
+int
+psutil_raise_ad_or_nsp(pid) {
+    if (psutil_pid_exists(pid) == 0) {
+        NoSuchProcess();
+    }
+    else {
+        AccessDenied();
+    }
+}

+ 15 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/bsd/process_info.h

@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+
+typedef struct kinfo_proc kinfo_proc;
+
+char *psutil_get_cmd_args(long pid, size_t *argsize);
+char *psutil_get_cmd_path(long pid, size_t *pathsize);
+int psutil_get_proc_list(struct kinfo_proc **procList, size_t *procCount);
+int psutil_pid_exists(long pid);
+PyObject* psutil_get_arg_list(long pid);

+ 293 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/osx/process_info.c

@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Helper functions related to fetching process information.
+ * Used by _psutil_osx module methods.
+ */
+
+
+#include <Python.h>
+#include <assert.h>
+#include <errno.h>
+#include <limits.h>  // for INT_MAX
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <signal.h>
+#include <sys/sysctl.h>
+#include <libproc.h>
+
+#include "process_info.h"
+#include "../../_psutil_common.h"
+
+
+/*
+ * Return 1 if PID exists in the current process list, else 0.
+ */
+int
+psutil_pid_exists(long pid)
+{
+    int kill_ret;
+
+    // save some time if it's an invalid PID
+    if (pid < 0) {
+        return 0;
+    }
+
+    // if kill returns success of permission denied we know it's a valid PID
+    kill_ret = kill(pid , 0);
+    if ( (0 == kill_ret) || (EPERM == errno) ) {
+        return 1;
+    }
+
+    // otherwise return 0 for PID not found
+    return 0;
+}
+
+
+/*
+ * Returns a list of all BSD processes on the system.  This routine
+ * allocates the list and puts it in *procList and a count of the
+ * number of entries in *procCount.  You are responsible for freeing
+ * this list (use "free" from System framework).
+ * On success, the function returns 0.
+ * On error, the function returns a BSD errno value.
+ */
+int
+psutil_get_proc_list(kinfo_proc **procList, size_t *procCount)
+{
+    // Declaring mib as const requires use of a cast since the
+    // sysctl prototype doesn't include the const modifier.
+    static const int mib3[3] = { CTL_KERN, KERN_PROC, KERN_PROC_ALL };
+    size_t           size, size2;
+    void            *ptr;
+    int              err, lim = 8;  // some limit
+
+    assert( procList != NULL);
+    assert(*procList == NULL);
+    assert(procCount != NULL);
+
+    *procCount = 0;
+
+    /*
+     * We start by calling sysctl with ptr == NULL and size == 0.
+     * That will succeed, and set size to the appropriate length.
+     * We then allocate a buffer of at least that size and call
+     * sysctl with that buffer.  If that succeeds, we're done.
+     * If that call fails with ENOMEM, we throw the buffer away
+     * and try again.
+     * Note that the loop calls sysctl with NULL again.  This is
+     * is necessary because the ENOMEM failure case sets size to
+     * the amount of data returned, not the amount of data that
+     * could have been returned.
+     */
+    while (lim-- > 0) {
+        size = 0;
+        if (sysctl((int *)mib3, 3, NULL, &size, NULL, 0) == -1) {
+            return errno;
+        }
+
+        size2 = size + (size >> 3);  // add some
+        if (size2 > size) {
+            ptr = malloc(size2);
+            if (ptr == NULL) {
+                ptr = malloc(size);
+            } else {
+                size = size2;
+            }
+        }
+        else {
+            ptr = malloc(size);
+        }
+        if (ptr == NULL) {
+            return ENOMEM;
+        }
+
+        if (sysctl((int *)mib3, 3, ptr, &size, NULL, 0) == -1) {
+            err = errno;
+            free(ptr);
+            if (err != ENOMEM) {
+                return err;
+            }
+
+        } else {
+            *procList = (kinfo_proc *)ptr;
+            *procCount = size / sizeof(kinfo_proc);
+            return 0;
+        }
+    }
+    return ENOMEM;
+}
+
+
+// Read the maximum argument size for processes
+int
+psutil_get_argmax()
+{
+    int argmax;
+    int mib[] = { CTL_KERN, KERN_ARGMAX };
+    size_t size = sizeof(argmax);
+
+    if (sysctl(mib, 2, &argmax, &size, NULL, 0) == 0) {
+        return argmax;
+    }
+    return 0;
+}
+
+
+// return process args as a python list
+PyObject *
+psutil_get_arg_list(long pid)
+{
+    int mib[3];
+    int nargs;
+    int len;
+    char *procargs = NULL;
+    char *arg_ptr;
+    char *arg_end;
+    char *curr_arg;
+    size_t argmax;
+    PyObject *arg = NULL;
+    PyObject *arglist = NULL;
+
+    // special case for PID 0 (kernel_task) where cmdline cannot be fetched
+    if (pid == 0) {
+        return Py_BuildValue("[]");
+    }
+
+    // read argmax and allocate memory for argument space.
+    argmax = psutil_get_argmax();
+    if (! argmax) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    procargs = (char *)malloc(argmax);
+    if (NULL == procargs) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    // read argument space
+    mib[0] = CTL_KERN;
+    mib[1] = KERN_PROCARGS2;
+    mib[2] = pid;
+    if (sysctl(mib, 3, procargs, &argmax, NULL, 0) < 0) {
+        if (EINVAL == errno) {
+            // EINVAL == access denied OR nonexistent PID
+            if ( psutil_pid_exists(pid) ) {
+                AccessDenied();
+            } else {
+                NoSuchProcess();
+            }
+        }
+        goto error;
+    }
+
+    arg_end = &procargs[argmax];
+    // copy the number of arguments to nargs
+    memcpy(&nargs, procargs, sizeof(nargs));
+
+    arg_ptr = procargs + sizeof(nargs);
+    len = strlen(arg_ptr);
+    arg_ptr += len + 1;
+
+    if (arg_ptr == arg_end) {
+        free(procargs);
+        return Py_BuildValue("[]");
+    }
+
+    // skip ahead to the first argument
+    for (; arg_ptr < arg_end; arg_ptr++) {
+        if (*arg_ptr != '\0') {
+            break;
+        }
+    }
+
+    // iterate through arguments
+    curr_arg = arg_ptr;
+    arglist = Py_BuildValue("[]");
+    if (!arglist)
+        goto error;
+    while (arg_ptr < arg_end && nargs > 0) {
+        if (*arg_ptr++ == '\0') {
+            arg = Py_BuildValue("s", curr_arg);
+            if (!arg)
+                goto error;
+            if (PyList_Append(arglist, arg))
+                goto error;
+            Py_DECREF(arg);
+            // iterate to next arg and decrement # of args
+            curr_arg = arg_ptr;
+            nargs--;
+        }
+    }
+
+    free(procargs);
+    return arglist;
+
+error:
+    Py_XDECREF(arg);
+    Py_XDECREF(arglist);
+    if (procargs != NULL)
+        free(procargs);
+    return NULL;
+}
+
+
+int
+psutil_get_kinfo_proc(pid_t pid, struct kinfo_proc *kp)
+{
+    int mib[4];
+    size_t len;
+    mib[0] = CTL_KERN;
+    mib[1] = KERN_PROC;
+    mib[2] = KERN_PROC_PID;
+    mib[3] = pid;
+
+    // fetch the info with sysctl()
+    len = sizeof(struct kinfo_proc);
+
+    // now read the data from sysctl
+    if (sysctl(mib, 4, kp, &len, NULL, 0) == -1) {
+        // raise an exception and throw errno as the error
+        PyErr_SetFromErrno(PyExc_OSError);
+        return -1;
+    }
+
+    // sysctl succeeds but len is zero, happens when process has gone away
+    if (len == 0) {
+        NoSuchProcess();
+        return -1;
+    }
+    return 0;
+}
+
+
+/*
+ * A thin wrapper around proc_pidinfo()
+ */
+int
+psutil_proc_pidinfo(long pid, int flavor, void *pti, int size)
+{
+    int ret = proc_pidinfo((int)pid, flavor, 0, pti, size);
+    if (ret == 0) {
+        if (! psutil_pid_exists(pid)) {
+            NoSuchProcess();
+            return 0;
+        }
+        else {
+            AccessDenied();
+            return 0;
+        }
+    }
+    else if (ret != size) {
+        AccessDenied();
+        return 0;
+    }
+    else {
+        return 1;
+    }
+}

+ 16 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/osx/process_info.h

@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+
+typedef struct kinfo_proc kinfo_proc;
+
+int psutil_get_argmax(void);
+int psutil_get_kinfo_proc(pid_t pid, struct kinfo_proc *kp);
+int psutil_get_proc_list(kinfo_proc **procList, size_t *procCount);
+int psutil_pid_exists(long pid);
+int psutil_proc_pidinfo(long pid, int flavor, void *pti, int size);
+PyObject* psutil_get_arg_list(long pid);

+ 41 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/glpi.h

@@ -0,0 +1,41 @@
+// mingw headers are missing this
+
+typedef enum _LOGICAL_PROCESSOR_RELATIONSHIP {
+    RelationProcessorCore,
+    RelationNumaNode,
+    RelationCache,
+    RelationProcessorPackage,
+    RelationGroup,
+    RelationAll=0xffff
+} LOGICAL_PROCESSOR_RELATIONSHIP;
+
+typedef enum _PROCESSOR_CACHE_TYPE {
+    CacheUnified,CacheInstruction,CacheData,CacheTrace
+} PROCESSOR_CACHE_TYPE;
+
+typedef struct _CACHE_DESCRIPTOR {
+    BYTE Level;
+    BYTE Associativity;
+    WORD LineSize;
+    DWORD Size;
+    PROCESSOR_CACHE_TYPE Type;
+} CACHE_DESCRIPTOR,*PCACHE_DESCRIPTOR;
+
+typedef struct _SYSTEM_LOGICAL_PROCESSOR_INFORMATION {
+    ULONG_PTR ProcessorMask;
+    LOGICAL_PROCESSOR_RELATIONSHIP Relationship;
+    union {
+        struct {
+            BYTE Flags;
+        } ProcessorCore;
+        struct {
+            DWORD NodeNumber;
+        } NumaNode;
+        CACHE_DESCRIPTOR Cache;
+        ULONGLONG Reserved[2];
+    };
+} SYSTEM_LOGICAL_PROCESSOR_INFORMATION,*PSYSTEM_LOGICAL_PROCESSOR_INFORMATION;
+
+WINBASEAPI WINBOOL WINAPI
+GetLogicalProcessorInformation(PSYSTEM_LOGICAL_PROCESSOR_INFORMATION Buffer,
+                               PDWORD ReturnedLength);

+ 287 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/ntextapi.h

@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+typedef enum _KTHREAD_STATE {
+    Initialized,
+    Ready,
+    Running,
+    Standby,
+    Terminated,
+    Waiting,
+    Transition,
+    DeferredReady,
+    GateWait,
+    MaximumThreadState
+} KTHREAD_STATE, *PKTHREAD_STATE;
+
+typedef enum _KWAIT_REASON {
+    Executive = 0,
+    FreePage = 1,
+    PageIn = 2,
+    PoolAllocation = 3,
+    DelayExecution = 4,
+    Suspended = 5,
+    UserRequest = 6,
+    WrExecutive = 7,
+    WrFreePage = 8,
+    WrPageIn = 9,
+    WrPoolAllocation = 10,
+    WrDelayExecution = 11,
+    WrSuspended = 12,
+    WrUserRequest = 13,
+    WrEventPair = 14,
+    WrQueue = 15,
+    WrLpcReceive = 16,
+    WrLpcReply = 17,
+    WrVirtualMemory = 18,
+    WrPageOut = 19,
+    WrRendezvous = 20,
+    Spare2 = 21,
+    Spare3 = 22,
+    Spare4 = 23,
+    Spare5 = 24,
+    WrCalloutStack = 25,
+    WrKernel = 26,
+    WrResource = 27,
+    WrPushLock = 28,
+    WrMutex = 29,
+    WrQuantumEnd = 30,
+    WrDispatchInt = 31,
+    WrPreempted = 32,
+    WrYieldExecution = 33,
+    WrFastMutex = 34,
+    WrGuardedMutex = 35,
+    WrRundown = 36,
+    MaximumWaitReason = 37
+} KWAIT_REASON, *PKWAIT_REASON;
+
+typedef struct _CLIENT_ID {
+    HANDLE UniqueProcess;
+    HANDLE UniqueThread;
+} CLIENT_ID, *PCLIENT_ID;
+
+
+typedef struct _UNICODE_STRING {
+    USHORT Length;
+    USHORT MaximumLength;
+    PWSTR Buffer;
+} UNICODE_STRING, *PUNICODE_STRING;
+
+typedef struct _SYSTEM_TIMEOFDAY_INFORMATION {
+    LARGE_INTEGER BootTime;
+    LARGE_INTEGER CurrentTime;
+    LARGE_INTEGER TimeZoneBias;
+    ULONG TimeZoneId;
+    ULONG Reserved;
+    ULONGLONG BootTimeBias;
+    ULONGLONG SleepTimeBias;
+} SYSTEM_TIMEOFDAY_INFORMATION, *PSYSTEM_TIMEOFDAY_INFORMATION;
+
+typedef struct _SYSTEM_THREAD_INFORMATION {
+    LARGE_INTEGER KernelTime;
+    LARGE_INTEGER UserTime;
+    LARGE_INTEGER CreateTime;
+    ULONG WaitTime;
+    PVOID StartAddress;
+    CLIENT_ID ClientId;
+    LONG Priority;
+    LONG BasePriority;
+    ULONG ContextSwitches;
+    ULONG ThreadState;
+    KWAIT_REASON WaitReason;
+} SYSTEM_THREAD_INFORMATION, *PSYSTEM_THREAD_INFORMATION;
+
+typedef struct _TEB *PTEB;
+
+// private
+typedef struct _SYSTEM_EXTENDED_THREAD_INFORMATION {
+    SYSTEM_THREAD_INFORMATION ThreadInfo;
+    PVOID StackBase;
+    PVOID StackLimit;
+    PVOID Win32StartAddress;
+    PTEB TebBase;
+    ULONG_PTR Reserved2;
+    ULONG_PTR Reserved3;
+    ULONG_PTR Reserved4;
+} SYSTEM_EXTENDED_THREAD_INFORMATION, *PSYSTEM_EXTENDED_THREAD_INFORMATION;
+
+typedef struct _SYSTEM_PROCESS_INFORMATION {
+    ULONG NextEntryOffset;
+    ULONG NumberOfThreads;
+    LARGE_INTEGER SpareLi1;
+    LARGE_INTEGER SpareLi2;
+    LARGE_INTEGER SpareLi3;
+    LARGE_INTEGER CreateTime;
+    LARGE_INTEGER UserTime;
+    LARGE_INTEGER KernelTime;
+    UNICODE_STRING ImageName;
+    LONG BasePriority;
+    HANDLE UniqueProcessId;
+    HANDLE InheritedFromUniqueProcessId;
+    ULONG HandleCount;
+    ULONG SessionId;
+    ULONG_PTR PageDirectoryBase;
+    SIZE_T PeakVirtualSize;
+    SIZE_T VirtualSize;
+    DWORD PageFaultCount;
+    SIZE_T PeakWorkingSetSize;
+    SIZE_T WorkingSetSize;
+    SIZE_T QuotaPeakPagedPoolUsage;
+    SIZE_T QuotaPagedPoolUsage;
+    SIZE_T QuotaPeakNonPagedPoolUsage;
+    SIZE_T QuotaNonPagedPoolUsage;
+    SIZE_T PagefileUsage;
+    SIZE_T PeakPagefileUsage;
+    SIZE_T PrivatePageCount;
+    LARGE_INTEGER ReadOperationCount;
+    LARGE_INTEGER WriteOperationCount;
+    LARGE_INTEGER OtherOperationCount;
+    LARGE_INTEGER ReadTransferCount;
+    LARGE_INTEGER WriteTransferCount;
+    LARGE_INTEGER OtherTransferCount;
+    SYSTEM_THREAD_INFORMATION Threads[1];
+} SYSTEM_PROCESS_INFORMATION, *PSYSTEM_PROCESS_INFORMATION;
+
+
+// structures and enums from winternl.h (not available under mingw)
+typedef struct _SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION {
+    LARGE_INTEGER IdleTime;
+    LARGE_INTEGER KernelTime;
+    LARGE_INTEGER UserTime;
+    LARGE_INTEGER Reserved1[2];
+    ULONG Reserved2;
+} SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION,
+    *PSYSTEM_PROCESSOR_PERFORMANCE_INFORMATION;
+
+
+typedef enum _SYSTEM_INFORMATION_CLASS {
+    SystemBasicInformation = 0,
+    SystemPerformanceInformation = 2,
+    SystemTimeOfDayInformation = 3,
+    SystemProcessInformation = 5,
+    SystemProcessorPerformanceInformation = 8,
+    SystemInterruptInformation = 23,
+    SystemExceptionInformation = 33,
+    SystemRegistryQuotaInformation = 37,
+    SystemLookasideInformation = 45
+} SYSTEM_INFORMATION_CLASS;
+
+
+// ================================================
+// psutil.users() support
+// ================================================
+
+typedef struct _WINSTATION_INFO {
+    BYTE Reserved1[72];
+    ULONG SessionId;
+    BYTE Reserved2[4];
+    FILETIME ConnectTime;
+    FILETIME DisconnectTime;
+    FILETIME LastInputTime;
+    FILETIME LoginTime;
+    BYTE Reserved3[1096];
+    FILETIME CurrentTime;
+} WINSTATION_INFO, *PWINSTATION_INFO;
+
+typedef enum _WINSTATIONINFOCLASS {
+     WinStationInformation = 8
+} WINSTATIONINFOCLASS;
+
+typedef BOOLEAN (WINAPI * PWINSTATIONQUERYINFORMATIONW)
+                 (HANDLE,ULONG,WINSTATIONINFOCLASS,PVOID,ULONG,PULONG);
+
+typedef struct _WINSTATIONINFORMATIONW {
+    BYTE Reserved2[70];
+    ULONG LogonId;
+    BYTE Reserved3[1140];
+} WINSTATIONINFORMATIONW, *PWINSTATIONINFORMATIONW;
+
+// mingw support:
+// http://www.koders.com/c/fid7C02CAE627C526914CDEB427405B51DF393A5EFA.aspx
+#ifndef _INC_WTSAPI
+typedef struct _WTS_CLIENT_ADDRESS {
+    DWORD AddressFamily;  // AF_INET, AF_IPX, AF_NETBIOS, AF_UNSPEC
+    BYTE  Address[20];    // client network address
+} WTS_CLIENT_ADDRESS, * PWTS_CLIENT_ADDRESS;
+
+HANDLE WINAPI WTSOpenServerA(IN LPSTR pServerName);
+
+VOID WINAPI WTSCloseServer(IN HANDLE hServer);
+#endif
+
+
+/*
+ * NtQueryInformationProcess code taken from
+ * http://wj32.wordpress.com/2009/01/24/howto-get-the-command-line-of-processes/
+ * typedefs needed to compile against ntdll functions not exposted in the API
+ */
+typedef LONG NTSTATUS;
+
+typedef NTSTATUS (NTAPI *_NtQueryInformationProcess)(
+    HANDLE ProcessHandle,
+    DWORD ProcessInformationClass,
+    PVOID ProcessInformation,
+    DWORD ProcessInformationLength,
+    PDWORD ReturnLength
+);
+
+typedef NTSTATUS (NTAPI *_NtSetInformationProcess)(
+    HANDLE ProcessHandle,
+    DWORD ProcessInformationClass,
+    PVOID ProcessInformation,
+    DWORD ProcessInformationLength
+);
+
+typedef struct _PROCESS_BASIC_INFORMATION {
+    PVOID Reserved1;
+    PVOID PebBaseAddress;
+    PVOID Reserved2[2];
+    ULONG_PTR UniqueProcessId;
+    PVOID Reserved3;
+} PROCESS_BASIC_INFORMATION, *PPROCESS_BASIC_INFORMATION;
+
+typedef enum _PROCESSINFOCLASS {
+    ProcessBasicInformation,
+    ProcessQuotaLimits,
+    ProcessIoCounters,
+    ProcessVmCounters,
+    ProcessTimes,
+    ProcessBasePriority,
+    ProcessRaisePriority,
+    ProcessDebugPort,
+    ProcessExceptionPort,
+    ProcessAccessToken,
+    ProcessLdtInformation,
+    ProcessLdtSize,
+    ProcessDefaultHardErrorMode,
+    ProcessIoPortHandlers,
+    ProcessPooledUsageAndLimits,
+    ProcessWorkingSetWatch,
+    ProcessUserModeIOPL,
+    ProcessEnableAlignmentFaultFixup,
+    ProcessPriorityClass,
+    ProcessWx86Information,
+    ProcessHandleCount,
+    ProcessAffinityMask,
+    ProcessPriorityBoost,
+    ProcessDeviceMap,
+    ProcessSessionInformation,
+    ProcessForegroundInformation,
+    ProcessWow64Information,
+    /* added after XP+ */
+    ProcessImageFileName,
+    ProcessLUIDDeviceMapsEnabled,
+    ProcessBreakOnTermination,
+    ProcessDebugObjectHandle,
+    ProcessDebugFlags,
+    ProcessHandleTracing,
+    ProcessIoPriority,
+    ProcessExecuteFlags,
+    ProcessResourceManagement,
+    ProcessCookie,
+    ProcessImageInformation,
+    MaxProcessInfoClass
+} PROCESSINFOCLASS;

+ 336 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_handles.c

@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ */
+
+#ifndef UNICODE
+#define UNICODE
+#endif
+
+#include <Python.h>
+#include <windows.h>
+#include <stdio.h>
+#include "process_handles.h"
+
+#ifndef NT_SUCCESS
+#define NT_SUCCESS(x) ((x) >= 0)
+#endif
+#define STATUS_INFO_LENGTH_MISMATCH 0xc0000004
+
+#define SystemHandleInformation 16
+#define ObjectBasicInformation 0
+#define ObjectNameInformation 1
+#define ObjectTypeInformation 2
+
+
+typedef LONG NTSTATUS;
+
+typedef struct _UNICODE_STRING {
+    USHORT Length;
+    USHORT MaximumLength;
+    PWSTR Buffer;
+} UNICODE_STRING, *PUNICODE_STRING;
+
+typedef NTSTATUS (NTAPI *_NtQuerySystemInformation)(
+    ULONG SystemInformationClass,
+    PVOID SystemInformation,
+    ULONG SystemInformationLength,
+    PULONG ReturnLength
+);
+
+typedef NTSTATUS (NTAPI *_NtDuplicateObject)(
+    HANDLE SourceProcessHandle,
+    HANDLE SourceHandle,
+    HANDLE TargetProcessHandle,
+    PHANDLE TargetHandle,
+    ACCESS_MASK DesiredAccess,
+    ULONG Attributes,
+    ULONG Options
+);
+
+typedef NTSTATUS (NTAPI *_NtQueryObject)(
+    HANDLE ObjectHandle,
+    ULONG ObjectInformationClass,
+    PVOID ObjectInformation,
+    ULONG ObjectInformationLength,
+    PULONG ReturnLength
+);
+
+typedef struct _SYSTEM_HANDLE {
+    ULONG ProcessId;
+    BYTE ObjectTypeNumber;
+    BYTE Flags;
+    USHORT Handle;
+    PVOID Object;
+    ACCESS_MASK GrantedAccess;
+} SYSTEM_HANDLE, *PSYSTEM_HANDLE;
+
+typedef struct _SYSTEM_HANDLE_INFORMATION {
+    ULONG HandleCount;
+    SYSTEM_HANDLE Handles[1];
+} SYSTEM_HANDLE_INFORMATION, *PSYSTEM_HANDLE_INFORMATION;
+
+typedef enum _POOL_TYPE {
+    NonPagedPool,
+    PagedPool,
+    NonPagedPoolMustSucceed,
+    DontUseThisType,
+    NonPagedPoolCacheAligned,
+    PagedPoolCacheAligned,
+    NonPagedPoolCacheAlignedMustS
+} POOL_TYPE, *PPOOL_TYPE;
+
+typedef struct _OBJECT_TYPE_INFORMATION {
+    UNICODE_STRING Name;
+    ULONG TotalNumberOfObjects;
+    ULONG TotalNumberOfHandles;
+    ULONG TotalPagedPoolUsage;
+    ULONG TotalNonPagedPoolUsage;
+    ULONG TotalNamePoolUsage;
+    ULONG TotalHandleTableUsage;
+    ULONG HighWaterNumberOfObjects;
+    ULONG HighWaterNumberOfHandles;
+    ULONG HighWaterPagedPoolUsage;
+    ULONG HighWaterNonPagedPoolUsage;
+    ULONG HighWaterNamePoolUsage;
+    ULONG HighWaterHandleTableUsage;
+    ULONG InvalidAttributes;
+    GENERIC_MAPPING GenericMapping;
+    ULONG ValidAccess;
+    BOOLEAN SecurityRequired;
+    BOOLEAN MaintainHandleCount;
+    USHORT MaintainTypeList;
+    POOL_TYPE PoolType;
+    ULONG PagedPoolUsage;
+    ULONG NonPagedPoolUsage;
+} OBJECT_TYPE_INFORMATION, *POBJECT_TYPE_INFORMATION;
+
+
+PVOID
+GetLibraryProcAddress(PSTR LibraryName, PSTR ProcName)
+{
+    return GetProcAddress(GetModuleHandleA(LibraryName), ProcName);
+}
+
+
+PyObject *
+psutil_get_open_files(long pid, HANDLE processHandle)
+{
+    _NtQuerySystemInformation NtQuerySystemInformation =
+        GetLibraryProcAddress("ntdll.dll", "NtQuerySystemInformation");
+    _NtQueryObject NtQueryObject =
+        GetLibraryProcAddress("ntdll.dll", "NtQueryObject");
+
+    NTSTATUS                    status;
+    PSYSTEM_HANDLE_INFORMATION  handleInfo;
+    ULONG                       handleInfoSize = 0x10000;
+    ULONG                       i;
+    ULONG                       fileNameLength;
+    PyObject                    *filesList = Py_BuildValue("[]");
+    PyObject                    *arg = NULL;
+    PyObject                    *fileFromWchar = NULL;
+
+    if (filesList == NULL)
+        return NULL;
+
+    handleInfo = (PSYSTEM_HANDLE_INFORMATION)malloc(handleInfoSize);
+    if (handleInfo == NULL) {
+        Py_DECREF(filesList);
+        PyErr_NoMemory();
+        return NULL;
+    }
+
+    // NtQuerySystemInformation won't give us the correct buffer size,
+    // so we guess by doubling the buffer size.
+    while ((status = NtQuerySystemInformation(
+                         SystemHandleInformation,
+                         handleInfo,
+                         handleInfoSize,
+                         NULL
+                     )) == STATUS_INFO_LENGTH_MISMATCH)
+    {
+        handleInfo = (PSYSTEM_HANDLE_INFORMATION) \
+            realloc(handleInfo, handleInfoSize *= 2);
+    }
+
+    // NtQuerySystemInformation stopped giving us STATUS_INFO_LENGTH_MISMATCH
+    if (!NT_SUCCESS(status)) {
+        Py_DECREF(filesList);
+        free(handleInfo);
+        return NULL;
+    }
+
+    for (i = 0; i < handleInfo->HandleCount; i++) {
+        SYSTEM_HANDLE            handle = handleInfo->Handles[i];
+        HANDLE                   dupHandle = NULL;
+        HANDLE                   mapHandle = NULL;
+        POBJECT_TYPE_INFORMATION objectTypeInfo = NULL;
+        PVOID                    objectNameInfo;
+        UNICODE_STRING           objectName;
+        ULONG                    returnLength;
+        DWORD                    error = 0;
+        fileFromWchar = NULL;
+        arg = NULL;
+
+        // Check if this handle belongs to the PID the user specified.
+        if (handle.ProcessId != pid)
+            continue;
+
+        // Skip handles with the following access codes as the next call
+        // to NtDuplicateObject() or NtQueryObject() might hang forever.
+        if ((handle.GrantedAccess == 0x0012019f)
+                || (handle.GrantedAccess == 0x001a019f)
+                || (handle.GrantedAccess == 0x00120189)
+                || (handle.GrantedAccess == 0x00100000)) {
+            continue;
+        }
+
+        if (!DuplicateHandle(processHandle,
+                             handle.Handle,
+                             GetCurrentProcess(),
+                             &dupHandle,
+                             0,
+                             TRUE,
+                             DUPLICATE_SAME_ACCESS))
+         {
+             //printf("[%#x] Error: %d \n", handle.Handle, GetLastError());
+             continue;
+         }
+
+
+        mapHandle = CreateFileMapping(dupHandle,
+                                      NULL,
+                                      PAGE_READONLY,
+                                      0,
+                                      0,
+                                      NULL);
+        if (mapHandle == NULL &&
+           (error == ERROR_INVALID_HANDLE ||
+            error == ERROR_BAD_EXE_FORMAT)) {
+            CloseHandle(dupHandle);
+            //printf("CreateFileMapping Error: %d\n", error);
+            continue;
+        }
+        CloseHandle(mapHandle);
+
+        // Query the object type.
+        objectTypeInfo = (POBJECT_TYPE_INFORMATION)malloc(0x1000);
+        if (!NT_SUCCESS(NtQueryObject(
+                            dupHandle,
+                            ObjectTypeInformation,
+                            objectTypeInfo,
+                            0x1000,
+                            NULL
+                        )))
+        {
+            free(objectTypeInfo);
+            CloseHandle(dupHandle);
+            continue;
+        }
+
+        objectNameInfo = malloc(0x1000);
+        if (!NT_SUCCESS(NtQueryObject(
+                            dupHandle,
+                            ObjectNameInformation,
+                            objectNameInfo,
+                            0x1000,
+                            &returnLength
+                        )))
+        {
+            // Reallocate the buffer and try again.
+            objectNameInfo = realloc(objectNameInfo, returnLength);
+            if (!NT_SUCCESS(NtQueryObject(
+                                dupHandle,
+                                ObjectNameInformation,
+                                objectNameInfo,
+                                returnLength,
+                                NULL
+                            )))
+            {
+                // We have the type name, so just display that.
+                /*
+                printf(
+                    "[%#x] %.*S: (could not get name)\n",
+                    handle.Handle,
+                    objectTypeInfo->Name.Length / 2,
+                    objectTypeInfo->Name.Buffer
+                    );
+                */
+                free(objectTypeInfo);
+                free(objectNameInfo);
+                CloseHandle(dupHandle);
+                continue;
+
+            }
+        }
+
+        // Cast our buffer into an UNICODE_STRING.
+        objectName = *(PUNICODE_STRING)objectNameInfo;
+
+        // Print the information!
+        if (objectName.Length)
+        {
+            // The object has a name.  Make sure it is a file otherwise
+            // ignore it
+            fileNameLength = objectName.Length / 2;
+            if (wcscmp(objectTypeInfo->Name.Buffer, L"File") == 0) {
+                // printf("%.*S\n", objectName.Length / 2, objectName.Buffer);
+                fileFromWchar = PyUnicode_FromWideChar(objectName.Buffer,
+                                                       fileNameLength);
+                if (fileFromWchar == NULL)
+                    goto error_py_fun;
+#if PY_MAJOR_VERSION >= 3
+                arg = Py_BuildValue("N",
+                                    PyUnicode_AsUTF8String(fileFromWchar));
+#else
+                arg = Py_BuildValue("N",
+                                    PyUnicode_FromObject(fileFromWchar));
+#endif
+                if (!arg)
+                    goto error_py_fun;
+                Py_XDECREF(fileFromWchar);
+                fileFromWchar = NULL;
+                if (PyList_Append(filesList, arg))
+                    goto error_py_fun;
+                Py_XDECREF(arg);
+            }
+            /*
+            printf(
+                "[%#x] %.*S: %.*S\n",
+                handle.Handle,
+                objectTypeInfo->Name.Length / 2,
+                objectTypeInfo->Name.Buffer,
+                objectName.Length / 2,
+                objectName.Buffer
+                );
+            */
+        }
+        else
+        {
+            // Print something else.
+            /*
+            printf(
+                "[%#x] %.*S: (unnamed)\n",
+                handle.Handle,
+                objectTypeInfo->Name.Length / 2,
+                objectTypeInfo->Name.Buffer
+                );
+            */
+            ;;
+        }
+        free(objectTypeInfo);
+        free(objectNameInfo);
+        CloseHandle(dupHandle);
+    }
+    free(handleInfo);
+    CloseHandle(processHandle);
+    return filesList;
+
+error_py_fun:
+    Py_XDECREF(arg);
+    Py_XDECREF(fileFromWchar);
+    Py_DECREF(filesList);
+    return NULL;
+}

+ 10 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_handles.h

@@ -0,0 +1,10 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+#include <windows.h>
+
+PyObject* psutil_get_open_files(long pid, HANDLE processHandle);

+ 443 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_info.c

@@ -0,0 +1,443 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Helper functions related to fetching process information. Used by
+ * _psutil_windows module methods.
+ */
+
+#include <Python.h>
+#include <windows.h>
+#include <Psapi.h>
+#include <tlhelp32.h>
+
+#include "security.h"
+#include "process_info.h"
+#include "ntextapi.h"
+#include "../../_psutil_common.h"
+
+
+/*
+ * A wrapper around OpenProcess setting NSP exception if process
+ * no longer exists.
+ * "pid" is the process pid, "dwDesiredAccess" is the first argument
+ * exptected by OpenProcess.
+ * Return a process handle or NULL.
+ */
+HANDLE
+psutil_handle_from_pid_waccess(DWORD pid, DWORD dwDesiredAccess)
+{
+    HANDLE hProcess;
+    DWORD processExitCode = 0;
+
+    if (pid == 0) {
+        // otherwise we'd get NoSuchProcess
+        return AccessDenied();
+    }
+
+    hProcess = OpenProcess(dwDesiredAccess, FALSE, pid);
+    if (hProcess == NULL) {
+        if (GetLastError() == ERROR_INVALID_PARAMETER) {
+            NoSuchProcess();
+        }
+        else {
+            PyErr_SetFromWindowsErr(0);
+        }
+        return NULL;
+    }
+
+    // make sure the process is running
+    GetExitCodeProcess(hProcess, &processExitCode);
+    if (processExitCode == 0) {
+        NoSuchProcess();
+        CloseHandle(hProcess);
+        return NULL;
+    }
+    return hProcess;
+}
+
+
+/*
+ * Same as psutil_handle_from_pid_waccess but implicitly uses
+ * PROCESS_QUERY_INFORMATION | PROCESS_VM_READ as dwDesiredAccess
+ * parameter for OpenProcess.
+ */
+HANDLE
+psutil_handle_from_pid(DWORD pid) {
+    DWORD dwDesiredAccess = PROCESS_QUERY_INFORMATION | PROCESS_VM_READ;
+    return psutil_handle_from_pid_waccess(pid, dwDesiredAccess);
+}
+
+
+// fetch the PEB base address from NtQueryInformationProcess()
+PVOID
+psutil_get_peb_address(HANDLE ProcessHandle)
+{
+    _NtQueryInformationProcess NtQueryInformationProcess =
+        (_NtQueryInformationProcess)GetProcAddress(
+            GetModuleHandleA("ntdll.dll"), "NtQueryInformationProcess");
+    PROCESS_BASIC_INFORMATION pbi;
+
+    NtQueryInformationProcess(ProcessHandle, 0, &pbi, sizeof(pbi), NULL);
+    return pbi.PebBaseAddress;
+}
+
+
+DWORD *
+psutil_get_pids(DWORD *numberOfReturnedPIDs) {
+    // Win32 SDK says the only way to know if our process array
+    // wasn't large enough is to check the returned size and make
+    // sure that it doesn't match the size of the array.
+    // If it does we allocate a larger array and try again
+
+    // Stores the actual array
+    DWORD *procArray = NULL;
+    DWORD procArrayByteSz;
+    int procArraySz = 0;
+
+    // Stores the byte size of the returned array from enumprocesses
+    DWORD enumReturnSz = 0;
+
+    do {
+        procArraySz += 1024;
+        free(procArray);
+        procArrayByteSz = procArraySz * sizeof(DWORD);
+        procArray = malloc(procArrayByteSz);
+        if (procArray == NULL) {
+            PyErr_NoMemory();
+            return NULL;
+        }
+        if (! EnumProcesses(procArray, procArrayByteSz, &enumReturnSz)) {
+            free(procArray);
+            PyErr_SetFromWindowsErr(0);
+            return NULL;
+        }
+    } while (enumReturnSz == procArraySz * sizeof(DWORD));
+
+    // The number of elements is the returned size / size of each element
+    *numberOfReturnedPIDs = enumReturnSz / sizeof(DWORD);
+
+    return procArray;
+}
+
+
+int
+psutil_pid_is_running(DWORD pid)
+{
+    HANDLE hProcess;
+    DWORD exitCode;
+
+    // Special case for PID 0 System Idle Process
+    if (pid == 0) {
+        return 1;
+    }
+
+    if (pid < 0) {
+        return 0;
+    }
+
+    hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
+                           FALSE, pid);
+    if (NULL == hProcess) {
+        // invalid parameter is no such process
+        if (GetLastError() == ERROR_INVALID_PARAMETER) {
+            CloseHandle(hProcess);
+            return 0;
+        }
+
+        // access denied obviously means there's a process to deny access to...
+        if (GetLastError() == ERROR_ACCESS_DENIED) {
+            CloseHandle(hProcess);
+            return 1;
+        }
+
+        CloseHandle(hProcess);
+        PyErr_SetFromWindowsErr(0);
+        return -1;
+    }
+
+    if (GetExitCodeProcess(hProcess, &exitCode)) {
+        CloseHandle(hProcess);
+        return (exitCode == STILL_ACTIVE);
+    }
+
+    // access denied means there's a process there so we'll assume
+    // it's running
+    if (GetLastError() == ERROR_ACCESS_DENIED) {
+        CloseHandle(hProcess);
+        return 1;
+    }
+
+    PyErr_SetFromWindowsErr(0);
+    CloseHandle(hProcess);
+    return -1;
+}
+
+
+int
+psutil_pid_in_proclist(DWORD pid)
+{
+    DWORD *proclist = NULL;
+    DWORD numberOfReturnedPIDs;
+    DWORD i;
+
+    proclist = psutil_get_pids(&numberOfReturnedPIDs);
+    if (NULL == proclist) {
+        return -1;
+    }
+
+    for (i = 0; i < numberOfReturnedPIDs; i++) {
+        if (pid == proclist[i]) {
+            free(proclist);
+            return 1;
+        }
+    }
+
+    free(proclist);
+    return 0;
+}
+
+
+// Check exit code from a process handle. Return FALSE on an error also
+// XXX - not used anymore
+int
+handlep_is_running(HANDLE hProcess)
+{
+    DWORD dwCode;
+    if (NULL == hProcess) {
+        return 0;
+    }
+    if (GetExitCodeProcess(hProcess, &dwCode)) {
+        if (dwCode == STILL_ACTIVE) {
+            return 1;
+        }
+    }
+    return 0;
+}
+
+
+/*
+ * returns a Python list representing the arguments for the process
+ * with given pid or NULL on error.
+ */
+PyObject *
+psutil_get_arg_list(long pid)
+{
+    int nArgs, i;
+    LPWSTR *szArglist = NULL;
+    HANDLE hProcess = NULL;
+    PVOID pebAddress;
+    PVOID rtlUserProcParamsAddress;
+    UNICODE_STRING commandLine;
+    WCHAR *commandLineContents = NULL;
+    PyObject *arg = NULL;
+    PyObject *arg_from_wchar = NULL;
+    PyObject *argList = NULL;
+
+    hProcess = psutil_handle_from_pid(pid);
+    if (hProcess == NULL) {
+        return NULL;
+    }
+
+    pebAddress = psutil_get_peb_address(hProcess);
+
+    // get the address of ProcessParameters
+#ifdef _WIN64
+    if (!ReadProcessMemory(hProcess, (PCHAR)pebAddress + 32,
+                           &rtlUserProcParamsAddress, sizeof(PVOID), NULL))
+#else
+    if (!ReadProcessMemory(hProcess, (PCHAR)pebAddress + 0x10,
+                           &rtlUserProcParamsAddress, sizeof(PVOID), NULL))
+#endif
+    {
+        ////printf("Could not read the address of ProcessParameters!\n");
+        PyErr_SetFromWindowsErr(0);
+        goto error;
+    }
+
+    // read the CommandLine UNICODE_STRING structure
+#ifdef _WIN64
+    if (!ReadProcessMemory(hProcess, (PCHAR)rtlUserProcParamsAddress + 112,
+                           &commandLine, sizeof(commandLine), NULL))
+#else
+    if (!ReadProcessMemory(hProcess, (PCHAR)rtlUserProcParamsAddress + 0x40,
+                           &commandLine, sizeof(commandLine), NULL))
+#endif
+    {
+        PyErr_SetFromWindowsErr(0);
+        goto error;
+    }
+
+
+    // allocate memory to hold the command line
+    commandLineContents = (WCHAR *)malloc(commandLine.Length + 1);
+    if (commandLineContents == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+
+    // read the command line
+    if (!ReadProcessMemory(hProcess, commandLine.Buffer,
+                           commandLineContents, commandLine.Length, NULL))
+    {
+        PyErr_SetFromWindowsErr(0);
+        goto error;
+    }
+
+    // Null-terminate the string to prevent wcslen from returning
+    // incorrect length the length specifier is in characters, but
+    // commandLine.Length is in bytes.
+    commandLineContents[(commandLine.Length / sizeof(WCHAR))] = '\0';
+
+    // attempt tp parse the command line using Win32 API, fall back
+    // on string cmdline version otherwise
+    szArglist = CommandLineToArgvW(commandLineContents, &nArgs);
+    if (NULL == szArglist) {
+        // failed to parse arglist
+        // encode as a UTF8 Python string object from WCHAR string
+        arg_from_wchar = PyUnicode_FromWideChar(commandLineContents,
+                                                commandLine.Length / 2);
+        if (arg_from_wchar == NULL)
+            goto error;
+#if PY_MAJOR_VERSION >= 3
+        argList = Py_BuildValue("N", PyUnicode_AsUTF8String(arg_from_wchar));
+#else
+        argList = Py_BuildValue("N", PyUnicode_FromObject(arg_from_wchar));
+#endif
+        if (!argList)
+            goto error;
+    }
+    else {
+        // arglist parsed as array of UNICODE_STRING, so convert each to
+        // Python string object and add to arg list
+        argList = Py_BuildValue("[]");
+        if (argList == NULL)
+            goto error;
+        for (i = 0; i < nArgs; i++) {
+            arg_from_wchar = NULL;
+            arg = NULL;
+            arg_from_wchar = PyUnicode_FromWideChar(szArglist[i],
+                                                    wcslen(szArglist[i]));
+            if (arg_from_wchar == NULL)
+                goto error;
+#if PY_MAJOR_VERSION >= 3
+            arg = PyUnicode_FromObject(arg_from_wchar);
+#else
+            arg = PyUnicode_AsUTF8String(arg_from_wchar);
+#endif
+            if (arg == NULL)
+                goto error;
+            Py_XDECREF(arg_from_wchar);
+            if (PyList_Append(argList, arg))
+                goto error;
+            Py_XDECREF(arg);
+        }
+    }
+
+    if (szArglist != NULL)
+        LocalFree(szArglist);
+    free(commandLineContents);
+    CloseHandle(hProcess);
+    return argList;
+
+error:
+    Py_XDECREF(arg);
+    Py_XDECREF(arg_from_wchar);
+    Py_XDECREF(argList);
+    if (hProcess != NULL)
+        CloseHandle(hProcess);
+    if (commandLineContents != NULL)
+        free(commandLineContents);
+    if (szArglist != NULL)
+        LocalFree(szArglist);
+    return NULL;
+}
+
+
+#define PH_FIRST_PROCESS(Processes) ((PSYSTEM_PROCESS_INFORMATION)(Processes))
+#define PH_NEXT_PROCESS(Process) ( \
+   ((PSYSTEM_PROCESS_INFORMATION)(Process))->NextEntryOffset ? \
+   (PSYSTEM_PROCESS_INFORMATION)((PCHAR)(Process) + \
+        ((PSYSTEM_PROCESS_INFORMATION)(Process))->NextEntryOffset) : \
+   NULL)
+
+const int STATUS_INFO_LENGTH_MISMATCH = 0xC0000004;
+const int STATUS_BUFFER_TOO_SMALL = 0xC0000023L;
+
+/*
+ * Given a process PID and a PSYSTEM_PROCESS_INFORMATION structure
+ * fills the structure with process information.
+ * On success return 1, else 0 with Python exception already set.
+ */
+int
+psutil_get_proc_info(DWORD pid, PSYSTEM_PROCESS_INFORMATION *retProcess,
+                 PVOID *retBuffer)
+{
+    static ULONG initialBufferSize = 0x4000;
+    NTSTATUS status;
+    PVOID buffer;
+    ULONG bufferSize;
+    PSYSTEM_PROCESS_INFORMATION process;
+
+    // get NtQuerySystemInformation
+    typedef DWORD (_stdcall * NTQSI_PROC) (int, PVOID, ULONG, PULONG);
+    NTQSI_PROC NtQuerySystemInformation;
+    HINSTANCE hNtDll;
+    hNtDll = LoadLibrary(TEXT("ntdll.dll"));
+    NtQuerySystemInformation = (NTQSI_PROC)GetProcAddress(
+        hNtDll, "NtQuerySystemInformation");
+
+    bufferSize = initialBufferSize;
+    buffer = malloc(bufferSize);
+    if (buffer == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+
+    while (TRUE) {
+        status = NtQuerySystemInformation(SystemProcessInformation, buffer,
+                                          bufferSize, &bufferSize);
+
+        if (status == STATUS_BUFFER_TOO_SMALL ||
+                status == STATUS_INFO_LENGTH_MISMATCH)
+        {
+            free(buffer);
+            buffer = malloc(bufferSize);
+            if (buffer == NULL) {
+                PyErr_NoMemory();
+                goto error;
+            }
+        }
+        else {
+            break;
+        }
+    }
+
+    if (status != 0) {
+        PyErr_Format(PyExc_RuntimeError, "NtQuerySystemInformation() failed");
+        goto error;
+    }
+
+    if (bufferSize <= 0x20000) {
+        initialBufferSize = bufferSize;
+    }
+
+    process = PH_FIRST_PROCESS(buffer);
+    do {
+        if (process->UniqueProcessId == (HANDLE)pid) {
+            *retProcess = process;
+            *retBuffer = buffer;
+            return 1;
+        }
+    } while ( (process = PH_NEXT_PROCESS(process)) );
+
+    NoSuchProcess();
+    goto error;
+
+error:
+    FreeLibrary(hNtDll);
+    if (buffer != NULL)
+        free(buffer);
+    return 0;
+}

+ 17 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_info.h

@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+#include <windows.h>
+
+DWORD* psutil_get_pids(DWORD *numberOfReturnedPIDs);
+HANDLE psutil_handle_from_pid(DWORD pid);
+HANDLE psutil_handle_from_pid_waccess(DWORD pid, DWORD dwDesiredAccess);
+int psutil_handlep_is_running(HANDLE hProcess);
+int psutil_pid_in_proclist(DWORD pid);
+int psutil_pid_is_running(DWORD pid);
+PVOID psutil_get_peb_address(HANDLE ProcessHandle);
+PyObject* psutil_get_arg_list(long pid);

+ 238 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/security.c

@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Security related functions for Windows platform (Set privileges such as
+ * SeDebug), as well as security helper functions.
+ */
+
+#include <windows.h>
+#include <Python.h>
+
+
+/*
+ * Convert a process handle to a process token handle.
+ */
+HANDLE
+psutil_token_from_handle(HANDLE hProcess) {
+    HANDLE hToken = NULL;
+
+    if (! OpenProcessToken(hProcess, TOKEN_QUERY, &hToken)) {
+        return PyErr_SetFromWindowsErr(0);
+    }
+    return hToken;
+}
+
+
+/*
+ * http://www.ddj.com/windows/184405986
+ *
+ * There's a way to determine whether we're running under the Local System
+ * account. However (you guessed it), we have to call more Win32 functions to
+ * determine this. Backing up through the code listing, we need to make another
+ * call to GetTokenInformation, but instead of passing through the TOKEN_USER
+ * constant, we pass through the TOKEN_PRIVILEGES constant. This value returns
+ * an array of privileges that the account has in the environment. Iterating
+ * through the array, we call the function LookupPrivilegeName looking for the
+ * string “SeTcbPrivilege. If the function returns this string, then this
+ * account has Local System privileges
+ */
+int
+psutil_has_system_privilege(HANDLE hProcess) {
+    DWORD i;
+    DWORD dwSize = 0;
+    DWORD dwRetval = 0;
+    TCHAR privName[256];
+    DWORD dwNameSize = 256;
+    // PTOKEN_PRIVILEGES tp = NULL;
+    BYTE *pBuffer = NULL;
+    TOKEN_PRIVILEGES *tp = NULL;
+    HANDLE hToken = psutil_token_from_handle(hProcess);
+
+    if (NULL == hToken) {
+        return -1;
+    }
+
+    // call GetTokenInformation first to get the buffer size
+    if (! GetTokenInformation(hToken, TokenPrivileges, NULL, 0, &dwSize)) {
+        dwRetval = GetLastError();
+        // if it failed for a reason other than the buffer, bail out
+        if (dwRetval != ERROR_INSUFFICIENT_BUFFER ) {
+            PyErr_SetFromWindowsErr(dwRetval);
+            return 0;
+        }
+    }
+
+    // allocate buffer and call GetTokenInformation again
+    // tp = (PTOKEN_PRIVILEGES) GlobalAlloc(GPTR, dwSize);
+    pBuffer = (BYTE *) malloc(dwSize);
+    if (pBuffer == NULL) {
+        PyErr_NoMemory();
+        return -1;
+    }
+
+    if (! GetTokenInformation(hToken, TokenPrivileges, pBuffer,
+                              dwSize, &dwSize))
+    {
+        PyErr_SetFromWindowsErr(0);
+        free(pBuffer);
+        return -1;
+    }
+
+    // convert the BYTE buffer to a TOKEN_PRIVILEGES struct pointer
+    tp = (TOKEN_PRIVILEGES *)pBuffer;
+
+    // check all the privileges looking for SeTcbPrivilege
+    for (i = 0; i < tp->PrivilegeCount; i++) {
+        // reset the buffer contents and the buffer size
+        strcpy(privName, "");
+        dwNameSize = sizeof(privName) / sizeof(TCHAR);
+        if (! LookupPrivilegeName(NULL,
+                                  &tp->Privileges[i].Luid,
+                                  (LPTSTR)privName,
+                                  &dwNameSize))
+        {
+            PyErr_SetFromWindowsErr(0);
+            free(pBuffer);
+            return -1;
+        }
+
+        // if we find the SeTcbPrivilege then it's a LocalSystem process
+        if (! lstrcmpi(privName, TEXT("SeTcbPrivilege"))) {
+            free(pBuffer);
+            return 1;
+        }
+    }
+
+    free(pBuffer);
+    return 0;
+}
+
+
+BOOL
+psutil_set_privilege(HANDLE hToken, LPCTSTR Privilege, BOOL bEnablePrivilege)
+{
+    TOKEN_PRIVILEGES tp;
+    LUID luid;
+    TOKEN_PRIVILEGES tpPrevious;
+    DWORD cbPrevious = sizeof(TOKEN_PRIVILEGES);
+
+    if (!LookupPrivilegeValue( NULL, Privilege, &luid )) return FALSE;
+
+    // first pass.  get current privilege setting
+    tp.PrivilegeCount = 1;
+    tp.Privileges[0].Luid = luid;
+    tp.Privileges[0].Attributes = 0;
+
+    AdjustTokenPrivileges(
+        hToken,
+        FALSE,
+        &tp,
+        sizeof(TOKEN_PRIVILEGES),
+        &tpPrevious,
+        &cbPrevious
+    );
+
+    if (GetLastError() != ERROR_SUCCESS) return FALSE;
+
+    // second pass. set privilege based on previous setting
+    tpPrevious.PrivilegeCount = 1;
+    tpPrevious.Privileges[0].Luid = luid;
+
+    if (bEnablePrivilege) {
+        tpPrevious.Privileges[0].Attributes |= (SE_PRIVILEGE_ENABLED);
+    }
+
+    else {
+        tpPrevious.Privileges[0].Attributes ^=
+            (SE_PRIVILEGE_ENABLED & tpPrevious.Privileges[0].Attributes);
+    }
+
+    AdjustTokenPrivileges(
+        hToken,
+        FALSE,
+        &tpPrevious,
+        cbPrevious,
+        NULL,
+        NULL
+    );
+
+    if (GetLastError() != ERROR_SUCCESS) return FALSE;
+
+    return TRUE;
+}
+
+
+int
+psutil_set_se_debug()
+{
+    HANDLE hToken;
+    if (! OpenThreadToken(GetCurrentThread(),
+                          TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY,
+                          FALSE,
+                          &hToken)
+       ) {
+        if (GetLastError() == ERROR_NO_TOKEN) {
+            if (!ImpersonateSelf(SecurityImpersonation)) {
+                CloseHandle(hToken);
+                return 0;
+            }
+            if (!OpenThreadToken(GetCurrentThread(),
+                                 TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY,
+                                 FALSE,
+                                 &hToken)
+               ) {
+                RevertToSelf();
+                CloseHandle(hToken);
+                return 0;
+            }
+        }
+    }
+
+    // enable SeDebugPrivilege (open any process)
+    if (! psutil_set_privilege(hToken, SE_DEBUG_NAME, TRUE)) {
+        RevertToSelf();
+        CloseHandle(hToken);
+        return 0;
+    }
+
+    RevertToSelf();
+    CloseHandle(hToken);
+    return 1;
+}
+
+
+int
+psutil_unset_se_debug()
+{
+    HANDLE hToken;
+    if (! OpenThreadToken(GetCurrentThread(),
+                          TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY,
+                          FALSE,
+                          &hToken)
+       ) {
+        if (GetLastError() == ERROR_NO_TOKEN) {
+            if (! ImpersonateSelf(SecurityImpersonation)) {
+                return 0;
+            }
+
+            if (!OpenThreadToken(GetCurrentThread(),
+                                 TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY,
+                                 FALSE,
+                                 &hToken)
+               )
+            {
+                return 0;
+            }
+        }
+    }
+
+    // now disable SeDebug
+    if (! psutil_set_privilege(hToken, SE_DEBUG_NAME, FALSE)) {
+        return 0;
+    }
+
+    CloseHandle(hToken);
+    return 1;
+}

+ 17 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/security.h

@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Security related functions for Windows platform (Set privileges such as
+ * SeDebug), as well as security helper functions.
+ */
+
+#include <windows.h>
+
+BOOL psutil_set_privilege(HANDLE hToken, LPCTSTR Privilege, BOOL bEnablePrivilege);
+HANDLE psutil_token_from_handle(HANDLE hProcess);
+int psutil_has_system_privilege(HANDLE hProcess);
+int psutil_set_se_debug();
+int psutil_unset_se_debug();
+

+ 198 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/setup.py

@@ -0,0 +1,198 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""psutil is a cross-platform library for retrieving information on
+running processes and system utilization (CPU, memory, disks, network)
+in Python.
+"""
+
+import os
+import sys
+try:
+    from setuptools import setup, Extension
+except ImportError:
+    from distutils.core import setup, Extension
+
+
+HERE = os.path.abspath(os.path.dirname(__file__))
+
+
+def get_version():
+    INIT = os.path.join(HERE, 'psutil/__init__.py')
+    f = open(INIT, 'r')
+    try:
+        for line in f:
+            if line.startswith('__version__'):
+                ret = eval(line.strip().split(' = ')[1])
+                assert ret.count('.') == 2, ret
+                for num in ret.split('.'):
+                    assert num.isdigit(), ret
+                return ret
+        else:
+            raise ValueError("couldn't find version string")
+    finally:
+        f.close()
+
+
+def get_description():
+    README = os.path.join(HERE, 'README')
+    f = open(README, 'r')
+    try:
+        return f.read()
+    finally:
+        f.close()
+
+
+# POSIX
+if os.name == 'posix':
+    posix_extension = Extension(
+        '_psutil_posix',
+        sources=['psutil/_psutil_posix.c'],
+    )
+# Windows
+if sys.platform.startswith("win32"):
+
+    def get_winver():
+        maj, min = sys.getwindowsversion()[0:2]
+        return '0x0%s' % ((maj * 100) + min)
+
+    extensions = [Extension(
+        '_psutil_windows',
+        sources=[
+            'psutil/_psutil_windows.c',
+            'psutil/_psutil_common.c',
+            'psutil/arch/windows/process_info.c',
+            'psutil/arch/windows/process_handles.c',
+            'psutil/arch/windows/security.c',
+        ],
+        define_macros=[
+            # be nice to mingw, see:
+            # http://www.mingw.org/wiki/Use_more_recent_defined_functions
+            ('_WIN32_WINNT', get_winver()),
+            ('_AVAIL_WINVER_', get_winver()),
+            # see: https://code.google.com/p/psutil/issues/detail?id=348
+            ('PSAPI_VERSION', 1),
+        ],
+        libraries=[
+            "psapi", "kernel32", "advapi32", "shell32", "netapi32", "iphlpapi",
+            "wtsapi32",
+        ],
+        # extra_compile_args=["/Z7"],
+        # extra_link_args=["/DEBUG"]
+    )]
+# OS X
+elif sys.platform.startswith("darwin"):
+    extensions = [Extension(
+        '_psutil_osx',
+        sources=[
+            'psutil/_psutil_osx.c',
+            'psutil/_psutil_common.c',
+            'psutil/arch/osx/process_info.c'
+        ],
+        extra_link_args=[
+            '-framework', 'CoreFoundation', '-framework', 'IOKit'
+        ],
+    ),
+        posix_extension,
+    ]
+# FreeBSD
+elif sys.platform.startswith("freebsd"):
+    extensions = [Extension(
+        '_psutil_bsd',
+        sources=[
+            'psutil/_psutil_bsd.c',
+            'psutil/_psutil_common.c',
+            'psutil/arch/bsd/process_info.c'
+        ],
+        libraries=["devstat"]),
+        posix_extension,
+    ]
+# Linux
+elif sys.platform.startswith("linux"):
+    extensions = [Extension(
+        '_psutil_linux',
+        sources=['psutil/_psutil_linux.c']),
+        posix_extension,
+    ]
+# Solaris
+elif sys.platform.lower().startswith('sunos'):
+    extensions = [Extension(
+        '_psutil_sunos',
+        sources=['psutil/_psutil_sunos.c'],
+        libraries=['kstat', 'nsl'],),
+        posix_extension,
+    ]
+else:
+    sys.exit('platform %s is not supported' % sys.platform)
+
+
+def main():
+    setup_args = dict(
+        name='psutil',
+        version=get_version(),
+        description=__doc__,
+        long_description=get_description(),
+        keywords=[
+            'ps', 'top', 'kill', 'free', 'lsof', 'netstat', 'nice',
+            'tty', 'ionice', 'uptime', 'taskmgr', 'process', 'df',
+            'iotop', 'iostat', 'ifconfig', 'taskset', 'who', 'pidof',
+            'pmap', 'smem', 'monitoring', 'ulimit', 'prlimit',
+        ],
+        author='Giampaolo Rodola',
+        author_email='g.rodola <at> gmail <dot> com',
+        url='http://code.google.com/p/psutil/',
+        platforms='Platform Independent',
+        license='BSD',
+        packages=['psutil'],
+        # see: python setup.py register --list-classifiers
+        classifiers=[
+            'Development Status :: 5 - Production/Stable',
+            'Environment :: Console',
+            'Environment :: Win32 (MS Windows)',
+            'Intended Audience :: Developers',
+            'Intended Audience :: Information Technology',
+            'Intended Audience :: System Administrators',
+            'License :: OSI Approved :: BSD License',
+            'Operating System :: MacOS :: MacOS X',
+            'Operating System :: Microsoft :: Windows :: Windows NT/2000',
+            'Operating System :: Microsoft',
+            'Operating System :: OS Independent',
+            'Operating System :: POSIX :: BSD :: FreeBSD',
+            'Operating System :: POSIX :: Linux',
+            'Operating System :: POSIX :: SunOS/Solaris',
+            'Operating System :: POSIX',
+            'Programming Language :: C',
+            'Programming Language :: Python :: 2',
+            'Programming Language :: Python :: 2.4',
+            'Programming Language :: Python :: 2.5',
+            'Programming Language :: Python :: 2.6',
+            'Programming Language :: Python :: 2.7',
+            'Programming Language :: Python :: 3',
+            'Programming Language :: Python :: 3.0',
+            'Programming Language :: Python :: 3.1',
+            'Programming Language :: Python :: 3.2',
+            'Programming Language :: Python :: 3.3',
+            'Programming Language :: Python :: 3.4',
+            'Programming Language :: Python :: Implementation :: CPython',
+            'Programming Language :: Python :: Implementation :: PyPy',
+            'Programming Language :: Python',
+            'Topic :: Software Development :: Libraries :: Python Modules',
+            'Topic :: Software Development :: Libraries',
+            'Topic :: System :: Benchmark',
+            'Topic :: System :: Hardware',
+            'Topic :: System :: Monitoring',
+            'Topic :: System :: Networking :: Monitoring',
+            'Topic :: System :: Networking',
+            'Topic :: System :: Systems Administration',
+            'Topic :: Utilities',
+        ],
+    )
+    if extensions is not None:
+        setup_args["ext_modules"] = extensions
+    setup(**setup_args)
+
+if __name__ == '__main__':
+    main()

+ 67 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestApplicationMetricMap.py

@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import json
+import logging
+from unittest import TestCase
+from application_metric_map import ApplicationMetricMap
+
+logger = logging.getLogger()
+
+class TestApplicationMetricMap(TestCase):
+  
+  def testApplicationMetricMap(self):
+    application_metric_map = ApplicationMetricMap("host1", "10.10.10.10")
+    
+    application_id = application_metric_map.format_app_id("A","1")
+    timestamp = int(round(1415390657.3806491 * 1000))
+    
+    metrics = {}
+    metrics.update({"b" : 'bv'})
+    
+    application_metric_map.put_metric(application_id, metrics, timestamp)
+    application_metric_map.put_metric(application_id, metrics, timestamp + 1)
+    application_metric_map.put_metric(application_id, metrics, timestamp + 2)
+    application_metric_map.put_metric(application_id, metrics, timestamp + 3)
+    
+    p = json.loads(application_metric_map.flatten(application_id))
+    self.assertEqual(len(p['metrics']), 1)
+    self.assertEqual(p['metrics'][0]['metricname'], "b")
+#     self.assertEqual(p['metrics'][0]['appid'], application_id)
+    self.assertEqual(p['metrics'][0]['hostname'], "host1")
+    self.assertEqual(len(p['metrics'][0]['metrics']), 4)
+    self.assertEqual(p['metrics'][0]['metrics'][str(timestamp)], 'bv')
+    
+    self.assertEqual(application_metric_map.get_start_time(application_id, "b"), timestamp)
+    
+    metrics = {}
+    metrics.update({"b" : 'bv'})
+    metrics.update({"a" : 'av'})
+    application_metric_map.put_metric(application_id, metrics, timestamp)
+    p = json.loads(application_metric_map.flatten(application_id))
+    self.assertEqual(len(p['metrics']), 2)
+    self.assertTrue((p['metrics'][0]['metricname'] == 'a' and p['metrics'][1]['metricname'] == 'b') or 
+                    (p['metrics'][1]['metricname'] == 'a' and p['metrics'][0]['metricname'] == 'b'))
+    
+    
+  def testEmptyMapReturnNone(self):
+    application_metric_map = ApplicationMetricMap("host","10.10.10.10")
+    self.assertTrue(application_metric_map.flatten() == None)
+    

+ 78 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestEmitter.py

@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import logging
+from unittest import TestCase
+
+from application_metric_map import ApplicationMetricMap
+from config_reader import Configuration
+from emitter import Emitter
+
+from mock.mock import patch, MagicMock
+
+import json
+import urllib2
+
+logger = logging.getLogger()
+
+class TestEmitter(TestCase):
+  
+  @patch("urllib2.urlopen")
+  def testJavaHomeAvailableCheck(self, url_open_mock):
+    url_open_mock.return_value = MagicMock()
+    url_open_mock.return_value.getcode.return_value = 200
+    self.assertEqual(urllib2.urlopen(None, None).getcode(), 200)
+    url_open_mock.reset_mock()
+    
+    config = Configuration()
+    application_metric_map = ApplicationMetricMap("host","10.10.10.10")
+    application_metric_map.clear()
+    application_metric_map.put_metric("APP1", {"metric1":1}, 1)
+    emitter = Emitter(config, application_metric_map)
+    emitter.submit_metrics()
+    
+    self.assertEqual(url_open_mock.call_count, 1)
+    self.assertUrlData(url_open_mock)
+    
+    
+  @patch("urllib2.urlopen")
+  def testRetryFetch(self, url_open_mock):
+    
+    config = Configuration()
+    application_metric_map = ApplicationMetricMap("host","10.10.10.10")
+    application_metric_map.clear()
+    application_metric_map.put_metric("APP1", {"metric1":1}, 1)
+    emitter = Emitter(config, application_metric_map)
+    emitter.RETRY_SLEEP_INTERVAL = .001
+    emitter.submit_metrics()
+    
+    self.assertEqual(url_open_mock.call_count, 3)
+    self.assertUrlData(url_open_mock)
+    
+
+  def assertUrlData(self, url_open_mock):
+    self.assertEqual(len(url_open_mock.call_args), 2)
+    data = url_open_mock.call_args[0][0].data
+    self.assertTrue(data is not None)
+    
+    metrics = json.loads(data)
+    self.assertEqual(len(metrics['metrics']), 1)
+    self.assertEqual(metrics['metrics'][0]['metricname'],'metric1')
+    self.assertEqual(metrics['metrics'][0]['starttime'],1)
+    pass

+ 97 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestHostInfo.py

@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import logging
+from host_info import HostInfo
+
+from unittest import TestCase
+from mock.mock import patch
+
+logger = logging.getLogger()
+
+class TestHostInfo(TestCase):
+  
+  @patch("os.getloadavg")
+  @patch("psutil.cpu_times")
+  def testCpuTimes(self, cp_mock, avg_mock):
+    
+    cp = cp_mock.return_value
+    cp.user = "user"
+    cp.system = "system"
+    cp.idle = "idle"
+    cp.nice = "nice"
+    cp.iowait = "iowait"
+    cp.irq = "irq"
+    cp.softirq = "softirq"
+    avg_mock.return_value  = [13, 13, 13]
+    
+    hostinfo = HostInfo()
+    
+    cpu = hostinfo.get_cpu_times()
+    
+    self.assertEqual(cpu['cpu_user'], 'user')
+    self.assertEqual(cpu['cpu_system'], 'system')
+    self.assertEqual(cpu['cpu_idle'], 'idle')
+    self.assertEqual(cpu['cpu_nice'], 'nice')
+    self.assertEqual(cpu['cpu_wio'], 'iowait')
+    self.assertEqual(cpu['cpu_intr'], 'irq')
+    self.assertEqual(cpu['cpu_sintr'], 'softirq')
+    self.assertEqual(cpu['load_one'], 13)
+    self.assertEqual(cpu['load_five'], 13)
+    self.assertEqual(cpu['load_fifteen'], 13)
+    
+  @patch("psutil.disk_usage")
+  @patch("psutil.disk_partitions")
+  @patch("psutil.swap_memory")
+  @patch("psutil.virtual_memory")
+  def testMemInfo(self, vm_mock, sw_mock, dm_mock, du_mock):
+    
+    vm = vm_mock.return_value
+    vm.free = "free"
+    vm.shared = "shared"
+    vm.buffers = "buffers"
+    vm.cached = "cached"
+    
+    sw = sw_mock.return_value
+    sw.free = "free"
+    
+    hostinfo = HostInfo()
+    
+    cpu = hostinfo.get_mem_info()
+    
+    self.assertEqual(cpu['mem_free'], 'free')
+    self.assertEqual(cpu['mem_shared'], 'shared')
+    self.assertEqual(cpu['mem_buffered'], 'buffers')
+    self.assertEqual(cpu['mem_cached'], 'cached')
+    self.assertEqual(cpu['swap_free'], 'free')
+
+  @patch("psutil.disk_usage")
+  @patch("psutil.disk_partitions")
+  def testCombinedDiskUsage(self, dp_mock, du_mock):
+    
+    dp_mock.__iter__.return_value = ['a', 'b', 'c']
+    
+    hostinfo = HostInfo()
+    
+    cdu = hostinfo.get_combined_disk_usage()
+    self.assertEqual(cdu['disk_total'], "0.00")
+    self.assertEqual(cdu['disk_used'], "0.00")
+    self.assertEqual(cdu['disk_free'], "0.00")
+    self.assertEqual(cdu['disk_percent'], "0.00")

+ 49 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestMetricCollector.py

@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import logging
+from unittest import TestCase
+
+from application_metric_map import ApplicationMetricMap
+from metric_collector import MetricsCollector
+from event_definition import HostMetricCollectEvent
+from mock.mock import patch
+from host_info import HostInfo
+
+logger = logging.getLogger()
+
+class TestMetricCollector(TestCase):
+  
+  @patch("os.getloadavg")
+  @patch.object(HostInfo, "get_cpu_times")
+  @patch.object(ApplicationMetricMap, "__init__")
+  def testCollectEvent(self, amm_mock, host_info_mock, avg_mock):
+    amm_mock.return_value = None
+    host_info_mock.return_value = {'metric_name' : 'metric_value'}
+    avg_mock.return_value.__getitem__.return_value = 13
+
+    metric_collector = MetricsCollector(None, amm_mock)
+    
+    group_config = {'collect_every' : 1, 'metrics' : 'cpu'}
+    
+    e = HostMetricCollectEvent(group_config, 'cpu')
+    
+    metric_collector.process_event(e)
+    
+    self.assertEqual(amm_mock.put_metric.call_count, 1)

+ 133 - 0
ambari-metrics/ambari-metrics-host-monitoring/src/test/python/unitTests.py

@@ -0,0 +1,133 @@
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import unittest
+import os
+import sys
+from random import shuffle
+import fnmatch
+
+#excluded directories with non-test staff from stack and service scanning,
+#also we can add service or stack to skip here
+STACK_EXCLUDE = ["utils"]
+SERVICE_EXCLUDE = ["configs"]
+
+TEST_MASK = '[Tt]est*.py'
+CUSTOM_TEST_MASK = '_[Tt]est*.py'
+def get_parent_path(base, directory_name):
+  """
+  Returns absolute path for directory_name, if directory_name present in base.
+  For example, base=/home/user/test2, directory_name=user - will return /home/user
+  """
+  done = False
+  while not done:
+    base = os.path.dirname(base)
+    if base == "/":
+      return None
+    done = True if os.path.split(base)[-1] == directory_name else False
+  return base
+
+def get_test_files(path, mask = None, recursive=True):
+  """
+  Returns test files for path recursively
+  """
+  current = []
+  directory_items = os.listdir(path)
+
+  for item in directory_items:
+    add_to_pythonpath = False
+    if os.path.isfile(path + "/" + item):
+      if fnmatch.fnmatch(item, mask):
+        add_to_pythonpath = True
+        current.append(item)
+    elif os.path.isdir(path + "/" + item):
+      if recursive:
+        current.extend(get_test_files(path + "/" + item, mask = mask))
+    if add_to_pythonpath:
+      sys.path.append(path)
+  return current
+
+
+def main():
+  custom_tests = False
+  if len(sys.argv) > 1:
+    if sys.argv[1] == "true":
+      custom_tests = True
+  pwd = os.path.abspath(os.path.dirname(__file__))
+
+  project_folder = get_parent_path(pwd,'ambari-metrics-host-monitoring')
+  ambari_common_folder = os.path.join(project_folder,"../../ambari-common")
+  sys.path.append(ambari_common_folder + "/src/main/python")
+  sys.path.append(ambari_common_folder + "/src/main/python/ambari_jinja2")
+  sys.path.append(ambari_common_folder + "/src/main/python")
+  sys.path.append(ambari_common_folder + "/src/test/python")
+  sys.path.append(project_folder + "/src/test/python")
+  sys.path.append(project_folder + "/src/main/python")
+  sys.path.append(project_folder + "/src/main/python/core")
+  sys.path.append(project_folder + "/src/main/resources/scripts")
+  sys.path.append(project_folder + "/src/main/resources/custom_actions")
+  sys.path.append(project_folder + "/target/psutil_build")
+
+  has_failures = False
+  test_runs = 0
+  test_failures = []
+  test_errors = []
+  #run base ambari-server tests
+  sys.stderr.write("Running tests\n")
+  if custom_tests:
+    test_mask = CUSTOM_TEST_MASK
+  else:
+    test_mask = TEST_MASK
+
+  tests = get_test_files(pwd, mask=test_mask, recursive=True)
+  shuffle(tests)
+  modules = [os.path.basename(s)[:-3] for s in tests]
+  suites = [unittest.defaultTestLoader.loadTestsFromName(name) for name in
+    modules]
+  testSuite = unittest.TestSuite(suites)
+  textRunner = unittest.TextTestRunner(verbosity=2).run(testSuite)
+  test_runs += textRunner.testsRun
+  test_errors.extend([(str(item[0]),str(item[1]),"ERROR") for item in textRunner.errors])
+  test_failures.extend([(str(item[0]),str(item[1]),"FAIL") for item in textRunner.failures])
+  tests_status = textRunner.wasSuccessful() and not has_failures
+
+  if not tests_status:
+    sys.stderr.write("----------------------------------------------------------------------\n")
+    sys.stderr.write("Failed tests:\n")
+  for failed_tests in [test_errors,test_failures]:
+    for err in failed_tests:
+      sys.stderr.write("{0}: {1}\n".format(err[2],err[0]))
+      sys.stderr.write("----------------------------------------------------------------------\n")
+      sys.stderr.write("{0}\n".format(err[1]))
+  sys.stderr.write("----------------------------------------------------------------------\n")
+  sys.stderr.write("Total run:{0}\n".format(test_runs))
+  sys.stderr.write("Total errors:{0}\n".format(len(test_errors)))
+  sys.stderr.write("Total failures:{0}\n".format(len(test_failures)))
+
+  if tests_status:
+    sys.stderr.write("OK\n")
+    exit_code = 0
+  else:
+    sys.stderr.write("ERROR\n")
+    exit_code = 1
+  return exit_code
+
+
+if __name__ == "__main__":
+  sys.exit(main())
+

+ 269 - 0
ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector

@@ -0,0 +1,269 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific
+
+#JAVA_HOME=/usr/jdk64/jdk1.7.0_45
+PIDFILE=/var/run/ambari-metrics-collector/ambari-metrics-collector.pid
+OUTFILE=/var/log/ambari-metrics-collector/ambari-metrics-collector.out
+
+HBASE_ZK_PID=/var/run/ams-hbase/hbase-hbase-zookeeper.pid
+HBASE_MASTER_PID=/var/run/ams-hbase/hbase-hbase-master.pid
+HBASE_RS_PID=/var/run/ams-hbase/hbase-hbase-regionserver.pid
+
+HBASE_DIR=/usr/lib/ams-hbase
+
+DAEMON_NAME=timelineserver
+
+COLLECTOR_CONF_DIR=/etc/ambari-metrics-collector/conf
+HBASE_CONF_DIR=/etc/ams-hbase/conf
+
+METRIC_COLLECTOR=ambari-metrics-collector
+
+STOP_TIMEOUT=5
+
+function hbase_daemon
+{
+    local daemon=$1
+    local cmd=$2
+    local pid
+
+    case "${daemon}" in
+      "master")
+        pid=${HBASE_MASTER_PID}
+      ;;
+      "zookeeper")
+        pid=${HBASE_ZK_PID}
+      ;;
+      "regionserver")
+        pid=${HBASE_RS_PID}
+      ;;
+    esac
+
+    daemon_status "${pid}"
+    if [[ $? == 0  ]]; then
+        echo "${daemon} is running as process $(cat "${pid}"). Continuing"
+      else
+        # stale pid file, so just remove it and continue on
+        rm -f "${pid}" >/dev/null 2>&1
+    fi
+
+    ${HBASE_DIR}/bin/hbase-daemon.sh --config ${HBASE_CONF_DIR} ${cmd} ${daemon}
+
+
+
+}
+
+function write_pidfile
+{
+    local pidfile="$1"
+    echo $! > "${pidfile}" 2>/dev/null
+    if [[ $? -gt 0 ]]; then
+      echo "ERROR:  Cannot write pid ${pidfile}."
+      exit 1;
+    fi
+}
+
+function hadoop_java_setup
+{
+  # Bail if we did not detect it
+  if [[ -z "${JAVA_HOME}" ]]; then
+    echo "ERROR: JAVA_HOME is not set and could not be found."
+    exit 1
+  fi
+
+  if [[ ! -d "${JAVA_HOME}" ]]; then
+    echo "ERROR: JAVA_HOME ${JAVA_HOME} does not exist."
+    exit 1
+  fi
+
+  JAVA="${JAVA_HOME}/bin/java"
+
+  if [[ ! -x "$JAVA" ]]; then
+    echo "ERROR: $JAVA is not executable."
+    exit 1
+  fi
+  # shellcheck disable=SC2034
+  JAVA_HEAP_MAX=-Xmx1g
+  HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-1024}
+
+  # check envvars which might override default args
+  if [[ -n "$HADOOP_HEAPSIZE" ]]; then
+    # shellcheck disable=SC2034
+    JAVA_HEAP_MAX="-Xmx${HADOOP_HEAPSIZE}m"
+  fi
+}
+
+function daemon_status()
+{
+  #
+  # LSB 4.1.0 compatible status command (1)
+  #
+  # 0 = program is running
+  # 1 = dead, but still a pid (2)
+  # 2 = (not used by us)
+  # 3 = not running
+  #
+  # 1 - this is not an endorsement of the LSB
+  #
+  # 2 - technically, the specification says /var/run/pid, so
+  #     we should never return this value, but we're giving
+  #     them the benefit of a doubt and returning 1 even if
+  #     our pid is not in in /var/run .
+  #
+
+  local pidfile="$1"
+  shift
+
+  local pid
+
+  if [[ -f "${pidfile}" ]]; then
+    pid=$(cat "${pidfile}")
+    if ps -p "${pid}" > /dev/null 2>&1; then
+      return 0
+    fi
+    return 1
+  fi
+  return 3
+}
+
+while [[ -z "${_ams_configs_done}" ]]; do
+  case $1 in
+    --config)
+      shift
+      confdir=$1
+      shift
+      if [[ -d "${confdir}" ]]; then
+        COLLECTOR_CONF_DIR="${confdir}"
+      elif [[ -z "${confdir}" ]]; then
+        echo "ERROR: No parameter provided for --config "
+        exit 1
+      else
+        echo "ERROR: Cannot find configuration directory \"${confdir}\""
+        exit 1
+      fi
+    ;;
+    *)
+      _ams_configs_done=true
+    ;;
+  esac
+done
+
+#execute ams-env.sh
+if [[ -f "${COLLECTOR_CONF_DIR}/ams-env.sh" ]]; then
+  . "${COLLECTOR_CONF_DIR}/ams-env.sh"
+else
+  echo "ERROR: Cannot execute ${COLLECTOR_CONF_DIR}/ams-env.sh." 2>&1
+  exit 1
+fi
+
+#TODO manage 3 hbase daemons for start/stop/status
+case "$1" in
+
+	start)
+		hadoop_java_setup
+
+		#hbase_daemon "zookeeper" "start"
+
+		hbase_daemon "master" "start"
+		#hbase_daemon "regionserver" "start"
+
+    sleep 30
+
+		CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
+		# YARN_OPTS="${YARN_OPTS} ${YARN_TIMELINESERVER_OPTS}"
+		# if [[ -n "${YARN_TIMELINESERVER_HEAPSIZE}" ]]; then
+		#   JAVA_HEAP_MAX="-Xmx${YARN_TIMELINESERVER_HEAPSIZE}m"
+		# fi
+		
+		# check if this is needed?
+		# export PHOENIX_JAR_PATH=/usr/lib/ambari-metrics/timelineservice/phoenix-client.jar
+		# export HBASE_CONF_DIR=${HBASE_DIR}/conf
+
+    daemon_status "${PIDFILE}"
+    if [[ $? == 0  ]]; then
+        echo "AMS is running as process $(cat "${PIDFILE}"). Exiting"
+        exit 1
+    else
+        # stale pid file, so just remove it and continue on
+        rm -f "${PIDFILE}" >/dev/null 2>&1
+    fi
+
+    nohup "${JAVA}" "-cp" "/usr/lib/ambari-metrics-collector/*:${COLLECTOR_CONF_DIR}" "-Djava.net.preferIPv4Stack=true" "-Dproc_${DAEMON_NAME}" "${CLASS}" "$@" > $OUTFILE 2>&1 &
+    PID=$!
+    write_pidfile "${PIDFILE}"
+    sleep 2
+
+    echo "Verifying ${METRIC_COLLECTOR} process status..."
+    if [ -z "`ps ax -o pid | grep ${PID}`" ]; then
+      if [ -s ${OUTFILE} ]; then
+        echo "ERROR: ${METRIC_COLLECTOR} start failed. For more details, see ${OUTFILE}:"
+        echo "===================="
+        tail -n 10 ${OUTFILE}
+        echo "===================="
+      else
+        echo "ERROR: ${METRIC_COLLECTOR} start failed"
+        rm -f ${PIDFILE}
+      fi
+      echo "Collector out at: ${OUTFILE}"
+      exit -1
+    fi
+
+    echo "Collector successfully started."
+
+  ;;
+	stop)
+	    pidfile=${PIDFILE}
+
+	    if [[ -f "${pidfile}" ]]; then
+          pid=$(cat "$pidfile")
+
+          kill "${pid}" >/dev/null 2>&1
+          sleep "${STOP_TIMEOUT}"
+
+          if kill -0 "${pid}" > /dev/null 2>&1; then
+            echo "WARNING: ${METRIC_COLLECTOR} did not stop gracefully after ${STOP_TIMEOUT} seconds: Trying to kill with kill -9"
+            kill -9 "${pid}" >/dev/null 2>&1
+          fi
+
+          if ps -p "${pid}" > /dev/null 2>&1; then
+            echo "ERROR: Unable to kill ${pid}"
+          else
+            rm -f "${pidfile}" >/dev/null 2>&1
+          fi
+      fi
+
+      #stop hbase daemons
+      #hbase_daemon "zookeeper" "stop"
+      hbase_daemon "master" "stop"
+      #hbase_daemon "regionserver" "stop"
+
+
+    ;;
+	status)
+	    daemon_status "${PIDFILE}"
+	    if [[ $? == 0  ]]; then
+            echo "AMS is running as process $(cat "${PIDFILE}")."
+        else
+            echo "AMS is not running."
+        fi
+        #print embedded hbase daemons statuses?
+    ;;
+	restart)
+	;;
+
+esac
+
+
+

+ 16 - 0
ambari-metrics/ambari-metrics-timelineservice/conf/unix/ams-env.sh

@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set environment variables here.

+ 25 - 0
ambari-metrics/ambari-metrics-timelineservice/conf/unix/ams-site.xml

@@ -0,0 +1,25 @@
+<?xml version="1.0"?>
+
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<configuration>
+
+  <!-- Site specific AMS configuration properties -->
+
+</configuration>

+ 31 - 0
ambari-metrics/ambari-metrics-timelineservice/conf/unix/log4j.properties

@@ -0,0 +1,31 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Define some default values that can be overridden by system properties
+# Root logger option
+log4j.rootLogger=INFO,file
+
+# Direct log messages to a log file
+log4j.appender.file=org.apache.log4j.RollingFileAppender
+log4j.appender.file.File=/var/log/ambari-metrics-collector/ambari-metrics-collector.log
+log4j.appender.file.MaxFileSize=80MB
+log4j.appender.file.MaxBackupIndex=60
+log4j.appender.file.layout=org.apache.log4j.PatternLayout
+log4j.appender.file.layout.ConversionPattern=%d{ABSOLUTE} %5p [%t] %c{1}:%L - %m%n
+
+

+ 593 - 0
ambari-metrics/ambari-metrics-timelineservice/pom.xml

@@ -0,0 +1,593 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <parent>
+    <artifactId>ambari-metrics</artifactId>
+    <groupId>org.apache.ambari</groupId>
+    <version>0.1.0-SNAPSHOT</version>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+  <artifactId>ambari-metrics-timelineservice</artifactId>
+  <version>0.1.0-SNAPSHOT</version>
+  <name>ambari-metrics-timelineservice</name>
+  <packaging>jar</packaging>
+
+  <properties>
+    <!-- Needed for generating FindBugs warnings using parent pom -->
+    <!--<yarn.basedir>${project.parent.parent.basedir}</yarn.basedir>-->
+    <protobuf.version>2.5.0</protobuf.version>
+    <hadoop.version>2.4.0</hadoop.version>
+  </properties>
+
+  <repositories>
+    <repository>
+      <id>phoenix-core-tests</id>
+      <name>Phoenix Unit tests</name>
+      <url>file://${project.basedir}/src/test/resources/lib</url>
+    </repository>
+  </repositories>
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <executions>
+          <execution>
+            <phase>package</phase>
+            <goals>
+              <goal>copy-dependencies</goal>
+            </goals>
+            <configuration>
+              <outputDirectory>${project.build.directory}/lib</outputDirectory>
+              <includeScope>compile</includeScope>
+              <excludeScope>test</excludeScope>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <executions>
+          <execution>
+            <configuration>
+              <descriptors>
+                <descriptor>src/main/assemblies/ats.xml</descriptor>
+              </descriptors>
+              <tarLongFileMode>gnu</tarLongFileMode>
+            </configuration>
+            <id>build-tarball</id>
+            <phase>none</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+          </execution>
+
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>com.github.goldin</groupId>
+        <artifactId>copy-maven-plugin</artifactId>
+        <version>0.2.5</version>
+        <executions>
+          <execution>
+            <id>create-archive</id>
+            <phase>package</phase>
+            <goals>
+              <goal>copy</goal>
+            </goals>
+            <configuration>
+              <resources>
+                <resource>
+                  <targetPath>${project.build.directory}/embedded</targetPath>
+                  <file>${hbase.tar}</file>
+                  <unpack>true</unpack>
+                </resource>
+              </resources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>rpm-maven-plugin</artifactId>
+        <version>2.0.1</version>
+        <executions>
+          <execution>
+            <!-- unbinds rpm creation from maven lifecycle -->
+            <phase>none</phase>
+            <goals>
+              <goal>rpm</goal>
+            </goals>
+          </execution>
+        </executions>
+        <configuration>
+          <name>ambari-metrics-collector</name>
+          <copyright>2012, Apache Software Foundation</copyright>
+          <group>Development</group>
+          <description>Maven Recipe: RPM Package.</description>
+          <autoRequires>false</autoRequires>
+          <requires>
+            <require>${python.ver}</require>
+          </requires>
+
+          <defaultFilemode>644</defaultFilemode>
+          <defaultDirmode>755</defaultDirmode>
+          <defaultUsername>root</defaultUsername>
+          <defaultGroupname>root</defaultGroupname>
+
+          <mappings>
+            <mapping>
+              <!--jars-->
+              <directory>/usr/lib/ambari-metrics-collector/</directory>
+              <sources>
+                <source>
+                  <location>target/lib</location>
+                </source>
+                <source>
+                  <location>${project.build.directory}/${project.artifactId}-${project.version}.jar</location>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <!--embedded applications-->
+              <directory>/usr/lib/ams-hbase/</directory>
+              <sources>
+                <source>
+                  <location>target/embedded/${hbase.folder}</location>
+                  <excludes>
+                    <exclude>bin/**</exclude>
+                    <exclude>bin/*</exclude>
+                  </excludes>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <directory>/usr/lib/ams-hbase/bin</directory>
+              <filemode>755</filemode>
+              <sources>
+                <source>
+                  <location>target/embedded/${hbase.folder}/bin</location>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <directory>/usr/lib/ams-hbase/lib/</directory>
+              <sources>
+                <source>
+                  <location>target/lib</location>
+                  <includes>
+                    <include>phoenix*.jar</include>
+                    <include>antlr*.jar</include>
+                  </includes>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <directory>/usr/sbin</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+              <directoryIncluded>false</directoryIncluded>
+              <sources>
+                <source>
+                  <location>conf/unix/ambari-metrics-collector</location>
+                  <filter>false</filter>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <directory>/etc/ambari-metrics-collector/conf</directory>
+              <configuration>true</configuration>
+              <sources>
+                <source>
+                  <location>conf/unix/ams-env.sh</location>
+                </source>
+                <source>
+                  <location>conf/unix/ams-site.xml</location>
+                </source>
+                <source>
+                  <location>conf/unix/log4j.properties</location>
+                </source>
+                <source>
+                  <location>target/embedded/${hbase.folder}/conf/hbase-site.xml</location>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <directory>/etc/ams-hbase/conf</directory>
+              <configuration>true</configuration>
+              <sources>
+                <source>
+                  <location>target/embedded/${hbase.folder}/conf</location>
+                  <includes>
+                    <include>*.*</include>
+                  </includes>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <directory>/var/run/ams-hbase</directory>
+            </mapping>
+            <mapping>
+              <directory>/var/run/ambari-metrics-collector</directory>
+            </mapping>
+            <mapping>
+              <directory>/var/log/ambari-metrics-collector</directory>
+            </mapping>
+            <mapping>
+              <directory>/var/lib/ambari-metrics-collector</directory>
+            </mapping>
+          </mappings>
+        </configuration>
+      </plugin>
+      <plugin>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <configuration>
+          <redirectTestOutputToFile>true</redirectTestOutputToFile>
+          <forkMode>always</forkMode>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.phoenix</groupId>
+      <artifactId>phoenix-core</artifactId>
+      <version>4.2.0.2.2.0.0-2041</version>
+      <exclusions>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-annotations</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>commons-lang</groupId>
+      <artifactId>commons-lang</artifactId>
+      <version>2.5</version>
+    </dependency>
+
+    <dependency>
+      <artifactId>ambari-metrics-hadoop-sink</artifactId>
+      <groupId>org.apache.ambari</groupId>
+      <version>0.1.0-SNAPSHOT</version>
+    </dependency>
+
+    <dependency>
+      <groupId>javax.servlet</groupId>
+      <artifactId>servlet-api</artifactId>
+      <version>2.5</version>
+    </dependency>
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>${hadoop.version}</version>
+      <scope>provided</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>commons-el</groupId>
+          <artifactId>commons-el</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-runtime</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-compiler</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jsp-2.1-jetty</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-annotations</artifactId>
+      <version>${hadoop.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <version>1.8.5</version>
+      <scope>test</scope>
+    </dependency>
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>${hadoop.version}</version>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.inject.extensions</groupId>
+      <artifactId>guice-servlet</artifactId>
+      <version>3.0</version>
+    </dependency>
+    <dependency>
+      <groupId>com.google.protobuf</groupId>
+      <artifactId>protobuf-java</artifactId>
+      <version>${protobuf.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+      <version>4.10</version>
+    </dependency>
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>com.google.inject</groupId>
+      <artifactId>guice</artifactId>
+      <version>3.0</version>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey.jersey-test-framework</groupId>
+      <artifactId>jersey-test-framework-core</artifactId>
+      <version>1.11</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-json</artifactId>
+      <version>1.11</version>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey.contribs</groupId>
+      <artifactId>jersey-guice</artifactId>
+      <version>1.11</version>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-server</artifactId>
+      <version>1.11</version>
+    </dependency>
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-common</artifactId>
+      <version>${hadoop.version}</version>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-common</artifactId>
+      <version>${hadoop.version}</version>
+    </dependency>
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-api</artifactId>
+      <version>${hadoop.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>javax.xml.bind</groupId>
+      <artifactId>jaxb-api</artifactId>
+      <version>2.2.2</version>
+    </dependency>
+    <dependency>
+      <groupId>org.codehaus.jettison</groupId>
+      <artifactId>jettison</artifactId>
+      <version>1.1</version>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-core</artifactId>
+      <version>1.11</version>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-client</artifactId>
+      <version>1.11</version>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <version>14.0.1</version>
+    </dependency>
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+      <version>1.1.1</version>
+    </dependency>
+
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-common</artifactId>
+      <version>${hadoop.version}</version>
+    </dependency>
+
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>com.sun.jersey.jersey-test-framework</groupId>
+      <artifactId>jersey-test-framework-grizzly2</artifactId>
+      <scope>test</scope>
+      <version>1.11</version>
+    </dependency>
+
+    <dependency>
+      <groupId>org.codehaus.jackson</groupId>
+      <artifactId>jackson-core-asl</artifactId>
+      <version>1.9.9</version>
+    </dependency>
+
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+      <version>1.7.2</version>
+    </dependency>
+
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
+      <version>1.7.2</version>
+    </dependency>
+
+    <dependency>
+      <groupId>org.codehaus.jackson</groupId>
+      <artifactId>jackson-mapper-asl</artifactId>
+      <version>1.9.13</version>
+    </dependency>
+
+    <dependency>
+      <groupId>commons-collections</groupId>
+      <artifactId>commons-collections</artifactId>
+      <version>3.2.1</version>
+    </dependency>
+
+    <dependency>
+      <groupId>org.fusesource.leveldbjni</groupId>
+      <artifactId>leveldbjni-all</artifactId>
+      <version>1.8</version>
+    </dependency>
+
+    <dependency>
+      <groupId>org.assertj</groupId>
+      <artifactId>assertj-core</artifactId>
+      <version>1.7.0</version>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.easymock</groupId>
+      <artifactId>easymock</artifactId>
+      <version>3.2</version>
+      <scope>test</scope>
+    </dependency>
+    <!-- for unit tests only -->
+    <dependency>
+      <groupId>org.apache.phoenix</groupId>
+      <artifactId>phoenix-core-tests</artifactId>
+      <version>4.2.0</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-it</artifactId>
+      <version>0.98.4-hadoop2</version>
+      <scope>test</scope>
+      <classifier>tests</classifier>
+    </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-testing-util</artifactId>
+        <version>0.98.4-hadoop2</version>
+        <scope>test</scope>
+        <optional>true</optional>
+        <exclusions>
+          <exclusion>
+            <groupId>org.jruby</groupId>
+            <artifactId>jruby-complete</artifactId>
+          </exclusion>
+        </exclusions>
+      </dependency>
+    <dependency>
+      <groupId>org.powermock</groupId>
+      <artifactId>powermock-module-junit4</artifactId>
+      <version>1.4.9</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.powermock</groupId>
+      <artifactId>powermock-api-mockito</artifactId>
+      <version>1.4.9</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.powermock</groupId>
+      <artifactId>powermock-api-easymock</artifactId>
+      <version>1.4.9</version>
+      <scope>test</scope>
+    </dependency>
+
+  </dependencies>
+
+  <profiles>
+    <profile>
+      <id>sim</id>
+      <build>
+
+        <plugins>
+          <plugin>
+            <artifactId>maven-assembly-plugin</artifactId>
+            <configuration>
+              <descriptors>
+                <descriptor>src/main/assemblies/simulator.xml</descriptor>
+              </descriptors>
+              <tarLongFileMode>gnu</tarLongFileMode>
+            </configuration>
+            <executions>
+              <execution>
+                <id>build-tarball</id>
+                <phase>package</phase>
+                <goals>
+                  <goal>single</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+
+          <plugin>
+            <artifactId>maven-jar-plugin</artifactId>
+            <version>2.3.1</version>
+            <!-- The configuration of the plugin -->
+            <configuration>
+              <!-- Configuration of the archiver -->
+              <finalName>${pom.artifactId}-simulator-${pom.version}</finalName>
+              <archive>
+                <!-- Manifest specific configuration -->
+                <manifest>
+                  <!-- Classpath is added to the manifest of the created jar file. -->
+                  <addClasspath>true</addClasspath>
+                  <!--
+                      Configures the classpath prefix. This configuration option is
+                      used to specify that all needed libraries are found under lib/
+                      directory.
+                  -->
+                  <classpathPrefix></classpathPrefix>
+                  <!-- Specifies the main class of the application -->
+                  <mainClass>
+                    org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.loadsimulator.MetricsLoadSimulator
+                  </mainClass>
+                </manifest>
+              </archive>
+            </configuration>
+          </plugin>
+        </plugins>
+
+      </build>
+
+    </profile>
+  </profiles>
+</project>

Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác