Quellcode durchsuchen

HADOOP-7190. Add metrics v1 back for backwards compatibility. (omalley)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.20-security-203@1081840 13f79535-47bb-0310-9956-ffa450edef68
Owen O'Malley vor 14 Jahren
Ursprung
Commit
41321f5599
40 geänderte Dateien mit 4806 neuen und 69 gelöschten Zeilen
  1. 2 0
      CHANGES.txt
  2. 47 0
      src/core/org/apache/hadoop/classification/InterfaceAudience.java
  3. 48 0
      src/core/org/apache/hadoop/classification/InterfaceStability.java
  4. 11 69
      src/core/org/apache/hadoop/log/EventCounter.java
  5. 101 0
      src/core/org/apache/hadoop/log/metrics/EventCounter.java
  6. 213 0
      src/core/org/apache/hadoop/metrics/ContextFactory.java
  7. 124 0
      src/core/org/apache/hadoop/metrics/MetricsContext.java
  8. 49 0
      src/core/org/apache/hadoop/metrics/MetricsException.java
  9. 253 0
      src/core/org/apache/hadoop/metrics/MetricsRecord.java
  10. 174 0
      src/core/org/apache/hadoop/metrics/MetricsServlet.java
  11. 106 0
      src/core/org/apache/hadoop/metrics/MetricsUtil.java
  12. 40 0
      src/core/org/apache/hadoop/metrics/Updater.java
  13. 154 0
      src/core/org/apache/hadoop/metrics/file/FileContext.java
  14. 43 0
      src/core/org/apache/hadoop/metrics/file/package.html
  15. 240 0
      src/core/org/apache/hadoop/metrics/ganglia/GangliaContext.java
  16. 144 0
      src/core/org/apache/hadoop/metrics/ganglia/GangliaContext31.java
  17. 74 0
      src/core/org/apache/hadoop/metrics/ganglia/package.html
  18. 35 0
      src/core/org/apache/hadoop/metrics/jvm/EventCounter.java
  19. 201 0
      src/core/org/apache/hadoop/metrics/jvm/JvmMetrics.java
  20. 160 0
      src/core/org/apache/hadoop/metrics/package.html
  21. 483 0
      src/core/org/apache/hadoop/metrics/spi/AbstractMetricsContext.java
  22. 205 0
      src/core/org/apache/hadoop/metrics/spi/CompositeContext.java
  23. 59 0
      src/core/org/apache/hadoop/metrics/spi/MetricValue.java
  24. 283 0
      src/core/org/apache/hadoop/metrics/spi/MetricsRecordImpl.java
  25. 58 0
      src/core/org/apache/hadoop/metrics/spi/NoEmitMetricsContext.java
  26. 70 0
      src/core/org/apache/hadoop/metrics/spi/NullContext.java
  27. 79 0
      src/core/org/apache/hadoop/metrics/spi/NullContextWithUpdateThread.java
  28. 96 0
      src/core/org/apache/hadoop/metrics/spi/OutputRecord.java
  29. 72 0
      src/core/org/apache/hadoop/metrics/spi/Util.java
  30. 38 0
      src/core/org/apache/hadoop/metrics/spi/package.html
  31. 92 0
      src/core/org/apache/hadoop/metrics/util/MBeanUtil.java
  32. 51 0
      src/core/org/apache/hadoop/metrics/util/MetricsBase.java
  33. 229 0
      src/core/org/apache/hadoop/metrics/util/MetricsDynamicMBeanBase.java
  34. 108 0
      src/core/org/apache/hadoop/metrics/util/MetricsIntValue.java
  35. 92 0
      src/core/org/apache/hadoop/metrics/util/MetricsLongValue.java
  36. 90 0
      src/core/org/apache/hadoop/metrics/util/MetricsRegistry.java
  37. 132 0
      src/core/org/apache/hadoop/metrics/util/MetricsTimeVaryingInt.java
  38. 128 0
      src/core/org/apache/hadoop/metrics/util/MetricsTimeVaryingLong.java
  39. 200 0
      src/core/org/apache/hadoop/metrics/util/MetricsTimeVaryingRate.java
  40. 22 0
      src/core/org/apache/hadoop/metrics/util/package-info.java

+ 2 - 0
CHANGES.txt

@@ -2,6 +2,8 @@ Hadoop Change Log
 
 Release 0.20.203.0 - unreleased
 
+    HADOOP-7190. Add metrics v1 back for backwards compatibility. (omalley)
+
     MAPREDUCE-2360. Remove stripping of scheme, authority from submit dir in 
     support of viewfs. (cdouglas)
     

+ 47 - 0
src/core/org/apache/hadoop/classification/InterfaceAudience.java

@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.classification;
+
+import java.lang.annotation.Documented;
+
+/**
+ * Annotation to inform users of a package, class or method's intended audience.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class InterfaceAudience {
+  /**
+   * Intended for use by any project or application.
+   */
+  @Documented public @interface Public {};
+  
+  /**
+   * Intended only for the project(s) specified in the annotation.
+   * For example, "Common", "HDFS", "MapReduce", "ZooKeeper", "HBase".
+   */
+  @Documented public @interface LimitedPrivate {
+    String[] value();
+  };
+  
+  /**
+   * Intended for use only within Hadoop itself.
+   */
+  @Documented public @interface Private {};
+
+  private InterfaceAudience() {} // Audience can't exist on its own
+}

+ 48 - 0
src/core/org/apache/hadoop/classification/InterfaceStability.java

@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.classification;
+
+import java.lang.annotation.Documented;
+
+/**
+ * Annotation to inform users of how much to rely on a particular package,
+ * class or method not changing over time.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class InterfaceStability {
+  /**
+   * Can evolve while retaining compatibility for minor release boundaries.; 
+   * can break compatibility only at major release (ie. at m.0).
+   */
+  @Documented
+  public @interface Stable {};
+  
+  /**
+   * Evolving, but can break compatibility at minor release (i.e. m.x)
+   */
+  @Documented
+  public @interface Evolving {};
+  
+  /**
+   * No guarantee is provided as to reliability or stability across any
+   * level of release granularity.
+   */
+  @Documented
+  public @interface Unstable {};
+}

+ 11 - 69
src/core/org/apache/hadoop/log/EventCounter.java

@@ -17,76 +17,18 @@
  */
 package org.apache.hadoop.log;
 
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.Level;
-import org.apache.log4j.spi.LoggingEvent;
-
 /**
  * A log4J Appender that simply counts logging events in three levels:
- * fatal, error and warn.
+ * fatal, error and warn. The class name is used in log4j.properties
+ * @deprecated use {@link org.apache.hadoop.log.metrics.EventCounter} instead
  */
-public class EventCounter extends AppenderSkeleton {
-        
-    private static final int FATAL = 0;
-    private static final int ERROR = 1;
-    private static final int WARN  = 2;
-    private static final int INFO  = 3;
-    
-    private static class EventCounts {
-        private final long[] counts = { 0, 0, 0, 0 };
-    
-        private synchronized void incr(int i) { 
-            ++counts[i]; 
-        }
-        
-        private synchronized long get(int i) { 
-            return counts[i]; 
-        }
-    }
-    private static EventCounts counts = new EventCounts();
-    
-    public static long getFatal() { 
-        return counts.get(FATAL); 
-    }
-    
-    public static long getError() { 
-        return counts.get(ERROR); 
-    }
-    
-    public static long getWarn() { 
-        return counts.get(WARN);  
-    }
-    
-    public static long getInfo() {
-        return counts.get(INFO);
-    }
-    
-    public void append(LoggingEvent event) {
-        Level level = event.getLevel();
-        if (level == Level.INFO) {
-            counts.incr(INFO);
-        }
-        else if (level == Level.WARN) {
-            counts.incr(WARN);
-        }
-        else if (level == Level.ERROR) {
-            counts.incr(ERROR);
-        }
-        else if (level == Level.FATAL) {
-            counts.incr(FATAL);
-        }
-
-    }
-    
-    // Strange: these two methods are abstract in AppenderSkeleton, but not
-    // included in the javadoc (log4j 1.2.13).
-    
-    public void close() {
-    }
-    public boolean requiresLayout() {
-        return false;
-    }
-    
-    
-    
+@Deprecated
+public class EventCounter extends org.apache.hadoop.log.metrics.EventCounter {
+  static {
+    // The logging system is not started yet.
+    System.err.println("WARNING: "+ EventCounter.class.getName() +
+        " is deprecated. Please use "+
+        org.apache.hadoop.log.metrics.EventCounter.class.getName() +
+        " in all the log4j.properties files.");
+  }
 }

+ 101 - 0
src/core/org/apache/hadoop/log/metrics/EventCounter.java

@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.log.metrics;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Level;
+import org.apache.log4j.spi.LoggingEvent;
+
+/**
+ * A log4J Appender that simply counts logging events in three levels:
+ * fatal, error and warn. The class name is used in log4j.properties
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class EventCounter extends AppenderSkeleton {
+
+  private static final int FATAL = 0;
+  private static final int ERROR = 1;
+  private static final int WARN = 2;
+  private static final int INFO = 3;
+
+  private static class EventCounts {
+
+    private final long[] counts = {0, 0, 0, 0};
+
+    private synchronized void incr(int i) {
+      ++counts[i];
+    }
+
+    private synchronized long get(int i) {
+      return counts[i];
+    }
+  }
+
+  private static EventCounts counts = new EventCounts();
+
+  @InterfaceAudience.Private
+  public static long getFatal() {
+    return counts.get(FATAL);
+  }
+
+  @InterfaceAudience.Private
+  public static long getError() {
+    return counts.get(ERROR);
+  }
+
+  @InterfaceAudience.Private
+  public static long getWarn() {
+    return counts.get(WARN);
+  }
+
+  @InterfaceAudience.Private
+  public static long getInfo() {
+    return counts.get(INFO);
+  }
+
+  @Override
+  public void append(LoggingEvent event) {
+    Level level = event.getLevel();
+    if (level == Level.INFO) {
+      counts.incr(INFO);
+    }
+    else if (level == Level.WARN) {
+      counts.incr(WARN);
+    }
+    else if (level == Level.ERROR) {
+      counts.incr(ERROR);
+    }
+    else if (level == Level.FATAL) {
+      counts.incr(FATAL);
+    }
+
+  }
+
+  @Override
+  public void close() {
+  }
+
+  @Override
+  public boolean requiresLayout() {
+    return false;
+  }
+}

+ 213 - 0
src/core/org/apache/hadoop/metrics/ContextFactory.java

@@ -0,0 +1,213 @@
+/*
+ * ContextFactory.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics.spi.NullContext;
+
+/**
+ * Factory class for creating MetricsContext objects.  To obtain an instance
+ * of this class, use the static <code>getFactory()</code> method.
+ * @deprecated in favor of <code>org.apache.hadoop.metrics2</code> usage.
+ */
+@Deprecated
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Evolving
+public class ContextFactory {
+    
+  private static final String PROPERTIES_FILE = 
+    "/hadoop-metrics.properties";
+  private static final String CONTEXT_CLASS_SUFFIX =
+    ".class";
+  private static final String DEFAULT_CONTEXT_CLASSNAME =
+    "org.apache.hadoop.metrics.spi.NullContext";
+    
+  private static ContextFactory theFactory = null;
+    
+  private Map<String,Object> attributeMap = new HashMap<String,Object>();
+  private Map<String,MetricsContext> contextMap = 
+    new HashMap<String,MetricsContext>();
+    
+  // Used only when contexts, or the ContextFactory itself, cannot be
+  // created.
+  private static Map<String,MetricsContext> nullContextMap = 
+    new HashMap<String,MetricsContext>();
+    
+  /** Creates a new instance of ContextFactory */
+  protected ContextFactory() {
+  }
+    
+  /**
+   * Returns the value of the named attribute, or null if there is no 
+   * attribute of that name.
+   *
+   * @param attributeName the attribute name
+   * @return the attribute value
+   */
+  public Object getAttribute(String attributeName) {
+    return attributeMap.get(attributeName);
+  }
+    
+  /**
+   * Returns the names of all the factory's attributes.
+   * 
+   * @return the attribute names
+   */
+  public String[] getAttributeNames() {
+    String[] result = new String[attributeMap.size()];
+    int i = 0;
+    // for (String attributeName : attributeMap.keySet()) {
+    Iterator it = attributeMap.keySet().iterator();
+    while (it.hasNext()) {
+      result[i++] = (String) it.next();
+    }
+    return result;
+  }
+    
+  /**
+   * Sets the named factory attribute to the specified value, creating it
+   * if it did not already exist.  If the value is null, this is the same as
+   * calling removeAttribute.
+   *
+   * @param attributeName the attribute name
+   * @param value the new attribute value
+   */
+  public void setAttribute(String attributeName, Object value) {
+    attributeMap.put(attributeName, value);
+  }
+
+  /**
+   * Removes the named attribute if it exists.
+   *
+   * @param attributeName the attribute name
+   */
+  public void removeAttribute(String attributeName) {
+    attributeMap.remove(attributeName);
+  }
+    
+  /**
+   * Returns the named MetricsContext instance, constructing it if necessary 
+   * using the factory's current configuration attributes. <p/>
+   * 
+   * When constructing the instance, if the factory property 
+   * <i>contextName</i>.class</code> exists, 
+   * its value is taken to be the name of the class to instantiate.  Otherwise,
+   * the default is to create an instance of 
+   * <code>org.apache.hadoop.metrics.spi.NullContext</code>, which is a 
+   * dummy "no-op" context which will cause all metric data to be discarded.
+   * 
+   * @param contextName the name of the context
+   * @return the named MetricsContext
+   */
+  public synchronized MetricsContext getContext(String refName, String contextName)
+      throws IOException, ClassNotFoundException,
+             InstantiationException, IllegalAccessException {
+    MetricsContext metricsContext = contextMap.get(refName);
+    if (metricsContext == null) {
+      String classNameAttribute = refName + CONTEXT_CLASS_SUFFIX;
+      String className = (String) getAttribute(classNameAttribute);
+      if (className == null) {
+        className = DEFAULT_CONTEXT_CLASSNAME;
+      }
+      Class contextClass = Class.forName(className);
+      metricsContext = (MetricsContext) contextClass.newInstance();
+      metricsContext.init(contextName, this);
+      contextMap.put(contextName, metricsContext);
+    }
+    return metricsContext;
+  }
+
+  public synchronized MetricsContext getContext(String contextName)
+    throws IOException, ClassNotFoundException, InstantiationException,
+           IllegalAccessException {
+    return getContext(contextName, contextName);
+  }
+  
+  /** 
+   * Returns all MetricsContexts built by this factory.
+   */
+  public synchronized Collection<MetricsContext> getAllContexts() {
+    // Make a copy to avoid race conditions with creating new contexts.
+    return new ArrayList<MetricsContext>(contextMap.values());
+  }
+    
+  /**
+   * Returns a "null" context - one which does nothing.
+   */
+  public static synchronized MetricsContext getNullContext(String contextName) {
+    MetricsContext nullContext = nullContextMap.get(contextName);
+    if (nullContext == null) {
+      nullContext = new NullContext();
+      nullContextMap.put(contextName, nullContext);
+    }
+    return nullContext;
+  }
+    
+  /**
+   * Returns the singleton ContextFactory instance, constructing it if 
+   * necessary. <p/>
+   * 
+   * When the instance is constructed, this method checks if the file 
+   * <code>hadoop-metrics.properties</code> exists on the class path.  If it 
+   * exists, it must be in the format defined by java.util.Properties, and all 
+   * the properties in the file are set as attributes on the newly created
+   * ContextFactory instance.
+   *
+   * @return the singleton ContextFactory instance
+   */
+  public static synchronized ContextFactory getFactory() throws IOException {
+    if (theFactory == null) {
+      theFactory = new ContextFactory();
+      theFactory.setAttributes();
+    }
+    return theFactory;
+  }
+    
+  private void setAttributes() throws IOException {
+    InputStream is = getClass().getResourceAsStream(PROPERTIES_FILE);
+    if (is != null) {
+      try {
+        Properties properties = new Properties();
+        properties.load(is);
+        //for (Object propertyNameObj : properties.keySet()) {
+        Iterator it = properties.keySet().iterator();
+        while (it.hasNext()) {
+          String propertyName = (String) it.next();
+          String propertyValue = properties.getProperty(propertyName);
+          setAttribute(propertyName, propertyValue);
+        }
+      } finally {
+        is.close();
+      }
+    }
+  }
+    
+}

+ 124 - 0
src/core/org/apache/hadoop/metrics/MetricsContext.java

@@ -0,0 +1,124 @@
+/*
+ * MetricsContext.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics.spi.OutputRecord;
+
+/**
+ * The main interface to the metrics package. 
+ * @deprecated in favor of <code>org.apache.hadoop.metrics2</code> usage.
+ */
+@Deprecated
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface MetricsContext {
+    
+  /**
+   * Default period in seconds at which data is sent to the metrics system.
+   */
+  public static final int DEFAULT_PERIOD = 5;
+
+  /**
+   * Initialize this context.
+   * @param contextName The given name for this context
+   * @param factory The creator of this context
+   */
+  public void init(String contextName, ContextFactory factory);
+
+  /**
+   * Returns the context name.
+   *
+   * @return the context name
+   */
+  public abstract String getContextName();
+    
+  /**
+   * Starts or restarts monitoring, the emitting of metrics records as they are 
+   * updated. 
+   */
+  public abstract void startMonitoring()
+    throws IOException;
+
+  /**
+   * Stops monitoring.  This does not free any data that the implementation
+   * may have buffered for sending at the next timer event. It
+   * is OK to call <code>startMonitoring()</code> again after calling 
+   * this.
+   * @see #close()
+   */
+  public abstract void stopMonitoring();
+    
+  /**
+   * Returns true if monitoring is currently in progress.
+   */
+  public abstract boolean isMonitoring();
+    
+  /**
+   * Stops monitoring and also frees any buffered data, returning this 
+   * object to its initial state.  
+   */
+  public abstract void close();
+    
+  /**
+   * Creates a new MetricsRecord instance with the given <code>recordName</code>.
+   * Throws an exception if the metrics implementation is configured with a fixed
+   * set of record names and <code>recordName</code> is not in that set.
+   *
+   * @param recordName the name of the record
+   * @throws MetricsException if recordName conflicts with configuration data
+   */
+  public abstract MetricsRecord createRecord(String recordName);
+    
+  /**
+   * Registers a callback to be called at regular time intervals, as 
+   * determined by the implementation-class specific configuration.
+   *
+   * @param updater object to be run periodically; it should updated
+   * some metrics records and then return
+   */
+  public abstract void registerUpdater(Updater updater);
+
+  /**
+   * Removes a callback, if it exists.
+   * 
+   * @param updater object to be removed from the callback list
+   */
+  public abstract void unregisterUpdater(Updater updater);
+  
+  /**
+   * Returns the timer period.
+   */
+  public abstract int getPeriod();
+  
+  /**
+   * Retrieves all the records managed by this MetricsContext.
+   * Useful for monitoring systems that are polling-based.
+   * 
+   * @return A non-null map from all record names to the records managed.
+   */
+   Map<String, Collection<OutputRecord>> getAllRecords();
+}

+ 49 - 0
src/core/org/apache/hadoop/metrics/MetricsException.java

@@ -0,0 +1,49 @@
+/*
+ * MetricsException.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * General-purpose, unchecked metrics exception.
+ * @deprecated in favor of {@link org.apache.hadoop.metrics2.MetricsException}.
+ */
+@Deprecated
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Evolving
+public class MetricsException extends RuntimeException {
+    
+  private static final long serialVersionUID = -1643257498540498497L;
+
+  /** Creates a new instance of MetricsException */
+  public MetricsException() {
+  }
+    
+  /** Creates a new instance of MetricsException 
+   *
+   * @param message an error message
+   */
+  public MetricsException(String message) {
+    super(message);
+  }
+    
+}

+ 253 - 0
src/core/org/apache/hadoop/metrics/MetricsRecord.java

@@ -0,0 +1,253 @@
+/*
+ * MetricsRecord.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * A named and optionally tagged set of records to be sent to the metrics
+ * system. <p/>
+ *
+ * A record name identifies the kind of data to be reported. For example, a
+ * program reporting statistics relating to the disks on a computer might use
+ * a record name "diskStats".<p/>
+ *
+ * A record has zero or more <i>tags</i>. A tag has a name and a value. To
+ * continue the example, the "diskStats" record might use a tag named
+ * "diskName" to identify a particular disk.  Sometimes it is useful to have
+ * more than one tag, so there might also be a "diskType" with value "ide" or
+ * "scsi" or whatever.<p/>
+ *
+ * A record also has zero or more <i>metrics</i>.  These are the named
+ * values that are to be reported to the metrics system.  In the "diskStats"
+ * example, possible metric names would be "diskPercentFull", "diskPercentBusy", 
+ * "kbReadPerSecond", etc.<p/>
+ * 
+ * The general procedure for using a MetricsRecord is to fill in its tag and
+ * metric values, and then call <code>update()</code> to pass the record to the
+ * client library.
+ * Metric data is not immediately sent to the metrics system
+ * each time that <code>update()</code> is called. 
+ * An internal table is maintained, identified by the record name. This
+ * table has columns 
+ * corresponding to the tag and the metric names, and rows 
+ * corresponding to each unique set of tag values. An update
+ * either modifies an existing row in the table, or adds a new row with a set of
+ * tag values that are different from all the other rows.  Note that if there
+ * are no tags, then there can be at most one row in the table. <p/>
+ * 
+ * Once a row is added to the table, its data will be sent to the metrics system 
+ * on every timer period, whether or not it has been updated since the previous
+ * timer period.  If this is inappropriate, for example if metrics were being
+ * reported by some transient object in an application, the <code>remove()</code>
+ * method can be used to remove the row and thus stop the data from being
+ * sent.<p/>
+ *
+ * Note that the <code>update()</code> method is atomic.  This means that it is
+ * safe for different threads to be updating the same metric.  More precisely,
+ * it is OK for different threads to call <code>update()</code> on MetricsRecord instances 
+ * with the same set of tag names and tag values.  Different threads should 
+ * <b>not</b> use the same MetricsRecord instance at the same time.
+ * @deprecated in favor of <code>org.apache.hadoop.metrics2</code> usage.
+ */
+@Deprecated
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface MetricsRecord {
+    
+  /**
+   * Returns the record name. 
+   *
+   * @return the record name
+   */
+  public abstract String getRecordName();
+    
+  /**
+   * Sets the named tag to the specified value.  The tagValue may be null, 
+   * which is treated the same as an empty String.
+   *
+   * @param tagName name of the tag
+   * @param tagValue new value of the tag
+   * @throws MetricsException if the tagName conflicts with the configuration
+   */
+  public abstract void setTag(String tagName, String tagValue);
+    
+  /**
+   * Sets the named tag to the specified value.
+   *
+   * @param tagName name of the tag
+   * @param tagValue new value of the tag
+   * @throws MetricsException if the tagName conflicts with the configuration
+   */
+  public abstract void setTag(String tagName, int tagValue);
+    
+  /**
+   * Sets the named tag to the specified value.
+   *
+   * @param tagName name of the tag
+   * @param tagValue new value of the tag
+   * @throws MetricsException if the tagName conflicts with the configuration
+   */
+  public abstract void setTag(String tagName, long tagValue);
+    
+  /**
+   * Sets the named tag to the specified value.
+   *
+   * @param tagName name of the tag
+   * @param tagValue new value of the tag
+   * @throws MetricsException if the tagName conflicts with the configuration
+   */
+  public abstract void setTag(String tagName, short tagValue);
+    
+  /**
+   * Sets the named tag to the specified value.
+   *
+   * @param tagName name of the tag
+   * @param tagValue new value of the tag
+   * @throws MetricsException if the tagName conflicts with the configuration
+   */
+  public abstract void setTag(String tagName, byte tagValue);
+    
+  /**
+   * Removes any tag of the specified name.
+   *
+   * @param tagName name of a tag
+   */
+  public abstract void removeTag(String tagName);
+  
+  /**
+   * Sets the named metric to the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue new value of the metric
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public abstract void setMetric(String metricName, int metricValue);
+    
+  /**
+   * Sets the named metric to the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue new value of the metric
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public abstract void setMetric(String metricName, long metricValue);
+    
+  /**
+   * Sets the named metric to the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue new value of the metric
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public abstract void setMetric(String metricName, short metricValue);
+    
+  /**
+   * Sets the named metric to the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue new value of the metric
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public abstract void setMetric(String metricName, byte metricValue);
+    
+  /**
+   * Sets the named metric to the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue new value of the metric
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public abstract void setMetric(String metricName, float metricValue);
+    
+  /**
+   * Increments the named metric by the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue incremental value
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public abstract void incrMetric(String metricName, int metricValue);
+    
+  /**
+   * Increments the named metric by the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue incremental value
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public abstract void incrMetric(String metricName, long metricValue);
+    
+  /**
+   * Increments the named metric by the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue incremental value
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public abstract void incrMetric(String metricName, short metricValue);
+    
+  /**
+   * Increments the named metric by the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue incremental value
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public abstract void incrMetric(String metricName, byte metricValue);
+    
+  /**
+   * Increments the named metric by the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue incremental value
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public abstract void incrMetric(String metricName, float metricValue);
+    
+  /**
+   * Updates the table of buffered data which is to be sent periodically.
+   * If the tag values match an existing row, that row is updated; 
+   * otherwise, a new row is added.
+   */
+  public abstract void update();
+    
+  /**
+   * Removes, from the buffered data table, all rows having tags 
+   * that equal the tags that have been set on this record. For example,
+   * if there are no tags on this record, all rows for this record name
+   * would be removed.  Or, if there is a single tag on this record, then
+   * just rows containing a tag with the same name and value would be removed.
+   */
+  public abstract void remove();
+    
+}

+ 174 - 0
src/core/org/apache/hadoop/metrics/MetricsServlet.java

@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.metrics.spi.OutputRecord;
+import org.apache.hadoop.metrics.spi.AbstractMetricsContext.MetricMap;
+import org.apache.hadoop.metrics.spi.AbstractMetricsContext.TagMap;
+import org.mortbay.util.ajax.JSON;
+import org.mortbay.util.ajax.JSON.Output;
+
+/**
+ * A servlet to print out metrics data.  By default, the servlet returns a 
+ * textual representation (no promises are made for parseability), and
+ * users can use "?format=json" for parseable output.
+ * @deprecated in favor of <code>org.apache.hadoop.metrics2</code> usage.
+ */
+@Deprecated
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class MetricsServlet extends HttpServlet {
+  
+  /**
+   * A helper class to hold a TagMap and MetricMap.
+   */
+  static class TagsMetricsPair implements JSON.Convertible {
+    final TagMap tagMap;
+    final MetricMap metricMap;
+    
+    public TagsMetricsPair(TagMap tagMap, MetricMap metricMap) {
+      this.tagMap = tagMap;
+      this.metricMap = metricMap;
+    }
+
+    @SuppressWarnings("unchecked")
+    public void fromJSON(Map map) {
+      throw new UnsupportedOperationException();
+    }
+
+    /** Converts to JSON by providing an array. */
+    public void toJSON(Output out) {
+      out.add(new Object[] { tagMap, metricMap });
+    }
+  }
+  
+  /**
+   * Collects all metric data, and returns a map:
+   *   contextName -> recordName -> [ (tag->tagValue), (metric->metricValue) ].
+   * The values are either String or Number.  The final value is implemented
+   * as a list of TagsMetricsPair.
+   */
+   Map<String, Map<String, List<TagsMetricsPair>>> makeMap(
+       Collection<MetricsContext> contexts) throws IOException {
+    Map<String, Map<String, List<TagsMetricsPair>>> map = 
+      new TreeMap<String, Map<String, List<TagsMetricsPair>>>();
+
+    for (MetricsContext context : contexts) {
+      Map<String, List<TagsMetricsPair>> records = 
+        new TreeMap<String, List<TagsMetricsPair>>();
+      map.put(context.getContextName(), records);
+    
+      for (Map.Entry<String, Collection<OutputRecord>> r : 
+          context.getAllRecords().entrySet()) {
+        List<TagsMetricsPair> metricsAndTags = 
+          new ArrayList<TagsMetricsPair>();
+        records.put(r.getKey(), metricsAndTags);
+        for (OutputRecord outputRecord : r.getValue()) {
+          TagMap tagMap = outputRecord.getTagsCopy();
+          MetricMap metricMap = outputRecord.getMetricsCopy();
+          metricsAndTags.add(new TagsMetricsPair(tagMap, metricMap));
+        }
+      }
+    }
+    return map;
+  }
+  
+  @Override
+  public void doGet(HttpServletRequest request, HttpServletResponse response)
+      throws ServletException, IOException {
+
+    // Do the authorization
+    if (!HttpServer.hasAdministratorAccess(getServletContext(), request,
+        response)) {
+      return;
+    }
+
+    PrintWriter out = new PrintWriter(response.getOutputStream());
+    String format = request.getParameter("format");
+    Collection<MetricsContext> allContexts = 
+      ContextFactory.getFactory().getAllContexts();
+    if ("json".equals(format)) {
+      // Uses Jetty's built-in JSON support to convert the map into JSON.
+      out.print(new JSON().toJSON(makeMap(allContexts)));
+    } else {
+      printMap(out, makeMap(allContexts));
+    }
+    out.close();
+  }
+  
+  /**
+   * Prints metrics data in a multi-line text form.
+   */
+  void printMap(PrintWriter out, Map<String, Map<String, List<TagsMetricsPair>>> map) {
+    for (Map.Entry<String, Map<String, List<TagsMetricsPair>>> context : map.entrySet()) {
+      out.println(context.getKey());
+      for (Map.Entry<String, List<TagsMetricsPair>> record : context.getValue().entrySet()) {
+        indent(out, 1);
+        out.println(record.getKey());
+        for (TagsMetricsPair pair : record.getValue()) {
+          indent(out, 2);
+          // Prints tag values in the form "{key=value,key=value}:"
+          out.print("{");
+          boolean first = true;
+          for (Map.Entry<String, Object> tagValue : pair.tagMap.entrySet()) {
+            if (first) {
+              first = false;
+            } else {
+              out.print(",");
+            }
+            out.print(tagValue.getKey());
+            out.print("=");
+            out.print(tagValue.getValue().toString());
+          }
+          out.println("}:");
+          
+          // Now print metric values, one per line
+          for (Map.Entry<String, Number> metricValue : 
+              pair.metricMap.entrySet()) {
+            indent(out, 3);
+            out.print(metricValue.getKey());
+            out.print("=");
+            out.println(metricValue.getValue().toString());
+          }
+        }
+      }
+    }    
+  }
+  
+  private void indent(PrintWriter out, int indent) {
+    for (int i = 0; i < indent; ++i) {
+      out.append("  ");
+    }
+  }
+}

+ 106 - 0
src/core/org/apache/hadoop/metrics/MetricsUtil.java

@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Utility class to simplify creation and reporting of hadoop metrics.
+ *
+ * For examples of usage, see NameNodeMetrics.
+ * @see org.apache.hadoop.metrics.MetricsRecord
+ * @see org.apache.hadoop.metrics.MetricsContext
+ * @see org.apache.hadoop.metrics.ContextFactory
+ * @deprecated in favor of <code>org.apache.hadoop.metrics2</code> usage.
+ */
+@Deprecated
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Evolving
+public class MetricsUtil {
+    
+  public static final Log LOG =
+    LogFactory.getLog(MetricsUtil.class);
+
+  /**
+   * Don't allow creation of a new instance of Metrics
+   */
+  private MetricsUtil() {}
+    
+  public static MetricsContext getContext(String contextName) {
+    return getContext(contextName, contextName);
+  }
+
+  /**
+   * Utility method to return the named context.
+   * If the desired context cannot be created for any reason, the exception
+   * is logged, and a null context is returned.
+   */
+  public static MetricsContext getContext(String refName, String contextName) {
+    MetricsContext metricsContext;
+    try {
+      metricsContext =
+        ContextFactory.getFactory().getContext(refName, contextName);
+      if (!metricsContext.isMonitoring()) {
+        metricsContext.startMonitoring();
+      }
+    } catch (Exception ex) {
+      LOG.error("Unable to create metrics context " + contextName, ex);
+      metricsContext = ContextFactory.getNullContext(contextName);
+    }
+    return metricsContext;
+  }
+
+  /**
+   * Utility method to create and return new metrics record instance within the
+   * given context. This record is tagged with the host name.
+   *
+   * @param context the context
+   * @param recordName name of the record
+   * @return newly created metrics record
+   */
+  public static MetricsRecord createRecord(MetricsContext context, 
+                                           String recordName) 
+  {
+    MetricsRecord metricsRecord = context.createRecord(recordName);
+    metricsRecord.setTag("hostName", getHostName());
+    return metricsRecord;        
+  }
+    
+  /**
+   * Returns the host name.  If the host name is unobtainable, logs the
+   * exception and returns "unknown".
+   */
+  private static String getHostName() {
+    String hostName = null;
+    try {
+      hostName = InetAddress.getLocalHost().getHostName();
+    } 
+    catch (UnknownHostException ex) {
+      LOG.info("Unable to obtain hostName", ex);
+      hostName = "unknown";
+    }
+    return hostName;
+  }
+
+}

+ 40 - 0
src/core/org/apache/hadoop/metrics/Updater.java

@@ -0,0 +1,40 @@
+/*
+ * Updater.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Call-back interface.  See <code>MetricsContext.registerUpdater()</code>.
+ * @deprecated in favor of <code>org.apache.hadoop.metrics2</code> usage.
+ */
+@Deprecated
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Evolving
+public interface Updater {
+    
+  /**
+   * Timer-based call-back from the metric library. 
+   */
+  public abstract void doUpdates(MetricsContext context);
+
+}

+ 154 - 0
src/core/org/apache/hadoop/metrics/file/FileContext.java

@@ -0,0 +1,154 @@
+/*
+ * FileContext.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics.file;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.PrintWriter;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics.ContextFactory;
+import org.apache.hadoop.metrics.spi.AbstractMetricsContext;
+import org.apache.hadoop.metrics.spi.OutputRecord;
+
+/**
+ * Metrics context for writing metrics to a file.<p/>
+ *
+ * This class is configured by setting ContextFactory attributes which in turn
+ * are usually configured through a properties file.  All the attributes are
+ * prefixed by the contextName. For example, the properties file might contain:
+ * <pre>
+ * myContextName.fileName=/tmp/metrics.log
+ * myContextName.period=5
+ * </pre>
+ * @deprecated use {@link org.apache.hadoop.metrics2.sink.FileSink} instead.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+@Deprecated
+public class FileContext extends AbstractMetricsContext {
+    
+  /* Configuration attribute names */
+  @InterfaceAudience.Private
+  protected static final String FILE_NAME_PROPERTY = "fileName";
+  @InterfaceAudience.Private
+  protected static final String PERIOD_PROPERTY = "period";
+    
+  private File file = null;              // file for metrics to be written to
+  private PrintWriter writer = null;
+    
+  /** Creates a new instance of FileContext */
+  @InterfaceAudience.Private
+  public FileContext() {}
+    
+  @InterfaceAudience.Private
+  public void init(String contextName, ContextFactory factory) {
+    super.init(contextName, factory);
+        
+    String fileName = getAttribute(FILE_NAME_PROPERTY);
+    if (fileName != null) {
+      file = new File(fileName);
+    }
+        
+    parseAndSetPeriod(PERIOD_PROPERTY);
+  }
+
+  /**
+   * Returns the configured file name, or null.
+   */
+  @InterfaceAudience.Private
+  public String getFileName() {
+    if (file == null) {
+      return null;
+    } else {
+      return file.getName();
+    }
+  }
+    
+  /**
+   * Starts or restarts monitoring, by opening in append-mode, the
+   * file specified by the <code>fileName</code> attribute,
+   * if specified. Otherwise the data will be written to standard
+   * output.
+   */
+  @InterfaceAudience.Private
+  public void startMonitoring()
+    throws IOException 
+  {
+    if (file == null) {
+      writer = new PrintWriter(new BufferedOutputStream(System.out));
+    } else {
+      writer = new PrintWriter(new FileWriter(file, true));
+    }
+    super.startMonitoring();
+  }
+    
+  /**
+   * Stops monitoring, closing the file.
+   * @see #close()
+   */
+  @InterfaceAudience.Private
+  public void stopMonitoring() {
+    super.stopMonitoring();
+        
+    if (writer != null) {
+      writer.close();
+      writer = null;
+    }
+  }
+    
+  /**
+   * Emits a metrics record to a file.
+   */
+  @InterfaceAudience.Private
+  public void emitRecord(String contextName, String recordName, OutputRecord outRec) {
+    writer.print(contextName);
+    writer.print(".");
+    writer.print(recordName);
+    String separator = ": ";
+    for (String tagName : outRec.getTagNames()) {
+      writer.print(separator);
+      separator = ", ";
+      writer.print(tagName);
+      writer.print("=");
+      writer.print(outRec.getTag(tagName));
+    }
+    for (String metricName : outRec.getMetricNames()) {
+      writer.print(separator);
+      separator = ", ";
+      writer.print(metricName);
+      writer.print("=");
+      writer.print(outRec.getMetric(metricName));
+    }
+    writer.println();
+  }
+    
+  /**
+   * Flushes the output writer, forcing updates to disk.
+   */
+  @InterfaceAudience.Private
+  public void flush() {
+    writer.flush();
+  }
+}

+ 43 - 0
src/core/org/apache/hadoop/metrics/file/package.html

@@ -0,0 +1,43 @@
+<html>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<body>
+Implementation of the metrics package that writes the metrics to a file.
+Programmers should not normally need to use this package directly. Instead
+they should use org.hadoop.metrics.
+
+<p/>
+These are the implementation specific factory attributes 
+(See ContextFactory.getFactory()):
+
+<dl>
+    <dt><i>contextName</i>.fileName</dt>
+    <dd>The path of the file to which metrics in context <i>contextName</i>
+    are to be appended.  If this attribute is not specified, the metrics
+    are written to standard output by default.</dd>
+    
+    <dt><i>contextName</i>.period</dt>
+    <dd>The period in seconds on which the metric data is written to the
+    file.</dd>
+    
+</dl>
+
+
+</body>
+</html>

+ 240 - 0
src/core/org/apache/hadoop/metrics/ganglia/GangliaContext.java

@@ -0,0 +1,240 @@
+/*
+ * GangliaContext.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics.ganglia;
+
+import java.io.IOException;
+import java.net.DatagramPacket;
+import java.net.DatagramSocket;
+import java.net.SocketAddress;
+import java.net.SocketException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics.ContextFactory;
+import org.apache.hadoop.metrics.MetricsException;
+import org.apache.hadoop.metrics.spi.AbstractMetricsContext;
+import org.apache.hadoop.metrics.spi.OutputRecord;
+import org.apache.hadoop.metrics.spi.Util;
+
+/**
+ * Context for sending metrics to Ganglia.
+ * 
+ * @deprecated in favor of <code>org.apache.hadoop.metrics2</code> usage.
+ */
+@Deprecated
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class GangliaContext extends AbstractMetricsContext {
+    
+  private static final String PERIOD_PROPERTY = "period";
+  private static final String SERVERS_PROPERTY = "servers";
+  private static final String UNITS_PROPERTY = "units";
+  private static final String SLOPE_PROPERTY = "slope";
+  private static final String TMAX_PROPERTY = "tmax";
+  private static final String DMAX_PROPERTY = "dmax";
+    
+  private static final String DEFAULT_UNITS = "";
+  private static final String DEFAULT_SLOPE = "both";
+  private static final int DEFAULT_TMAX = 60;
+  private static final int DEFAULT_DMAX = 0;
+  private static final int DEFAULT_PORT = 8649;
+  private static final int BUFFER_SIZE = 1500;       // as per libgmond.c
+
+  private final Log LOG = LogFactory.getLog(this.getClass());    
+
+  private static final Map<Class,String> typeTable = new HashMap<Class,String>(5);
+    
+  static {
+    typeTable.put(String.class, "string");
+    typeTable.put(Byte.class, "int8");
+    typeTable.put(Short.class, "int16");
+    typeTable.put(Integer.class, "int32");
+    typeTable.put(Long.class, "float");
+    typeTable.put(Float.class, "float");
+  }
+    
+  protected byte[] buffer = new byte[BUFFER_SIZE];
+  protected int offset;
+    
+  protected List<? extends SocketAddress> metricsServers;
+  private Map<String,String> unitsTable;
+  private Map<String,String> slopeTable;
+  private Map<String,String> tmaxTable;
+  private Map<String,String> dmaxTable;
+    
+  protected DatagramSocket datagramSocket;
+    
+  /** Creates a new instance of GangliaContext */
+  @InterfaceAudience.Private
+  public GangliaContext() {
+  }
+    
+  @InterfaceAudience.Private
+  public void init(String contextName, ContextFactory factory) {
+    super.init(contextName, factory);
+    parseAndSetPeriod(PERIOD_PROPERTY);
+        
+    metricsServers = 
+      Util.parse(getAttribute(SERVERS_PROPERTY), DEFAULT_PORT); 
+        
+    unitsTable = getAttributeTable(UNITS_PROPERTY);
+    slopeTable = getAttributeTable(SLOPE_PROPERTY);
+    tmaxTable  = getAttributeTable(TMAX_PROPERTY);
+    dmaxTable  = getAttributeTable(DMAX_PROPERTY);
+        
+    try {
+      datagramSocket = new DatagramSocket();
+    }
+    catch (SocketException se) {
+      se.printStackTrace();
+    }
+  }
+
+  @InterfaceAudience.Private
+  public void emitRecord(String contextName, String recordName,
+    OutputRecord outRec) 
+  throws IOException {
+    // Setup so that the records have the proper leader names so they are
+    // unambiguous at the ganglia level, and this prevents a lot of rework
+    StringBuilder sb = new StringBuilder();
+    sb.append(contextName);
+    sb.append('.');
+    sb.append(recordName);
+    sb.append('.');
+    int sbBaseLen = sb.length();
+
+    // emit each metric in turn
+    for (String metricName : outRec.getMetricNames()) {
+      Object metric = outRec.getMetric(metricName);
+      String type = typeTable.get(metric.getClass());
+      if (type != null) {
+        sb.append(metricName);
+        emitMetric(sb.toString(), type, metric.toString());
+        sb.setLength(sbBaseLen);
+      } else {
+        LOG.warn("Unknown metrics type: " + metric.getClass());
+      }
+    }
+  }
+    
+  protected void emitMetric(String name, String type,  String value) 
+  throws IOException {
+    String units = getUnits(name);
+    int slope = getSlope(name);
+    int tmax = getTmax(name);
+    int dmax = getDmax(name);
+        
+    offset = 0;
+    xdr_int(0);             // metric_user_defined
+    xdr_string(type);
+    xdr_string(name);
+    xdr_string(value);
+    xdr_string(units);
+    xdr_int(slope);
+    xdr_int(tmax);
+    xdr_int(dmax);
+        
+    for (SocketAddress socketAddress : metricsServers) {
+      DatagramPacket packet = 
+        new DatagramPacket(buffer, offset, socketAddress);
+      datagramSocket.send(packet);
+    }
+  }
+    
+  protected String getUnits(String metricName) {
+    String result = unitsTable.get(metricName);
+    if (result == null) {
+      result = DEFAULT_UNITS;
+    }
+    return result;
+  }
+    
+  protected int getSlope(String metricName) {
+    String slopeString = slopeTable.get(metricName);
+    if (slopeString == null) {
+      slopeString = DEFAULT_SLOPE; 
+    }
+    return ("zero".equals(slopeString) ? 0 : 3); // see gmetric.c
+  }
+    
+  protected int getTmax(String metricName) {
+    if (tmaxTable == null) {
+      return DEFAULT_TMAX;
+    }
+    String tmaxString = tmaxTable.get(metricName);
+    if (tmaxString == null) {
+      return DEFAULT_TMAX;
+    }
+    else {
+      return Integer.parseInt(tmaxString);
+    }
+  }
+    
+  protected int getDmax(String metricName) {
+    String dmaxString = dmaxTable.get(metricName);
+    if (dmaxString == null) {
+      return DEFAULT_DMAX;
+    }
+    else {
+      return Integer.parseInt(dmaxString);
+    }
+  }
+    
+  /**
+   * Puts a string into the buffer by first writing the size of the string
+   * as an int, followed by the bytes of the string, padded if necessary to
+   * a multiple of 4.
+   */
+  protected void xdr_string(String s) {
+    byte[] bytes = s.getBytes();
+    int len = bytes.length;
+    xdr_int(len);
+    System.arraycopy(bytes, 0, buffer, offset, len);
+    offset += len;
+    pad();
+  }
+
+  /**
+   * Pads the buffer with zero bytes up to the nearest multiple of 4.
+   */
+  private void pad() {
+    int newOffset = ((offset + 3) / 4) * 4;
+    while (offset < newOffset) {
+      buffer[offset++] = 0;
+    }
+  }
+        
+  /**
+   * Puts an integer into the buffer as 4 bytes, big-endian.
+   */
+  protected void xdr_int(int i) {
+    buffer[offset++] = (byte)((i >> 24) & 0xff);
+    buffer[offset++] = (byte)((i >> 16) & 0xff);
+    buffer[offset++] = (byte)((i >> 8) & 0xff);
+    buffer[offset++] = (byte)(i & 0xff);
+  }
+}

+ 144 - 0
src/core/org/apache/hadoop/metrics/ganglia/GangliaContext31.java

@@ -0,0 +1,144 @@
+/*
+ * GangliaContext.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics.ganglia;
+
+import java.io.IOException;
+import java.net.DatagramPacket;
+import java.net.SocketAddress;
+import java.net.UnknownHostException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics.ContextFactory;
+import org.apache.hadoop.net.DNS;
+
+/**
+ * Context for sending metrics to Ganglia version 3.1.x.
+ * 
+ * 3.1.1 has a slightly different wire portal compared to 3.0.x.
+ */
+public class GangliaContext31 extends GangliaContext {
+
+  String hostName = "UNKNOWN.example.com";
+
+  private static final Log LOG = 
+    LogFactory.getLog("org.apache.hadoop.util.GangliaContext31");
+
+  public void init(String contextName, ContextFactory factory) {
+    super.init(contextName, factory);
+
+    LOG.debug("Initializing the GangliaContext31 for Ganglia 3.1 metrics.");
+
+    // Take the hostname from the DNS class.
+
+    Configuration conf = new Configuration();
+
+    if (conf.get("slave.host.name") != null) {
+      hostName = conf.get("slave.host.name");
+    } else {
+      try {
+        hostName = DNS.getDefaultHost(
+          conf.get("dfs.datanode.dns.interface","default"),
+          conf.get("dfs.datanode.dns.nameserver","default"));
+      } catch (UnknownHostException uhe) {
+        LOG.error(uhe);
+    	hostName = "UNKNOWN.example.com";
+      }
+    }
+  }
+
+  protected void emitMetric(String name, String type,  String value) 
+    throws IOException
+  {
+    if (name == null) {
+      LOG.warn("Metric was emitted with no name.");
+      return;
+    } else if (value == null) {
+      LOG.warn("Metric name " + name +" was emitted with a null value.");
+      return;
+    } else if (type == null) {
+      LOG.warn("Metric name " + name + ", value " + value + " has no type.");
+      return;
+    }
+
+    LOG.debug("Emitting metric " + name + ", type " + type + ", value " + 
+      value + " from hostname" + hostName);
+
+    String units = getUnits(name);
+    if (units == null) {
+      LOG.warn("Metric name " + name + ", value " + value
+        + " had 'null' units");
+      units = "";
+    }
+    int slope = getSlope(name);
+    int tmax = getTmax(name);
+    int dmax = getDmax(name);
+    offset = 0;
+    String groupName = name.substring(0,name.lastIndexOf("."));
+
+    // The following XDR recipe was done through a careful reading of
+    // gm_protocol.x in Ganglia 3.1 and carefully examining the output of
+    // the gmetric utility with strace.
+
+    // First we send out a metadata message
+    xdr_int(128);         // metric_id = metadata_msg
+    xdr_string(hostName); // hostname
+    xdr_string(name);     // metric name
+    xdr_int(0);           // spoof = False
+    xdr_string(type);     // metric type
+    xdr_string(name);     // metric name
+    xdr_string(units);    // units
+    xdr_int(slope);       // slope
+    xdr_int(tmax);        // tmax, the maximum time between metrics
+    xdr_int(dmax);        // dmax, the maximum data value
+
+    xdr_int(1);             /*Num of the entries in extra_value field for 
+                              Ganglia 3.1.x*/
+    xdr_string("GROUP");    /*Group attribute*/
+    xdr_string(groupName);  /*Group value*/
+
+    for (SocketAddress socketAddress : metricsServers) {
+      DatagramPacket packet =
+        new DatagramPacket(buffer, offset, socketAddress);
+      datagramSocket.send(packet);
+    }
+
+    // Now we send out a message with the actual value.
+    // Technically, we only need to send out the metadata message once for
+    // each metric, but I don't want to have to record which metrics we did and
+    // did not send.
+    offset = 0;
+    xdr_int(133);         // we are sending a string value
+    xdr_string(hostName); // hostName
+    xdr_string(name);     // metric name
+    xdr_int(0);           // spoof = False
+    xdr_string("%s");     // format field
+    xdr_string(value);    // metric value
+        
+    for (SocketAddress socketAddress : metricsServers) {
+      DatagramPacket packet = 
+        new DatagramPacket(buffer, offset, socketAddress);
+      datagramSocket.send(packet);
+    }
+  }
+
+}

+ 74 - 0
src/core/org/apache/hadoop/metrics/ganglia/package.html

@@ -0,0 +1,74 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<body>
+<!--
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+-->
+
+Implementation of the metrics package that sends metric data to 
+<a href="http://ganglia.sourceforge.net/">Ganglia</a>.
+Programmers should not normally need to use this package directly. Instead
+they should use org.hadoop.metrics.
+
+<p/>
+These are the implementation specific factory attributes 
+(See ContextFactory.getFactory()):
+
+<dl>
+    <dt><i>contextName</i>.servers</dt>
+    <dd>Space and/or comma separated sequence of servers to which UDP
+    messages should be sent.</dd>
+    
+    <dt><i>contextName</i>.period</dt>
+    <dd>The period in seconds on which the metric data is sent to the
+    server(s).</dd>
+    
+    <dt><i>contextName</i>.units.<i>recordName</i>.<i>metricName</i></dt>
+    <dd>The units for the specified metric in the specified record.</dd>
+    
+    <dt><i>contextName</i>.slope.<i>recordName</i>.<i>metricName</i></dt>
+    <dd>The slope for the specified metric in the specified record.</dd>
+    
+    <dt><i>contextName</i>.tmax.<i>recordName</i>.<i>metricName</i></dt>
+    <dd>The tmax for the specified metric in the specified record.</dd>
+    
+    <dt><i>contextName</i>.dmax.<i>recordName</i>.<i>metricName</i></dt>
+    <dd>The dmax for the specified metric in the specified record.</dd>
+    
+</dl>
+
+
+</body>
+</html>

+ 35 - 0
src/core/org/apache/hadoop/metrics/jvm/EventCounter.java

@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics.jvm;
+
+/**
+ * A log4J Appender that simply counts logging events in three levels:
+ * fatal, error and warn.
+ * @deprecated use {@link org.apache.hadoop.log.metrics.EventCounter} instead
+ */
+@Deprecated
+public class EventCounter extends org.apache.hadoop.log.metrics.EventCounter {
+
+  static {
+    // The logging system is not started yet.
+    System.err.println("WARNING: "+ EventCounter.class.getName() +
+        " is deprecated. Please use "+
+        org.apache.hadoop.log.metrics.EventCounter.class.getName() +
+        " in all the log4j.properties files.");
+  }
+}

+ 201 - 0
src/core/org/apache/hadoop/metrics/jvm/JvmMetrics.java

@@ -0,0 +1,201 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics.jvm;
+
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryMXBean;
+import java.lang.management.MemoryUsage;
+import java.lang.management.ThreadInfo;
+import java.lang.management.ThreadMXBean;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics.MetricsContext;
+import org.apache.hadoop.metrics.MetricsRecord;
+import org.apache.hadoop.metrics.MetricsUtil;
+import org.apache.hadoop.metrics.Updater;
+
+import static java.lang.Thread.State.*;
+import java.lang.management.GarbageCollectorMXBean;
+import java.util.List;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * Singleton class which reports Java Virtual Machine metrics to the metrics API.  
+ * Any application can create an instance of this class in order to emit
+ * Java VM metrics.  
+ * @deprecated in favor of <code>org.apache.hadoop.metrics2</code> usage.
+ */
+@Deprecated
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class JvmMetrics implements Updater {
+    
+    private static final float M = 1024*1024;
+    private static JvmMetrics theInstance = null;
+    private static Log log = LogFactory.getLog(JvmMetrics.class);
+    
+    private MetricsRecord metrics;
+    
+    // garbage collection counters
+    private long gcCount = 0;
+    private long gcTimeMillis = 0;
+    
+    // logging event counters
+    private long fatalCount = 0;
+    private long errorCount = 0;
+    private long warnCount  = 0;
+    private long infoCount  = 0;
+    
+    public synchronized static JvmMetrics init(String processName, String sessionId) {
+      return init(processName, sessionId, "metrics");
+    }
+    
+    public synchronized static JvmMetrics init(String processName, String sessionId,
+      String recordName) {
+        if (theInstance != null) {
+            log.info("Cannot initialize JVM Metrics with processName=" + 
+                     processName + ", sessionId=" + sessionId + 
+                     " - already initialized");
+        }
+        else {
+            log.info("Initializing JVM Metrics with processName=" 
+                    + processName + ", sessionId=" + sessionId);
+            theInstance = new JvmMetrics(processName, sessionId, recordName);
+        }
+        return theInstance;
+    }
+    
+    /** Creates a new instance of JvmMetrics */
+    private JvmMetrics(String processName, String sessionId,
+      String recordName) {
+        MetricsContext context = MetricsUtil.getContext("jvm");
+        metrics = MetricsUtil.createRecord(context, recordName);
+        metrics.setTag("processName", processName);
+        metrics.setTag("sessionId", sessionId);
+        context.registerUpdater(this);
+    }
+    
+    /**
+     * This will be called periodically (with the period being configuration
+     * dependent).
+     */
+    public void doUpdates(MetricsContext context) {
+        doMemoryUpdates();
+        doGarbageCollectionUpdates();
+        doThreadUpdates();
+        doEventCountUpdates();
+        metrics.update();
+    }
+    
+    private void doMemoryUpdates() {
+        MemoryMXBean memoryMXBean =
+               ManagementFactory.getMemoryMXBean();
+        MemoryUsage memNonHeap =
+                memoryMXBean.getNonHeapMemoryUsage();
+        MemoryUsage memHeap =
+                memoryMXBean.getHeapMemoryUsage();
+        Runtime runtime = Runtime.getRuntime();
+
+        metrics.setMetric("memNonHeapUsedM", memNonHeap.getUsed()/M);
+        metrics.setMetric("memNonHeapCommittedM", memNonHeap.getCommitted()/M);
+        metrics.setMetric("memHeapUsedM", memHeap.getUsed()/M);
+        metrics.setMetric("memHeapCommittedM", memHeap.getCommitted()/M);
+        metrics.setMetric("maxMemoryM", runtime.maxMemory()/M);
+    }
+    
+    private void doGarbageCollectionUpdates() {
+        List<GarbageCollectorMXBean> gcBeans =
+                ManagementFactory.getGarbageCollectorMXBeans();
+        long count = 0;
+        long timeMillis = 0;
+        for (GarbageCollectorMXBean gcBean : gcBeans) {
+            count += gcBean.getCollectionCount();
+            timeMillis += gcBean.getCollectionTime();
+        }
+        metrics.incrMetric("gcCount", (int)(count - gcCount));
+        metrics.incrMetric("gcTimeMillis", (int)(timeMillis - gcTimeMillis));
+        
+        gcCount = count;
+        gcTimeMillis = timeMillis;
+    }
+    
+    private void doThreadUpdates() {
+        ThreadMXBean threadMXBean =
+                ManagementFactory.getThreadMXBean();
+        long threadIds[] = 
+                threadMXBean.getAllThreadIds();
+        ThreadInfo[] threadInfos =
+                threadMXBean.getThreadInfo(threadIds, 0);
+        
+        int threadsNew = 0;
+        int threadsRunnable = 0;
+        int threadsBlocked = 0;
+        int threadsWaiting = 0;
+        int threadsTimedWaiting = 0;
+        int threadsTerminated = 0;
+        
+        for (ThreadInfo threadInfo : threadInfos) {
+            // threadInfo is null if the thread is not alive or doesn't exist
+            if (threadInfo == null) continue;
+            Thread.State state = threadInfo.getThreadState();
+            if (state == NEW) {
+                threadsNew++;
+            } 
+            else if (state == RUNNABLE) {
+                threadsRunnable++;
+            }
+            else if (state == BLOCKED) {
+                threadsBlocked++;
+            }
+            else if (state == WAITING) {
+                threadsWaiting++;
+            } 
+            else if (state == TIMED_WAITING) {
+                threadsTimedWaiting++;
+            }
+            else if (state == TERMINATED) {
+                threadsTerminated++;
+            }
+        }
+        metrics.setMetric("threadsNew", threadsNew);
+        metrics.setMetric("threadsRunnable", threadsRunnable);
+        metrics.setMetric("threadsBlocked", threadsBlocked);
+        metrics.setMetric("threadsWaiting", threadsWaiting);
+        metrics.setMetric("threadsTimedWaiting", threadsTimedWaiting);
+        metrics.setMetric("threadsTerminated", threadsTerminated);
+    }
+    
+    private void doEventCountUpdates() {
+        long newFatal = EventCounter.getFatal();
+        long newError = EventCounter.getError();
+        long newWarn  = EventCounter.getWarn();
+        long newInfo  = EventCounter.getInfo();
+        
+        metrics.incrMetric("logFatal", (int)(newFatal - fatalCount));
+        metrics.incrMetric("logError", (int)(newError - errorCount));
+        metrics.incrMetric("logWarn",  (int)(newWarn - warnCount));
+        metrics.incrMetric("logInfo",  (int)(newInfo - infoCount));
+        
+        fatalCount = newFatal;
+        errorCount = newError;
+        warnCount  = newWarn;
+        infoCount  = newInfo;
+    }
+}

+ 160 - 0
src/core/org/apache/hadoop/metrics/package.html

@@ -0,0 +1,160 @@
+<html>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+  <head>
+    <title>org.apache.hadoop.metrics</title>
+  </head>
+<body>
+  <p><em>Note, this package is deprecated in favor of
+    <code>org.apache.hadoop.metrics2</code> usage.</em></p>
+  <p>This package defines an API for reporting performance metric information.</p>
+  <p>The API is abstract so that it can be implemented on top of
+a variety of metrics client libraries.  The choice of 
+client library is a configuration option, and different 
+modules within the same application can use
+different metrics implementation libraries.</p>
+<p/>
+Sub-packages:
+<dl>
+    <dt><code>org.apache.hadoop.metrics.spi</code></dt>
+    <dd>The abstract Server Provider Interface package. Those wishing to
+    integrate the metrics API with a particular metrics client library should 
+    extend this package.</dd>
+    
+    <dt><code>org.apache.hadoop.metrics.file</code></dt>
+    <dd>An implementation package which writes the metric data to 
+    a file, or sends it to the standard output stream.</dd>
+ 
+    <dt> <code>org.apache.hadoop.metrics.ganglia</code></dt>
+    <dd>An implementation package which sends metric data to 
+    <a href="http://ganglia.sourceforge.net/">Ganglia</a>.</dd>
+</dl>
+
+<h3>Introduction to the Metrics API</h3>
+
+Here is a simple example of how to use this package to report a single
+metric value:
+<pre>
+    private ContextFactory contextFactory = ContextFactory.getFactory();
+    
+    void reportMyMetric(float myMetric) {
+        MetricsContext myContext = contextFactory.getContext("myContext");
+        MetricsRecord myRecord = myContext.getRecord("myRecord");
+        myRecord.setMetric("myMetric", myMetric);
+        myRecord.update();
+    }
+</pre>
+  
+In this example there are three names:
+<dl>
+  <dt><i>myContext</i></dt>
+  <dd>The context name will typically identify either the application, or else a
+  module within an application or library.</dd>
+  
+  <dt><i>myRecord</i></dt>
+  <dd>The record name generally identifies some entity for which a set of
+  metrics are to be reported.  For example, you could have a record named 
+  "cacheStats" for reporting a number of statistics relating to the usage of
+  some cache in your application.</dd>
+  
+  <dt><i>myMetric</i></dt>
+  <dd>This identifies a particular metric.  For example, you might have metrics
+  named "cache_hits" and "cache_misses".
+  </dd>
+</dl>
+
+<h3>Tags</h3>
+
+In some cases it is useful to have multiple records with the same name. For 
+example, suppose that you want to report statistics about each disk on a computer. 
+In this case, the record name would be something like "diskStats", but you also
+need to identify the disk which is done by adding a <i>tag</i> to the record.
+The code could look something like this:
+<pre>
+    private MetricsRecord diskStats =
+            contextFactory.getContext("myContext").getRecord("diskStats");
+            
+    void reportDiskMetrics(String diskName, float diskBusy, float diskUsed) {
+        diskStats.setTag("diskName", diskName);
+        diskStats.setMetric("diskBusy", diskBusy);
+        diskStats.setMetric("diskUsed", diskUsed);
+        diskStats.update();
+    }
+</pre>
+
+<h3>Buffering and Callbacks</h3>
+
+Data is not sent immediately to the metrics system when 
+<code>MetricsRecord.update()</code> is called. Instead it is stored in an
+internal table, and the contents of the table are sent periodically.
+This can be important for two reasons:
+<ol>
+    <li>It means that a programmer is free to put calls to this API in an 
+    inner loop, since updates can be very frequent without slowing down
+    the application significantly.</li>
+    <li>Some implementations can gain efficiency by combining many metrics 
+    into a single UDP message.</li>
+</ol>
+
+The API provides a timer-based callback via the 
+<code>registerUpdater()</code> method.  The benefit of this
+versus using <code>java.util.Timer</code> is that the callbacks will be done 
+immediately before sending the data, making the data as current as possible.
+
+<h3>Configuration</h3>
+
+It is possible to programmatically examine and modify configuration data
+before creating a context, like this:
+<pre>
+    ContextFactory factory = ContextFactory.getFactory();
+    ... examine and/or modify factory attributes ...
+    MetricsContext context = factory.getContext("myContext");
+</pre>
+The factory attributes can be examined and modified using the following
+<code>ContextFactory</code>methods:
+<ul>
+    <li><code>Object getAttribute(String attributeName)</code></li>
+    <li><code>String[] getAttributeNames()</code></li>
+    <li><code>void setAttribute(String name, Object value)</code></li>
+    <li><code>void removeAttribute(attributeName)</code></li>
+</ul>
+
+<p/>
+<code>ContextFactory.getFactory()</code> initializes the factory attributes by
+reading the properties file <code>hadoop-metrics.properties</code> if it exists 
+on the class path.
+
+<p/>
+A factory attribute named:
+<pre>
+<i>contextName</i>.class
+</pre>
+should have as its value the fully qualified name of the class to be 
+instantiated by a call of the <code>CodeFactory</code> method
+<code>getContext(<i>contextName</i>)</code>.  If this factory attribute is not 
+specified, the default is to instantiate 
+<code>org.apache.hadoop.metrics.file.FileContext</code>.
+
+<p/>
+Other factory attributes are specific to a particular implementation of this 
+API and are documented elsewhere.  For example, configuration attributes for
+the file and Ganglia implementations can be found in the javadoc for 
+their respective packages.
+</body>
+</html>

+ 483 - 0
src/core/org/apache/hadoop/metrics/spi/AbstractMetricsContext.java

@@ -0,0 +1,483 @@
+/*
+ * AbstractMetricsContext.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics.spi;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.TreeMap;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics.ContextFactory;
+import org.apache.hadoop.metrics.MetricsContext;
+import org.apache.hadoop.metrics.MetricsException;
+import org.apache.hadoop.metrics.MetricsRecord;
+import org.apache.hadoop.metrics.Updater;
+
+/**
+ * The main class of the Service Provider Interface.  This class should be
+ * extended in order to integrate the Metrics API with a specific metrics
+ * client library. <p/>
+ *
+ * This class implements the internal table of metric data, and the timer
+ * on which data is to be sent to the metrics system.  Subclasses must
+ * override the abstract <code>emitRecord</code> method in order to transmit
+ * the data. <p/>
+ * @deprecated in favor of <code>org.apache.hadoop.metrics2</code> usage.
+ */
+@Deprecated
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class AbstractMetricsContext implements MetricsContext {
+    
+  private int period = MetricsContext.DEFAULT_PERIOD;
+  private Timer timer = null;
+    
+  private Set<Updater> updaters = new HashSet<Updater>(1);
+  private volatile boolean isMonitoring = false;
+    
+  private ContextFactory factory = null;
+  private String contextName = null;
+    
+  @InterfaceAudience.Private
+  public static class TagMap extends TreeMap<String,Object> {
+    private static final long serialVersionUID = 3546309335061952993L;
+    TagMap() {
+      super();
+    }
+    TagMap(TagMap orig) {
+      super(orig);
+    }
+    /**
+     * Returns true if this tagmap contains every tag in other.
+     */
+    public boolean containsAll(TagMap other) {
+      for (Map.Entry<String,Object> entry : other.entrySet()) {
+        Object value = get(entry.getKey());
+        if (value == null || !value.equals(entry.getValue())) {
+          // either key does not exist here, or the value is different
+          return false;
+        }
+      }
+      return true;
+    }
+  }
+  
+  @InterfaceAudience.Private
+  public static class MetricMap extends TreeMap<String,Number> {
+    private static final long serialVersionUID = -7495051861141631609L;
+    MetricMap() {
+      super();
+    }
+    MetricMap(MetricMap orig) {
+      super(orig);
+    }
+  }
+            
+  static class RecordMap extends HashMap<TagMap,MetricMap> {
+    private static final long serialVersionUID = 259835619700264611L;
+  }
+    
+  private Map<String,RecordMap> bufferedData = new HashMap<String,RecordMap>();
+    
+
+  /**
+   * Creates a new instance of AbstractMetricsContext
+   */
+  protected AbstractMetricsContext() {
+  }
+    
+  /**
+   * Initializes the context.
+   */
+  public void init(String contextName, ContextFactory factory) 
+  {
+    this.contextName = contextName;
+    this.factory = factory;
+  }
+    
+  /**
+   * Convenience method for subclasses to access factory attributes.
+   */
+  protected String getAttribute(String attributeName) {
+    String factoryAttribute = contextName + "." + attributeName;
+    return (String) factory.getAttribute(factoryAttribute);  
+  }
+    
+  /**
+   * Returns an attribute-value map derived from the factory attributes
+   * by finding all factory attributes that begin with 
+   * <i>contextName</i>.<i>tableName</i>.  The returned map consists of
+   * those attributes with the contextName and tableName stripped off.
+   */
+  protected Map<String,String> getAttributeTable(String tableName) {
+    String prefix = contextName + "." + tableName + ".";
+    Map<String,String> result = new HashMap<String,String>();
+    for (String attributeName : factory.getAttributeNames()) {
+      if (attributeName.startsWith(prefix)) {
+        String name = attributeName.substring(prefix.length());
+        String value = (String) factory.getAttribute(attributeName);
+        result.put(name, value);
+      }
+    }
+    return result;
+  }
+    
+  /**
+   * Returns the context name.
+   */
+  public String getContextName() {
+    return contextName;
+  }
+    
+  /**
+   * Returns the factory by which this context was created.
+   */
+  public ContextFactory getContextFactory() {
+    return factory;
+  }
+    
+  /**
+   * Starts or restarts monitoring, the emitting of metrics records.
+   */
+  public synchronized void startMonitoring()
+    throws IOException {
+    if (!isMonitoring) {
+      startTimer();
+      isMonitoring = true;
+    }
+  }
+    
+  /**
+   * Stops monitoring.  This does not free buffered data. 
+   * @see #close()
+   */
+  public synchronized void stopMonitoring() {
+    if (isMonitoring) {
+      stopTimer();
+      isMonitoring = false;
+    }
+  }
+    
+  /**
+   * Returns true if monitoring is currently in progress.
+   */
+  public boolean isMonitoring() {
+    return isMonitoring;
+  }
+    
+  /**
+   * Stops monitoring and frees buffered data, returning this
+   * object to its initial state.  
+   */
+  public synchronized void close() {
+    stopMonitoring();
+    clearUpdaters();
+  } 
+    
+  /**
+   * Creates a new AbstractMetricsRecord instance with the given <code>recordName</code>.
+   * Throws an exception if the metrics implementation is configured with a fixed
+   * set of record names and <code>recordName</code> is not in that set.
+   * 
+   * @param recordName the name of the record
+   * @throws MetricsException if recordName conflicts with configuration data
+   */
+  public final synchronized MetricsRecord createRecord(String recordName) {
+    if (bufferedData.get(recordName) == null) {
+      bufferedData.put(recordName, new RecordMap());
+    }
+    return newRecord(recordName);
+  }
+    
+  /**
+   * Subclasses should override this if they subclass MetricsRecordImpl.
+   * @param recordName the name of the record
+   * @return newly created instance of MetricsRecordImpl or subclass
+   */
+  protected MetricsRecord newRecord(String recordName) {
+    return new MetricsRecordImpl(recordName, this);
+  }
+    
+  /**
+   * Registers a callback to be called at time intervals determined by
+   * the configuration.
+   *
+   * @param updater object to be run periodically; it should update
+   * some metrics records 
+   */
+  public synchronized void registerUpdater(final Updater updater) {
+    if (!updaters.contains(updater)) {
+      updaters.add(updater);
+    }
+  }
+    
+  /**
+   * Removes a callback, if it exists.
+   *
+   * @param updater object to be removed from the callback list
+   */
+  public synchronized void unregisterUpdater(Updater updater) {
+    updaters.remove(updater);
+  }
+    
+  private synchronized void clearUpdaters() {
+    updaters.clear();
+  }
+    
+  /**
+   * Starts timer if it is not already started
+   */
+  private synchronized void startTimer() {
+    if (timer == null) {
+      timer = new Timer("Timer thread for monitoring " + getContextName(), 
+                        true);
+      TimerTask task = new TimerTask() {
+          public void run() {
+            try {
+              timerEvent();
+            }
+            catch (IOException ioe) {
+              ioe.printStackTrace();
+            }
+          }
+        };
+      long millis = period * 1000;
+      timer.scheduleAtFixedRate(task, millis, millis);
+    }
+  }
+    
+  /**
+   * Stops timer if it is running
+   */
+  private synchronized void stopTimer() {
+    if (timer != null) {
+      timer.cancel();
+      timer = null;
+    }
+  }
+    
+  /**
+   * Timer callback.
+   */
+  private void timerEvent() throws IOException {
+    if (isMonitoring) {
+      Collection<Updater> myUpdaters;
+      synchronized (this) {
+        myUpdaters = new ArrayList<Updater>(updaters);
+      }     
+      // Run all the registered updates without holding a lock
+      // on this context
+      for (Updater updater : myUpdaters) {
+        try {
+          updater.doUpdates(this);
+        }
+        catch (Throwable throwable) {
+          throwable.printStackTrace();
+        }
+      }
+      emitRecords();
+    }
+  }
+    
+  /**
+   *  Emits the records.
+   */
+  private synchronized void emitRecords() throws IOException {
+    for (String recordName : bufferedData.keySet()) {
+      RecordMap recordMap = bufferedData.get(recordName);
+      synchronized (recordMap) {
+        Set<Entry<TagMap, MetricMap>> entrySet = recordMap.entrySet ();
+        for (Entry<TagMap, MetricMap> entry : entrySet) {
+          OutputRecord outRec = new OutputRecord(entry.getKey(), entry.getValue());
+          emitRecord(contextName, recordName, outRec);
+        }
+      }
+    }
+    flush();
+  }
+  
+  /**
+   * Retrieves all the records managed by this MetricsContext.
+   * Useful for monitoring systems that are polling-based.
+   * @return A non-null collection of all monitoring records.
+   */
+  public synchronized Map<String, Collection<OutputRecord>> getAllRecords() {
+    Map<String, Collection<OutputRecord>> out = new TreeMap<String, Collection<OutputRecord>>();
+    for (String recordName : bufferedData.keySet()) {
+      RecordMap recordMap = bufferedData.get(recordName);
+      synchronized (recordMap) {
+        List<OutputRecord> records = new ArrayList<OutputRecord>();
+        Set<Entry<TagMap, MetricMap>> entrySet = recordMap.entrySet();
+        for (Entry<TagMap, MetricMap> entry : entrySet) {
+          OutputRecord outRec = new OutputRecord(entry.getKey(), entry.getValue());
+          records.add(outRec);
+        }
+        out.put(recordName, records);
+      }
+    }
+    return out;
+  }
+
+  /**
+   * Sends a record to the metrics system.
+   */
+  protected abstract void emitRecord(String contextName, String recordName, 
+                                     OutputRecord outRec) throws IOException;
+    
+  /**
+   * Called each period after all records have been emitted, this method does nothing.
+   * Subclasses may override it in order to perform some kind of flush.
+   */
+  protected void flush() throws IOException {
+  }
+    
+  /**
+   * Called by MetricsRecordImpl.update().  Creates or updates a row in
+   * the internal table of metric data.
+   */
+  protected void update(MetricsRecordImpl record) {
+    String recordName = record.getRecordName();
+    TagMap tagTable = record.getTagTable();
+    Map<String,MetricValue> metricUpdates = record.getMetricTable();
+        
+    RecordMap recordMap = getRecordMap(recordName);
+    synchronized (recordMap) {
+      MetricMap metricMap = recordMap.get(tagTable);
+      if (metricMap == null) {
+        metricMap = new MetricMap();
+        TagMap tagMap = new TagMap(tagTable); // clone tags
+        recordMap.put(tagMap, metricMap);
+      }
+
+      Set<Entry<String, MetricValue>> entrySet = metricUpdates.entrySet();
+      for (Entry<String, MetricValue> entry : entrySet) {
+        String metricName = entry.getKey ();
+        MetricValue updateValue = entry.getValue ();
+        Number updateNumber = updateValue.getNumber();
+        Number currentNumber = metricMap.get(metricName);
+        if (currentNumber == null || updateValue.isAbsolute()) {
+          metricMap.put(metricName, updateNumber);
+        }
+        else {
+          Number newNumber = sum(updateNumber, currentNumber);
+          metricMap.put(metricName, newNumber);
+        }
+      }
+    }
+  }
+    
+  private synchronized RecordMap getRecordMap(String recordName) {
+    return bufferedData.get(recordName);
+  }
+    
+  /**
+   * Adds two numbers, coercing the second to the type of the first.
+   *
+   */
+  private Number sum(Number a, Number b) {
+    if (a instanceof Integer) {
+      return Integer.valueOf(a.intValue() + b.intValue());
+    }
+    else if (a instanceof Float) {
+      return new Float(a.floatValue() + b.floatValue());
+    }
+    else if (a instanceof Short) {
+      return Short.valueOf((short)(a.shortValue() + b.shortValue()));
+    }
+    else if (a instanceof Byte) {
+      return Byte.valueOf((byte)(a.byteValue() + b.byteValue()));
+    }
+    else if (a instanceof Long) {
+      return Long.valueOf((a.longValue() + b.longValue()));
+    }
+    else {
+      // should never happen
+      throw new MetricsException("Invalid number type");
+    }
+            
+  }
+    
+  /**
+   * Called by MetricsRecordImpl.remove().  Removes all matching rows in
+   * the internal table of metric data.  A row matches if it has the same
+   * tag names and values as record, but it may also have additional
+   * tags.
+   */    
+  protected void remove(MetricsRecordImpl record) {
+    String recordName = record.getRecordName();
+    TagMap tagTable = record.getTagTable();
+        
+    RecordMap recordMap = getRecordMap(recordName);
+    synchronized (recordMap) {
+      Iterator<TagMap> it = recordMap.keySet().iterator();
+      while (it.hasNext()) {
+        TagMap rowTags = it.next();
+        if (rowTags.containsAll(tagTable)) {
+          it.remove();
+        }
+      }
+    }
+  }
+    
+  /**
+   * Returns the timer period.
+   */
+  public int getPeriod() {
+    return period;
+  }
+    
+  /**
+   * Sets the timer period
+   */
+  protected void setPeriod(int period) {
+    this.period = period;
+  }
+  
+  /**
+   * If a period is set in the attribute passed in, override
+   * the default with it.
+   */
+  protected void parseAndSetPeriod(String attributeName) {
+    String periodStr = getAttribute(attributeName);
+    if (periodStr != null) {
+      int period = 0;
+      try {
+        period = Integer.parseInt(periodStr);
+      } catch (NumberFormatException nfe) {
+      }
+      if (period <= 0) {
+        throw new MetricsException("Invalid period: " + periodStr);
+      }
+      setPeriod(period);
+    }
+  }
+}

+ 205 - 0
src/core/org/apache/hadoop/metrics/spi/CompositeContext.java

@@ -0,0 +1,205 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics.spi;
+
+import java.io.IOException;
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.util.ArrayList;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics.ContextFactory;
+import org.apache.hadoop.metrics.MetricsContext;
+import org.apache.hadoop.metrics.MetricsException;
+import org.apache.hadoop.metrics.MetricsRecord;
+import org.apache.hadoop.metrics.MetricsUtil;
+import org.apache.hadoop.metrics.Updater;
+
+/**
+ * @deprecated in favor of <code>org.apache.hadoop.metrics2</code> usage.
+ */
+@Deprecated
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class CompositeContext extends AbstractMetricsContext {
+
+  private static final Log LOG = LogFactory.getLog(CompositeContext.class);
+  private static final String ARITY_LABEL = "arity";
+  private static final String SUB_FMT = "%s.sub%d";
+  private final ArrayList<MetricsContext> subctxt =
+    new ArrayList<MetricsContext>();
+
+  @InterfaceAudience.Private
+  public CompositeContext() {
+  }
+
+  @InterfaceAudience.Private
+  public void init(String contextName, ContextFactory factory) {
+    super.init(contextName, factory);
+    int nKids;
+    try {
+      String sKids = getAttribute(ARITY_LABEL);
+      nKids = Integer.valueOf(sKids);
+    } catch (Exception e) {
+      LOG.error("Unable to initialize composite metric " + contextName +
+                ": could not init arity", e);
+      return;
+    }
+    for (int i = 0; i < nKids; ++i) {
+      MetricsContext ctxt = MetricsUtil.getContext(
+          String.format(SUB_FMT, contextName, i), contextName);
+      if (null != ctxt) {
+        subctxt.add(ctxt);
+      }
+    }
+  }
+
+  @InterfaceAudience.Private
+  @Override
+  public MetricsRecord newRecord(String recordName) {
+    return (MetricsRecord) Proxy.newProxyInstance(
+        MetricsRecord.class.getClassLoader(),
+        new Class[] { MetricsRecord.class },
+        new MetricsRecordDelegator(recordName, subctxt));
+  }
+
+  @InterfaceAudience.Private
+  @Override
+  protected void emitRecord(String contextName, String recordName,
+      OutputRecord outRec) throws IOException {
+    for (MetricsContext ctxt : subctxt) {
+      try {
+        ((AbstractMetricsContext)ctxt).emitRecord(
+          contextName, recordName, outRec);
+        if (contextName == null || recordName == null || outRec == null) {
+          throw new IOException(contextName + ":" + recordName + ":" + outRec);
+        }
+      } catch (IOException e) {
+        LOG.warn("emitRecord failed: " + ctxt.getContextName(), e);
+      }
+    }
+  }
+
+  @InterfaceAudience.Private
+  @Override
+  protected void flush() throws IOException {
+    for (MetricsContext ctxt : subctxt) {
+      try {
+        ((AbstractMetricsContext)ctxt).flush();
+      } catch (IOException e) {
+        LOG.warn("flush failed: " + ctxt.getContextName(), e);
+      }
+    }
+  }
+
+  @InterfaceAudience.Private
+  @Override
+  public void startMonitoring() throws IOException {
+    for (MetricsContext ctxt : subctxt) {
+      try {
+        ctxt.startMonitoring();
+      } catch (IOException e) {
+        LOG.warn("startMonitoring failed: " + ctxt.getContextName(), e);
+      }
+    }
+  }
+
+  @InterfaceAudience.Private
+  @Override
+  public void stopMonitoring() {
+    for (MetricsContext ctxt : subctxt) {
+      ctxt.stopMonitoring();
+    }
+  }
+
+  /**
+   * Return true if all subcontexts are monitoring.
+   */
+  @InterfaceAudience.Private
+  @Override
+  public boolean isMonitoring() {
+    boolean ret = true;
+    for (MetricsContext ctxt : subctxt) {
+      ret &= ctxt.isMonitoring();
+    }
+    return ret;
+  }
+
+  @InterfaceAudience.Private
+  @Override
+  public void close() {
+    for (MetricsContext ctxt : subctxt) {
+      ctxt.close();
+    }
+  }
+
+  @InterfaceAudience.Private
+  @Override
+  public void registerUpdater(Updater updater) {
+    for (MetricsContext ctxt : subctxt) {
+      ctxt.registerUpdater(updater);
+    }
+  }
+
+  @InterfaceAudience.Private
+  @Override
+  public void unregisterUpdater(Updater updater) {
+    for (MetricsContext ctxt : subctxt) {
+      ctxt.unregisterUpdater(updater);
+    }
+  }
+
+  private static class MetricsRecordDelegator implements InvocationHandler {
+    private static final Method m_getRecordName = initMethod();
+    private static Method initMethod() {
+      try {
+        return MetricsRecord.class.getMethod("getRecordName", new Class[0]);
+      } catch (Exception e) {
+        throw new RuntimeException("Internal error", e);
+      }
+    }
+
+    private final String recordName;
+    private final ArrayList<MetricsRecord> subrecs;
+
+    MetricsRecordDelegator(String recordName, ArrayList<MetricsContext> ctxts) {
+      this.recordName = recordName;
+      this.subrecs = new ArrayList<MetricsRecord>(ctxts.size());
+      for (MetricsContext ctxt : ctxts) {
+        subrecs.add(ctxt.createRecord(recordName));
+      }
+    }
+
+    public Object invoke(Object p, Method m, Object[] args) throws Throwable {
+      if (m_getRecordName.equals(m)) {
+        return recordName;
+      }
+      assert Void.TYPE.equals(m.getReturnType());
+      for (MetricsRecord rec : subrecs) {
+        m.invoke(rec, args);
+      }
+      return null;
+    }
+  }
+
+}

+ 59 - 0
src/core/org/apache/hadoop/metrics/spi/MetricValue.java

@@ -0,0 +1,59 @@
+/*
+ * MetricValue.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics.spi;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * A Number that is either an absolute or an incremental amount.
+ * @deprecated in favor of <code>org.apache.hadoop.metrics2</code> usage.
+ */
+@Deprecated
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class MetricValue {
+    
+  public static final boolean ABSOLUTE = false;
+  public static final boolean INCREMENT = true;
+    
+  private boolean isIncrement;
+  private Number number;
+    
+  /** Creates a new instance of MetricValue */
+  public MetricValue(Number number, boolean isIncrement) {
+    this.number = number;
+    this.isIncrement = isIncrement;
+  }
+
+  public boolean isIncrement() {
+    return isIncrement;
+  }
+    
+  public boolean isAbsolute() {
+    return !isIncrement;
+  }
+
+  public Number getNumber() {
+    return number;
+  }
+    
+}

+ 283 - 0
src/core/org/apache/hadoop/metrics/spi/MetricsRecordImpl.java

@@ -0,0 +1,283 @@
+/*
+ * MetricsRecordImpl.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics.spi;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics.MetricsException;
+import org.apache.hadoop.metrics.MetricsRecord;
+import org.apache.hadoop.metrics.spi.AbstractMetricsContext.TagMap;
+
+/**
+ * An implementation of MetricsRecord.  Keeps a back-pointer to the context
+ * from which it was created, and delegates back to it on <code>update</code>
+ * and <code>remove()</code>.
+ * @deprecated in favor of <code>org.apache.hadoop.metrics2</code> usage.
+ */
+@Deprecated
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class MetricsRecordImpl implements MetricsRecord {
+    
+  private TagMap tagTable = new TagMap();
+  private Map<String,MetricValue> metricTable = new LinkedHashMap<String,MetricValue>();
+    
+  private String recordName;
+  private AbstractMetricsContext context;
+    
+    
+  /** Creates a new instance of FileRecord */
+  protected MetricsRecordImpl(String recordName, AbstractMetricsContext context)
+  {
+    this.recordName = recordName;
+    this.context = context;
+  }
+    
+  /**
+   * Returns the record name. 
+   *
+   * @return the record name
+   */
+  public String getRecordName() {
+    return recordName;
+  }
+    
+  /**
+   * Sets the named tag to the specified value.
+   *
+   * @param tagName name of the tag
+   * @param tagValue new value of the tag
+   * @throws MetricsException if the tagName conflicts with the configuration
+   */
+  public void setTag(String tagName, String tagValue) {
+    if (tagValue == null) {
+      tagValue = "";
+    }
+    tagTable.put(tagName, tagValue);
+  }
+    
+  /**
+   * Sets the named tag to the specified value.
+   *
+   * @param tagName name of the tag
+   * @param tagValue new value of the tag
+   * @throws MetricsException if the tagName conflicts with the configuration
+   */
+  public void setTag(String tagName, int tagValue) {
+    tagTable.put(tagName, Integer.valueOf(tagValue));
+  }
+    
+  /**
+   * Sets the named tag to the specified value.
+   *
+   * @param tagName name of the tag
+   * @param tagValue new value of the tag
+   * @throws MetricsException if the tagName conflicts with the configuration
+   */
+  public void setTag(String tagName, long tagValue) {
+    tagTable.put(tagName, Long.valueOf(tagValue));
+  }
+    
+  /**
+   * Sets the named tag to the specified value.
+   *
+   * @param tagName name of the tag
+   * @param tagValue new value of the tag
+   * @throws MetricsException if the tagName conflicts with the configuration
+   */
+  public void setTag(String tagName, short tagValue) {
+    tagTable.put(tagName, Short.valueOf(tagValue));
+  }
+    
+  /**
+   * Sets the named tag to the specified value.
+   *
+   * @param tagName name of the tag
+   * @param tagValue new value of the tag
+   * @throws MetricsException if the tagName conflicts with the configuration
+   */
+  public void setTag(String tagName, byte tagValue) {
+    tagTable.put(tagName, Byte.valueOf(tagValue));
+  }
+    
+  /**
+   * Removes any tag of the specified name.
+   */
+  public void removeTag(String tagName) {
+    tagTable.remove(tagName);
+  }
+  
+  /**
+   * Sets the named metric to the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue new value of the metric
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public void setMetric(String metricName, int metricValue) {
+    setAbsolute(metricName, Integer.valueOf(metricValue));
+  }
+    
+  /**
+   * Sets the named metric to the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue new value of the metric
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public void setMetric(String metricName, long metricValue) {
+    setAbsolute(metricName, Long.valueOf(metricValue));
+  }
+    
+  /**
+   * Sets the named metric to the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue new value of the metric
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public void setMetric(String metricName, short metricValue) {
+    setAbsolute(metricName, Short.valueOf(metricValue));
+  }
+    
+  /**
+   * Sets the named metric to the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue new value of the metric
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public void setMetric(String metricName, byte metricValue) {
+    setAbsolute(metricName, Byte.valueOf(metricValue));
+  }
+    
+  /**
+   * Sets the named metric to the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue new value of the metric
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public void setMetric(String metricName, float metricValue) {
+    setAbsolute(metricName, new Float(metricValue));
+  }
+    
+  /**
+   * Increments the named metric by the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue incremental value
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public void incrMetric(String metricName, int metricValue) {
+    setIncrement(metricName, Integer.valueOf(metricValue));
+  }
+    
+  /**
+   * Increments the named metric by the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue incremental value
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public void incrMetric(String metricName, long metricValue) {
+    setIncrement(metricName, Long.valueOf(metricValue));
+  }
+    
+  /**
+   * Increments the named metric by the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue incremental value
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public void incrMetric(String metricName, short metricValue) {
+    setIncrement(metricName, Short.valueOf(metricValue));
+  }
+    
+  /**
+   * Increments the named metric by the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue incremental value
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public void incrMetric(String metricName, byte metricValue) {
+    setIncrement(metricName, Byte.valueOf(metricValue));
+  }
+    
+  /**
+   * Increments the named metric by the specified value.
+   *
+   * @param metricName name of the metric
+   * @param metricValue incremental value
+   * @throws MetricsException if the metricName or the type of the metricValue 
+   * conflicts with the configuration
+   */
+  public void incrMetric(String metricName, float metricValue) {
+    setIncrement(metricName, new Float(metricValue));
+  }
+    
+  private void setAbsolute(String metricName, Number metricValue) {
+    metricTable.put(metricName, new MetricValue(metricValue, MetricValue.ABSOLUTE));
+  }
+    
+  private void setIncrement(String metricName, Number metricValue) {
+    metricTable.put(metricName, new MetricValue(metricValue, MetricValue.INCREMENT));
+  }
+    
+  /**
+   * Updates the table of buffered data which is to be sent periodically.
+   * If the tag values match an existing row, that row is updated; 
+   * otherwise, a new row is added.
+   */
+  public void update() {
+    context.update(this);
+  }
+    
+  /**
+   * Removes the row, if it exists, in the buffered data table having tags 
+   * that equal the tags that have been set on this record. 
+   */
+  public void remove() {
+    context.remove(this);
+  }
+
+  TagMap getTagTable() {
+    return tagTable;
+  }
+
+  Map<String, MetricValue> getMetricTable() {
+    return metricTable;
+  }
+}

+ 58 - 0
src/core/org/apache/hadoop/metrics/spi/NoEmitMetricsContext.java

@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics.spi;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics.ContextFactory;
+import org.apache.hadoop.metrics.MetricsServlet;
+
+/** 
+ * A MetricsContext that does not emit data, but, unlike NullContextWithUpdate,
+ * does save it for retrieval with getAllRecords().
+ * 
+ * This is useful if you want to support {@link MetricsServlet}, but
+ * not emit metrics in any other way.
+ * @deprecated in favor of <code>org.apache.hadoop.metrics2</code> usage.
+ */
+@Deprecated
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class NoEmitMetricsContext extends AbstractMetricsContext {
+    
+    private static final String PERIOD_PROPERTY = "period";
+      
+    /** Creates a new instance of NullContextWithUpdateThread */
+    @InterfaceAudience.Private
+    public NoEmitMetricsContext() {
+    }
+    
+    @InterfaceAudience.Private
+    public void init(String contextName, ContextFactory factory) {
+      super.init(contextName, factory);
+      parseAndSetPeriod(PERIOD_PROPERTY);
+    }
+     
+    /**
+     * Do-nothing version of emitRecord
+     */
+    @InterfaceAudience.Private
+    protected void emitRecord(String contextName, String recordName,
+                              OutputRecord outRec) {
+    }
+}

+ 70 - 0
src/core/org/apache/hadoop/metrics/spi/NullContext.java

@@ -0,0 +1,70 @@
+/*
+ * NullContext.java
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics.spi;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Null metrics context: a metrics context which does nothing.  Used as the
+ * default context, so that no performance data is emitted if no configuration
+ * data is found.
+ * 
+ * @deprecated in favor of <code>org.apache.hadoop.metrics2</code> usage.
+ */
+@Deprecated
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class NullContext extends AbstractMetricsContext {
+    
+  /** Creates a new instance of NullContext */
+  @InterfaceAudience.Private
+  public NullContext() {
+  }
+    
+  /**
+   * Do-nothing version of startMonitoring
+   */
+  @InterfaceAudience.Private
+  public void startMonitoring() {
+  }
+    
+  /**
+   * Do-nothing version of emitRecord
+   */
+  @InterfaceAudience.Private
+  protected void emitRecord(String contextName, String recordName,
+                            OutputRecord outRec) 
+  {}
+    
+  /**
+   * Do-nothing version of update
+   */
+  @InterfaceAudience.Private
+  protected void update(MetricsRecordImpl record) {
+  }
+    
+  /**
+   * Do-nothing version of remove
+   */
+  @InterfaceAudience.Private
+  protected void remove(MetricsRecordImpl record) {
+  }
+}

+ 79 - 0
src/core/org/apache/hadoop/metrics/spi/NullContextWithUpdateThread.java

@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics.spi;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics.ContextFactory;
+import org.apache.hadoop.metrics.MetricsException;
+
+/**
+ * A null context which has a thread calling 
+ * periodically when monitoring is started. This keeps the data sampled 
+ * correctly.
+ * In all other respects, this is like the NULL context: No data is emitted.
+ * This is suitable for Monitoring systems like JMX which reads the metrics
+ *  when someone reads the data from JMX.
+ * 
+ * The default impl of start and stop monitoring:
+ *  is the AbstractMetricsContext is good enough.
+ * 
+ * @deprecated in favor of <code>org.apache.hadoop.metrics2</code> usage.
+ */
+@Deprecated
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class NullContextWithUpdateThread extends AbstractMetricsContext {
+  
+  private static final String PERIOD_PROPERTY = "period";
+    
+  /** Creates a new instance of NullContextWithUpdateThread */
+  @InterfaceAudience.Private
+  public NullContextWithUpdateThread() {
+  }
+  
+  @InterfaceAudience.Private
+  public void init(String contextName, ContextFactory factory) {
+    super.init(contextName, factory);
+    parseAndSetPeriod(PERIOD_PROPERTY);
+  }
+   
+    
+  /**
+   * Do-nothing version of emitRecord
+   */
+  @InterfaceAudience.Private
+  protected void emitRecord(String contextName, String recordName,
+                            OutputRecord outRec) 
+  {}
+    
+  /**
+   * Do-nothing version of update
+   */
+  @InterfaceAudience.Private
+  protected void update(MetricsRecordImpl record) {
+  }
+    
+  /**
+   * Do-nothing version of remove
+   */
+  @InterfaceAudience.Private
+  protected void remove(MetricsRecordImpl record) {
+  }
+}

+ 96 - 0
src/core/org/apache/hadoop/metrics/spi/OutputRecord.java

@@ -0,0 +1,96 @@
+/*
+ * OutputRecord.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics.spi;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics.spi.AbstractMetricsContext.MetricMap;
+import org.apache.hadoop.metrics.spi.AbstractMetricsContext.TagMap;
+
+/**
+ * Represents a record of metric data to be sent to a metrics system.
+ * @deprecated in favor of <code>org.apache.hadoop.metrics2</code> usage.
+ */
+@Deprecated
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class OutputRecord {
+    
+  private TagMap tagMap;
+  private MetricMap metricMap;
+    
+  /** Creates a new instance of OutputRecord */
+  OutputRecord(TagMap tagMap, MetricMap metricMap) {
+    this.tagMap = tagMap;
+    this.metricMap = metricMap;
+  }
+    
+  /**
+   * Returns the set of tag names
+   */
+  public Set<String> getTagNames() {
+    return Collections.unmodifiableSet(tagMap.keySet());
+  }
+    
+  /**
+   * Returns a tag object which is can be a String, Integer, Short or Byte.
+   *
+   * @return the tag value, or null if there is no such tag
+   */
+  public Object getTag(String name) {
+    return tagMap.get(name);
+  }
+    
+  /**
+   * Returns the set of metric names.
+   */
+  public Set<String> getMetricNames() {
+    return Collections.unmodifiableSet(metricMap.keySet());
+  }
+    
+  /**
+   * Returns the metric object which can be a Float, Integer, Short or Byte.
+   */
+  public Number getMetric(String name) {
+    return metricMap.get(name);
+  }
+  
+
+  /**
+   * Returns a copy of this record's tags.
+   */
+  public TagMap getTagsCopy() {
+    return new TagMap(tagMap);
+  }
+  
+  /**
+   * Returns a copy of this record's metrics.
+   */
+  public MetricMap getMetricsCopy() {
+    return new MetricMap(metricMap);
+  }
+}

+ 72 - 0
src/core/org/apache/hadoop/metrics/spi/Util.java

@@ -0,0 +1,72 @@
+/*
+ * Util.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.hadoop.metrics.spi;
+
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Static utility methods
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class Util {
+    
+  /**
+   * This class is not intended to be instantiated
+   */
+  private Util() {}
+    
+  /**
+   * Parses a space and/or comma separated sequence of server specifications
+   * of the form <i>hostname</i> or <i>hostname:port</i>.  If 
+   * the specs string is null, defaults to localhost:defaultPort.
+   * 
+   * @return a list of InetSocketAddress objects.
+   */
+  public static List<InetSocketAddress> parse(String specs, int defaultPort) {
+    List<InetSocketAddress> result = new ArrayList<InetSocketAddress>(1);
+    if (specs == null) {
+      result.add(new InetSocketAddress("localhost", defaultPort));
+    }
+    else {
+      String[] specStrings = specs.split("[ ,]+");
+      for (String specString : specStrings) {
+        int colon = specString.indexOf(':');
+        if (colon < 0 || colon == specString.length() - 1) {
+          result.add(new InetSocketAddress(specString, defaultPort));
+        } else {
+          String hostname = specString.substring(0, colon);
+          int port = Integer.parseInt(specString.substring(colon+1));
+          result.add(new InetSocketAddress(hostname, port));
+        }
+      }
+    }
+    return result;
+  }
+    
+}

+ 38 - 0
src/core/org/apache/hadoop/metrics/spi/package.html

@@ -0,0 +1,38 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+  <head>
+    <title>org.apache.hadoop.metrics.spi</title>
+  </head>
+  <body>
+    <p><em>Note, this package is deprecated in favor of
+      <code>org.apache.hadoop.metrics2</code> usage.</em></p>
+      <p>The Service Provider Interface for the Metrics API.  This package provides
+an interface allowing a variety of metrics reporting implementations to be
+plugged in to the Metrics API.  Examples of such implementations can be found 
+in the packages <code>org.apache.hadoop.metrics.file</code> and
+<code>org.apache.hadoop.metrics.ganglia</code>.</p>
+
+      <p>Plugging in an implementation involves writing a concrete subclass of
+<code>AbstractMetricsContext</code>.  The subclass should get its
+ configuration information using the <code>getAttribute(<i>attributeName</i>)</code>
+ method.</p>
+  </body>
+</html>

+ 92 - 0
src/core/org/apache/hadoop/metrics/util/MBeanUtil.java

@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics.util;
+
+import java.lang.management.ManagementFactory;
+
+import javax.management.InstanceNotFoundException;
+import javax.management.MBeanServer;
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+import javax.management.InstanceAlreadyExistsException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+
+/**
+ * This util class provides a method to register an MBean using
+ * our standard naming convention as described in the doc
+ *  for {@link #registerMBean(String, String, Object)}
+ *
+ * @deprecated in favor of {@link org.apache.hadoop.metrics2.util.MBeans}.
+ */
+@Deprecated
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+public class MBeanUtil {
+	
+  /**
+   * Register the MBean using our standard MBeanName format
+   * "hadoop:service=<serviceName>,name=<nameName>"
+   * Where the <serviceName> and <nameName> are the supplied parameters
+   *    
+   * @param serviceName
+   * @param nameName
+   * @param theMbean - the MBean to register
+   * @return the named used to register the MBean
+   */	
+  static public ObjectName registerMBean(final String serviceName, 
+		  							final String nameName,
+		  							final Object theMbean) {
+    final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+    ObjectName name = getMBeanName(serviceName, nameName);
+    try {
+      mbs.registerMBean(theMbean, name);
+      return name;
+    } catch (InstanceAlreadyExistsException ie) {
+      // Ignore if instance already exists 
+    } catch (Exception e) {
+      e.printStackTrace();
+    }
+    return null;
+  }
+  
+  static public void unregisterMBean(ObjectName mbeanName) {
+    final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+    if (mbeanName == null) 
+        return;
+    try {
+      mbs.unregisterMBean(mbeanName);
+    } catch (InstanceNotFoundException e ) {
+      // ignore
+    } catch (Exception e) {
+      e.printStackTrace();
+    } 
+  }
+  
+  static private ObjectName getMBeanName(final String serviceName,
+		  								 final String nameName) {
+    ObjectName name = null;
+    try {
+      name = new ObjectName("hadoop:" +
+                  "service=" + serviceName + ",name=" + nameName);
+    } catch (MalformedObjectNameException e) {
+      e.printStackTrace();
+    }
+    return name;
+  }
+}

+ 51 - 0
src/core/org/apache/hadoop/metrics/util/MetricsBase.java

@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.metrics.MetricsRecord;
+
+/**
+ * 
+ * This is base class for all metrics
+ *
+ * @deprecated in favor of <code>org.apache.hadoop.metrics2</code> usage.
+ */
+@Deprecated
+@InterfaceAudience.Private
+public abstract class MetricsBase {
+  public static final String NO_DESCRIPTION = "NoDescription";
+  final private String name;
+  final private String description;
+  
+  protected MetricsBase(final String nam) {
+    name = nam;
+    description = NO_DESCRIPTION;
+  }
+  
+  protected MetricsBase(final String nam, final String desc) {
+    name = nam;
+    description = desc;
+  }
+  
+  public abstract void pushMetric(final MetricsRecord mr);
+  
+  public String getName() { return name; }
+  public String getDescription() { return description; };
+
+}

+ 229 - 0
src/core/org/apache/hadoop/metrics/util/MetricsDynamicMBeanBase.java

@@ -0,0 +1,229 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics.util;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import javax.management.Attribute;
+import javax.management.AttributeList;
+import javax.management.AttributeNotFoundException;
+import javax.management.DynamicMBean;
+import javax.management.InvalidAttributeValueException;
+import javax.management.MBeanAttributeInfo;
+import javax.management.MBeanException;
+import javax.management.MBeanInfo;
+import javax.management.MBeanOperationInfo;
+import javax.management.ReflectionException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.metrics.MetricsUtil;
+
+
+
+/**
+ * This abstract base class facilitates creating dynamic mbeans automatically from
+ * metrics. 
+ * The metrics constructors registers metrics in a registry. 
+ * Different categories of metrics should be in differnt classes with their own
+ * registry (as in NameNodeMetrics and DataNodeMetrics).
+ * Then the MBean can be created passing the registry to the constructor.
+ * The MBean should be then registered using a mbean name (example):
+ *  MetricsHolder myMetrics = new MetricsHolder(); // has metrics and registry
+ *  MetricsTestMBean theMBean = new MetricsTestMBean(myMetrics.mregistry);
+ *  ObjectName mbeanName = MBeanUtil.registerMBean("ServiceFoo",
+ *                "TestStatistics", theMBean);
+ * 
+ * @deprecated in favor of <code>org.apache.hadoop.metrics2</code> usage.
+ */
+@Deprecated
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+public abstract class MetricsDynamicMBeanBase implements DynamicMBean {
+  private final static String AVG_TIME = "AvgTime";
+  private final static String MIN_TIME = "MinTime";
+  private final static String MAX_TIME = "MaxTime";
+  private final static String NUM_OPS = "NumOps";
+  private final static String RESET_ALL_MIN_MAX_OP = "resetAllMinMax";
+  private MetricsRegistry metricsRegistry;
+  private MBeanInfo mbeanInfo;
+  private Map<String, MetricsBase> metricsRateAttributeMod;
+  private int numEntriesInRegistry = 0;
+  private String mbeanDescription;
+  
+  protected MetricsDynamicMBeanBase(final MetricsRegistry mr, final String aMBeanDescription) {
+    metricsRegistry = mr;
+    mbeanDescription = aMBeanDescription;
+    createMBeanInfo();
+  }
+  
+  private void updateMbeanInfoIfMetricsListChanged()  {
+    if (numEntriesInRegistry != metricsRegistry.size())
+      createMBeanInfo();
+  }
+  
+  private void createMBeanInfo() {
+    metricsRateAttributeMod = new HashMap<String, MetricsBase>();
+    boolean needsMinMaxResetOperation = false;
+    List<MBeanAttributeInfo> attributesInfo = new ArrayList<MBeanAttributeInfo>();
+    MBeanOperationInfo[] operationsInfo = null;
+    numEntriesInRegistry = metricsRegistry.size();
+    
+    for (MetricsBase o : metricsRegistry.getMetricsList()) {
+
+      if (MetricsTimeVaryingRate.class.isInstance(o)) {
+        // For each of the metrics there are 3 different attributes
+        attributesInfo.add(new MBeanAttributeInfo(o.getName() + NUM_OPS, "java.lang.Integer",
+            o.getDescription(), true, false, false));
+        attributesInfo.add(new MBeanAttributeInfo(o.getName() + AVG_TIME, "java.lang.Long",
+            o.getDescription(), true, false, false));
+        attributesInfo.add(new MBeanAttributeInfo(o.getName() + MIN_TIME, "java.lang.Long",
+            o.getDescription(), true, false, false));
+        attributesInfo.add(new MBeanAttributeInfo(o.getName() + MAX_TIME, "java.lang.Long",
+            o.getDescription(), true, false, false));
+        needsMinMaxResetOperation = true;  // the min and max can be reset.
+        
+        // Note the special attributes (AVG_TIME, MIN_TIME, ..) are derived from metrics 
+        // Rather than check for the suffix we store them in a map.
+        metricsRateAttributeMod.put(o.getName() + NUM_OPS, o);
+        metricsRateAttributeMod.put(o.getName() + AVG_TIME, o);
+        metricsRateAttributeMod.put(o.getName() + MIN_TIME, o);
+        metricsRateAttributeMod.put(o.getName() + MAX_TIME, o);
+        
+      }  else if ( MetricsIntValue.class.isInstance(o) || MetricsTimeVaryingInt.class.isInstance(o) ) {
+        attributesInfo.add(new MBeanAttributeInfo(o.getName(), "java.lang.Integer",
+            o.getDescription(), true, false, false)); 
+      } else if ( MetricsLongValue.class.isInstance(o) || MetricsTimeVaryingLong.class.isInstance(o) ) {
+        attributesInfo.add(new MBeanAttributeInfo(o.getName(), "java.lang.Long",
+            o.getDescription(), true, false, false));     
+      } else {
+        MetricsUtil.LOG.error("unknown metrics type: " + o.getClass().getName());
+      }
+
+      if (needsMinMaxResetOperation) {
+        operationsInfo = new MBeanOperationInfo[] {
+            new MBeanOperationInfo(RESET_ALL_MIN_MAX_OP, "Reset (zero) All Min Max",
+                    null, "void", MBeanOperationInfo.ACTION) };
+      }
+    }
+    MBeanAttributeInfo[] attrArray = new MBeanAttributeInfo[attributesInfo.size()];
+    mbeanInfo =  new MBeanInfo(this.getClass().getName(), mbeanDescription, 
+        attributesInfo.toArray(attrArray), null, operationsInfo, null);
+  }
+  
+  @Override
+  public Object getAttribute(String attributeName) throws AttributeNotFoundException,
+      MBeanException, ReflectionException {
+    if (attributeName == null || attributeName.equals("")) 
+      throw new IllegalArgumentException();
+    
+    updateMbeanInfoIfMetricsListChanged();
+    
+    Object o = metricsRateAttributeMod.get(attributeName);
+    if (o == null) {
+      o = metricsRegistry.get(attributeName);
+    }
+    if (o == null)
+      throw new AttributeNotFoundException();
+    
+    if (o instanceof MetricsIntValue)
+      return ((MetricsIntValue) o).get();
+    else if (o instanceof MetricsLongValue)
+      return ((MetricsLongValue) o).get();
+    else if (o instanceof MetricsTimeVaryingInt)
+      return ((MetricsTimeVaryingInt) o).getPreviousIntervalValue();
+    else if (o instanceof MetricsTimeVaryingLong)
+      return ((MetricsTimeVaryingLong) o).getPreviousIntervalValue();
+    else if (o instanceof MetricsTimeVaryingRate) {
+      MetricsTimeVaryingRate or = (MetricsTimeVaryingRate) o;
+      if (attributeName.endsWith(NUM_OPS))
+        return or.getPreviousIntervalNumOps();
+      else if (attributeName.endsWith(AVG_TIME))
+        return or.getPreviousIntervalAverageTime();
+      else if (attributeName.endsWith(MIN_TIME))
+        return or.getMinTime();
+      else if (attributeName.endsWith(MAX_TIME))
+        return or.getMaxTime();
+      else {
+        MetricsUtil.LOG.error("Unexpected attrubute suffix");
+        throw new AttributeNotFoundException();
+      }
+    } else {
+        MetricsUtil.LOG.error("unknown metrics type: " + o.getClass().getName());
+        throw new AttributeNotFoundException();
+    }
+  }
+
+  @Override
+  public AttributeList getAttributes(String[] attributeNames) {
+    if (attributeNames == null || attributeNames.length == 0) 
+      throw new IllegalArgumentException();
+    
+    updateMbeanInfoIfMetricsListChanged();
+    
+    AttributeList result = new AttributeList(attributeNames.length);
+    for (String iAttributeName : attributeNames) {
+      try {
+        Object value = getAttribute(iAttributeName);
+        result.add(new Attribute(iAttributeName, value));
+      } catch (Exception e) {
+        continue;
+      } 
+    }
+    return result;
+  }
+
+  @Override
+  public MBeanInfo getMBeanInfo() {
+    return mbeanInfo;
+  }
+
+  @Override
+  public Object invoke(String actionName, Object[] parms, String[] signature)
+      throws MBeanException, ReflectionException {
+    
+    if (actionName == null || actionName.equals("")) 
+      throw new IllegalArgumentException();
+    
+    
+    // Right now we support only one fixed operation (if it applies)
+    if (!(actionName.equals(RESET_ALL_MIN_MAX_OP)) || 
+        mbeanInfo.getOperations().length != 1) {
+      throw new ReflectionException(new NoSuchMethodException(actionName));
+    }
+    for (MetricsBase m : metricsRegistry.getMetricsList())  {
+      if ( MetricsTimeVaryingRate.class.isInstance(m) ) {
+        MetricsTimeVaryingRate.class.cast(m).resetMinMax();
+      }
+    }
+    return null;
+  }
+
+  @Override
+  public void setAttribute(Attribute attribute)
+      throws AttributeNotFoundException, InvalidAttributeValueException,
+      MBeanException, ReflectionException {
+    throw new ReflectionException(new NoSuchMethodException("set" + attribute));
+  }
+
+  @Override
+  public AttributeList setAttributes(AttributeList attributes) {
+    return null;
+  }
+}

+ 108 - 0
src/core/org/apache/hadoop/metrics/util/MetricsIntValue.java

@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.metrics.MetricsRecord;
+import org.apache.hadoop.util.StringUtils;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * The MetricsIntValue class is for a metric that is not time varied
+ * but changes only when it is set. 
+ * Each time its value is set, it is published only *once* at the next update
+ * call.
+ *
+ * @deprecated in favor of {@link org.apache.hadoop.metrics2.lib.MutableGaugeInt}.
+ */
+@Deprecated
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+public class MetricsIntValue extends MetricsBase {  
+
+  private static final Log LOG =
+    LogFactory.getLog("org.apache.hadoop.metrics.util");
+
+  private int value;
+  private boolean changed;
+  
+  
+  /**
+   * Constructor - create a new metric
+   * @param nam the name of the metrics to be used to publish the metric
+   * @param registry - where the metrics object will be registered
+   */
+  public MetricsIntValue(final String nam, final MetricsRegistry registry, final String description) {
+    super(nam, description);
+    value = 0;
+    changed = false;
+    registry.add(nam, this);
+  }
+  
+  /**
+   * Constructor - create a new metric
+   * @param nam the name of the metrics to be used to publish the metric
+   * @param registry - where the metrics object will be registered
+   * A description of {@link #NO_DESCRIPTION} is used
+   */
+  public MetricsIntValue(final String nam, MetricsRegistry registry) {
+    this(nam, registry, NO_DESCRIPTION);
+  }
+  
+  
+  
+  /**
+   * Set the value
+   * @param newValue
+   */
+  public synchronized void set(final int newValue) {
+    value = newValue;
+    changed = true;
+  }
+  
+  /**
+   * Get value
+   * @return the value last set
+   */
+  public synchronized int get() { 
+    return value;
+  } 
+  
+
+  /**
+   * Push the metric to the mr.
+   * The metric is pushed only if it was updated since last push
+   * 
+   * Note this does NOT push to JMX
+   * (JMX gets the info via {@link #get()}
+   *
+   * @param mr
+   */
+  public synchronized void pushMetric(final MetricsRecord mr) {
+    if (changed) {
+      try {
+        mr.setMetric(getName(), value);
+      } catch (Exception e) {
+        LOG.info("pushMetric failed for " + getName() + "\n" +
+            StringUtils.stringifyException(e));
+      }
+    }
+    changed = false;
+  }
+}

+ 92 - 0
src/core/org/apache/hadoop/metrics/util/MetricsLongValue.java

@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.metrics.MetricsRecord;
+
+
+/**
+ * The MetricsLongValue class is for a metric that is not time varied
+ * but changes only when it is set. 
+ * Each time its value is set, it is published only *once* at the next update
+ * call.
+ *
+ * @deprecated in favor of <code>org.apache.hadoop.metrics2</code> usage.
+ */
+@Deprecated
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+public class MetricsLongValue extends MetricsBase{  
+  private long value;
+  private boolean changed;
+  
+  /**
+   * Constructor - create a new metric
+   * @param nam the name of the metrics to be used to publish the metric
+   * @param registry - where the metrics object will be registered
+   */
+  public MetricsLongValue(final String nam, final MetricsRegistry registry, final String description) {
+    super(nam, description);
+    value = 0;
+    changed = false;
+    registry.add(nam, this);
+  }
+  
+  /**
+   * Constructor - create a new metric
+   * @param nam the name of the metrics to be used to publish the metric
+   * @param registry - where the metrics object will be registered
+   * A description of {@link #NO_DESCRIPTION} is used
+   */
+  public MetricsLongValue(final String nam, MetricsRegistry registry) {
+    this(nam, registry, NO_DESCRIPTION);
+  }
+  
+  /**
+   * Set the value
+   * @param newValue
+   */
+  public synchronized void set(final long newValue) {
+    value = newValue;
+    changed = true;
+  }
+  
+  /**
+   * Get value
+   * @return the value last set
+   */
+  public synchronized long get() { 
+    return value;
+  } 
+ 
+
+  /**
+   * Push the metric to the mr.
+   * The metric is pushed only if it was updated since last push
+   * 
+   * Note this does NOT push to JMX
+   * (JMX gets the info via {@link #get()}
+   *
+   * @param mr
+   */
+  public synchronized void pushMetric(final MetricsRecord mr) {
+    if (changed) 
+      mr.setMetric(getName(), value);
+    changed = false;
+  }
+}

+ 90 - 0
src/core/org/apache/hadoop/metrics/util/MetricsRegistry.java

@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics.util;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * 
+ * This is the registry for metrics.
+ * Related set of metrics should be declared in a holding class and registered
+ * in a registry for those metrics which is also stored in the the holding class.
+ *
+ * @deprecated in favor of {@link org.apache.hadoop.metrics2.lib.MetricsRegistry}.
+ */
+@Deprecated
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+public class MetricsRegistry {
+  private Map<String, MetricsBase> metricsList = new HashMap<String, MetricsBase>();
+
+  public MetricsRegistry() {
+  }
+  
+  /**
+   * 
+   * @return number of metrics in the registry
+   */
+  public int size() {
+    return metricsList.size();
+  }
+  
+  /**
+   * Add a new metrics to the registry
+   * @param metricsName - the name
+   * @param theMetricsObj - the metrics
+   * @throws IllegalArgumentException if a name is already registered
+   */
+  public synchronized void add(final String metricsName, final MetricsBase theMetricsObj) {
+    if (metricsList.containsKey(metricsName)) {
+      throw new IllegalArgumentException("Duplicate metricsName:" + metricsName);
+    }
+    metricsList.put(metricsName, theMetricsObj);
+  }
+
+  
+  /**
+   * 
+   * @param metricsName
+   * @return the metrics if there is one registered by the supplied name.
+   *         Returns null if none is registered
+   */
+  public synchronized MetricsBase get(final String metricsName) {
+    return metricsList.get(metricsName);
+  }
+  
+  
+  /**
+   * 
+   * @return the list of metrics names
+   */
+  public synchronized Collection<String> getKeyList() {
+    return metricsList.keySet();
+  }
+  
+  /**
+   * 
+   * @return the list of metrics
+   */
+  public synchronized Collection<MetricsBase> getMetricsList() {
+    return metricsList.values();
+  }
+}

+ 132 - 0
src/core/org/apache/hadoop/metrics/util/MetricsTimeVaryingInt.java

@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.metrics.MetricsRecord;
+import org.apache.hadoop.util.StringUtils;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * The MetricsTimeVaryingInt class is for a metric that naturally
+ * varies over time (e.g. number of files created). The metrics is accumulated
+ * over an interval (set in the metrics config file); the metrics is
+ *  published at the end of each interval and then 
+ * reset to zero. Hence the counter has the value in the current interval. 
+ * 
+ * Note if one wants a time associated with the metric then use
+ * @see org.apache.hadoop.metrics.util.MetricsTimeVaryingRate
+ *
+ * @deprecated in favor of {@link org.apache.hadoop.metrics2.lib.MutableCounterInt}.
+ */
+@Deprecated
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+public class MetricsTimeVaryingInt extends MetricsBase {
+
+  private static final Log LOG =
+    LogFactory.getLog("org.apache.hadoop.metrics.util");
+  
+  private int currentValue;
+  private int previousIntervalValue;
+  
+  
+  /**
+   * Constructor - create a new metric
+   * @param nam the name of the metrics to be used to publish the metric
+   * @param registry - where the metrics object will be registered
+   * @param description - the description
+   */
+  public MetricsTimeVaryingInt(final String nam,
+                               final MetricsRegistry registry,
+                               final String description) {
+    super(nam, description);
+    currentValue = 0;
+    previousIntervalValue = 0;
+    registry.add(nam, this);
+  }
+  
+  /**
+   * Constructor - create a new metric
+   * @param nam the name of the metrics to be used to publish the metric
+   * @param registry - where the metrics object will be registered
+   * A description of {@link #NO_DESCRIPTION} is used
+   */
+  public MetricsTimeVaryingInt(final String nam, final MetricsRegistry registry) {
+    this(nam, registry, NO_DESCRIPTION);
+  }
+  
+
+  
+  /**
+   * Inc metrics for incr vlaue
+   * @param incr - number of operations
+   */
+  public synchronized void inc(final int incr) {
+    currentValue += incr;
+  }
+  
+  /**
+   * Inc metrics by one
+   */
+  public synchronized void inc() {
+    currentValue++;
+  }
+
+  private synchronized void intervalHeartBeat() {
+     previousIntervalValue = currentValue;
+     currentValue = 0;
+  }
+  
+  /**
+   * Push the delta  metrics to the mr.
+   * The delta is since the last push/interval.
+   * 
+   * Note this does NOT push to JMX
+   * (JMX gets the info via {@link #previousIntervalValue}
+   *
+   * @param mr
+   */
+  public synchronized void pushMetric(final MetricsRecord mr) {
+    intervalHeartBeat();
+    try {
+      mr.incrMetric(getName(), getPreviousIntervalValue());
+    } catch (Exception e) {
+      LOG.info("pushMetric failed for " + getName() + "\n" +
+          StringUtils.stringifyException(e));
+    }
+  }
+  
+  
+  /**
+   * The Value at the Previous interval
+   * @return prev interval value
+   */
+  public synchronized int getPreviousIntervalValue() { 
+    return previousIntervalValue;
+  }
+  
+  /**
+   * The Value at the current interval
+   * @return prev interval value
+   */
+  public synchronized int getCurrentIntervalValue() { 
+    return currentValue;
+  } 
+}

+ 128 - 0
src/core/org/apache/hadoop/metrics/util/MetricsTimeVaryingLong.java

@@ -0,0 +1,128 @@
+package org.apache.hadoop.metrics.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.metrics.MetricsRecord;
+import org.apache.hadoop.util.StringUtils;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * The MetricsTimeVaryingLong class is for a metric that naturally
+ * varies over time (e.g. number of files created). The metrics is accumulated
+ * over an interval (set in the metrics config file); the metrics is
+ *  published at the end of each interval and then 
+ * reset to zero. Hence the counter has the value in the current interval. 
+ * 
+ * Note if one wants a time associated with the metric then use
+ * @see org.apache.hadoop.metrics.util.MetricsTimeVaryingRate
+ *
+ * @deprecated in favor of {@link org.apache.hadoop.metrics2.lib.MutableCounterLong}.
+ */
+@Deprecated
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+public class MetricsTimeVaryingLong extends MetricsBase{
+
+  private static final Log LOG =
+    LogFactory.getLog("org.apache.hadoop.metrics.util");
+ 
+  private long currentValue;
+  private long previousIntervalValue;
+  
+  /**
+   * Constructor - create a new metric
+   * @param nam the name of the metrics to be used to publish the metric
+   * @param registry - where the metrics object will be registered
+   */
+  public MetricsTimeVaryingLong(final String nam, MetricsRegistry registry, final String description) {
+    super(nam, description);
+    currentValue = 0;
+    previousIntervalValue = 0;
+    registry.add(nam, this);
+  }
+  
+  
+  /**
+   * Constructor - create a new metric
+   * @param nam the name of the metrics to be used to publish the metric
+   * @param registry - where the metrics object will be registered
+   * A description of {@link #NO_DESCRIPTION} is used
+   */
+  public MetricsTimeVaryingLong(final String nam, MetricsRegistry registry) {
+    this(nam, registry, NO_DESCRIPTION);
+  }
+  
+  /**
+   * Inc metrics for incr vlaue
+   * @param incr - number of operations
+   */
+  public synchronized void inc(final long incr) {
+    currentValue += incr;
+  }
+  
+  /**
+   * Inc metrics by one
+   */
+  public synchronized void inc() {
+    currentValue++;
+  }
+
+  private synchronized void intervalHeartBeat() {
+     previousIntervalValue = currentValue;
+     currentValue = 0;
+  }
+  
+  /**
+   * Push the delta  metrics to the mr.
+   * The delta is since the last push/interval.
+   * 
+   * Note this does NOT push to JMX
+   * (JMX gets the info via {@link #previousIntervalValue}
+   *
+   * @param mr
+   */
+  public synchronized void pushMetric(final MetricsRecord mr) {
+    intervalHeartBeat();
+    try {
+      mr.incrMetric(getName(), getPreviousIntervalValue());
+    } catch (Exception e) {
+      LOG.info("pushMetric failed for " + getName() + "\n" +
+          StringUtils.stringifyException(e));
+    }
+  }
+  
+  
+  /**
+   * The Value at the Previous interval
+   * @return prev interval value
+   */
+  public synchronized long getPreviousIntervalValue() { 
+    return previousIntervalValue;
+  } 
+  
+  /**
+   * The Value at the current interval
+   * @return prev interval value
+   */
+  public synchronized long getCurrentIntervalValue() { 
+    return currentValue;
+  } 
+}

+ 200 - 0
src/core/org/apache/hadoop/metrics/util/MetricsTimeVaryingRate.java

@@ -0,0 +1,200 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.metrics.MetricsRecord;
+import org.apache.hadoop.util.StringUtils;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * The MetricsTimeVaryingRate class is for a rate based metric that
+ * naturally varies over time (e.g. time taken to create a file).
+ * The rate is averaged at each interval heart beat (the interval
+ * is set in the metrics config file).
+ * This class also keeps track of the min and max rates along with 
+ * a method to reset the min-max.
+ *
+ * @deprecated in favor of {@link org.apache.hadoop.metrics2.lib.MutableRate}.
+ */
+@Deprecated
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+public class MetricsTimeVaryingRate extends MetricsBase {
+
+  private static final Log LOG =
+    LogFactory.getLog("org.apache.hadoop.metrics.util");
+
+  static class Metrics {
+    int numOperations = 0;
+    long time = 0;  // total time or average time
+
+    void set(final Metrics resetTo) {
+      numOperations = resetTo.numOperations;
+      time = resetTo.time;
+    }
+    
+    void reset() {
+      numOperations = 0;
+      time = 0;
+    }
+  }
+  
+  static class MinMax {
+    long minTime = -1;
+    long maxTime = 0;
+    
+    void set(final MinMax newVal) {
+      minTime = newVal.minTime;
+      maxTime = newVal.maxTime;
+    }
+    
+    void reset() {
+      minTime = -1;
+      maxTime = 0;
+    }
+    void update(final long time) { // update min max
+      minTime = (minTime == -1) ? time : Math.min(minTime, time);
+      minTime = Math.min(minTime, time);
+      maxTime = Math.max(maxTime, time);
+    }
+  }
+  private Metrics currentData;
+  private Metrics previousIntervalData;
+  private MinMax minMax;
+  
+  
+  /**
+   * Constructor - create a new metric
+   * @param nam the name of the metrics to be used to publish the metric
+   * @param registry - where the metrics object will be registered
+   */
+  public MetricsTimeVaryingRate(final String nam, final MetricsRegistry registry, final String description) {
+    super(nam, description);
+    currentData = new Metrics();
+    previousIntervalData = new Metrics();
+    minMax = new MinMax();
+    registry.add(nam, this);
+  }
+  
+  /**
+   * Constructor - create a new metric
+   * @param nam the name of the metrics to be used to publish the metric
+   * @param registry - where the metrics object will be registered
+   * A description of {@link #NO_DESCRIPTION} is used
+   */
+  public MetricsTimeVaryingRate(final String nam, MetricsRegistry registry) {
+    this(nam, registry, NO_DESCRIPTION);
+
+  }
+  
+  
+  /**
+   * Increment the metrics for numOps operations
+   * @param numOps - number of operations
+   * @param time - time for numOps operations
+   */
+  public synchronized void inc(final int numOps, final long time) {
+    currentData.numOperations += numOps;
+    currentData.time += time;
+    long timePerOps = time/numOps;
+    minMax.update(timePerOps);
+  }
+  
+  /**
+   * Increment the metrics for one operation
+   * @param time for one operation
+   */
+  public synchronized void inc(final long time) {
+    currentData.numOperations++;
+    currentData.time += time;
+    minMax.update(time);
+  }
+  
+  
+
+  private synchronized void intervalHeartBeat() {
+     previousIntervalData.numOperations = currentData.numOperations;
+     previousIntervalData.time = (currentData.numOperations == 0) ?
+                             0 : currentData.time / currentData.numOperations;
+     currentData.reset();
+  }
+  
+  /**
+   * Push the delta  metrics to the mr.
+   * The delta is since the last push/interval.
+   * 
+   * Note this does NOT push to JMX
+   * (JMX gets the info via {@link #getPreviousIntervalAverageTime()} and
+   * {@link #getPreviousIntervalNumOps()}
+   *
+   * @param mr
+   */
+  public synchronized void pushMetric(final MetricsRecord mr) {
+    intervalHeartBeat();
+    try {
+      mr.incrMetric(getName() + "_num_ops", getPreviousIntervalNumOps());
+      mr.setMetric(getName() + "_avg_time", getPreviousIntervalAverageTime());
+    } catch (Exception e) {
+      LOG.info("pushMetric failed for " + getName() + "\n" +
+          StringUtils.stringifyException(e));
+    }
+  }
+  
+  /**
+   * The number of operations in the previous interval
+   * @return - ops in prev interval
+   */
+  public synchronized int getPreviousIntervalNumOps() { 
+    return previousIntervalData.numOperations;
+  }
+  
+  /**
+   * The average rate of an operation in the previous interval
+   * @return - the average rate.
+   */
+  public synchronized long getPreviousIntervalAverageTime() {
+    return previousIntervalData.time;
+  } 
+  
+  /**
+   * The min time for a single operation since the last reset
+   *  {@link #resetMinMax()}
+   * @return min time for an operation
+   */
+  public synchronized long getMinTime() {
+    return  minMax.minTime;
+  }
+  
+  /**
+   * The max time for a single operation since the last reset
+   *  {@link #resetMinMax()}
+   * @return max time for an operation
+   */
+  public synchronized long getMaxTime() {
+    return minMax.maxTime;
+  }
+  
+  /**
+   * Reset the min max values
+   */
+  public synchronized void resetMinMax() {
+    minMax.reset();
+  }
+}

+ 22 - 0
src/core/org/apache/hadoop/metrics/util/package-info.java

@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
+@InterfaceStability.Evolving
+package org.apache.hadoop.metrics.util;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;