|
@@ -128,6 +128,7 @@ import org.apache.hadoop.util.DiskChecker;
|
|
|
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
|
|
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
|
|
|
import org.apache.hadoop.util.ReflectionUtils;
|
|
|
+import org.apache.hadoop.util.ServicePlugin;
|
|
|
import org.apache.hadoop.util.StringUtils;
|
|
|
import org.apache.hadoop.util.VersionInfo;
|
|
|
import org.mortbay.util.ajax.JSON;
|
|
@@ -251,6 +252,9 @@ public class DataNode extends Configured
|
|
|
public DataBlockScanner blockScanner = null;
|
|
|
public Daemon blockScannerThread = null;
|
|
|
|
|
|
+ /** Activated plug-ins. */
|
|
|
+ private List<ServicePlugin> plugins;
|
|
|
+
|
|
|
private static final Random R = new Random();
|
|
|
|
|
|
public static final String DATA_DIR_KEY = "dfs.data.dir";
|
|
@@ -538,6 +542,16 @@ public class DataNode extends Configured
|
|
|
dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort());
|
|
|
|
|
|
LOG.info("dnRegistration = " + dnRegistration);
|
|
|
+
|
|
|
+ plugins = conf.getInstances("dfs.datanode.plugins", ServicePlugin.class);
|
|
|
+ for (ServicePlugin p: plugins) {
|
|
|
+ try {
|
|
|
+ p.start(this);
|
|
|
+ LOG.info("Started plug-in " + p);
|
|
|
+ } catch (Throwable t) {
|
|
|
+ LOG.warn("ServicePlugin " + p + " could not be started", t);
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
private ObjectName mxBean = null;
|
|
@@ -800,6 +814,17 @@ public class DataNode extends Configured
|
|
|
* Otherwise, deadlock might occur.
|
|
|
*/
|
|
|
public void shutdown() {
|
|
|
+ if (plugins != null) {
|
|
|
+ for (ServicePlugin p : plugins) {
|
|
|
+ try {
|
|
|
+ p.stop();
|
|
|
+ LOG.info("Stopped plug-in " + p);
|
|
|
+ } catch (Throwable t) {
|
|
|
+ LOG.warn("ServicePlugin " + p + " could not be stopped", t);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
this.unRegisterMXBean();
|
|
|
if (infoServer != null) {
|
|
|
try {
|