瀏覽代碼

HDFS-5717. Save FSImage header in protobuf. Contributed by Haohui Mai.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5698@1556697 13f79535-47bb-0310-9956-ffa450edef68
Jing Zhao 11 年之前
父節點
當前提交
128eca8d81

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5698.txt

@@ -0,0 +1,3 @@
+HDFS-5698 subtasks
+
+    HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9) 

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -458,6 +458,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                 <includes>
                 <includes>
                   <include>ClientDatanodeProtocol.proto</include>
                   <include>ClientDatanodeProtocol.proto</include>
                   <include>DatanodeProtocol.proto</include>
                   <include>DatanodeProtocol.proto</include>
+                  <include>fsimage.proto</include>
                 </includes>
                 </includes>
               </source>
               </source>
               <output>${project.build.directory}/generated-sources/java</output>
               <output>${project.build.directory}/generated-sources/java</output>

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java

@@ -109,7 +109,8 @@ public class LayoutVersion {
         + "enable rebuilding retry cache in case of HA failover"),
         + "enable rebuilding retry cache in case of HA failover"),
     CACHING(-48, "Support for cache pools and path-based caching"),
     CACHING(-48, "Support for cache pools and path-based caching"),
     ADD_DATANODE_AND_STORAGE_UUIDS(-49, "Replace StorageID with DatanodeUuid."
     ADD_DATANODE_AND_STORAGE_UUIDS(-49, "Replace StorageID with DatanodeUuid."
-        + " Use distinct StorageUuid per storage directory.");
+        + " Use distinct StorageUuid per storage directory."),
+    PROTOBUF_FORMAT(-50, "Use protobuf to serialize FSImage");
 
 
     
     
     final int lv;
     final int lv;

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java

@@ -824,8 +824,7 @@ public class FSImage implements Closeable {
    */
    */
   private void loadFSImage(File curFile, MD5Hash expectedMd5,
   private void loadFSImage(File curFile, MD5Hash expectedMd5,
       FSNamesystem target, MetaRecoveryContext recovery) throws IOException {
       FSNamesystem target, MetaRecoveryContext recovery) throws IOException {
-    FSImageFormat.Loader loader = new FSImageFormat.Loader(
-        conf, target);
+    FSImageFormat.LoaderDelegator loader = FSImageFormat.newLoader(conf, target);
     loader.load(curFile);
     loader.load(curFile);
     target.setBlockPoolId(this.getBlockPoolID());
     target.setBlockPoolId(this.getBlockPoolID());
 
 
@@ -854,7 +853,7 @@ public class FSImage implements Closeable {
     File newFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid);
     File newFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid);
     File dstFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, txid);
     File dstFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, txid);
     
     
-    FSImageFormat.Saver saver = new FSImageFormat.Saver(context);
+    FSImageFormatProtobuf.Saver saver = new FSImageFormatProtobuf.Saver(context);
     FSImageCompression compression = FSImageCompression.createCompression(conf);
     FSImageCompression compression = FSImageCompression.createCompression(conf);
     saver.save(newFile, compression);
     saver.save(newFile, compression);
     
     

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageCompression.java

@@ -57,6 +57,10 @@ class FSImageCompression {
     imageCodec = codec;
     imageCodec = codec;
   }
   }
 
 
+  public CompressionCodec getImageCodec() {
+    return imageCodec;
+  }
+
   /**
   /**
    * Create a "noop" compression - i.e. uncompressed
    * Create a "noop" compression - i.e. uncompressed
    */
    */
@@ -89,7 +93,7 @@ class FSImageCompression {
    * Create a compression instance using the codec specified by
    * Create a compression instance using the codec specified by
    * <code>codecClassName</code>
    * <code>codecClassName</code>
    */
    */
-  private static FSImageCompression createCompression(Configuration conf,
+  static FSImageCompression createCompression(Configuration conf,
                                                       String codecClassName)
                                                       String codecClassName)
     throws IOException {
     throws IOException {
 
 

+ 70 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java

@@ -64,9 +64,12 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Co
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 
 
+import com.google.common.base.Preconditions;
+
 /**
 /**
  * Contains inner classes for reading or writing the on-disk format for
  * Contains inner classes for reading or writing the on-disk format for
  * FSImages.
  * FSImages.
@@ -172,16 +175,75 @@ import org.apache.hadoop.io.Text;
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class FSImageFormat {
 public class FSImageFormat {
   private static final Log LOG = FSImage.LOG;
   private static final Log LOG = FSImage.LOG;
-  
+
   // Static-only class
   // Static-only class
   private FSImageFormat() {}
   private FSImageFormat() {}
-  
+
+  interface AbstractLoader {
+    MD5Hash getLoadedImageMd5();
+    long getLoadedImageTxId();
+  }
+
+  static class LoaderDelegator implements AbstractLoader {
+    private AbstractLoader impl;
+    private final Configuration conf;
+    private final FSNamesystem fsn;
+
+    LoaderDelegator(Configuration conf, FSNamesystem fsn) {
+      this.conf = conf;
+      this.fsn = fsn;
+    }
+
+    @Override
+    public MD5Hash getLoadedImageMd5() {
+      return impl.getLoadedImageMd5();
+    }
+
+    @Override
+    public long getLoadedImageTxId() {
+      return impl.getLoadedImageTxId();
+    }
+
+    public void load(File file) throws IOException {
+      Preconditions.checkState(impl == null, "Image already loaded!");
+
+      byte[] magic = new byte[FSImageFormatProtobuf.MAGIC_HEADER.length];
+      FileInputStream is = null;
+      try {
+        is = new FileInputStream(file);
+        if (is.read(magic) == magic.length
+            && Arrays.equals(magic, FSImageFormatProtobuf.MAGIC_HEADER)) {
+          FSImageFormatProtobuf.Loader loader = new FSImageFormatProtobuf.Loader(
+              conf, fsn);
+          impl = loader;
+          is.getChannel().position(0);
+          loader.load(is);
+        } else {
+          Loader loader = new Loader(conf, fsn);
+          impl = loader;
+          loader.load(file);
+        }
+
+      } finally {
+        IOUtils.cleanup(LOG, is);
+      }
+    }
+  }
+
+  /**
+   * Construct a loader class to load the image. It chooses the loader based on
+   * the layout version.
+   */
+  public static LoaderDelegator newLoader(Configuration conf, FSNamesystem fsn) {
+    return new LoaderDelegator(conf, fsn);
+  }
+
   /**
   /**
    * A one-shot class responsible for loading an image. The load() function
    * A one-shot class responsible for loading an image. The load() function
    * should be called once, after which the getter methods may be used to retrieve
    * should be called once, after which the getter methods may be used to retrieve
    * information about the image that was loaded, if loading was successful.
    * information about the image that was loaded, if loading was successful.
    */
    */
-  public static class Loader {
+  public static class Loader implements AbstractLoader {
     private final Configuration conf;
     private final Configuration conf;
     /** which namesystem this loader is working for */
     /** which namesystem this loader is working for */
     private final FSNamesystem namesystem;
     private final FSNamesystem namesystem;
@@ -206,12 +268,14 @@ public class FSImageFormat {
      * Return the MD5 checksum of the image that has been loaded.
      * Return the MD5 checksum of the image that has been loaded.
      * @throws IllegalStateException if load() has not yet been called.
      * @throws IllegalStateException if load() has not yet been called.
      */
      */
-    MD5Hash getLoadedImageMd5() {
+    @Override
+    public MD5Hash getLoadedImageMd5() {
       checkLoaded();
       checkLoaded();
       return imgDigest;
       return imgDigest;
     }
     }
 
 
-    long getLoadedImageTxId() {
+    @Override
+    public long getLoadedImageTxId() {
       checkLoaded();
       checkLoaded();
       return imgTxId;
       return imgTxId;
     }
     }
@@ -234,7 +298,7 @@ public class FSImageFormat {
       }
       }
     }
     }
 
 
-    void load(File curFile) throws IOException {
+    public void load(File curFile) throws IOException {
       checkNotLoaded();
       checkNotLoaded();
       assert curFile != null : "curFile is null";
       assert curFile != null : "curFile is null";
 
 

+ 277 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java

@@ -0,0 +1,277 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.channels.FileChannel;
+import java.security.DigestInputStream;
+import java.security.DigestOutputStream;
+import java.security.MessageDigest;
+import java.util.Arrays;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileHeader;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.MD5Hash;
+import org.apache.hadoop.io.compress.CompressionCodec;
+
+import com.google.common.base.Preconditions;
+import com.google.protobuf.CodedOutputStream;
+
+/**
+ * Utility class to read / write fsimage in protobuf format.
+ */
+final class FSImageFormatProtobuf {
+  private static final Log LOG = LogFactory
+      .getLog(DelegationTokenSecretManager.class);
+
+  static final byte[] MAGIC_HEADER = "HDFSIMG1".getBytes();
+  private static final int FILE_VERSION = 1;
+  private static final int PRE_ALLOCATED_HEADER_SIZE = 1024;
+
+  /**
+   * Supported section name
+   */
+  private enum SectionName {
+    NS_INFO("NS_INFO");
+
+    private static final SectionName[] values = SectionName.values();
+    private final String name;
+
+    private SectionName(String name) {
+      this.name = name;
+    }
+
+    private static SectionName fromString(String name) {
+      for (SectionName n : values) {
+        if (n.name.equals(name))
+          return n;
+      }
+      return null;
+    }
+  }
+
+  // Buffer size of when reading / writing fsimage
+  public static final int DEFAULT_BUFFER_SIZE = 16 * 1024 * 1024;
+
+  static final class Loader implements FSImageFormat.AbstractLoader {
+    private final Configuration conf;
+    private final FSNamesystem fsn;
+
+    /** The transaction ID of the last edit represented by the loaded file */
+    private long imgTxId;
+    /** The MD5 sum of the loaded file */
+    private MD5Hash imgDigest;
+
+    Loader(Configuration conf, FSNamesystem fsn) {
+      this.conf = conf;
+      this.fsn = fsn;
+    }
+
+    @Override
+    public MD5Hash getLoadedImageMd5() {
+      return imgDigest;
+    }
+
+    @Override
+    public long getLoadedImageTxId() {
+      return imgTxId;
+    }
+
+    @SuppressWarnings("resource")
+    public void load(FileInputStream fin) throws IOException {
+      FileHeader header = loadHeader(new BufferedInputStream(fin));
+
+      fin.getChannel().position(header.getDataOffset());
+      MessageDigest digester = MD5Hash.getDigester();
+      InputStream in = new DigestInputStream(new BufferedInputStream(fin,
+          DEFAULT_BUFFER_SIZE), digester);
+
+      if (header.hasCodec()) {
+        // read compression related info
+        FSImageCompression compression = FSImageCompression.createCompression(
+            conf, header.getCodec());
+        CompressionCodec imageCodec = compression.getImageCodec();
+        if (header.getCodec() != null) {
+          in = imageCodec.createInputStream(in);
+        }
+      }
+
+      for (FileHeader.Section s : header.getSectionsList()) {
+        String n = s.getName();
+        switch (SectionName.fromString(n)) {
+        case NS_INFO:
+          loadNameSystemSection(in, s);
+          break;
+        default:
+          LOG.warn("Unregconized section " + n);
+          break;
+        }
+      }
+
+      updateDigestForFileHeader(header, digester);
+
+      imgDigest = new MD5Hash(digester.digest());
+      in.close();
+    }
+
+    private FileHeader loadHeader(InputStream fin) throws IOException {
+      byte[] magic = new byte[MAGIC_HEADER.length];
+      if (fin.read(magic) != magic.length
+          || !Arrays.equals(magic, FSImageFormatProtobuf.MAGIC_HEADER)) {
+        throw new IOException("Unrecognized FSImage");
+      }
+
+      FileHeader header = FileHeader.parseDelimitedFrom(fin);
+      if (header.getOndiskVersion() != FILE_VERSION) {
+        throw new IOException("Unsupported file version "
+            + header.getOndiskVersion());
+      }
+
+      if (!LayoutVersion.supports(Feature.PROTOBUF_FORMAT,
+          header.getLayoutVersion())) {
+        throw new IOException("Unsupported layout version "
+            + header.getLayoutVersion());
+      }
+      return header;
+    }
+
+    private void loadNameSystemSection(InputStream in, FileHeader.Section header)
+        throws IOException {
+      NameSystemSection s = NameSystemSection.parseDelimitedFrom(in);
+      fsn.setGenerationStampV1(s.getGenstampV1());
+      fsn.setGenerationStampV2(s.getGenstampV2());
+      fsn.setGenerationStampV1Limit(s.getGenstampV1Limit());
+      fsn.setLastAllocatedBlockId(s.getLastAllocatedBlockId());
+      imgTxId = s.getTransactionId();
+      long offset = header.getLength() - getOndiskTrunkSize(s);
+      Preconditions.checkArgument(offset == 0);
+      in.skip(offset);
+    }
+  }
+
+  static final class Saver {
+    private final SaveNamespaceContext context;
+    private MD5Hash savedDigest;
+
+    Saver(SaveNamespaceContext context) {
+      this.context = context;
+    }
+
+    void save(File file, FSImageCompression compression) throws IOException {
+      FileHeader.Builder b = FileHeader.newBuilder()
+          .setOndiskVersion(FILE_VERSION)
+          .setLayoutVersion(LayoutVersion.getCurrentLayoutVersion())
+          .setDataOffset(PRE_ALLOCATED_HEADER_SIZE);
+      MessageDigest digester = MD5Hash.getDigester();
+      OutputStream out = null;
+      try {
+        FileOutputStream fout = new FileOutputStream(file);
+        FileChannel channel = fout.getChannel();
+
+        channel.position(PRE_ALLOCATED_HEADER_SIZE);
+        out = new DigestOutputStream(new BufferedOutputStream(fout,
+            DEFAULT_BUFFER_SIZE), digester);
+
+        CompressionCodec codec = compression.getImageCodec();
+        if (codec != null) {
+          b.setCodec(codec.getClass().getCanonicalName());
+          out = codec.createOutputStream(out);
+        }
+
+        save(out, b);
+        out.flush();
+        channel.position(0);
+        FileHeader header = b.build();
+        Preconditions.checkState(MAGIC_HEADER.length
+            + getOndiskTrunkSize(header) < PRE_ALLOCATED_HEADER_SIZE,
+            "Insufficient space to write file header");
+        fout.write(MAGIC_HEADER);
+        header.writeDelimitedTo(fout);
+        updateDigestForFileHeader(header, digester);
+        savedDigest = new MD5Hash(digester.digest());
+      } finally {
+        IOUtils.cleanup(LOG, out);
+      }
+    }
+
+    private void save(OutputStream out, FileHeader.Builder headers)
+        throws IOException {
+      final FSNamesystem fsn = context.getSourceNamesystem();
+      FileHeader.Section.Builder sectionHeader = FileHeader.Section
+          .newBuilder().setName(SectionName.NS_INFO.name);
+      NameSystemSection.Builder b = NameSystemSection.newBuilder()
+          .setGenstampV1(fsn.getGenerationStampV1())
+          .setGenstampV1Limit(fsn.getGenerationStampV1Limit())
+          .setGenstampV2(fsn.getGenerationStampV2())
+          .setLastAllocatedBlockId(fsn.getLastAllocatedBlockId())
+          .setTransactionId(context.getTxId());
+
+      // We use the non-locked version of getNamespaceInfo here since
+      // the coordinating thread of saveNamespace already has read-locked
+      // the namespace for us. If we attempt to take another readlock
+      // from the actual saver thread, there's a potential of a
+      // fairness-related deadlock. See the comments on HDFS-2223.
+      b.setNamespaceId(fsn.unprotectedGetNamespaceInfo().getNamespaceID());
+      NameSystemSection s = b.build();
+      s.writeDelimitedTo(out);
+      sectionHeader.setLength(getOndiskTrunkSize(s));
+      headers.addSections(sectionHeader);
+    }
+
+    public MD5Hash getSavedDigest() {
+      return savedDigest;
+    }
+  }
+
+  private static int getOndiskTrunkSize(com.google.protobuf.GeneratedMessage s) {
+    return CodedOutputStream.computeRawVarint32Size(s.getSerializedSize())
+        + s.getSerializedSize();
+  }
+
+  /**
+   * Include the FileHeader when calculating the digest. This is required as the
+   * code does not access the FSImage strictly in sequential order.
+   */
+  private static void updateDigestForFileHeader(FileHeader header,
+      MessageDigest digester) throws IOException {
+    ByteArrayOutputStream o = new ByteArrayOutputStream();
+    o.write(MAGIC_HEADER);
+    header.writeDelimitedTo(o);
+    digester.update(o.toByteArray());
+  }
+
+  private FSImageFormatProtobuf() {
+  }
+
+}

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java

@@ -27,7 +27,6 @@ import java.util.Map;
 
 
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
 import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
-import org.apache.hadoop.hdfs.server.namenode.FSImageFormat.Loader;
 import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
 import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
 import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;

+ 67 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto

@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option java_package = "org.apache.hadoop.hdfs.server.namenode";
+option java_outer_classname = "FsImageProto";
+
+package hadoop.hdfs.fsimage;
+
+/**
+ * This file defines the on-disk layout of the file system image. The
+ * layout is defined by the following EBNF grammar, in which angle
+ * brackets mark protobuf definitions. (e.g., <FileHeader>)
+ *
+ * FILE := MAGIC <FileHeader> [blank] SECTION*
+ * MAGIC := 'HDFSIMG1'
+ * SECTION := NameSystemSection | ...
+ *
+ * Some notes:
+ *
+ * The codec field in FileHeader describes the compression codec used
+ * for all sections. The fileheader is always uncompressed.
+ *
+ * All protobuf messages are serialized in delimited form, which means
+ * that there always will be an integer indicates the size of the
+ * protobuf message.
+ *
+ **/
+
+message FileHeader {
+  // The on-disk layout version of the file.
+  required uint32 ondiskVersion = 1;
+  // layoutVersion describes which features are available in the
+  // FSImage.
+  required uint32 layoutVersion = 2;
+  optional string codec   = 3;
+  // The offset of the first data section
+  required uint64 dataOffset = 4;
+  message Section {
+    optional string name = 1;
+    optional uint64 length = 2;
+  }
+  repeated Section sections = 5;
+}
+
+message NameSystemSection {
+  optional uint32 namespaceId = 1;
+  optional uint64 genstampV1 = 2;
+  optional uint64 genstampV2 = 3;
+  optional uint64 genstampV1Limit = 4;
+  optional uint64 lastAllocatedBlockId = 5;
+  optional uint64 transactionId = 6;
+}

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java

@@ -140,7 +140,7 @@ public class TestFSImageWithSnapshot {
   private File saveFSImageToTempFile() throws IOException {
   private File saveFSImageToTempFile() throws IOException {
     SaveNamespaceContext context = new SaveNamespaceContext(fsn, txid,
     SaveNamespaceContext context = new SaveNamespaceContext(fsn, txid,
         new Canceler());
         new Canceler());
-    FSImageFormat.Saver saver = new FSImageFormat.Saver(context);
+    FSImageFormatProtobuf.Saver saver = new FSImageFormatProtobuf.Saver(context);
     FSImageCompression compression = FSImageCompression.createCompression(conf);
     FSImageCompression compression = FSImageCompression.createCompression(conf);
     File imageFile = getImageFile(testDir, txid);
     File imageFile = getImageFile(testDir, txid);
     fsn.readLock();
     fsn.readLock();
@@ -154,7 +154,7 @@ public class TestFSImageWithSnapshot {
   
   
   /** Load the fsimage from a temp file */
   /** Load the fsimage from a temp file */
   private void loadFSImageFromTempFile(File imageFile) throws IOException {
   private void loadFSImageFromTempFile(File imageFile) throws IOException {
-    FSImageFormat.Loader loader = new FSImageFormat.Loader(conf, fsn);
+    FSImageFormat.LoaderDelegator loader = FSImageFormat.newLoader(conf, fsn);
     fsn.writeLock();
     fsn.writeLock();
     fsn.getFSDirectory().writeLock();
     fsn.getFSDirectory().writeLock();
     try {
     try {