|
@@ -122,6 +122,7 @@ import java.lang.management.ManagementFactory;
|
|
import java.net.InetAddress;
|
|
import java.net.InetAddress;
|
|
import java.net.InetSocketAddress;
|
|
import java.net.InetSocketAddress;
|
|
import java.net.URI;
|
|
import java.net.URI;
|
|
|
|
+import java.nio.file.Files;
|
|
import java.util.ArrayList;
|
|
import java.util.ArrayList;
|
|
import java.util.Arrays;
|
|
import java.util.Arrays;
|
|
import java.util.Collection;
|
|
import java.util.Collection;
|
|
@@ -587,6 +588,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|
private boolean resourceLowSafeMode = false;
|
|
private boolean resourceLowSafeMode = false;
|
|
private String nameNodeHostName = null;
|
|
private String nameNodeHostName = null;
|
|
|
|
|
|
|
|
+ /**
|
|
|
|
+ * HDFS-14497: Concurrency control when many metaSave request to write
|
|
|
|
+ * meta to same out stream after switch to read lock.
|
|
|
|
+ */
|
|
|
|
+ private Object metaSaveLock = new Object();
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* Notify that loading of this FSDirectory is complete, and
|
|
* Notify that loading of this FSDirectory is complete, and
|
|
* it is imageLoaded for use
|
|
* it is imageLoaded for use
|
|
@@ -1757,23 +1764,26 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|
String operationName = "metaSave";
|
|
String operationName = "metaSave";
|
|
checkSuperuserPrivilege(operationName);
|
|
checkSuperuserPrivilege(operationName);
|
|
checkOperation(OperationCategory.READ);
|
|
checkOperation(OperationCategory.READ);
|
|
- writeLock();
|
|
|
|
|
|
+ readLock();
|
|
try {
|
|
try {
|
|
checkOperation(OperationCategory.READ);
|
|
checkOperation(OperationCategory.READ);
|
|
- File file = new File(System.getProperty("hadoop.log.dir"), filename);
|
|
|
|
- PrintWriter out = new PrintWriter(new BufferedWriter(
|
|
|
|
- new OutputStreamWriter(new FileOutputStream(file), Charsets.UTF_8)));
|
|
|
|
- metaSave(out);
|
|
|
|
- out.flush();
|
|
|
|
- out.close();
|
|
|
|
|
|
+ synchronized(metaSaveLock) {
|
|
|
|
+ File file = new File(System.getProperty("hadoop.log.dir"), filename);
|
|
|
|
+ PrintWriter out = new PrintWriter(new BufferedWriter(
|
|
|
|
+ new OutputStreamWriter(Files.newOutputStream(file.toPath()),
|
|
|
|
+ Charsets.UTF_8)));
|
|
|
|
+ metaSave(out);
|
|
|
|
+ out.flush();
|
|
|
|
+ out.close();
|
|
|
|
+ }
|
|
} finally {
|
|
} finally {
|
|
- writeUnlock(operationName);
|
|
|
|
|
|
+ readUnlock(operationName);
|
|
}
|
|
}
|
|
logAuditEvent(true, operationName, null);
|
|
logAuditEvent(true, operationName, null);
|
|
}
|
|
}
|
|
|
|
|
|
private void metaSave(PrintWriter out) {
|
|
private void metaSave(PrintWriter out) {
|
|
- assert hasWriteLock();
|
|
|
|
|
|
+ assert hasReadLock();
|
|
long totalInodes = this.dir.totalInodes();
|
|
long totalInodes = this.dir.totalInodes();
|
|
long totalBlocks = this.getBlocksTotal();
|
|
long totalBlocks = this.getBlocksTotal();
|
|
out.println(totalInodes + " files and directories, " + totalBlocks
|
|
out.println(totalInodes + " files and directories, " + totalBlocks
|