Java Code Examples for java.io.DataOutputStream#close()
The following examples show how to use
java.io.DataOutputStream#close() .
These examples are extracted from open source projects.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: ViaRewind File: CompressedNBTType.java License: MIT License | 6 votes |
@Override public void write(ByteBuf buffer, CompoundTag nbt) throws Exception { if (nbt == null) { buffer.writeShort(-1); } else { ByteBuf buf = buffer.alloc().buffer(); ByteBufOutputStream bytebufStream = new ByteBufOutputStream(buf); DataOutputStream dataOutputStream = new DataOutputStream(bytebufStream); NBTIO.writeTag((DataOutput) dataOutputStream, nbt); dataOutputStream.close(); byte[] uncompressed = new byte[buf.readableBytes()]; buf.readBytes(uncompressed); buf.release(); byte[] compressed = compress(uncompressed); buffer.writeShort(compressed.length); buffer.writeBytes(compressed); } }
Example 2
Source Project: big-c File: LeveldbRMStateStore.java License: Apache License 2.0 | 6 votes |
@Override protected void storeRMDTMasterKeyState(DelegationKey masterKey) throws IOException { String dbKey = getRMDTMasterKeyNodeKey(masterKey); if (LOG.isDebugEnabled()) { LOG.debug("Storing token master key to " + dbKey); } ByteArrayOutputStream os = new ByteArrayOutputStream(); DataOutputStream out = new DataOutputStream(os); try { masterKey.write(out); } finally { out.close(); } try { db.put(bytes(dbKey), os.toByteArray()); } catch (DBException e) { throw new IOException(e); } }
Example 3
Source Project: APICloud-Studio File: ParserGenerator.java License: GNU General Public License v3.0 | 6 votes |
static private ByteArrayOutputStream serializeParsingTables(ParsingTables tables, int[] rule_descr, NonTerminal error) throws IOException { ByteArrayOutputStream bytes_stream = new ByteArrayOutputStream(16384); DataOutputStream data_stream = new DataOutputStream(new DeflaterOutputStream(bytes_stream)); tables.writeTo(data_stream); data_stream.writeInt(rule_descr.length); for (int i = 0; i < rule_descr.length; i++) { data_stream.writeInt(rule_descr[i]); } data_stream.writeShort(error.id); data_stream.close(); return bytes_stream; }
Example 4
Source Project: EosCommander File: PRNGFixes.java License: MIT License | 6 votes |
/** * Generates a device- and invocation-specific seed to be mixed into the * Linux PRNG. */ private static byte[] generateSeed() { try { ByteArrayOutputStream seedBuffer = new ByteArrayOutputStream(); DataOutputStream seedBufferOut = new DataOutputStream(seedBuffer); seedBufferOut.writeLong(System.currentTimeMillis()); seedBufferOut.writeLong(System.nanoTime()); seedBufferOut.writeInt(Process.myPid()); seedBufferOut.writeInt(Process.myUid()); seedBufferOut.write(BUILD_FINGERPRINT_AND_DEVICE_SERIAL); seedBufferOut.close(); return seedBuffer.toByteArray(); } catch (IOException e) { throw new SecurityException("Failed to generate seed", e); } }
Example 5
Source Project: incubator-pinot File: AggregationPhaseMapOutputKey.java License: Apache License 2.0 | 6 votes |
/** * Converts AggregationPhaseMapOutputKey to bytes buffer * @return * @throws IOException */ public byte[] toBytes() throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(baos); // time dos.writeLong(time); // dimensions size dos.writeInt(dimensionValues.size()); // dimension values for (int i = 0; i < dimensionValues.size(); i++) { Object dimensionValue = dimensionValues.get(i); DimensionType dimensionType = dimensionTypes.get(i); DimensionType.writeDimensionValueToOutputStream(dos, dimensionValue, dimensionType); } baos.close(); dos.close(); return baos.toByteArray(); }
Example 6
Source Project: netbeans File: GoToTypeAction.java License: Apache License 2.0 | 6 votes |
private synchronized void stop() throws Exception { long delta = System.currentTimeMillis() - time; Sampler ss = profiler; profiler = null; if (!profiling) { return; } try { ByteArrayOutputStream out = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(out); ss.stopAndWriteTo(dos); dos.close(); if (dos.size() > 0) { Object[] params = new Object[]{out.toByteArray(), delta, "GoToType" }; //NOI18N Logger.getLogger("org.netbeans.ui.performance").log(Level.CONFIG, "Slowness detected", params); //NOI18N } else { LOGGER.log(Level.WARNING, "no snapshot taken"); // NOI18N } } catch (Exception ex) { Exceptions.printStackTrace(ex); } }
Example 7
Source Project: netbeans File: WebServiceLibReferenceHelper.java License: Apache License 2.0 | 6 votes |
private static void copyJarFile(String srcPath, FileObject destJar) throws IOException { FileLock fileLock = destJar.lock(); try { OutputStream outStream = destJar.getOutputStream(fileLock); DataInputStream in = new DataInputStream(new FileInputStream(new File(srcPath))); DataOutputStream out = new DataOutputStream(outStream); byte[] bytes = new byte[1024]; int byteCount = in.read(bytes); while (byteCount > -1) { out.write(bytes, 0, byteCount); byteCount = in.read(bytes); } out.flush(); out.close(); outStream.close(); in.close(); } finally { fileLock.releaseLock(); } }
Example 8
Source Project: big-c File: AggregatedLogFormat.java License: Apache License 2.0 | 5 votes |
public void writeApplicationOwner(String user) throws IOException { DataOutputStream out = this.writer.prepareAppendKey(-1); APPLICATION_OWNER_KEY.write(out); out.close(); out = this.writer.prepareAppendValue(-1); out.writeUTF(user); out.close(); }
Example 9
Source Project: ProRecipes File: Metrics.java License: GNU General Public License v2.0 | 5 votes |
/** * Sends the data to the bStats server. * * @param data The data to send. * @throws Exception If the request failed. */ private static void sendData(JSONObject data) throws Exception { if (data == null) { throw new IllegalArgumentException("Data cannot be null!"); } if (Bukkit.isPrimaryThread()) { throw new IllegalAccessException("This method must not be called from the main thread!"); } HttpsURLConnection connection = (HttpsURLConnection) new URL(URL).openConnection(); // Compress the data to save bandwidth byte[] compressedData = compress(data.toString()); // Add headers connection.setRequestMethod("POST"); connection.addRequestProperty("Accept", "application/json"); connection.addRequestProperty("Connection", "close"); connection.addRequestProperty("Content-Encoding", "gzip"); // We gzip our request connection.addRequestProperty("Content-Length", String.valueOf(compressedData.length)); connection.setRequestProperty("Content-Type", "application/json"); // We send our data in JSON format connection.setRequestProperty("User-Agent", "MC-Server/" + B_STATS_VERSION); // Send data connection.setDoOutput(true); DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream()); outputStream.write(compressedData); outputStream.flush(); outputStream.close(); connection.getInputStream().close(); // We don't care about the response - Just send our data :) }
Example 10
Source Project: hadoop File: TestLease.java License: Apache License 2.0 | 5 votes |
@Test public void testLease() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); try { FileSystem fs = cluster.getFileSystem(); Assert.assertTrue(fs.mkdirs(dir)); Path a = new Path(dir, "a"); Path b = new Path(dir, "b"); DataOutputStream a_out = fs.create(a); a_out.writeBytes("something"); Assert.assertTrue(hasLease(cluster, a)); Assert.assertTrue(!hasLease(cluster, b)); DataOutputStream b_out = fs.create(b); b_out.writeBytes("something"); Assert.assertTrue(hasLease(cluster, a)); Assert.assertTrue(hasLease(cluster, b)); a_out.close(); b_out.close(); Assert.assertTrue(!hasLease(cluster, a)); Assert.assertTrue(!hasLease(cluster, b)); fs.delete(dir, true); } finally { if (cluster != null) {cluster.shutdown();} } }
Example 11
Source Project: CodenameOne File: BlackBerryImplementation.java License: GNU General Public License v2.0 | 5 votes |
private byte[] toRecord(String name, short key) throws IOException { ByteArrayOutputStream bo = new ByteArrayOutputStream(); DataOutputStream d = new DataOutputStream(bo); d.writeUTF(name); d.writeShort(key); d.close(); bo.close(); return bo.toByteArray(); }
Example 12
Source Project: SimplifyReader File: HurlStack.java License: Apache License 2.0 | 5 votes |
private static void addBodyIfExists(HttpURLConnection connection, Request<?> request) throws IOException, AuthFailureError { byte[] body = request.getBody(); if (body != null) { connection.setDoOutput(true); connection.addRequestProperty(HEADER_CONTENT_TYPE, request.getBodyContentType()); DataOutputStream out = new DataOutputStream(connection.getOutputStream()); out.write(body); out.close(); } }
Example 13
Source Project: QualityArmory File: Metrics.java License: GNU General Public License v3.0 | 5 votes |
/** * Sends the data to the bStats server. * * @param data The data to send. * @throws Exception If the request failed. */ private static void sendData(JSONObject data) throws Exception { if (data == null) { throw new IllegalArgumentException("Data cannot be null!"); } if (Bukkit.isPrimaryThread()) { throw new IllegalAccessException("This method must not be called from the main thread!"); } HttpsURLConnection connection = (HttpsURLConnection) new URL(URL).openConnection(); // Compress the data to save bandwidth byte[] compressedData = compress(data.toString()); // Add headers connection.setRequestMethod("POST"); connection.addRequestProperty("Accept", "application/json"); connection.addRequestProperty("Connection", "close"); connection.addRequestProperty("Content-Encoding", "gzip"); // We gzip our request connection.addRequestProperty("Content-Length", String.valueOf(compressedData.length)); connection.setRequestProperty("Content-Type", "application/json"); // We send our data in JSON format connection.setRequestProperty("User-Agent", "MC-Server/" + B_STATS_VERSION); // Send data connection.setDoOutput(true); DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream()); outputStream.write(compressedData); outputStream.flush(); outputStream.close(); connection.getInputStream().close(); // We don't care about the response - Just send our data :) }
Example 14
Source Project: shortyz File: UclickDownloader.java License: GNU General Public License v3.0 | 5 votes |
public File download(Date date) { File downloadTo = new File(this.downloadDirectory, this.createFileName(date)); if (downloadTo.exists()) { return null; } File plainText = downloadToTempFile(date); if (plainText == null) { return null; } try { LOG.log(Level.INFO, "TMP FILE "+plainText.getAbsolutePath()); InputStream is = new FileInputStream(plainText); DataOutputStream os = new DataOutputStream(new FileOutputStream(downloadTo)); boolean retVal = UclickXMLIO.convertUclickPuzzle(is, os, "\u00a9 " + (date.getYear() + 1900) + " " + copyright, date); os.close(); is.close(); plainText.delete(); if (!retVal) { LOG.log(Level.SEVERE, "Unable to convert uclick XML puzzle into Across Lite format."); downloadTo.delete(); downloadTo = null; } } catch (IOException ioe) { LOG.log(Level.SEVERE, "Exception converting uclick XML puzzle into Across Lite format.", ioe); downloadTo.delete(); downloadTo = null; } return downloadTo; }
Example 15
Source Project: terracotta-platform File: ProxyMessageCodec.java License: Apache License 2.0 | 5 votes |
@Override public byte[] encodeMessage(ProxyEntityMessage message) throws MessageCodecException { try { MessageType messageType = message.getType(); MethodDescriptor method = message.getMethod(); Byte methodIdentifier = getMethodIdentifier(message); Object[] args = message.getArguments(); final Annotation[][] parameterAnnotations = method.getParameterAnnotations(); for (int i = 0, parameterAnnotationsLength = parameterAnnotations.length; i < parameterAnnotationsLength; i++) { final Annotation[] parameterAnnotation = parameterAnnotations[i]; for (Annotation annotation : parameterAnnotation) { if (annotation.annotationType() == ClientId.class) { args[i] = null; } } } ByteArrayOutputStream byteOut = new ByteArrayOutputStream(); DataOutputStream output = new DataOutputStream(byteOut); output.writeByte(messageType.ordinal()); // first, message type output.writeByte(methodIdentifier); // then method mapping output.write(codec.encode(method.getParameterTypes(), args)); output.close(); return byteOut.toByteArray(); } catch (Exception ex) { throw new MessageCodecException("Error encoding ProxyEntityMessage", ex); } }
Example 16
Source Project: birt File: PreparedIVDataSourceQuery.java License: Eclipse Public License 1.0 | 4 votes |
/** * * @param eventHandler * @param stopSign * @param manager * @throws DataException * @throws IOException */ private void populatePLSDataSetData( IEventHandler eventHandler, StreamManager manager ) throws DataException, IOException { org.eclipse.birt.data.engine.impl.document.ResultIterator docIt; if( !queryDefn.isSummaryQuery( ) ) { docIt = new org.eclipse.birt.data.engine.impl.document.ResultIterator( engine.getSession( ) .getTempDir( ), getEngineContext( ), null, queryDefn.getQueryResultsID( ), queryDefn ); } else { docIt = new org.eclipse.birt.data.engine.impl.document.ResultIterator2( engine.getSession( ) .getTempDir( ), getEngineContext( ), null, queryDefn.getQueryResultsID( ), queryDefn.getGroups( ).size( ), queryDefn.isSummaryQuery( ), queryDefn ); } PLSEnabledDataSetPopulator populator = new PLSEnabledDataSetPopulator( queryDefn, queryDefn.getQueryExecutionHints( ) .getTargetGroupInstances( ), docIt ); ResultClass processedRC = (ResultClass) populateResultClass( populator.getResultClass( ) ); SmartCache cache = new SmartCache( new CacheRequest( 0, new ArrayList( ), null, eventHandler ), new OdiAdapter( populator ), processedRC, engine.getSession( ) ); manager.dropStream1( DataEngineContext.DATASET_DATA_STREAM ); manager.dropStream1( DataEngineContext.DATASET_DATA_LEN_STREAM ); cleanUpOldRD(); OutputStream resultClassStream = manager.getOutStream( DataEngineContext.DATASET_META_STREAM, StreamManager.ROOT_STREAM, StreamManager.SELF_SCOPE ); processedRC.doSave( resultClassStream, new ArrayList( queryDefn.getBindings( ).values( ) ), manager.getVersion( ) ); resultClassStream.close( ); DataOutputStream dataSetDataStream = new DataOutputStream( manager.getOutStream( DataEngineContext.DATASET_DATA_STREAM, StreamManager.ROOT_STREAM, StreamManager.SELF_SCOPE ) ); DataOutputStream rowLensStream = new DataOutputStream( manager.getOutStream( DataEngineContext.DATASET_DATA_LEN_STREAM, StreamManager.ROOT_STREAM, StreamManager.SELF_SCOPE ) ); cache.doSave( dataSetDataStream, rowLensStream, null, new HashMap(), eventHandler.getAllColumnBindings( ), manager.getVersion( ), null, //Row id should keep unchanged even if some data are removed //So we have to save the original row id. true); dataSetDataStream.flush( ); cache.close( ); DataOutputStream plsGroupLevelStream = new DataOutputStream( manager.getOutStream( DataEngineContext.PLS_GROUPLEVEL_STREAM, StreamManager.ROOT_STREAM, StreamManager.SELF_SCOPE ) ); IOUtil.writeInt( plsGroupLevelStream, PLSUtil.getOutmostPlsGroupLevel( queryDefn ) ); //Write a flag to indicate that the row id is saved for each row. IOUtil.writeBool( plsGroupLevelStream, true ); plsGroupLevelStream.close( ); }
Example 17
Source Project: rscplus File: Replay.java License: GNU General Public License v3.0 | 4 votes |
public static void initializeReplayRecording() { // No username specified, exit if (Client.username_login.length() == 0) return; String timeStamp = new SimpleDateFormat("MM-dd-yyyy HH.mm.ss").format(new Date()); String recordingDirectory = Settings.Dir.REPLAY + "/" + Client.username_login; Util.makeDirectory(recordingDirectory); recordingDirectory = recordingDirectory + "/" + timeStamp; Util.makeDirectory(recordingDirectory); try { // Write out version information DataOutputStream version = new DataOutputStream( new BufferedOutputStream( new FileOutputStream(new File(recordingDirectory + "/version.bin")))); version.writeInt(Replay.VERSION); version.writeInt(Client.version); version.close(); output = new DataOutputStream( new BufferedOutputStream( new FlushableGZIPOutputStream( new FileOutputStream(new File(recordingDirectory + "/out.bin.gz"))))); input = new DataOutputStream( new BufferedOutputStream( new FlushableGZIPOutputStream( new FileOutputStream(new File(recordingDirectory + "/in.bin.gz"))))); keys = new DataOutputStream( new BufferedOutputStream( new FileOutputStream(new File(recordingDirectory + "/keys.bin")))); if (Settings.RECORD_KB_MOUSE.get(Settings.currentProfile)) { keyboard = new DataOutputStream( new BufferedOutputStream( new FlushableGZIPOutputStream( new FileOutputStream(new File(recordingDirectory + "/keyboard.bin.gz"))))); mouse = new DataOutputStream( new BufferedOutputStream( new FlushableGZIPOutputStream( new FileOutputStream(new File(recordingDirectory + "/mouse.bin.gz"))))); started_record_kb_mouse = true; // need this to know whether or not to close the file if the user changes settings // mid-recording } else { started_record_kb_mouse = false; } metadata = new DataOutputStream( new BufferedOutputStream( new FileOutputStream(new File(recordingDirectory + "/metadata.bin")))); output_checksum = MessageDigest.getInstance("SHA-256"); input_checksum = MessageDigest.getInstance("SHA-256"); Logger.Info("Replay recording started"); } catch (Exception e) { output = null; input = null; keys = null; keyboard = null; mouse = null; Logger.Error("Unable to create replay files"); return; } retained_timestamp = TIMESTAMP_EOF; retained_bytes = null; isRecording = true; }
Example 18
Source Project: NametagEdit File: Metrics.java License: GNU General Public License v3.0 | 4 votes |
/** * Sends the data to the bStats server. * * @param plugin Any plugin. It's just used to get a logger instance. * @param data The data to send. * @throws Exception If the request failed. */ private static void sendData(Plugin plugin, JSONObject data) throws Exception { if (data == null) { throw new IllegalArgumentException("Data cannot be null!"); } if (Bukkit.isPrimaryThread()) { throw new IllegalAccessException("This method must not be called from the main thread!"); } if (logSentData) { plugin.getLogger().info("Sending data to bStats: " + data.toString()); } HttpsURLConnection connection = (HttpsURLConnection) new URL(URL).openConnection(); // Compress the data to save bandwidth byte[] compressedData = compress(data.toString()); // Add headers connection.setRequestMethod("POST"); connection.addRequestProperty("Accept", "application/json"); connection.addRequestProperty("Connection", "close"); connection.addRequestProperty("Content-Encoding", "gzip"); // We gzip our request connection.addRequestProperty("Content-Length", String.valueOf(compressedData.length)); connection.setRequestProperty("Content-Type", "application/json"); // We send our data in JSON format connection.setRequestProperty("User-Agent", "MC-Server/" + B_STATS_VERSION); // Send data connection.setDoOutput(true); DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream()); outputStream.write(compressedData); outputStream.flush(); outputStream.close(); InputStream inputStream = connection.getInputStream(); BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream)); StringBuilder builder = new StringBuilder(); String line; while ((line = bufferedReader.readLine()) != null) { builder.append(line); } bufferedReader.close(); if (logResponseStatusText) { plugin.getLogger().info("Sent data to bStats and received response: " + builder.toString()); } }
Example 19
Source Project: AtlasForAndroid File: BundleArchiveRevision.java License: MIT License | 4 votes |
void updateMetadata() throws IOException { Throwable e; File file = new File(this.revisionDir, "meta"); DataOutputStream dataOutputStream = null; try { if (!file.getParentFile().exists()) { file.getParentFile().mkdirs(); } if (AtlasFileLock.getInstance().LockExclusive(file)) { DataOutputStream dataOutputStream2 = new DataOutputStream(new FileOutputStream(file)); try { dataOutputStream2.writeUTF(this.revisionLocation); dataOutputStream2.flush(); AtlasFileLock.getInstance().unLock(file); if (dataOutputStream2 != null) { try { dataOutputStream2.close(); return; } catch (IOException e2) { e2.printStackTrace(); return; } } return; } catch (IOException e3) { e = e3; dataOutputStream = dataOutputStream2; try { throw new IOException("Could not save meta data " + file.getAbsolutePath(), e); } catch (Throwable th) { e = th; AtlasFileLock.getInstance().unLock(file); if (dataOutputStream != null) { try { dataOutputStream.close(); } catch (IOException e4) { e4.printStackTrace(); } } throw e; } } catch (Throwable th2) { e = th2; dataOutputStream = dataOutputStream2; AtlasFileLock.getInstance().unLock(file); if (dataOutputStream != null) { dataOutputStream.close(); } throw e; } } log.error("Failed to get fileLock for " + file.getAbsolutePath()); AtlasFileLock.getInstance().unLock(file); if (dataOutputStream != null) { try { dataOutputStream.close(); } catch (IOException e22) { e22.printStackTrace(); } } } catch (IOException e5) { e = e5; throw new IOException("Could not save meta data " + file.getAbsolutePath(), e); } }
Example 20
Source Project: hadoop File: FSImageFormat.java License: Apache License 2.0 | 4 votes |
void save(File newFile, FSImageCompression compression) throws IOException { checkNotSaved(); final FSNamesystem sourceNamesystem = context.getSourceNamesystem(); final INodeDirectory rootDir = sourceNamesystem.dir.rootDir; final long numINodes = rootDir.getDirectoryWithQuotaFeature() .getSpaceConsumed().getNameSpace(); String sdPath = newFile.getParentFile().getParentFile().getAbsolutePath(); Step step = new Step(StepType.INODES, sdPath); StartupProgress prog = NameNode.getStartupProgress(); prog.beginStep(Phase.SAVING_CHECKPOINT, step); prog.setTotal(Phase.SAVING_CHECKPOINT, step, numINodes); Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); long startTime = monotonicNow(); // // Write out data // MessageDigest digester = MD5Hash.getDigester(); FileOutputStream fout = new FileOutputStream(newFile); DigestOutputStream fos = new DigestOutputStream(fout, digester); DataOutputStream out = new DataOutputStream(fos); try { out.writeInt(LAYOUT_VERSION); LayoutFlags.write(out); // We use the non-locked version of getNamespaceInfo here since // the coordinating thread of saveNamespace already has read-locked // the namespace for us. If we attempt to take another readlock // from the actual saver thread, there's a potential of a // fairness-related deadlock. See the comments on HDFS-2223. out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo() .getNamespaceID()); out.writeLong(numINodes); out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampV1()); out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampV2()); out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampAtblockIdSwitch()); out.writeLong(sourceNamesystem.getBlockIdManager().getLastAllocatedBlockId()); out.writeLong(context.getTxId()); out.writeLong(sourceNamesystem.dir.getLastInodeId()); sourceNamesystem.getSnapshotManager().write(out); // write compression info and set up compressed stream out = compression.writeHeaderAndWrapStream(fos); LOG.info("Saving image file " + newFile + " using " + compression); // save the root saveINode2Image(rootDir, out, false, referenceMap, counter); // save the rest of the nodes saveImage(rootDir, out, true, false, counter); prog.endStep(Phase.SAVING_CHECKPOINT, step); // Now that the step is finished, set counter equal to total to adjust // for possible under-counting due to reference inodes. prog.setCount(Phase.SAVING_CHECKPOINT, step, numINodes); // save files under construction // TODO: for HDFS-5428, since we cannot break the compatibility of // fsimage, we store part of the under-construction files that are only // in snapshots in this "under-construction-file" section. As a // temporary solution, we use "/.reserved/.inodes/<inodeid>" as their // paths, so that when loading fsimage we do not put them into the lease // map. In the future, we can remove this hack when we can bump the // layout version. sourceNamesystem.saveFilesUnderConstruction(out, snapshotUCMap); context.checkCancelled(); sourceNamesystem.saveSecretManagerStateCompat(out, sdPath); context.checkCancelled(); sourceNamesystem.getCacheManager().saveStateCompat(out, sdPath); context.checkCancelled(); out.flush(); context.checkCancelled(); fout.getChannel().force(true); } finally { out.close(); } saved = true; // set md5 of the saved image savedDigest = new MD5Hash(digester.digest()); LOG.info("Image file " + newFile + " of size " + newFile.length() + " bytes saved in " + (monotonicNow() - startTime) / 1000 + " seconds."); }