org.apache.hadoop.io.MultipleIOException Java Examples
The following examples show how to use
org.apache.hadoop.io.MultipleIOException.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DFSClientCache.java From hadoop with Apache License 2.0 | 6 votes |
/** * Close all DFSClient instances in the Cache. * @param onlyAutomatic only close those that are marked for automatic closing */ synchronized void closeAll(boolean onlyAutomatic) throws IOException { List<IOException> exceptions = new ArrayList<IOException>(); ConcurrentMap<String, DFSClient> map = clientCache.asMap(); for (Entry<String, DFSClient> item : map.entrySet()) { final DFSClient client = item.getValue(); if (client != null) { try { client.close(); } catch (IOException ioe) { exceptions.add(ioe); } } } if (!exceptions.isEmpty()) { throw MultipleIOException.createIOException(exceptions); } }
Example #2
Source File: DFSClientCache.java From big-c with Apache License 2.0 | 6 votes |
/** * Close all DFSClient instances in the Cache. * @param onlyAutomatic only close those that are marked for automatic closing */ synchronized void closeAll(boolean onlyAutomatic) throws IOException { List<IOException> exceptions = new ArrayList<IOException>(); ConcurrentMap<String, DFSClient> map = clientCache.asMap(); for (Entry<String, DFSClient> item : map.entrySet()) { final DFSClient client = item.getValue(); if (client != null) { try { client.close(); } catch (IOException ioe) { exceptions.add(ioe); } } } if (!exceptions.isEmpty()) { throw MultipleIOException.createIOException(exceptions); } }
Example #3
Source File: BoundedRecoveredEditsOutputSink.java From hbase with Apache License 2.0 | 6 votes |
@Override public void append(EntryBuffers.RegionEntryBuffer buffer) throws IOException { List<WAL.Entry> entries = buffer.entryBuffer; if (entries.isEmpty()) { LOG.warn("got an empty buffer, skipping"); return; } // The key point is create a new writer, write edits then close writer. RecoveredEditsWriter writer = createRecoveredEditsWriter(buffer.tableName, buffer.encodedRegionName, entries.get(0).getKey().getSequenceId()); if (writer != null) { openingWritersNum.incrementAndGet(); writer.writeRegionEntries(entries); regionEditsWrittenMap.compute(Bytes.toString(buffer.encodedRegionName), (k, v) -> v == null ? writer.editsWritten : v + writer.editsWritten); List<IOException> thrown = new ArrayList<>(); Path dst = closeRecoveredEditsWriter(writer, thrown); splits.add(dst); openingWritersNum.decrementAndGet(); if (!thrown.isEmpty()) { throw MultipleIOException.createIOException(thrown); } } }
Example #4
Source File: HFileArchiver.java From hbase with Apache License 2.0 | 6 votes |
/** * Just do a simple delete of the given store files * <p> * A best effort is made to delete each of the files, rather than bailing on the first failure. * <p> * @param compactedFiles store files to delete from the file system. * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before * throwing the exception, rather than failing at the first file. */ private static void deleteStoreFilesWithoutArchiving(Collection<HStoreFile> compactedFiles) throws IOException { LOG.debug("Deleting files without archiving."); List<IOException> errors = new ArrayList<>(0); for (HStoreFile hsf : compactedFiles) { try { hsf.deleteStoreFile(); } catch (IOException e) { LOG.error("Failed to delete {}", hsf.getPath()); errors.add(e); } } if (errors.size() > 0) { throw MultipleIOException.createIOException(errors); } }
Example #5
Source File: FileSystem.java From hadoop-gpu with Apache License 2.0 | 6 votes |
synchronized void closeAll() throws IOException { List<IOException> exceptions = new ArrayList<IOException>(); for(; !map.isEmpty(); ) { Map.Entry<Key, FileSystem> e = map.entrySet().iterator().next(); final Key key = e.getKey(); final FileSystem fs = e.getValue(); //remove from cache remove(key, fs); if (fs != null) { try { fs.close(); } catch(IOException ioe) { exceptions.add(ioe); } } } if (!exceptions.isEmpty()) { throw MultipleIOException.createIOException(exceptions); } }
Example #6
Source File: TestFsDatasetImpl.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testAddVolumeFailureReleasesInUseLock() throws IOException { FsDatasetImpl spyDataset = spy(dataset); FsVolumeImpl mockVolume = mock(FsVolumeImpl.class); File badDir = new File(BASE_DIR, "bad"); badDir.mkdirs(); doReturn(mockVolume).when(spyDataset) .createFsVolume(anyString(), any(File.class), any(StorageType.class)); doThrow(new IOException("Failed to getVolumeMap()")) .when(mockVolume).getVolumeMap( anyString(), any(ReplicaMap.class), any(RamDiskReplicaLruTracker.class)); Storage.StorageDirectory sd = createStorageDirectory(badDir); sd.lock(); DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd); when(storage.prepareVolume(eq(datanode), eq(badDir.getAbsoluteFile()), Matchers.<List<NamespaceInfo>>any())) .thenReturn(builder); StorageLocation location = StorageLocation.parse(badDir.toString()); List<NamespaceInfo> nsInfos = Lists.newArrayList(); for (String bpid : BLOCK_POOL_IDS) { nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1)); } try { spyDataset.addVolume(location, nsInfos); fail("Expect to throw MultipleIOException"); } catch (MultipleIOException e) { } FsDatasetTestUtil.assertFileLockReleased(badDir.toString()); }
Example #7
Source File: FileSystem.java From hadoop with Apache License 2.0 | 5 votes |
/** * Close all FileSystem instances in the Cache. * @param onlyAutomatic only close those that are marked for automatic closing */ synchronized void closeAll(boolean onlyAutomatic) throws IOException { List<IOException> exceptions = new ArrayList<IOException>(); // Make a copy of the keys in the map since we'll be modifying // the map while iterating over it, which isn't safe. List<Key> keys = new ArrayList<Key>(); keys.addAll(map.keySet()); for (Key key : keys) { final FileSystem fs = map.get(key); if (onlyAutomatic && !toAutoClose.contains(key)) { continue; } //remove from cache remove(key, fs); if (fs != null) { try { fs.close(); } catch(IOException ioe) { exceptions.add(ioe); } } } if (!exceptions.isEmpty()) { throw MultipleIOException.createIOException(exceptions); } }
Example #8
Source File: DremioFileSystemCache.java From dremio-oss with Apache License 2.0 | 5 votes |
/** * Close all FileSystem instances in the Cache. * @param onlyAutomatic only close those that are marked for automatic closing */ public synchronized void closeAll(boolean onlyAutomatic) throws IOException { List<IOException> exceptions = new ArrayList<>(); // Make a copy of the keys in the map since we'll be modifying // the map while iterating over it, which isn't safe. List<Key> keys = new ArrayList<Key>(); keys.addAll(map.keySet()); for (Key key : keys) { final FileSystem fs = map.get(key); if (onlyAutomatic && !toAutoClose.contains(key)) { continue; } //remove from cache map.remove(key); toAutoClose.remove(key); if (fs != null) { try { fs.close(); } catch(IOException ioe) { exceptions.add(ioe); } } } if (!exceptions.isEmpty()) { throw MultipleIOException.createIOException(exceptions); } }
Example #9
Source File: TestFsDatasetImpl.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testAddVolumeFailureReleasesInUseLock() throws IOException { FsDatasetImpl spyDataset = spy(dataset); FsVolumeImpl mockVolume = mock(FsVolumeImpl.class); File badDir = new File(BASE_DIR, "bad"); badDir.mkdirs(); doReturn(mockVolume).when(spyDataset) .createFsVolume(anyString(), any(File.class), any(StorageType.class)); doThrow(new IOException("Failed to getVolumeMap()")) .when(mockVolume).getVolumeMap( anyString(), any(ReplicaMap.class), any(RamDiskReplicaLruTracker.class)); Storage.StorageDirectory sd = createStorageDirectory(badDir); sd.lock(); DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd); when(storage.prepareVolume(eq(datanode), eq(badDir.getAbsoluteFile()), Matchers.<List<NamespaceInfo>>any())) .thenReturn(builder); StorageLocation location = StorageLocation.parse(badDir.toString()); List<NamespaceInfo> nsInfos = Lists.newArrayList(); for (String bpid : BLOCK_POOL_IDS) { nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1)); } try { spyDataset.addVolume(location, nsInfos); fail("Expect to throw MultipleIOException"); } catch (MultipleIOException e) { } FsDatasetTestUtil.assertFileLockReleased(badDir.toString()); }
Example #10
Source File: FileSystem.java From big-c with Apache License 2.0 | 5 votes |
/** * Close all FileSystem instances in the Cache. * @param onlyAutomatic only close those that are marked for automatic closing */ synchronized void closeAll(boolean onlyAutomatic) throws IOException { List<IOException> exceptions = new ArrayList<IOException>(); // Make a copy of the keys in the map since we'll be modifying // the map while iterating over it, which isn't safe. List<Key> keys = new ArrayList<Key>(); keys.addAll(map.keySet()); for (Key key : keys) { final FileSystem fs = map.get(key); if (onlyAutomatic && !toAutoClose.contains(key)) { continue; } //remove from cache remove(key, fs); if (fs != null) { try { fs.close(); } catch(IOException ioe) { exceptions.add(ioe); } } } if (!exceptions.isEmpty()) { throw MultipleIOException.createIOException(exceptions); } }
Example #11
Source File: BlockPoolSlice.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * Wait till all the recursive task for add replica to volume completed. * * @param subTaskQueue * {@link AddReplicaProcessor} tasks list. * @param exceptions * exceptions occurred in sub tasks. * @throws IOException * throw if any sub task or multiple sub tasks failed. */ private void waitForSubTaskToFinish(Queue<RecursiveAction> subTaskQueue, List<IOException> exceptions) throws IOException { while (!subTaskQueue.isEmpty()) { RecursiveAction task = subTaskQueue.poll(); if (task != null) { task.join(); } } if (!exceptions.isEmpty()) { throw MultipleIOException.createIOException(exceptions); } }
Example #12
Source File: OfflineMetaRepair.java From hbase-operator-tools with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { // create a fsck object Configuration conf = HBaseConfiguration.create(); // Cover both bases, the old way of setting default fs and the new. // We're supposed to run on 0.20 and 0.21 anyways. CommonFSUtils.setFsDefault(conf, CommonFSUtils.getRootDir(conf)); HBaseFsck fsck = new HBaseFsck(conf); // Process command-line args. for (int i = 0; i < args.length; i++) { String cmd = args[i]; if (cmd.equals("-details")) { HBaseFsck.setDisplayFullReport(); } else if (cmd.equals("-base")) { if (i == args.length - 1) { System.err.println("OfflineMetaRepair: -base needs an HDFS path."); printUsageAndExit(); } // update hbase root dir to user-specified base i++; CommonFSUtils.setRootDir(conf, new Path(args[i])); CommonFSUtils.setFsDefault(conf, CommonFSUtils.getRootDir(conf)); } else if (cmd.equals("-sidelineDir")) { if (i == args.length - 1) { System.err.println("OfflineMetaRepair: -sidelineDir needs an HDFS path."); printUsageAndExit(); } // set the hbck sideline dir to user-specified one i++; fsck.setSidelineDir(args[i]); } else { String str = "Unknown command line option : " + cmd; LOG.info(str); System.out.println(str); printUsageAndExit(); } } System.out.println("OfflineMetaRepair command line options: " + String.join(" ", args)); // Fsck doesn't shutdown and and doesn't provide a way to shutdown its // threads cleanly, so we do a System.exit. boolean success = false; try { success = fsck.rebuildMeta(); } catch (MultipleIOException mioes) { for (IOException ioe : mioes.getExceptions()) { LOG.error("Bailed out due to:", ioe); } } catch (Exception e) { LOG.error("Bailed out due to: ", e); } finally { System.exit(success ? 0 : 1); } }