Java Code Examples for com.google.common.io.Closer#rethrow()

The following examples show how to use com.google.common.io.Closer#rethrow() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Task.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
private void publishTaskData()
    throws IOException {
  Closer closer = Closer.create();
  try {
    Class<? extends DataPublisher> dataPublisherClass = getTaskPublisherClass();
    SingleTaskDataPublisher publisher =
        closer.register(SingleTaskDataPublisher.getInstance(dataPublisherClass, this.taskState));

    LOG.info("Publishing data from task " + this.taskId);
    publisher.publish(this.taskState);
  } catch (ClassCastException e) {
    LOG.error(String.format("To publish data in task, the publisher class must extend %s",
        SingleTaskDataPublisher.class.getSimpleName()), e);
    this.taskState.setTaskFailureException(e);
    throw closer.rethrow(e);
  } catch (Throwable t) {
    this.taskState.setTaskFailureException(t);
    throw closer.rethrow(t);
  } finally {
    closer.close();
  }
}
 
Example 2
Source File: JavaWriter.java    From dagger2-sample with Apache License 2.0 6 votes vote down vote up
public void file(Filer filer, CharSequence name,  Iterable<? extends Element> originatingElements)
    throws IOException {
  JavaFileObject sourceFile = filer.createSourceFile(name,
      Iterables.toArray(originatingElements, Element.class));
  Closer closer = Closer.create();
  try {
    write(closer.register(sourceFile.openWriter()));
  } catch (Exception e) {
    try {
      sourceFile.delete();
    } catch (Exception e2) {
      // couldn't delete the file
    }
    throw closer.rethrow(e);
  } finally {
    closer.close();
  }
}
 
Example 3
Source File: YarnService.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
private ByteBuffer getSecurityTokens() throws IOException {
  Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
  Closer closer = Closer.create();
  try {
    DataOutputBuffer dataOutputBuffer = closer.register(new DataOutputBuffer());
    credentials.writeTokenStorageToStream(dataOutputBuffer);

    // Remove the AM->RM token so that containers cannot access it
    Iterator<Token<?>> tokenIterator = credentials.getAllTokens().iterator();
    while (tokenIterator.hasNext()) {
      Token<?> token = tokenIterator.next();
      if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
        tokenIterator.remove();
      }
    }

    return ByteBuffer.wrap(dataOutputBuffer.getData(), 0, dataOutputBuffer.getLength());
  } catch (Throwable t) {
    throw closer.rethrow(t);
  } finally {
    closer.close();
  }
}
 
Example 4
Source File: Log4jConfigHelper.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
/**
 * Update the log4j configuration.
 *
 * @param targetClass the target class used to get the original log4j configuration file as a resource
 * @param log4jFileName the custom log4j configuration properties file name
 * @throws IOException if there's something wrong with updating the log4j configuration
 */
public static void updateLog4jConfiguration(Class<?> targetClass, String log4jFileName)
    throws IOException {
  final Closer closer = Closer.create();
  try {
    final InputStream inputStream = closer.register(targetClass.getResourceAsStream("/" + log4jFileName));
    final Properties originalProperties = new Properties();
    originalProperties.load(inputStream);

    LogManager.resetConfiguration();
    PropertyConfigurator.configure(originalProperties);
  } catch (Throwable t) {
    throw closer.rethrow(t);
  } finally {
    closer.close();
  }
}
 
Example 5
Source File: VFSZipper.java    From scheduling with GNU Affero General Public License v3.0 6 votes vote down vote up
public static void zip(FileObject root, List<FileObject> files, OutputStream out) throws IOException {
    String basePath = root.getName().getPath();
    Closer closer = Closer.create();
    try {
        ZipOutputStream zos = new ZipOutputStream(out);
        closer.register(zos);
        for (FileObject fileToCopy : files) {
            ZipEntry zipEntry = zipEntry(basePath, fileToCopy);
            zos.putNextEntry(zipEntry);
            copyFileContents(fileToCopy, zos);
            zos.flush();
            zos.closeEntry();
        }
    } catch (IOException e) {
        throw closer.rethrow(e);
    } finally {
        closer.close();
    }
}
 
Example 6
Source File: Zipper.java    From scheduling with GNU Affero General Public License v3.0 5 votes vote down vote up
public static void writeZipEntry(ZipEntry zipEntry, InputStream is, ZipOutputStream zos) throws IOException {
    Closer closer = Closer.create();
    closer.register(is);
    try {
        logger.trace("Adding file zip entry: " + zipEntry.toString());
        zos.putNextEntry(zipEntry);
        ByteStreams.copy(is, zos);
        zos.flush();
    } catch (IOException ioe) {
        throw closer.rethrow(ioe);
    } finally {
        closer.close();
    }
}
 
Example 7
Source File: EventUtils.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
/**
 * Parses a {@link org.apache.gobblin.metrics.GobblinTrackingEvent} from a byte array Avro serialization.
 * @param reuse GobblinTrackingEvent to reuse.
 * @param bytes Input bytes.
 * @param schemaId Expected schemaId.
 * @return GobblinTrackingEvent.
 * @throws java.io.IOException
 */
public synchronized static GobblinTrackingEvent deserializeEventFromAvroSerialization(GobblinTrackingEvent reuse, byte[] bytes, @Nullable String schemaId)
    throws IOException {
  if (!reader.isPresent()) {
    reader = Optional.of(new SpecificDatumReader<>(GobblinTrackingEvent.class));
  }

  Closer closer = Closer.create();

  try {
    DataInputStream inputStream = closer.register(new DataInputStream(new ByteArrayInputStream(bytes)));

    if (schemaId != null) {
      MetricReportUtils.readAndVerifySchemaId(inputStream, schemaId);
    } else {
      MetricReportUtils.readAndVerifySchemaVersion(inputStream);
    }

    // Decode the rest
    Decoder decoder = DecoderFactory.get().binaryDecoder(inputStream, null);
    return reader.get().read(reuse, decoder);
  } catch(Throwable t) {
    throw closer.rethrow(t);
  } finally {
    closer.close();
  }
}
 
Example 8
Source File: ClasspathCache.java    From glowroot with Apache License 2.0 5 votes vote down vote up
private static void loadClassNamesFromDirectoryInsideJarFile(File jarFile,
        String directoryInsideJarFile, Location location,
        Multimap<String, Location> newClassNameLocations) throws IOException {
    Closer closer = Closer.create();
    try {
        InputStream in = closer.register(new FileInputStream(jarFile));
        JarInputStream jarIn = closer.register(new JarInputStream(in));
        loadClassNamesFromJarInputStream(jarIn, directoryInsideJarFile, location,
                newClassNameLocations);
    } catch (Throwable t) {
        throw closer.rethrow(t);
    } finally {
        closer.close();
    }
}
 
Example 9
Source File: MultiWorkUnitTest.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
@Test
public void testSerDe()
    throws IOException {
  Closer closer = Closer.create();
  try {
    ByteArrayOutputStream baos = closer.register(new ByteArrayOutputStream());
    DataOutputStream dos = closer.register(new DataOutputStream(baos));
    this.multiWorkUnit.write(dos);

    ByteArrayInputStream bais = closer.register((new ByteArrayInputStream(baos.toByteArray())));
    DataInputStream dis = closer.register((new DataInputStream(bais)));
    MultiWorkUnit copy = new MultiWorkUnit();
    copy.readFields(dis);

    List<WorkUnit> workUnitList = copy.getWorkUnits();
    Assert.assertEquals(workUnitList.size(), 2);

    Assert.assertEquals(workUnitList.get(0).getHighWaterMark(), 1000);
    Assert.assertEquals(workUnitList.get(0).getLowWaterMark(), 0);
    Assert.assertEquals(workUnitList.get(0).getProp("k1"), "v1");

    Assert.assertEquals(workUnitList.get(1).getHighWaterMark(), 2000);
    Assert.assertEquals(workUnitList.get(1).getLowWaterMark(), 1001);
    Assert.assertEquals(workUnitList.get(1).getProp("k2"), "v2");
  } catch (Throwable t) {
    throw closer.rethrow(t);
  } finally {
    closer.close();
  }
}
 
Example 10
Source File: GobblinYarnAppLauncher.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
private void setupSecurityTokens(ContainerLaunchContext containerLaunchContext) throws IOException {
  Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();

  // Pass on the credentials from the hadoop token file if present.
  // The value in the token file takes precedence.
  if (System.getenv(HADOOP_TOKEN_FILE_LOCATION) != null) {
    Credentials tokenFileCredentials = Credentials.readTokenStorageFile(new File(System.getenv(HADOOP_TOKEN_FILE_LOCATION)),
        new Configuration());
    credentials.addAll(tokenFileCredentials);
  }

  String tokenRenewer = this.yarnConfiguration.get(YarnConfiguration.RM_PRINCIPAL);
  if (tokenRenewer == null || tokenRenewer.length() == 0) {
    throw new IOException("Failed to get master Kerberos principal for the RM to use as renewer");
  }

  // For now, only getting tokens for the default file-system.
  Token<?> tokens[] = this.fs.addDelegationTokens(tokenRenewer, credentials);
  if (tokens != null) {
    for (Token<?> token : tokens) {
      LOGGER.info("Got delegation token for " + this.fs.getUri() + "; " + token);
    }
  }

  Closer closer = Closer.create();
  try {
    DataOutputBuffer dataOutputBuffer = closer.register(new DataOutputBuffer());
    credentials.writeTokenStorageToStream(dataOutputBuffer);
    ByteBuffer fsTokens = ByteBuffer.wrap(dataOutputBuffer.getData(), 0, dataOutputBuffer.getLength());
    containerLaunchContext.setTokens(fsTokens);
  } catch (Throwable t) {
    throw closer.rethrow(t);
  } finally {
    closer.close();
  }
}
 
Example 11
Source File: WikipediaExtractor.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
private JsonElement performHttpQuery(String rootUrl, Map<String, String> query) throws URISyntaxException, IOException {
  if (null == this.httpClient) {
    this.httpClient = createHttpClient();
  }
  HttpUriRequest req = createHttpRequest(rootUrl, query);

  Closer closer = Closer.create();

  StringBuilder sb = new StringBuilder();
  try {
    HttpResponse response = sendHttpRequest(req, this.httpClient);
    if (response instanceof CloseableHttpResponse) {
      closer.register((CloseableHttpResponse)response);
    }
    BufferedReader br = closer.register(
        new BufferedReader(new InputStreamReader(response.getEntity().getContent(),
                                                 ConfigurationKeys.DEFAULT_CHARSET_ENCODING)));
    String line;
    while ((line = br.readLine()) != null) {
      sb.append(line + "\n");
    }
  } catch (Throwable t) {
    throw closer.rethrow(t);
  } finally {
    try {
      closer.close();
    } catch (IOException e) {
      LOG.error("IOException in Closer.close() while performing query " + req + ": " + e, e);
    }
  }

  if (Strings.isNullOrEmpty(sb.toString())) {
    LOG.warn("Received empty response for query: " + req);
    return new JsonObject();
  }

  JsonElement jsonElement = GSON.fromJson(sb.toString(), JsonElement.class);
  return jsonElement;

}
 
Example 12
Source File: DatabaseJobHistoryStoreSchemaManager.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
@Override
public void run(String[] args) throws Exception {
  if (args.length < 1 || args.length > 2) {
    printUsage();
  }
  Closer closer = Closer.create();
  try {
    CompositeConfiguration config = new CompositeConfiguration();
    config.addConfiguration(new SystemConfiguration());
    if (args.length == 2) {
      config.addConfiguration(new PropertiesConfiguration(args[1]));
    }
    Properties properties = getProperties(config);
    DatabaseJobHistoryStoreSchemaManager schemaManager =
            closer.register(DatabaseJobHistoryStoreSchemaManager.builder(properties).build());
    if (String.CASE_INSENSITIVE_ORDER.compare("migrate", args[0]) == 0) {
      schemaManager.migrate();
    } else if (String.CASE_INSENSITIVE_ORDER.compare("info", args[0]) == 0) {
      schemaManager.info();
    } else {
      printUsage();
    }
  } catch (Throwable t) {
    throw closer.rethrow(t);
  } finally {
    closer.close();
  }
}
 
Example 13
Source File: VFSZipper.java    From scheduling with GNU Affero General Public License v3.0 5 votes vote down vote up
public static void zip(FileObject file, OutputStream out) throws IOException {
    Closer closer = Closer.create();
    try {
        closer.register(out);
        InputStream in = file.getContent().getInputStream();
        closer.register(in);
        ByteStreams.copy(in, out);
    } catch (IOException ioe) {
        throw closer.rethrow(ioe);
    } finally {
        closer.close();
    }
}
 
Example 14
Source File: VFSZipper.java    From scheduling with GNU Affero General Public License v3.0 5 votes vote down vote up
public static void unzip(InputStream is, FileObject outfileObj) throws IOException {
    Closer closer = Closer.create();
    try {
        ZipInputStream zis = new ZipInputStream(is);
        closer.register(zis);
        ZipEntry zipEntry = zis.getNextEntry();
        while (zipEntry != null) {
            FileObject entryFile = outfileObj.resolveFile(zipEntry.getName());

            if (zipEntry.isDirectory()) {
                logger.debug("Creating folder " + entryFile.getURL());
                entryFile.createFolder();
            } else {
                if (!entryFile.exists()) {
                    logger.debug("Creating file " + entryFile.getURL());
                    entryFile.createFile();
                } else {
                    logger.debug("Overwriting file " + entryFile.getURL());
                }
                Zipper.ZIP.unzipEntry(zis, entryFile.getContent().getOutputStream());
            }

            zipEntry = zis.getNextEntry();
        }
    } catch (IOException ioe) {
        logger.error("Error when unzipping", ioe);
        throw closer.rethrow(ioe);
    } finally {
        closer.close();
    }
}
 
Example 15
Source File: ClasspathCache.java    From glowroot with Apache License 2.0 5 votes vote down vote up
@GuardedBy("this")
private void loadClassNamesFromJarFile(File jarFile, Location location,
        Multimap<String, Location> newClassNameLocations) throws IOException {
    Closer closer = Closer.create();
    try {
        InputStream in = closer.register(new FileInputStream(jarFile));
        JarInputStream jarIn = closer.register(new JarInputStream(in));
        loadClassNamesFromManifestClassPath(jarIn, jarFile, newClassNameLocations);
        loadClassNamesFromJarInputStream(jarIn, "", location, newClassNameLocations);
    } catch (Throwable t) {
        throw closer.rethrow(t);
    } finally {
        closer.close();
    }
}
 
Example 16
Source File: FsStateStore.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
/**
 * See {@link StateStore#put(String, String, T)}.
 *
 * <p>
 *   This implementation does not support putting the state object into an existing store as
 *   append is to be supported by the Hadoop SequenceFile (HADOOP-7139).
 * </p>
 */
@Override
public void put(String storeName, String tableName, T state) throws IOException {
  String tmpTableName = this.useTmpFileForPut ? TMP_FILE_PREFIX + tableName : tableName;
  Path tmpTablePath = new Path(new Path(this.storeRootDir, storeName), tmpTableName);

  if (!this.fs.exists(tmpTablePath) && !create(storeName, tmpTableName)) {
    throw new IOException("Failed to create a state file for table " + tmpTableName);
  }

  Closer closer = Closer.create();
  try {
    @SuppressWarnings("deprecation")
    SequenceFile.Writer writer = closer.register(SequenceFile.createWriter(this.fs, this.conf, tmpTablePath,
        Text.class, this.stateClass, SequenceFile.CompressionType.BLOCK, new DefaultCodec()));
    writer.append(new Text(Strings.nullToEmpty(state.getId())), state);
  } catch (Throwable t) {
    throw closer.rethrow(t);
  } finally {
    closer.close();
  }

  if (this.useTmpFileForPut) {
    Path tablePath = new Path(new Path(this.storeRootDir, storeName), tableName);
    renamePath(tmpTablePath, tablePath);
  }
}
 
Example 17
Source File: AvroSchemaFieldRemoverTest.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
private Schema parseSchema(String schemaFile) throws IOException {
  Closer closer = Closer.create();
  try {
    InputStream in = closer.register(getClass().getResourceAsStream(schemaFile));
    return new Schema.Parser().parse(in);
  } catch (Throwable t) {
    throw closer.rethrow(t);
  } finally {
    closer.close();
  }
}
 
Example 18
Source File: FsStateStore.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
@Override
@SuppressWarnings("unchecked")
public T get(String storeName, String tableName, String stateId) throws IOException {
  Path tablePath = new Path(new Path(this.storeRootDir, storeName), tableName);
  if (!this.fs.exists(tablePath)) {
    return null;
  }

  Closer closer = Closer.create();
  try {
    @SuppressWarnings("deprecation")
    GobblinSequenceFileReader reader = closer.register(new GobblinSequenceFileReader(this.fs, tablePath, this.conf));
    try {
      Text key = new Text();
      T state = this.stateClass.newInstance();
      while (reader.next(key)) {
        state = (T)reader.getCurrentValue(state);
        if (key.toString().equals(stateId)) {
          state.setId(stateId);
          return state;
        }
      }
    } catch (Exception e) {
      throw new IOException("failure retrieving state from storeName " + storeName + " tableName " + tableName, e);
    }
  } catch (Throwable t) {
    throw closer.rethrow(t);
  } finally {
    closer.close();
  }

  return null;
}
 
Example 19
Source File: NpmSearchIndexFilter.java    From nexus-public with Eclipse Public License 1.0 4 votes vote down vote up
/**
 * Filters the npm index document with given predicate/
 */
static Content filter(final Content fullIndex,
                      final Predicate<NestedAttributesMap> predicate) throws IOException
{
  checkNotNull(fullIndex);
  checkNotNull(predicate);
  final Path path = Files.createTempFile("npm-searchIndex-filter", "json");
  final Closer closer = Closer.create();
  try {
    final JsonParser jsonParser = mapper.getFactory().createParser(closer.register(fullIndex.openInputStream()));
    if (jsonParser.nextToken() == JsonToken.START_OBJECT &&
        NpmMetadataUtils.META_UPDATED.equals(jsonParser.nextFieldName())) {
      jsonParser.nextValue(); // skip value
    }
    final JsonGenerator generator = closer.register(
        mapper.getFactory().createGenerator(new BufferedOutputStream(Files.newOutputStream(path)))
    );
    generator.writeStartObject();
    generator.writeNumberField(NpmMetadataUtils.META_UPDATED, System.currentTimeMillis());
    NestedAttributesMap packageRoot;
    while ((packageRoot = getNext(jsonParser)) != null) {
      if (predicate.apply(packageRoot)) {
        generator.writeObjectField(packageRoot.getKey(), packageRoot.backing());
      }
    }
    generator.writeEndObject();
    generator.flush();
  }
  catch (Throwable t) {
    throw closer.rethrow(t);
  }
  finally {
    closer.close();
  }

  return new Content(new StreamPayload(
      new InputStreamSupplier()
      {
        @Nonnull
        @Override
        public InputStream get() throws IOException {
          return new BufferedInputStream(Files.newInputStream(path));
        }
      },
      Files.size(path),
      ContentTypes.APPLICATION_JSON)
  );
}
 
Example 20
Source File: HiveSerDeTest.java    From incubator-gobblin with Apache License 2.0 4 votes vote down vote up
/**
 * This test uses Avro SerDe to deserialize data from Avro files, and use ORC SerDe
 * to serialize them into ORC files.
 */
@Test(groups = { "gobblin.serde" })
public void testAvroOrcSerDes()
    throws IOException, DataRecordException, DataConversionException, URISyntaxException {
  Properties properties = new Properties();
  properties.load(HiveSerDeTest.class.getClassLoader().getResourceAsStream("serde/serde.properties"));
  SourceState sourceState = new SourceState(new State(properties), ImmutableList.<WorkUnitState> of());
  File schemaFile = new File(HiveSerDeTest.class.getClassLoader().getResource("serde/serde.avsc").toURI());
  sourceState.setProp("avro.schema.url" , schemaFile.getAbsolutePath());

  OldApiWritableFileSource source = new OldApiWritableFileSource();
  List<WorkUnit> workUnits = source.getWorkunits(sourceState);

  Assert.assertEquals(workUnits.size(), 1);

  WorkUnitState wus = new WorkUnitState(workUnits.get(0));
  wus.addAll(sourceState);

  Closer closer = Closer.create();

  HiveWritableHdfsDataWriter writer = null;
  try {
    OldApiWritableFileExtractor extractor = closer.register((OldApiWritableFileExtractor) source.getExtractor(wus));
    HiveSerDeConverter converter = closer.register(new HiveSerDeConverter());
    writer =
        closer.register((HiveWritableHdfsDataWriter) new HiveWritableHdfsDataWriterBuilder<>().withBranches(1)
            .withWriterId("0").writeTo(Destination.of(DestinationType.HDFS, sourceState))
            .withAttemptId("0-0")
            .writeInFormat(WriterOutputFormat.ORC).build());

    Assert.assertTrue(writer.isSpeculativeAttemptSafe());

    converter.init(wus);
    Writable record;

    while ((record = extractor.readRecord(null)) != null) {
      Iterable<Writable> convertedRecordIterable = converter.convertRecordImpl(null, record, wus);
      Assert.assertEquals(Iterators.size(convertedRecordIterable.iterator()), 1);
      writer.write(convertedRecordIterable.iterator().next());
    }
  } catch (Throwable t) {
    throw closer.rethrow(t);
  } finally {
    closer.close();
    if (writer != null) {
      writer.commit();
    }
    Assert.assertTrue(this.fs.exists(new Path(sourceState.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR),
        sourceState.getProp(ConfigurationKeys.WRITER_FILE_NAME))));
    HadoopUtils.deletePath(this.fs, new Path(sourceState.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR)), true);
  }
}