scala.reflect.ClassTag Java Examples

The following examples show how to use scala.reflect.ClassTag. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: GraphXGraphGenerator.java    From rya with Apache License 2.0 8 votes vote down vote up
public Graph<RyaTypeWritable, RyaTypeWritable> createGraph(SparkContext sc, Configuration conf) throws IOException, AccumuloSecurityException{
    StorageLevel storageLvl1 = StorageLevel.MEMORY_ONLY();
    StorageLevel storageLvl2 = StorageLevel.MEMORY_ONLY();
    ClassTag<RyaTypeWritable> RTWTag = ClassTag$.MODULE$.apply(RyaTypeWritable.class);
    RyaTypeWritable rtw = null;
    RDD<Tuple2<Object, RyaTypeWritable>> vertexRDD = getVertexRDD(sc, conf);

    RDD<Tuple2<Object, Edge>> edgeRDD = getEdgeRDD(sc, conf);
    JavaRDD<Tuple2<Object, Edge>> jrddTuple = edgeRDD.toJavaRDD();
    JavaRDD<Edge<RyaTypeWritable>> jrdd = jrddTuple.map(tuple -> tuple._2);

    RDD<Edge<RyaTypeWritable>> goodERDD = JavaRDD.toRDD(jrdd);

    return Graph.apply(vertexRDD, goodERDD, rtw, storageLvl1, storageLvl2, RTWTag, RTWTag);
}
 
Example #2
Source File: MLContextConversionUtil.java    From systemds with Apache License 2.0 6 votes vote down vote up
/**
 * Convert a {@code FrameObject} to a {@code RDD<String>} in IJV format.
 *
 * @param frameObject
 *            the {@code FrameObject}
 * @return the {@code FrameObject} converted to a {@code RDD<String>}
 */
public static RDD<String> frameObjectToRDDStringIJV(FrameObject frameObject) {

	// NOTE: The following works when called from Java but does not
	// currently work when called from Spark Shell (when you call
	// collect() on the RDD<String>).
	//
	// JavaRDD<String> javaRDD = jsc.parallelize(list);
	// RDD<String> rdd = JavaRDD.toRDD(javaRDD);
	//
	// Therefore, we call parallelize() on the SparkContext rather than
	// the JavaSparkContext to produce the RDD<String> for Scala.

	List<String> list = frameObjectToListStringIJV(frameObject);

	ClassTag<String> tag = scala.reflect.ClassTag$.MODULE$.apply(String.class);
	return sc().parallelize(JavaConversions.asScalaBuffer(list), sc().defaultParallelism(), tag);
}
 
Example #3
Source File: SparkMaster.java    From GeoTriples with Apache License 2.0 6 votes vote down vote up
/**
 * Convert the input Dataset into RDF triples and store the results.
 * The conversion is taking place per Partitions using the mapPartition Spark transformation.
 * @param mapping_list list of TripleMaps
 */
private void convert_partition(ArrayList<TriplesMap> mapping_list){
    SparkContext sc = SparkContext.getOrCreate();

    Pair<ArrayList<TriplesMap>, List<String>> transformation_info = new Pair<>(mapping_list, Arrays.asList(reader.getHeaders()));
    ClassTag<Pair<ArrayList<TriplesMap>, List<String>>> classTag_pair = scala.reflect.ClassTag$.MODULE$.apply(Pair.class);
    Broadcast<Pair<ArrayList<TriplesMap>, List<String>>> bd_info = sc.broadcast(transformation_info, classTag_pair);

    rowRDD
        .mapPartitions(
        (Iterator<Row> rows_iter) -> {
            ArrayList<TriplesMap> p_mapping_list = bd_info.value().getKey();
            List<String> p_header = bd_info.value().getValue();
            RML_Converter rml_converter = new RML_Converter(p_mapping_list, p_header);
            rml_converter.start();
            rml_converter.registerFunctions();
            Iterator<String> triples = rml_converter.convertPartition(rows_iter);

            rml_converter.stop();
            return triples;
        })
        .saveAsTextFile(outputDir);
}
 
Example #4
Source File: SparkMaster.java    From GeoTriples with Apache License 2.0 6 votes vote down vote up
/**
 * Convert the input Dataset into RDF triples and store the results.
 * The conversion is taking place per Per using the map Spark transformation.
 * @param mapping_list list of TripleMaps
 */
private void convert_row(ArrayList<TriplesMap> mapping_list){

    SparkContext sc = SparkContext.getOrCreate();

    RML_Converter rml_converter = new RML_Converter(mapping_list, Arrays.asList(reader.getHeaders()));
    ClassTag<RML_Converter> classTagRML_Converter = scala.reflect.ClassTag$.MODULE$.apply(RML_Converter.class);
    Broadcast<RML_Converter> bc_converter = sc.broadcast(rml_converter, classTagRML_Converter);

    ClassTag<HashMap<URI, Function>> classTag_hashMap = scala.reflect.ClassTag$.MODULE$.apply(HashMap.class);
    Broadcast<HashMap<URI, Function>> bc_functionsHashMap = sc.broadcast(FunctionFactory.availableFunctions, classTag_hashMap);
    rowRDD
        .map((row) ->  {
            FunctionFactory.availableFunctions = bc_functionsHashMap.value();
            return bc_converter.value().convertRow(row);
        } )
        .saveAsTextFile(outputDir);
}
 
Example #5
Source File: SparkFrontendUtils.java    From incubator-nemo with Apache License 2.0 6 votes vote down vote up
/**
 * Converts a {@link Function1} to a corresponding {@link Function}.
 * <p>
 * Here, we use the Spark 'JavaSerializer' to facilitate debugging in the future.
 * TODO #205: RDD Closure with Broadcast Variables Serialization Bug
 *
 * @param scalaFunction the scala function to convert.
 * @param <I>           the type of input.
 * @param <O>           the type of output.
 * @return the converted Java function.
 */
public static <I, O> Function<I, O> toJavaFunction(final Function1<I, O> scalaFunction) {
  // This 'JavaSerializer' from Spark provides a human-readable NotSerializableException stack traces,
  // which can be useful when addressing this problem.
  // Other toJavaFunction can also use this serializer when debugging.
  final ClassTag<Function1<I, O>> classTag = ClassTag$.MODULE$.apply(scalaFunction.getClass());
  final byte[] serializedFunction = new JavaSerializer().newInstance().serialize(scalaFunction, classTag).array();

  return new Function<I, O>() {
    private Function1<I, O> deserializedFunction;

    @Override
    public O call(final I v1) throws Exception {
      if (deserializedFunction == null) {
        // TODO #205: RDD Closure with Broadcast Variables Serialization Bug
        final SerializerInstance js = new JavaSerializer().newInstance();
        deserializedFunction = js.deserialize(ByteBuffer.wrap(serializedFunction), classTag);
      }
      return deserializedFunction.apply(v1);
    }
  };
}
 
Example #6
Source File: DistinctConverter.java    From spork with Apache License 2.0 6 votes vote down vote up
@Override
public RDD<Tuple> convert(List<RDD<Tuple>> predecessors,
        PODistinct poDistinct) throws IOException {
    SparkUtil.assertPredecessorSize(predecessors, poDistinct, 1);
    RDD<Tuple> rdd = predecessors.get(0);

    ClassTag<Tuple2<Tuple, Object>> tuple2ClassManifest = SparkUtil
            .<Tuple, Object> getTuple2Manifest();

    RDD<Tuple2<Tuple, Object>> rddPairs = rdd.map(TO_KEY_VALUE_FUNCTION,
            tuple2ClassManifest);
    PairRDDFunctions<Tuple, Object> pairRDDFunctions
      = new PairRDDFunctions<Tuple, Object>(
            rddPairs, SparkUtil.getManifest(Tuple.class),
            SparkUtil.getManifest(Object.class), null);
    int parallelism = SparkUtil.getParallelism(predecessors, poDistinct);
    return pairRDDFunctions.reduceByKey(MERGE_VALUES_FUNCTION, parallelism)
            .map(TO_VALUE_FUNCTION, SparkUtil.getManifest(Tuple.class));
}
 
Example #7
Source File: MLContextConversionUtil.java    From systemds with Apache License 2.0 6 votes vote down vote up
/**
 * Convert a {@code FrameObject} to a {@code RDD<String>} in CSV format.
 *
 * @param frameObject
 *            the {@code FrameObject}
 * @param delimiter
 *            the delimiter
 * @return the {@code FrameObject} converted to a {@code RDD<String>}
 */
public static RDD<String> frameObjectToRDDStringCSV(FrameObject frameObject, String delimiter) {

	// NOTE: The following works when called from Java but does not
	// currently work when called from Spark Shell (when you call
	// collect() on the RDD<String>).
	//
	// JavaRDD<String> javaRDD = jsc.parallelize(list);
	// RDD<String> rdd = JavaRDD.toRDD(javaRDD);
	//
	// Therefore, we call parallelize() on the SparkContext rather than
	// the JavaSparkContext to produce the RDD<String> for Scala.

	List<String> list = frameObjectToListStringCSV(frameObject, delimiter);

	ClassTag<String> tag = scala.reflect.ClassTag$.MODULE$.apply(String.class);
	return sc().parallelize(JavaConversions.asScalaBuffer(list), sc().defaultParallelism(), tag);
}
 
Example #8
Source File: MLContextConversionUtil.java    From systemds with Apache License 2.0 6 votes vote down vote up
/**
 * Convert a {@code MatrixObject} to a {@code RDD<String>} in CSV format.
 *
 * @param matrixObject
 *            the {@code MatrixObject}
 * @return the {@code MatrixObject} converted to a {@code RDD<String>}
 */
public static RDD<String> matrixObjectToRDDStringCSV(MatrixObject matrixObject) {

	// NOTE: The following works when called from Java but does not
	// currently work when called from Spark Shell (when you call
	// collect() on the RDD<String>).
	//
	// JavaRDD<String> javaRDD = jsc.parallelize(list);
	// RDD<String> rdd = JavaRDD.toRDD(javaRDD);
	//
	// Therefore, we call parallelize() on the SparkContext rather than
	// the JavaSparkContext to produce the RDD<String> for Scala.

	List<String> list = matrixObjectToListStringCSV(matrixObject);

	ClassTag<String> tag = scala.reflect.ClassTag$.MODULE$.apply(String.class);
	return sc().parallelize(JavaConversions.asScalaBuffer(list), sc().defaultParallelism(), tag);
}
 
Example #9
Source File: MLContextConversionUtil.java    From systemds with Apache License 2.0 6 votes vote down vote up
/**
 * Convert a {@code FrameObject} to a {@code RDD<String>} in IJV format.
 *
 * @param frameObject
 *            the {@code FrameObject}
 * @return the {@code FrameObject} converted to a {@code RDD<String>}
 */
public static RDD<String> frameObjectToRDDStringIJV(FrameObject frameObject) {

	// NOTE: The following works when called from Java but does not
	// currently work when called from Spark Shell (when you call
	// collect() on the RDD<String>).
	//
	// JavaRDD<String> javaRDD = jsc.parallelize(list);
	// RDD<String> rdd = JavaRDD.toRDD(javaRDD);
	//
	// Therefore, we call parallelize() on the SparkContext rather than
	// the JavaSparkContext to produce the RDD<String> for Scala.

	List<String> list = frameObjectToListStringIJV(frameObject);

	ClassTag<String> tag = scala.reflect.ClassTag$.MODULE$.apply(String.class);
	return sc().parallelize(JavaConversions.asScalaBuffer(list), sc().defaultParallelism(), tag);
}
 
Example #10
Source File: MLContextConversionUtil.java    From systemds with Apache License 2.0 6 votes vote down vote up
/**
 * Convert a {@code MatrixObject} to a {@code RDD<String>} in IJV format.
 *
 * @param matrixObject
 *            the {@code MatrixObject}
 * @return the {@code MatrixObject} converted to a {@code RDD<String>}
 */
public static RDD<String> matrixObjectToRDDStringIJV(MatrixObject matrixObject) {

	// NOTE: The following works when called from Java but does not
	// currently work when called from Spark Shell (when you call
	// collect() on the RDD<String>).
	//
	// JavaRDD<String> javaRDD = jsc.parallelize(list);
	// RDD<String> rdd = JavaRDD.toRDD(javaRDD);
	//
	// Therefore, we call parallelize() on the SparkContext rather than
	// the JavaSparkContext to produce the RDD<String> for Scala.

	List<String> list = matrixObjectToListStringIJV(matrixObject);

	ClassTag<String> tag = scala.reflect.ClassTag$.MODULE$.apply(String.class);
	return sc().parallelize(JavaConversions.asScalaBuffer(list), sc().defaultParallelism(), tag);
}
 
Example #11
Source File: MLContextConversionUtil.java    From systemds with Apache License 2.0 6 votes vote down vote up
/**
 * Convert a {@code FrameObject} to a {@code RDD<String>} in CSV format.
 *
 * @param frameObject
 *            the {@code FrameObject}
 * @param delimiter
 *            the delimiter
 * @return the {@code FrameObject} converted to a {@code RDD<String>}
 */
public static RDD<String> frameObjectToRDDStringCSV(FrameObject frameObject, String delimiter) {

	// NOTE: The following works when called from Java but does not
	// currently work when called from Spark Shell (when you call
	// collect() on the RDD<String>).
	//
	// JavaRDD<String> javaRDD = jsc.parallelize(list);
	// RDD<String> rdd = JavaRDD.toRDD(javaRDD);
	//
	// Therefore, we call parallelize() on the SparkContext rather than
	// the JavaSparkContext to produce the RDD<String> for Scala.

	List<String> list = frameObjectToListStringCSV(frameObject, delimiter);

	ClassTag<String> tag = scala.reflect.ClassTag$.MODULE$.apply(String.class);
	return sc().parallelize(JavaConversions.asScalaBuffer(list), sc().defaultParallelism(), tag);
}
 
Example #12
Source File: MLContextConversionUtil.java    From systemds with Apache License 2.0 6 votes vote down vote up
/**
 * Convert a {@code MatrixObject} to a {@code RDD<String>} in CSV format.
 *
 * @param matrixObject
 *            the {@code MatrixObject}
 * @return the {@code MatrixObject} converted to a {@code RDD<String>}
 */
public static RDD<String> matrixObjectToRDDStringCSV(MatrixObject matrixObject) {

	// NOTE: The following works when called from Java but does not
	// currently work when called from Spark Shell (when you call
	// collect() on the RDD<String>).
	//
	// JavaRDD<String> javaRDD = jsc.parallelize(list);
	// RDD<String> rdd = JavaRDD.toRDD(javaRDD);
	//
	// Therefore, we call parallelize() on the SparkContext rather than
	// the JavaSparkContext to produce the RDD<String> for Scala.

	List<String> list = matrixObjectToListStringCSV(matrixObject);

	ClassTag<String> tag = scala.reflect.ClassTag$.MODULE$.apply(String.class);
	return sc().parallelize(JavaConversions.asScalaBuffer(list), sc().defaultParallelism(), tag);
}
 
Example #13
Source File: MLContextConversionUtil.java    From systemds with Apache License 2.0 6 votes vote down vote up
/**
 * Convert a {@code MatrixObject} to a {@code RDD<String>} in IJV format.
 *
 * @param matrixObject
 *            the {@code MatrixObject}
 * @return the {@code MatrixObject} converted to a {@code RDD<String>}
 */
public static RDD<String> matrixObjectToRDDStringIJV(MatrixObject matrixObject) {

	// NOTE: The following works when called from Java but does not
	// currently work when called from Spark Shell (when you call
	// collect() on the RDD<String>).
	//
	// JavaRDD<String> javaRDD = jsc.parallelize(list);
	// RDD<String> rdd = JavaRDD.toRDD(javaRDD);
	//
	// Therefore, we call parallelize() on the SparkContext rather than
	// the JavaSparkContext to produce the RDD<String> for Scala.

	List<String> list = matrixObjectToListStringIJV(matrixObject);

	ClassTag<String> tag = scala.reflect.ClassTag$.MODULE$.apply(String.class);
	return sc().parallelize(JavaConversions.asScalaBuffer(list), sc().defaultParallelism(), tag);
}
 
Example #14
Source File: GryoSerializerInstance.java    From tinkerpop with Apache License 2.0 5 votes vote down vote up
@Override
public <T> T deserialize(final ByteBuffer byteBuffer, final ClassLoader classLoader, final ClassTag<T> classTag) {
    this.input.setBuffer(byteBuffer.array());
    return this.gryoSerializer.getGryoPool().readWithKryo(kryo -> {
        kryo.setClassLoader(classLoader);
        return (T) kryo.readClassAndObject(this.input);
    });
}
 
Example #15
Source File: MizoRDD.java    From mizo with Apache License 2.0 5 votes vote down vote up
public MizoRDD(SparkContext context, IMizoRDDConfig config, ClassTag<TReturn> classTag) {
    super(context, new ArrayBuffer<>(), classTag);

    if (!Strings.isNullOrEmpty(config.logConfigPath())) {
        PropertyConfigurator.configure(config.logConfigPath());
    }

    this.config = config;
    this.regionsPaths = getRegionsPaths(config.regionDirectoriesPath());
    this.relationTypes = loadRelationTypes(config.titanConfigPath());
}
 
Example #16
Source File: ProcessedOffsetManager.java    From kafka-spark-consumer with Apache License 2.0 5 votes vote down vote up
public static <T> DStream<Tuple2<String, Iterable<Long>>>  getPartitionOffset(
    DStream<MessageAndMetadata<T>> unionStreams, Properties props) {
  ClassTag<MessageAndMetadata<T>> messageMetaClassTag =
      ScalaUtil.<T>getMessageAndMetadataClassTag();
  JavaDStream<MessageAndMetadata<T>> javaDStream =
      new JavaDStream<MessageAndMetadata<T>>(unionStreams, messageMetaClassTag);
  JavaPairDStream<String, Iterable<Long>> partitonOffset = getPartitionOffset(javaDStream, props);
  return partitonOffset.dstream();
}
 
Example #17
Source File: EncoderHelpers.java    From beam with Apache License 2.0 5 votes vote down vote up
/**
 * Wrap a Beam coder into a Spark Encoder using Catalyst Expression Encoders (which uses java code
 * generation).
 */
public static <T> Encoder<T> fromBeamCoder(Coder<T> coder) {
  Class<? super T> clazz = coder.getEncodedTypeDescriptor().getRawType();
  ClassTag<T> classTag = ClassTag$.MODULE$.apply(clazz);
  List<Expression> serializers =
      Collections.singletonList(
          new EncodeUsingBeamCoder<>(new BoundReference(0, new ObjectType(clazz), true), coder));

  return new ExpressionEncoder<>(
      SchemaHelpers.binarySchema(),
      false,
      JavaConversions.collectionAsScalaIterable(serializers).toSeq(),
      new DecodeUsingBeamCoder<>(
          new Cast(new GetColumnByOrdinal(0, BinaryType), BinaryType), classTag, coder),
      classTag);
}
 
Example #18
Source File: DefaultMessageMapperFactory.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
@Nullable
private static MessageMapper createAnyMessageMapper(final Class<?> clazz,
        final DynamicAccess dynamicAccess) {
    final ClassTag<MessageMapper> tag = scala.reflect.ClassTag$.MODULE$.apply(MessageMapper.class);
    final Try<MessageMapper> mapperTry = dynamicAccess.createInstanceFor(clazz, List$.MODULE$.empty(), tag);

    if (mapperTry.isFailure()) {
        final Throwable error = mapperTry.failed().get();
        if (error instanceof ClassNotFoundException || error instanceof InstantiationException ||
                error instanceof ClassCastException) {
            return null;
        } else {
            throw new IllegalStateException("There was an unknown error when trying to creating instance for '"
                    + clazz + "'", error);
        }
    }

    return mapperTry.get();
}
 
Example #19
Source File: DefaultMessageMapperFactory.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
private static List<MessageMapperExtension> loadMessageMapperExtensions(final DynamicAccess dynamicAccess) {
    return messageMapperExtensionClasses.stream().map(clazz -> {
        final ClassTag<MessageMapperExtension> tag =
                scala.reflect.ClassTag$.MODULE$.apply(MessageMapperExtension.class);
        return dynamicAccess.createInstanceFor(clazz, List$.MODULE$.empty(), tag).get();
    }).collect(Collectors.toList());
}
 
Example #20
Source File: ProcessedOffsetManager.java    From kafka-spark-consumer with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("deprecation")
public static void persists(DStream<Tuple2<String, Iterable<Long>>> partitonOffset, Properties props) {
  ClassTag<Tuple2<String, Iterable<Long>>> tuple2ClassTag =
      ScalaUtil.<String, Iterable<Long>>getTuple2ClassTag();
  JavaDStream<Tuple2<String, Iterable<Long>>> jpartitonOffset =
      new JavaDStream<Tuple2<String, Iterable<Long>>>(partitonOffset, tuple2ClassTag);
  jpartitonOffset.foreachRDD(new VoidFunction<JavaRDD<Tuple2<String, Iterable<Long>>>>() {
    @Override
    public void call(JavaRDD<Tuple2<String, Iterable<Long>>> po) throws Exception {
      List<Tuple2<String, Iterable<Long>>> poList = po.collect();
      doPersists(poList, props);
    }
  });
}
 
Example #21
Source File: RDDUtils.java    From geowave with Apache License 2.0 5 votes vote down vote up
/**
 * Translate a set of objects in a JavaRDD to a provided type and push to GeoWave
 *
 * @throws IOException
 */
private static void writeToGeoWave(
    final SparkContext sc,
    final Index index,
    final DataStorePluginOptions outputStoreOptions,
    final DataTypeAdapter adapter,
    final JavaRDD<SimpleFeature> inputRDD) throws IOException {

  // setup the configuration and the output format
  final Configuration conf = new org.apache.hadoop.conf.Configuration(sc.hadoopConfiguration());

  GeoWaveOutputFormat.setStoreOptions(conf, outputStoreOptions);
  GeoWaveOutputFormat.addIndex(conf, index);
  GeoWaveOutputFormat.addDataAdapter(conf, adapter);

  // create the job
  final Job job = new Job(conf);
  job.setOutputKeyClass(GeoWaveOutputKey.class);
  job.setOutputValueClass(SimpleFeature.class);
  job.setOutputFormatClass(GeoWaveOutputFormat.class);

  // broadcast string names
  final ClassTag<String> stringTag = scala.reflect.ClassTag$.MODULE$.apply(String.class);
  final Broadcast<String> typeName = sc.broadcast(adapter.getTypeName(), stringTag);
  final Broadcast<String> indexName = sc.broadcast(index.getName(), stringTag);

  // map to a pair containing the output key and the output value
  inputRDD.mapToPair(
      feat -> new Tuple2<>(
          new GeoWaveOutputKey(typeName.value(), indexName.value()),
          feat)).saveAsNewAPIHadoopDataset(job.getConfiguration());
}
 
Example #22
Source File: RDDUtils.java    From geowave with Apache License 2.0 5 votes vote down vote up
public static void writeRasterToGeoWave(
    final SparkContext sc,
    final Index index,
    final DataStorePluginOptions outputStoreOptions,
    final RasterDataAdapter adapter,
    final JavaRDD<GridCoverage> inputRDD) throws IOException {

  // setup the configuration and the output format
  final Configuration conf = new org.apache.hadoop.conf.Configuration(sc.hadoopConfiguration());

  GeoWaveOutputFormat.setStoreOptions(conf, outputStoreOptions);
  GeoWaveOutputFormat.addIndex(conf, index);
  GeoWaveOutputFormat.addDataAdapter(conf, adapter);

  // create the job
  final Job job = new Job(conf);
  job.setOutputKeyClass(GeoWaveOutputKey.class);
  job.setOutputValueClass(GridCoverage.class);
  job.setOutputFormatClass(GeoWaveOutputFormat.class);

  // broadcast string names
  final ClassTag<String> stringTag = scala.reflect.ClassTag$.MODULE$.apply(String.class);
  final Broadcast<String> typeName = sc.broadcast(adapter.getTypeName(), stringTag);
  final Broadcast<String> indexName = sc.broadcast(index.getName(), stringTag);

  // map to a pair containing the output key and the output value
  inputRDD.mapToPair(
      gridCoverage -> new Tuple2<>(
          new GeoWaveOutputKey(typeName.value(), indexName.value()),
          gridCoverage)).saveAsNewAPIHadoopDataset(job.getConfiguration());
}
 
Example #23
Source File: RDDUtils.java    From geowave with Apache License 2.0 5 votes vote down vote up
public static Broadcast<? extends NumericIndexStrategy> broadcastIndexStrategy(
    final SparkContext sc,
    final NumericIndexStrategy indexStrategy) {
  final ClassTag<NumericIndexStrategy> indexClassTag =
      scala.reflect.ClassTag$.MODULE$.apply(indexStrategy.getClass());
  final Broadcast<NumericIndexStrategy> broadcastStrategy =
      sc.broadcast(indexStrategy, indexClassTag);
  return broadcastStrategy;
}
 
Example #24
Source File: GryoSerializationStream.java    From tinkerpop with Apache License 2.0 4 votes vote down vote up
@Override
public <T> SerializationStream writeObject(final T t, final ClassTag<T> classTag) {
    this.gryoSerializer.getGryoPool().writeWithKryo(kryo -> kryo.writeClassAndObject(this.output, t));
    return this;
}
 
Example #25
Source File: SparkUtil.java    From spork with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
public static <K, V> ClassTag<Product2<K, V>> getProduct2Manifest() {
    return (ClassTag<Product2<K, V>>) (Object) getManifest(Product2.class);
}
 
Example #26
Source File: SparkUtil.java    From spork with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
public static <K, V> ClassTag<Tuple2<K, V>> getTuple2Manifest() {
    return (ClassTag<Tuple2<K, V>>) (Object) getManifest(Tuple2.class);
}
 
Example #27
Source File: SparkUtil.java    From spork with Apache License 2.0 4 votes vote down vote up
public static <T> ClassTag<T> getManifest(Class<T> clazz) {
    return ClassTag$.MODULE$.apply(clazz);
}
 
Example #28
Source File: ScalaUtil.java    From kafka-spark-consumer with Apache License 2.0 4 votes vote down vote up
/**
* Scala 2.10 use ClassTag to replace ClassManifest
*/
 public static <T> ClassTag<T> getClassTag(Class<T> clazz) {
   return ClassTag$.MODULE$.apply(clazz);
 }
 
Example #29
Source File: ScalaUtil.java    From kafka-spark-consumer with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
public static <E> ClassTag<MessageAndMetadata<E>> getMessageAndMetadataClassTag() {
  return (ClassTag<MessageAndMetadata<E>>)(Object) getClassTag(MessageAndMetadata.class);
}
 
Example #30
Source File: ScalaUtil.java    From kafka-spark-consumer with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
public static <K, V> ClassTag<Tuple2<K, V>> getTuple2ClassTag() {
  return (ClassTag<Tuple2<K, V>>)(Object) getClassTag(Tuple2.class);
}