Java Code Examples for org.apache.tinkerpop.gremlin.hadoop.structure.io.VertexWritable

The following examples show how to use org.apache.tinkerpop.gremlin.hadoop.structure.io.VertexWritable. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
public static <M> JavaPairRDD<Object, VertexWritable> prepareFinalGraphRDD(
        final JavaPairRDD<Object, VertexWritable> graphRDD,
        final JavaPairRDD<Object, ViewIncomingPayload<M>> viewIncomingRDD,
        final Set<VertexComputeKey> vertexComputeKeys) {
    // the graphRDD and the viewRDD must have the same partitioner
    Preconditions.checkState(!graphRDD.partitioner().isPresent() || (graphRDD.partitioner().get().equals(viewIncomingRDD.partitioner().get())));
    final String[] vertexComputeKeysArray = VertexProgramHelper.vertexComputeKeysAsArray(vertexComputeKeys); // the compute keys as an array
    return graphRDD.leftOuterJoin(viewIncomingRDD)
            .mapValues(tuple -> {
                final StarGraph.StarVertex vertex = tuple._1().get();
                vertex.dropVertexProperties(vertexComputeKeysArray); // drop all existing compute keys
                // attach the final computed view to the cached graph
                final List<DetachedVertexProperty<Object>> view = tuple._2().isPresent() ? tuple._2().get().getView() : Collections.emptyList();
                for (final DetachedVertexProperty<Object> property : view) {
                    if (!VertexProgramHelper.isTransientVertexComputeKey(property.key(), vertexComputeKeys)){
                        property.attach(Attachable.Method.create(vertex));}
                }
                return tuple._1();
            });
}
 
Example 2
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
    while (reader.nextKeyValue()) {
        // TODO janusgraph05 integration -- the duplicate() call may be unnecessary
        TinkerVertex maybeNullTinkerVertex = deserializer.readHadoopVertex(reader.getCurrentKey(), reader.getCurrentValue());
        if (null != maybeNullTinkerVertex) {
            vertex = new VertexWritable(maybeNullTinkerVertex);
            if (graphFilter == null) {
                return true;
            } else {
                final Optional<StarGraph.StarVertex> vertexWritable = vertex.get().applyGraphFilter(graphFilter);
                if (vertexWritable.isPresent()) {
                    vertex.set(vertexWritable.get());
                    return true;
                }
            }
        }
    }
    return false;
}
 
Example 3
Source Project: tinkerpop   Source File: ScriptRecordWriter.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void write(final NullWritable key, final VertexWritable vertex) throws IOException {
    if (null != vertex) {
        try {
            final Bindings bindings = this.engine.createBindings();
            bindings.put(VERTEX, vertex.get());
            final String line = (String) engine.eval(WRITE_CALL, bindings);
            if (line != null) {
                this.out.write(line.getBytes(UTF8));
                this.out.write(NEWLINE);
            }
        } catch (final ScriptException e) {
            throw new IOException(e.getMessage(), e);
        }
    }
}
 
Example 4
Source Project: tinkerpop   Source File: GraphFilterRecordReader.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
    if (null == this.graphFilter) {
        return this.recordReader.nextKeyValue();
    } else {
        while (true) {
            if (this.recordReader.nextKeyValue()) {
                final VertexWritable vertexWritable = this.recordReader.getCurrentValue();
                final Optional<StarGraph.StarVertex> vertex = vertexWritable.get().applyGraphFilter(this.graphFilter);
                if (vertex.isPresent()) {
                    vertexWritable.set(vertex.get());
                    return true;
                }
            } else {
                return false;
            }
        }
    }
}
 
Example 5
Source Project: tinkerpop   Source File: PersistedOutputRDD.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void writeGraphRDD(final Configuration configuration, final JavaPairRDD<Object, VertexWritable> graphRDD) {
    if (!configuration.getBoolean(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, false))
        LOGGER.warn("The SparkContext should be persisted in order for the RDD to persist across jobs. To do so, set " + Constants.GREMLIN_SPARK_PERSIST_CONTEXT + " to true");
    if (!configuration.containsKey(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION))
        throw new IllegalArgumentException("There is no provided " + Constants.GREMLIN_HADOOP_OUTPUT_LOCATION + " to write the persisted RDD to");
    SparkContextStorage.open(configuration).rm(configuration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION));  // this might be bad cause it unpersists the job RDD
    // determine which storage level to persist the RDD as with MEMORY_ONLY being the default cache()
    final StorageLevel storageLevel = StorageLevel.fromString(configuration.getString(Constants.GREMLIN_SPARK_PERSIST_STORAGE_LEVEL, "MEMORY_ONLY"));
    if (!configuration.getBoolean(Constants.GREMLIN_HADOOP_GRAPH_WRITER_HAS_EDGES, true))
        graphRDD.mapValues(vertex -> {
            vertex.get().dropEdges(Direction.BOTH);
            return vertex;
        }).setName(Constants.getGraphLocation(configuration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION))).persist(storageLevel)
                // call action to eager store rdd
                .count();
    else
        graphRDD.setName(Constants.getGraphLocation(configuration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION))).persist(storageLevel)
                // call action to eager store rdd
                .count();
    Spark.refresh(); // necessary to do really fast so the Spark GC doesn't clear out the RDD
}
 
Example 6
Source Project: tinkerpop   Source File: SparkExecutor.java    License: Apache License 2.0 6 votes vote down vote up
public static <M> JavaPairRDD<Object, VertexWritable> prepareFinalGraphRDD(
        final JavaPairRDD<Object, VertexWritable> graphRDD,
        final JavaPairRDD<Object, ViewIncomingPayload<M>> viewIncomingRDD,
        final Set<VertexComputeKey> vertexComputeKeys) {
    // the graphRDD and the viewRDD must have the same partitioner
    if (graphRDD.partitioner().isPresent())
        assert (graphRDD.partitioner().get().equals(viewIncomingRDD.partitioner().get()));
    final String[] vertexComputeKeysArray = VertexProgramHelper.vertexComputeKeysAsArray(vertexComputeKeys); // the compute keys as an array
    return graphRDD.leftOuterJoin(viewIncomingRDD)
            .mapValues(tuple -> {
                final StarGraph.StarVertex vertex = tuple._1().get();
                vertex.dropVertexProperties(vertexComputeKeysArray); // drop all existing compute keys
                // attach the final computed view to the cached graph
                final List<DetachedVertexProperty<Object>> view = tuple._2().isPresent() ? tuple._2().get().getView() : Collections.emptyList();
                for (final DetachedVertexProperty<Object> property : view) {
                    if (!VertexProgramHelper.isTransientVertexComputeKey(property.key(), vertexComputeKeys))
                        property.attach(Attachable.Method.create(vertex));
                }
                return tuple._1();
            });
}
 
Example 7
public static <K, V> JavaPairRDD<K, V> executeMap(
        final JavaPairRDD<Object, VertexWritable> graphRDD, final MapReduce<K, V, ?, ?, ?> mapReduce,
        final Configuration graphComputerConfiguration) {
    JavaPairRDD<K, V> mapRDD = graphRDD.mapPartitionsToPair(partitionIterator -> {
        KryoShimServiceLoader.applyConfiguration(graphComputerConfiguration);
        return new MapIterator<>(MapReduce.<MapReduce<K, V, ?, ?, ?>>createMapReduce(HadoopGraph.open(graphComputerConfiguration), graphComputerConfiguration), partitionIterator);
    });
    if (mapReduce.getMapKeySort().isPresent()){
        mapRDD = mapRDD.sortByKey(mapReduce.getMapKeySort().get(), true, 1);}
    return mapRDD;
}
 
Example 8
Source Project: titan1withtp3.1   Source File: GiraphRecordReader.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
    while (reader.nextKeyValue()) {
        // TODO titan05 integration -- the duplicate() call may be unnecessary
        final TinkerVertex maybeNullTinkerVertex =
                deser.readHadoopVertex(reader.getCurrentKey(), reader.getCurrentValue());
        if (null != maybeNullTinkerVertex) {
            vertex = new VertexWritable(maybeNullTinkerVertex);
            //vertexQuery.filterRelationsOf(vertex); // TODO reimplement vertexquery filtering
            return true;
        }
    }
    return false;
}
 
Example 9
Source Project: tinkerpop   Source File: ConfUtil.java    License: Apache License 2.0 5 votes vote down vote up
public static InputFormat<NullWritable, VertexWritable> getReaderAsInputFormat(final Configuration hadoopConfiguration) {
    final Class<?> readerClass = hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, Object.class);
    try {
        return InputFormat.class.isAssignableFrom(readerClass) ?
                (InputFormat<NullWritable, VertexWritable>) readerClass.newInstance() :
                (InputFormat<NullWritable, VertexWritable>) Class.forName("org.apache.tinkerpop.gremlin.spark.structure.io.InputRDDFormat").newInstance();
    } catch (final Exception e) {
        throw new IllegalStateException(e.getMessage(), e);
    }
}
 
Example 10
Source Project: tinkerpop   Source File: GryoRecordWriter.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void write(final NullWritable key, final VertexWritable vertex) throws IOException {
    if (null != vertex) {
        if (this.hasEdges)
            gryoWriter.writeVertex(this.outputStream, vertex.get(), Direction.BOTH);
        else
            gryoWriter.writeVertex(this.outputStream, vertex.get());
    }
}
 
Example 11
Source Project: tinkerpop   Source File: GraphSONRecordWriter.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void write(final NullWritable key, final VertexWritable vertex) throws IOException {
    if (null != vertex) {
        if (this.hasEdges) {
            graphsonWriter.writeVertex(this.outputStream, vertex.get(), Direction.BOTH);
            this.outputStream.write(NEWLINE);
        } else {
            graphsonWriter.writeVertex(this.outputStream, vertex.get());
            this.outputStream.write(NEWLINE);
        }
    }
}
 
Example 12
Source Project: tinkerpop   Source File: GraphFilterRecordReader.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void initialize(final InputSplit inputSplit, final TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
    final Configuration configuration = taskAttemptContext.getConfiguration();
    final InputFormat<NullWritable, VertexWritable> inputFormat = ReflectionUtils.newInstance(configuration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, InputFormat.class, InputFormat.class), configuration);
    if (!(inputFormat instanceof GraphFilterAware) && configuration.get(Constants.GREMLIN_HADOOP_GRAPH_FILTER, null) != null)
        this.graphFilter = VertexProgramHelper.deserialize(ConfUtil.makeApacheConfiguration(configuration), Constants.GREMLIN_HADOOP_GRAPH_FILTER);
    this.recordReader = inputFormat.createRecordReader(inputSplit, taskAttemptContext);
    this.recordReader.initialize(inputSplit, taskAttemptContext);
}
 
Example 13
Source Project: tinkerpop   Source File: HadoopMap.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void setup(final Mapper<NullWritable, VertexWritable, ObjectWritable, ObjectWritable>.Context context) {
    final Configuration apacheConfiguration = ConfUtil.makeApacheConfiguration(context.getConfiguration());
    KryoShimServiceLoader.applyConfiguration(apacheConfiguration);
    this.mapReduce = MapReduce.createMapReduce(HadoopGraph.open(apacheConfiguration), apacheConfiguration);
    this.mapReduce.workerStart(MapReduce.Stage.MAP);
}
 
Example 14
Source Project: tinkerpop   Source File: GryoSerializer.java    License: Apache License 2.0 5 votes vote down vote up
private SparkIoRegistry() {
    try {
        super.register(GryoIo.class, Tuple2.class, new Tuple2Serializer());
        super.register(GryoIo.class, Tuple2[].class, null);
        super.register(GryoIo.class, Tuple3.class, new Tuple3Serializer());
        super.register(GryoIo.class, Tuple3[].class, null);
        super.register(GryoIo.class, CompactBuffer.class, new CompactBufferSerializer());
        super.register(GryoIo.class, CompactBuffer[].class, null);
        super.register(GryoIo.class, CompressedMapStatus.class, null);
        super.register(GryoIo.class, BlockManagerId.class, null);
        super.register(GryoIo.class, HighlyCompressedMapStatus.class, new ExternalizableSerializer());  // externalizable implemented so its okay
        super.register(GryoIo.class, TorrentBroadcast.class, null);
        super.register(GryoIo.class, PythonBroadcast.class, null);
        super.register(GryoIo.class, BoxedUnit.class, null);
        super.register(GryoIo.class, Class.forName("scala.reflect.ClassTag$$anon$1"), new JavaSerializer());
        super.register(GryoIo.class, Class.forName("scala.reflect.ManifestFactory$$anon$1"), new JavaSerializer());
        super.register(GryoIo.class, Class.forName("org.apache.spark.internal.io.FileCommitProtocol$TaskCommitMessage"), new JavaSerializer());
        super.register(GryoIo.class, Class.forName("org.apache.spark.internal.io.FileCommitProtocol$EmptyTaskCommitMessage$"), new JavaSerializer());
        super.register(GryoIo.class, Class.forName("scala.collection.immutable.Map$EmptyMap$"), new JavaSerializer());
        super.register(GryoIo.class, Class.forName("scala.collection.immutable.Map"), new JavaSerializer());
        super.register(GryoIo.class, Class.forName("scala.None$"), new JavaSerializer());
        super.register(GryoIo.class, Class.forName("scala.Some$"), new JavaSerializer());
        super.register(GryoIo.class, Class.forName("scala.Some"), new JavaSerializer());
        super.register(GryoIo.class, WrappedArray.ofRef.class, new WrappedArraySerializer());
        super.register(GryoIo.class, MessagePayload.class, null);
        super.register(GryoIo.class, ViewIncomingPayload.class, null);
        super.register(GryoIo.class, ViewOutgoingPayload.class, null);
        super.register(GryoIo.class, ViewPayload.class, null);
        super.register(GryoIo.class, SerializableConfiguration.class, new JavaSerializer());
        super.register(GryoIo.class, VertexWritable.class, new VertexWritableSerializer());
        super.register(GryoIo.class, ObjectWritable.class, new ObjectWritableSerializer());
    } catch (final ClassNotFoundException e) {
        throw new IllegalStateException(e);
    }
}
 
Example 15
Source Project: tinkerpop   Source File: OutputFormatRDD.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void writeGraphRDD(final Configuration configuration, final JavaPairRDD<Object, VertexWritable> graphRDD) {
    final org.apache.hadoop.conf.Configuration hadoopConfiguration = ConfUtil.makeHadoopConfiguration(configuration);
    final String outputLocation = hadoopConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION);
    if (null != outputLocation) {
        // map back to a <nullwritable,vertexwritable> stream for output
        graphRDD.mapToPair(tuple -> new Tuple2<>(NullWritable.get(), tuple._2()))
                .saveAsNewAPIHadoopFile(Constants.getGraphLocation(outputLocation),
                        NullWritable.class,
                        VertexWritable.class,
                        (Class<OutputFormat<NullWritable, VertexWritable>>) hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER, OutputFormat.class), hadoopConfiguration);
    }
}
 
Example 16
Source Project: tinkerpop   Source File: InputFormatRDD.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public JavaPairRDD<Object, VertexWritable> readGraphRDD(final Configuration configuration, final JavaSparkContext sparkContext) {
    final org.apache.hadoop.conf.Configuration hadoopConfiguration = ConfUtil.makeHadoopConfiguration(configuration);
    return sparkContext.newAPIHadoopRDD(hadoopConfiguration,
            (Class<InputFormat<NullWritable, VertexWritable>>) hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, InputFormat.class),
            NullWritable.class,
            VertexWritable.class)
            .mapToPair(tuple -> new Tuple2<>(tuple._2().get().id(), new VertexWritable(tuple._2().get())));
}
 
Example 17
Source Project: tinkerpop   Source File: PersistedInputRDD.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public JavaPairRDD<Object, VertexWritable> readGraphRDD(final Configuration configuration, final JavaSparkContext sparkContext) {
    if (!configuration.containsKey(Constants.GREMLIN_HADOOP_INPUT_LOCATION))
        throw new IllegalArgumentException("There is no provided " + Constants.GREMLIN_HADOOP_INPUT_LOCATION + " to read the persisted RDD from");
    Spark.create(sparkContext.sc());
    final Optional<String> graphLocation = Constants.getSearchGraphLocation(configuration.getString(Constants.GREMLIN_HADOOP_INPUT_LOCATION), SparkContextStorage.open());
    return graphLocation.isPresent() ? JavaPairRDD.fromJavaRDD((JavaRDD) Spark.getRDD(graphLocation.get()).toJavaRDD()) : JavaPairRDD.fromJavaRDD(sparkContext.emptyRDD());
}
 
Example 18
Source Project: tinkerpop   Source File: SparkExecutor.java    License: Apache License 2.0 5 votes vote down vote up
public static <K, V> JavaPairRDD<K, V> executeMap(
        final JavaPairRDD<Object, VertexWritable> graphRDD, final MapReduce<K, V, ?, ?, ?> mapReduce,
        final Configuration graphComputerConfiguration) {
    JavaPairRDD<K, V> mapRDD = graphRDD.mapPartitionsToPair(partitionIterator -> {
        KryoShimServiceLoader.applyConfiguration(graphComputerConfiguration);
        return new MapIterator<>(MapReduce.<MapReduce<K, V, ?, ?, ?>>createMapReduce(HadoopGraph.open(graphComputerConfiguration), graphComputerConfiguration), partitionIterator);
    });
    if (mapReduce.getMapKeySort().isPresent())
        mapRDD = mapRDD.sortByKey(mapReduce.getMapKeySort().get(), true, 1);
    return mapRDD;
}
 
Example 19
Source Project: tinkerpop   Source File: ExampleOutputRDD.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void writeGraphRDD(final Configuration configuration, final JavaPairRDD<Object, VertexWritable> graphRDD) {
    int totalAge = 0;
    final Iterator<VertexWritable> iterator = graphRDD.values().collect().iterator();
    while (iterator.hasNext()) {
        final Vertex vertex = iterator.next().get();
        if (vertex.label().equals("person"))
            totalAge = totalAge + vertex.<Integer>value("age");
    }
    assertEquals(123, totalAge);
}
 
Example 20
Source Project: tinkerpop   Source File: ToyGraphInputRDD.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public JavaPairRDD<Object, VertexWritable> readGraphRDD(final Configuration configuration, final JavaSparkContext sparkContext) {
    KryoShimServiceLoader.applyConfiguration(TinkerGraph.open().configuration());
    final List<VertexWritable> vertices;
    if (configuration.getString(Constants.GREMLIN_HADOOP_INPUT_LOCATION).contains("modern"))
        vertices = IteratorUtils.list(IteratorUtils.map(TinkerFactory.createModern().vertices(), VertexWritable::new));
    else if (configuration.getString(Constants.GREMLIN_HADOOP_INPUT_LOCATION).contains("classic"))
        vertices = IteratorUtils.list(IteratorUtils.map(TinkerFactory.createClassic().vertices(), VertexWritable::new));
    else if (configuration.getString(Constants.GREMLIN_HADOOP_INPUT_LOCATION).contains("crew"))
        vertices = IteratorUtils.list(IteratorUtils.map(TinkerFactory.createTheCrew().vertices(), VertexWritable::new));
    else if (configuration.getString(Constants.GREMLIN_HADOOP_INPUT_LOCATION).contains("sink"))
        vertices = IteratorUtils.list(IteratorUtils.map(TinkerFactory.createKitchenSink().vertices(), VertexWritable::new));
    else if (configuration.getString(Constants.GREMLIN_HADOOP_INPUT_LOCATION).contains("grateful")) {
        try {
            final Graph graph = TinkerGraph.open();
            final GraphReader reader = GryoReader.build().mapper(graph.io(GryoIo.build()).mapper().create()).create();
            try (final InputStream stream = GryoResourceAccess.class.getResourceAsStream("grateful-dead-v3d0.kryo")) {
                reader.readGraph(stream, graph);
            }
            vertices = IteratorUtils.list(IteratorUtils.map(graph.vertices(), VertexWritable::new));
        } catch (final IOException e) {
            throw new IllegalStateException(e.getMessage(), e);
        }
    } else
        throw new IllegalArgumentException("No legal toy graph was provided to load: " + configuration.getProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION));

    return sparkContext.parallelize(vertices).mapToPair(vertex -> new Tuple2<>(vertex.get().id(), vertex));
}
 
Example 21
Source Project: tinkerpop   Source File: ExampleInputRDD.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public JavaPairRDD<Object, VertexWritable> readGraphRDD(final Configuration configuration, final JavaSparkContext sparkContext) {
    final List<Vertex> list = new ArrayList<>();
    list.add(StarGraph.open().addVertex(T.id, 1l, T.label, "person", "age", 29));
    list.add(StarGraph.open().addVertex(T.id, 2l, T.label, "person", "age", 27));
    list.add(StarGraph.open().addVertex(T.id, 4l, T.label, "person", "age", 32));
    list.add(StarGraph.open().addVertex(T.id, 6l, T.label, "person", "age", 35));
    return sparkContext.parallelize(list).mapToPair(vertex -> new Tuple2<>(vertex.id(), new VertexWritable(vertex)));
}
 
Example 22
public static JavaPairRDD<Object, VertexWritable> applyGraphFilter(final JavaPairRDD<Object, VertexWritable> graphRDD, final GraphFilter graphFilter) {
    return graphRDD.mapPartitionsToPair(partitionIterator -> {
        final GraphFilter gFilter = graphFilter.clone();
        return IteratorUtils.filter(partitionIterator, tuple -> (tuple._2().get().applyGraphFilter(gFilter)).isPresent());
    }, true);
}
 
Example 23
@Override
public RecordReader<NullWritable, VertexWritable> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
    return new RecordReaderHadoop(refCounter, inputFormat.createRecordReader(split, context));
}
 
Example 24
@Override
public VertexWritable getCurrentValue() {
    return vertex;
}
 
Example 25
Source Project: titan1withtp3.1   Source File: GiraphRecordReader.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public VertexWritable getCurrentValue() throws IOException, InterruptedException {
    return vertex;
}
 
Example 26
Source Project: titan1withtp3.1   Source File: GiraphInputFormat.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public RecordReader<NullWritable, VertexWritable> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
    return new GiraphRecordReader(refCounter, inputFormat.createRecordReader(split, context));
}
 
Example 27
Source Project: tinkerpop   Source File: ScriptOutputFormat.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public RecordWriter<NullWritable, VertexWritable> getRecordWriter(final TaskAttemptContext job) throws IOException, InterruptedException {
    return getRecordWriter(job, getDataOutputStream(job));
}
 
Example 28
Source Project: tinkerpop   Source File: ScriptOutputFormat.java    License: Apache License 2.0 4 votes vote down vote up
public RecordWriter<NullWritable, VertexWritable> getRecordWriter(final TaskAttemptContext job, final DataOutputStream outputStream) throws IOException, InterruptedException {
    return new ScriptRecordWriter(outputStream, job);
}
 
Example 29
Source Project: tinkerpop   Source File: ScriptInputFormat.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public RecordReader<NullWritable, VertexWritable> createRecordReader(final InputSplit split, final TaskAttemptContext context) throws IOException, InterruptedException {
    RecordReader<NullWritable, VertexWritable> reader = new ScriptRecordReader();
    reader.initialize(split, context);
    return reader;
}
 
Example 30
Source Project: tinkerpop   Source File: ScriptRecordReader.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public VertexWritable getCurrentValue() {
    return this.vertexWritable;
}