Java Code Examples for org.elasticsearch.common.io.stream.BytesStreamOutput#bytes()

The following examples show how to use org.elasticsearch.common.io.stream.BytesStreamOutput#bytes() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DistributedTranslog.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
/**
 * 
 * @param operation
 * @return
 * @throws IOException 
 */
public Tuple<Future<DLSN>, Tuple<BytesReference, Long>> writeOperation(Translog.Operation operation, AtomicLong txid) throws IOException {
    BytesStreamOutput out = new BytesStreamOutput();
    try (ReleasableLock lock = writeLock.acquire()) {
        Future<DLSN> writeResult = null;
        out.writeByte(operation.opType().id());
        operation.writeTo(out);
        BytesReference bytes = out.bytes();
        LogRecord logRecord = new LogRecord(txid.incrementAndGet(), bytes.toBytes());
        writeResult = logWriter.write(logRecord);
        sizeInBytes += (20 + logRecord.getPayload().length);
        ++ numOperations;
        return new Tuple<Future<DLSN>, Tuple<BytesReference, Long>>(writeResult, new Tuple<BytesReference, Long>(bytes, txid.get()));
    } catch (TransactionIdOutOfOrderException e) {
        throw e;
    } finally {
        out.close();
    }
}
 
Example 2
Source File: Store.java    From crate with Apache License 2.0 6 votes vote down vote up
/**
 * Marks this store as corrupted. This method writes a {@code corrupted_${uuid}} file containing the given exception
 * message. If a store contains a {@code corrupted_${uuid}} file {@link #isMarkedCorrupted()} will return <code>true</code>.
 */
public void markStoreCorrupted(IOException exception) throws IOException {
    ensureOpen();
    if (!isMarkedCorrupted()) {
        String uuid = CORRUPTED + UUIDs.randomBase64UUID();
        try (IndexOutput output = this.directory().createOutput(uuid, IOContext.DEFAULT)) {
            CodecUtil.writeHeader(output, CODEC, VERSION);
            BytesStreamOutput out = new BytesStreamOutput();
            out.writeException(exception);
            BytesReference bytes = out.bytes();
            output.writeVInt(bytes.length());
            BytesRef ref = bytes.toBytesRef();
            output.writeBytes(ref.bytes, ref.offset, ref.length);
            CodecUtil.writeFooter(output);
        } catch (IOException ex) {
            logger.warn("Can't mark store as corrupted", ex);
        }
        directory().sync(Collections.singleton(uuid));
    }
}
 
Example 3
Source File: Store.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
/**
 * Marks this store as corrupted. This method writes a <tt>corrupted_${uuid}</tt> file containing the given exception
 * message. If a store contains a <tt>corrupted_${uuid}</tt> file {@link #isMarkedCorrupted()} will return <code>true</code>.
 */
public void markStoreCorrupted(IOException exception) throws IOException {
    ensureOpen();
    if (!isMarkedCorrupted()) {
        String uuid = CORRUPTED + Strings.randomBase64UUID();
        try (IndexOutput output = this.directory().createOutput(uuid, IOContext.DEFAULT)) {
            CodecUtil.writeHeader(output, CODEC, VERSION);
            BytesStreamOutput out = new BytesStreamOutput();
            out.writeThrowable(exception);
            BytesReference bytes = out.bytes();
            output.writeVInt(bytes.length());
            output.writeBytes(bytes.array(), bytes.arrayOffset(), bytes.length());
            CodecUtil.writeFooter(output);
        } catch (IOException ex) {
            logger.warn("Can't mark store as corrupted", ex);
        }
        directory().sync(Collections.singleton(uuid));
    }
}
 
Example 4
Source File: NettyTransportChannel.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
@Override
public void sendResponse(Throwable error) throws IOException {
    BytesStreamOutput stream = new BytesStreamOutput();
    stream.skip(NettyHeader.HEADER_SIZE);
    RemoteTransportException tx = new RemoteTransportException(transport.nodeName(), transport.wrapAddress(channel.getLocalAddress()), action, error);
    stream.writeThrowable(tx);
    byte status = 0;
    status = TransportStatus.setResponse(status);
    status = TransportStatus.setError(status);

    BytesReference bytes = stream.bytes();
    ChannelBuffer buffer = bytes.toChannelBuffer();
    NettyHeader.writeHeader(buffer, requestId, status, version);
    channel.write(buffer);
    transportServiceAdapter.onResponseSent(requestId, action, error);
}
 
Example 5
Source File: PullFullClusterStateAction.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
@Override
public void messageReceived(PullFullClusterStateRequest request, final TransportChannel channel) throws Exception {
    ClusterStateWithDLSN clusterStateWithDLSN = clusterStateOpLog.getLatestClusterState();
    if (!clusterStateWithDLSN.state().getClusterName().equals(request.clusterName)) {
       throw new java.lang.Exception("master cluster name is [" + clusterStateWithDLSN.state().getClusterName() + "], request cluster name is [" + request.clusterName + "]");
    }
    if (!clusterStateWithDLSN.state().nodes().localNodeMaster()) {
        throw new java.lang.Exception("current node is no longer master node");
    }
    BytesStreamOutput bStream = new BytesStreamOutput();
    try (StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream)) {
        clusterStateWithDLSN.writeTo(stream);
    }
    BytesReference fullStateBytes = bStream.bytes();
    channel.sendResponse(new org.elasticsearch.transport.BytesTransportResponse(fullStateBytes));
}
 
Example 6
Source File: TermVectorsWriter.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
private BytesReference writeHeader(int numFieldsWritten, boolean getTermStatistics, boolean getFieldStatistics, boolean scores) throws IOException {
    // now, write the information about offset of the terms in the
    // termVectors field
    BytesStreamOutput header = new BytesStreamOutput();
    header.writeString(HEADER);
    header.writeInt(CURRENT_VERSION);
    header.writeBoolean(getTermStatistics);
    header.writeBoolean(getFieldStatistics);
    header.writeBoolean(scores);
    header.writeVInt(numFieldsWritten);
    for (int i = 0; i < fields.size(); i++) {
        header.writeString(fields.get(i));
        header.writeVLong(fieldOffset.get(i).longValue());
    }
    header.close();
    return header.bytes();
}
 
Example 7
Source File: CompressorFactory.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
private static BytesReference uncompress(BytesReference bytes, Compressor compressor) throws IOException {
    StreamInput compressed = compressor.streamInput(bytes.streamInput());
    BytesStreamOutput bStream = new BytesStreamOutput();
    Streams.copy(compressed, bStream);
    compressed.close();
    return bStream.bytes();
}
 
Example 8
Source File: Streams.java    From crate with Apache License 2.0 5 votes vote down vote up
/**
 * Reads all bytes from the given {@link InputStream} and closes it afterwards.
 */
public static BytesReference readFully(InputStream in) throws IOException {
    try (InputStream inputStream = in) {
        BytesStreamOutput out = new BytesStreamOutput();
        copy(inputStream, out);
        return out.bytes();
    }
}
 
Example 9
Source File: CompressorFactory.java    From crate with Apache License 2.0 5 votes vote down vote up
private static BytesReference uncompress(BytesReference bytes, Compressor compressor) throws IOException {
    StreamInput compressed = compressor.streamInput(bytes.streamInput());
    BytesStreamOutput bStream = new BytesStreamOutput();
    Streams.copy(compressed, bStream);
    compressed.close();
    return bStream.bytes();
}
 
Example 10
Source File: PublicationTransportHandler.java    From crate with Apache License 2.0 5 votes vote down vote up
public static BytesReference serializeDiffClusterState(Diff diff, Version nodeVersion) throws IOException {
    final BytesStreamOutput bStream = new BytesStreamOutput();
    try (StreamOutput stream = CompressorFactory.COMPRESSOR.streamOutput(bStream)) {
        stream.setVersion(nodeVersion);
        stream.writeBoolean(false);
        diff.writeTo(stream);
    }
    return bStream.bytes();
}
 
Example 11
Source File: PublicationTransportHandler.java    From crate with Apache License 2.0 5 votes vote down vote up
public static BytesReference serializeFullClusterState(ClusterState clusterState, Version nodeVersion) throws IOException {
    final BytesStreamOutput bStream = new BytesStreamOutput();
    try (StreamOutput stream = CompressorFactory.COMPRESSOR.streamOutput(bStream)) {
        stream.setVersion(nodeVersion);
        stream.writeBoolean(true);
        clusterState.writeTo(stream);
    }
    return bStream.bytes();
}
 
Example 12
Source File: PublishClusterStateAction.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public static BytesReference serializeDiffClusterState(Diff diff, Version nodeVersion) throws IOException {
    BytesStreamOutput bStream = new BytesStreamOutput();
    try (StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream)) {
        stream.setVersion(nodeVersion);
        stream.writeBoolean(false);
        diff.writeTo(stream);
    }
    return bStream.bytes();
}
 
Example 13
Source File: PublishClusterStateAction.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public static BytesReference serializeFullClusterState(ClusterState clusterState, Version nodeVersion) throws IOException {
    BytesStreamOutput bStream = new BytesStreamOutput();
    try (StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream)) {
        stream.setVersion(nodeVersion);
        stream.writeBoolean(true);
        clusterState.writeTo(stream);
    }
    return bStream.bytes();
}
 
Example 14
Source File: MustacheScriptEngineService.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Override
public Object run() {
    BytesStreamOutput result = new BytesStreamOutput();
    try (UTF8StreamWriter writer = utf8StreamWriter().setOutput(result)) {
        ((Mustache) template.compiled()).execute(writer, vars);
    } catch (Exception e) {
        logger.error("Error running " + template, e);
        throw new ScriptException("Error running " + template, e);
    }
    return result.bytes();
}
 
Example 15
Source File: FulltextAnalyzerResolver.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public static BytesReference encodeSettings(Settings settings) throws IOException {
    BytesStreamOutput bso = new BytesStreamOutput();
    XContentBuilder builder = XContentFactory.jsonBuilder(bso);
    builder.startObject();
    for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
        builder.field(entry.getKey(), entry.getValue());
    }
    builder.endObject();
    builder.flush();
    return bso.bytes();
}
 
Example 16
Source File: PublishClusterStateVersionAction.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public void publishClusterStateVersionToNode(final long newVersion,
        final DiscoveryNode node, 
        final TimeValue publishTimeout,
        final BlockingClusterStatePublishResponseHandler publishResponseHandler) {
    try {
        BytesStreamOutput bStream = new BytesStreamOutput();
        bStream.writeLong(newVersion);
        clusterService.localNode().writeTo(bStream);
        bStream.writeString(node.getId());
        BytesReference nodeBytes = bStream.bytes();
        TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.STATE).withCompress(false).withTimeout(publishTimeout).build();
        transportService.sendRequest(node, PUBLISH_VERSION_ACTION_NAME, new BytesTransportRequest(nodeBytes, node.version()), 
                options, 
                new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
            
            @Override
            public void handleResponse(TransportResponse.Empty response) {
                publishResponseHandler.onResponse(node);
            }
            
            @Override
            public void handleException(TransportException exp) {
                logger.debug("failed to send cluster state to {}, version {}", exp, node, newVersion);
                publishResponseHandler.onFailure(node, exp);
            }
        });
    } catch (Throwable t) {
        logger.warn("error sending cluster state to {}", t, node);
        publishResponseHandler.onFailure(node, t);
    }
}
 
Example 17
Source File: TermVectorsResponse.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
public void setTermVectorsField(BytesStreamOutput output) {
    termVectors = output.bytes();
}
 
Example 18
Source File: ExportCollector.java    From elasticsearch-inout-plugin with Apache License 2.0 4 votes vote down vote up
@Override
public void collect(int doc) throws IOException {
    fieldsVisitor.reset();
    currentReader.document(doc, fieldsVisitor);

    Map<String, SearchHitField> searchFields = null;
    if (fieldsVisitor.fields() != null) {
        searchFields = new HashMap<String, SearchHitField>(fieldsVisitor.fields().size());
        for (Map.Entry<String, List<Object>> entry : fieldsVisitor.fields().entrySet()) {
            searchFields.put(entry.getKey(), new InternalSearchHitField(entry.getKey(), entry.getValue()));
        }
    }

    DocumentMapper documentMapper = context.mapperService()
            .documentMapper(fieldsVisitor.uid().type());
    Text typeText;
    if (documentMapper == null) {
        typeText = new StringAndBytesText(fieldsVisitor.uid().type());
    } else {
        typeText = documentMapper.typeText();
    }

    InternalSearchHit searchHit = new InternalSearchHit(doc,
            fieldsVisitor.uid().id(), typeText,
            sourceRequested ? fieldsVisitor.source() : null,
            searchFields);

    for (FetchSubPhase fetchSubPhase : fetchSubPhases) {
        FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
        if (fetchSubPhase.hitExecutionNeeded(context)) {
            hitContext.reset(searchHit, arc, doc, context.searcher().getIndexReader(), doc, fieldsVisitor);
            fetchSubPhase.hitExecute(context, hitContext);
        }
    }

    searchHit.shardTarget(context.shardTarget());
    exportFields.hit(searchHit);
    BytesStreamOutput os = new BytesStreamOutput();
    XContentBuilder builder = new XContentBuilder(XContentFactory.xContent(XContentType.JSON), os);
    exportFields.toXContent(builder, ToXContent.EMPTY_PARAMS);
    builder.flush();
    BytesReference bytes = os.bytes();
    out.write(bytes.array(), bytes.arrayOffset(), bytes.length());
    out.write('\n');
    out.flush();
    numExported++;
}