Java Code Examples for org.elasticsearch.common.UUIDs

The following examples show how to use org.elasticsearch.common.UUIDs. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: anomaly-detection   Source File: TestHelpers.java    License: Apache License 2.0 6 votes vote down vote up
public static ClusterState createIndexBlockedState(String indexName, Settings hackedSettings, String alias) {
    ClusterState blockedClusterState = null;
    IndexMetadata.Builder builder = IndexMetadata.builder(indexName);
    if (alias != null) {
        builder.putAlias(AliasMetadata.builder(alias));
    }
    IndexMetadata indexMetaData = builder
        .settings(
            Settings
                .builder()
                .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
                .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
                .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
                .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
                .put(hackedSettings)
        )
        .build();
    Metadata metaData = Metadata.builder().put(indexMetaData, false).build();
    blockedClusterState = ClusterState
        .builder(new ClusterName("test cluster"))
        .metadata(metaData)
        .blocks(ClusterBlocks.builder().addBlocks(indexMetaData))
        .build();
    return blockedClusterState;
}
 
Example 2
public String bulkUpsert(String index,String type,List<Object> jsons){
	try {
		if(client==null){
			init();
		}
		BulkRequestBuilder bulkRequest = client.prepareBulk();
		for (Object json : jsons) {
			JSONObject obj = JSON.parseObject(JSON.toJSONString(json));
			String id = UUIDs.base64UUID();
			if(obj.containsKey("id")){
				id = obj.getString("id");
				obj.remove("id");
				bulkRequest.add(client.prepareUpdate(index, type, id).setDoc(obj.toJSONString(),XContentType.JSON));
			}else{
				bulkRequest.add(client.prepareIndex(index, type, id).setSource(obj.toJSONString(),XContentType.JSON));
			}
		}
		BulkResponse result = bulkRequest.execute().get();
		return result.toString();
	}catch (Exception e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}
	return null;
}
 
Example 3
Source Project: crate   Source File: RecoveryTarget.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates a new recovery target object that represents a recovery to the provided shard.
 *
 * @param indexShard                        local shard where we want to recover to
 * @param sourceNode                        source node of the recovery where we recover from
 * @param listener                          called when recovery is completed/failed
 * @param ensureClusterStateVersionCallback callback to ensure that the current node is at least on a cluster state with the provided
 *                                          version; necessary for primary relocation so that new primary knows about all other ongoing
 *                                          replica recoveries when replicating documents (see {@link RecoverySourceHandler})
 */
public RecoveryTarget(final IndexShard indexShard,
               final DiscoveryNode sourceNode,
               final PeerRecoveryTargetService.RecoveryListener listener,
               final LongConsumer ensureClusterStateVersionCallback) {
    super("recovery_status");
    this.cancellableThreads = new CancellableThreads();
    this.recoveryId = ID_GENERATOR.incrementAndGet();
    this.listener = listener;
    this.logger = Loggers.getLogger(getClass(), indexShard.shardId());
    this.indexShard = indexShard;
    this.sourceNode = sourceNode;
    this.shardId = indexShard.shardId();
    this.tempFilePrefix = RECOVERY_PREFIX + UUIDs.randomBase64UUID() + ".";
    this.store = indexShard.store();
    this.ensureClusterStateVersionCallback = ensureClusterStateVersionCallback;
    // make sure the store is not released until we are done.
    store.incRef();
    indexShard.recoveryStats().incCurrentAsTarget();
}
 
Example 4
Source Project: crate   Source File: BlobStoreRepository.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public String startVerification() {
    try {
        if (isReadOnly()) {
            // It's readonly - so there is not much we can do here to verify it apart from reading the blob store metadata
            latestIndexBlobId();
            return "read-only";
        } else {
            String seed = UUIDs.randomBase64UUID();
            byte[] testBytes = seed.getBytes(StandardCharsets.UTF_8);
            BlobContainer testContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed)));
            String blobName = "master.dat";
            BytesArray bytes = new BytesArray(testBytes);
            try (InputStream stream = bytes.streamInput()) {
                testContainer.writeBlobAtomic(blobName, stream, bytes.length(), true);
            }
            return seed;
        }
    } catch (IOException exp) {
        throw new RepositoryVerificationException(metadata.name(), "path " + basePath() + " is not accessible on master node", exp);
    }
}
 
Example 5
Source Project: crate   Source File: Store.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Marks this store as corrupted. This method writes a {@code corrupted_${uuid}} file containing the given exception
 * message. If a store contains a {@code corrupted_${uuid}} file {@link #isMarkedCorrupted()} will return <code>true</code>.
 */
public void markStoreCorrupted(IOException exception) throws IOException {
    ensureOpen();
    if (!isMarkedCorrupted()) {
        String uuid = CORRUPTED + UUIDs.randomBase64UUID();
        try (IndexOutput output = this.directory().createOutput(uuid, IOContext.DEFAULT)) {
            CodecUtil.writeHeader(output, CODEC, VERSION);
            BytesStreamOutput out = new BytesStreamOutput();
            out.writeException(exception);
            BytesReference bytes = out.bytes();
            output.writeVInt(bytes.length());
            BytesRef ref = bytes.toBytesRef();
            output.writeBytes(ref.bytes, ref.offset, ref.length);
            CodecUtil.writeFooter(output);
        } catch (IOException ex) {
            logger.warn("Can't mark store as corrupted", ex);
        }
        directory().sync(Collections.singleton(uuid));
    }
}
 
Example 6
Source Project: crate   Source File: TransportCreatePartitionsAction.java    License: Apache License 2.0 6 votes vote down vote up
private Settings createIndexSettings(ClusterState currentState, List<IndexTemplateMetaData> templates) {
    Settings.Builder indexSettingsBuilder = Settings.builder();
    // apply templates, here, in reverse order, since first ones are better matching
    for (int i = templates.size() - 1; i >= 0; i--) {
        indexSettingsBuilder.put(templates.get(i).settings());
    }
    if (indexSettingsBuilder.get(IndexMetaData.SETTING_VERSION_CREATED) == null) {
        DiscoveryNodes nodes = currentState.nodes();
        final Version createdVersion = Version.min(Version.CURRENT, nodes.getSmallestNonClientNodeVersion());
        indexSettingsBuilder.put(IndexMetaData.SETTING_VERSION_CREATED, createdVersion);
    }
    if (indexSettingsBuilder.get(IndexMetaData.SETTING_CREATION_DATE) == null) {
        indexSettingsBuilder.put(IndexMetaData.SETTING_CREATION_DATE, new DateTime(DateTimeZone.UTC).getMillis());
    }
    indexSettingsBuilder.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID());

    return indexSettingsBuilder.build();
}
 
Example 7
Source Project: crate   Source File: RepositoryDataTests.java    License: Apache License 2.0 6 votes vote down vote up
public void testInitIndices() {
    final int numSnapshots = randomIntBetween(1, 30);
    final Map<String, SnapshotId> snapshotIds = new HashMap<>(numSnapshots);
    final Map<String, SnapshotState> snapshotStates = new HashMap<>(numSnapshots);
    for (int i = 0; i < numSnapshots; i++) {
        final SnapshotId snapshotId = new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID());
        snapshotIds.put(snapshotId.getUUID(), snapshotId);
        snapshotStates.put(snapshotId.getUUID(), randomFrom(SnapshotState.values()));
    }
    RepositoryData repositoryData = new RepositoryData(EMPTY_REPO_GEN, snapshotIds,
                                                       Collections.emptyMap(), Collections.emptyMap(), ShardGenerations.EMPTY);
    // test that initializing indices works
    Map<IndexId, Set<SnapshotId>> indices = randomIndices(snapshotIds);
    RepositoryData newRepoData =
        new RepositoryData(repositoryData.getGenId(), snapshotIds, snapshotStates, indices, ShardGenerations.EMPTY);
    List<SnapshotId> expected = new ArrayList<>(repositoryData.getSnapshotIds());
    Collections.sort(expected);
    List<SnapshotId> actual = new ArrayList<>(newRepoData.getSnapshotIds());
    Collections.sort(actual);
    assertEquals(expected, actual);
    for (IndexId indexId : indices.keySet()) {
        assertEquals(indices.get(indexId), newRepoData.getSnapshots(indexId));
    }
}
 
Example 8
Source Project: crate   Source File: RepositoryDataTests.java    License: Apache License 2.0 6 votes vote down vote up
public static RepositoryData generateRandomRepoData() {
    final int numIndices = randomIntBetween(1, 30);
    final List<IndexId> indices = new ArrayList<>(numIndices);
    for (int i = 0; i < numIndices; i++) {
        indices.add(new IndexId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()));
    }
    final int numSnapshots = randomIntBetween(1, 30);
    RepositoryData repositoryData = RepositoryData.EMPTY;
    for (int i = 0; i < numSnapshots; i++) {
        final SnapshotId snapshotId = new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID());
        final List<IndexId> someIndices = indices.subList(0, randomIntBetween(1, numIndices));
        final ShardGenerations.Builder builder = ShardGenerations.builder();
        for (IndexId someIndex : someIndices) {
            final int shardCount = randomIntBetween(1, 10);
            for (int j = 0; j < shardCount; ++j) {
                builder.put(someIndex, 0, UUIDs.randomBase64UUID(random()));
            }
        }
        repositoryData = repositoryData.addSnapshot(snapshotId, randomFrom(SnapshotState.values()), builder.build());
    }
    return repositoryData;
}
 
Example 9
Source Project: crate   Source File: RepositoryDataTests.java    License: Apache License 2.0 6 votes vote down vote up
private static Map<IndexId, Set<SnapshotId>> randomIndices(final Map<String, SnapshotId> snapshotIdsMap) {
    final List<SnapshotId> snapshotIds = new ArrayList<>(snapshotIdsMap.values());
    final int totalSnapshots = snapshotIds.size();
    final int numIndices = randomIntBetween(1, 30);
    final Map<IndexId, Set<SnapshotId>> indices = new HashMap<>(numIndices);
    for (int i = 0; i < numIndices; i++) {
        final IndexId indexId = new IndexId(randomAlphaOfLength(8), UUIDs.randomBase64UUID());
        final Set<SnapshotId> indexSnapshots = new LinkedHashSet<>();
        final int numIndicesForSnapshot = randomIntBetween(1, numIndices);
        for (int j = 0; j < numIndicesForSnapshot; j++) {
            indexSnapshots.add(snapshotIds.get(randomIntBetween(0, totalSnapshots - 1)));
        }
        indices.put(indexId, indexSnapshots);
    }
    return indices;
}
 
Example 10
Source Project: crate   Source File: CoordinatorTests.java    License: Apache License 2.0 6 votes vote down vote up
void applyInitialConfiguration() {
    onNode(() -> {
        final Set<String> nodeIdsWithPlaceholders = new HashSet<>(initialConfiguration.getNodeIds());
        Stream.generate(() -> BOOTSTRAP_PLACEHOLDER_PREFIX + UUIDs.randomBase64UUID(random()))
            .limit((Math.max(initialConfiguration.getNodeIds().size(), 2) - 1) / 2)
            .forEach(nodeIdsWithPlaceholders::add);
        final Set<String> nodeIds = new HashSet<>(
            randomSubsetOf(initialConfiguration.getNodeIds().size(), nodeIdsWithPlaceholders));
        // initial configuration should not have a place holder for local node
        if (initialConfiguration.getNodeIds().contains(localNode.getId()) && nodeIds.contains(localNode.getId()) == false) {
            nodeIds.remove(nodeIds.iterator().next());
            nodeIds.add(localNode.getId());
        }
        final VotingConfiguration configurationWithPlaceholders = new VotingConfiguration(nodeIds);
        try {
            coordinator.setInitialConfiguration(configurationWithPlaceholders);
            logger.info("successfully set initial configuration to {}", configurationWithPlaceholders);
        } catch (CoordinationStateRejectedException e) {
            logger.info(new ParameterizedMessage("failed to set initial configuration to {}",
                configurationWithPlaceholders), e);
        }
    }).run();
}
 
Example 11
Source Project: crate   Source File: MockTransportService.java    License: Apache License 2.0 6 votes vote down vote up
public static MockTransportService createNewService(Settings settings,
                                                    Transport transport,
                                                    Version version,
                                                    ThreadPool threadPool,
                                                    @Nullable ClusterSettings clusterSettings) {
    return new MockTransportService(
        settings,
        transport,
        threadPool,
        TransportService.NOOP_TRANSPORT_INTERCEPTOR,
        boundAddress -> new DiscoveryNode(
            Node.NODE_NAME_SETTING.get(settings),
            UUIDs.randomBase64UUID(),
            boundAddress.publishAddress(),
            Node.NODE_ATTRIBUTES.getAsMap(settings),
            DiscoveryNode.getRolesFromSettings(settings),
            version
        ),
        clusterSettings
    );
}
 
Example 12
Source Project: crate   Source File: MockTransportService.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Build the service.
 *
 * @param clusterSettings if non null the {@linkplain TransportService} will register with the {@link ClusterSettings} for settings
 *                        updates for {@link TransportSettings#TRACE_LOG_EXCLUDE_SETTING} and {@link TransportSettings#TRACE_LOG_INCLUDE_SETTING}.
 */
public MockTransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor interceptor,
                            @Nullable ClusterSettings clusterSettings) {
    this(
        settings,
        transport,
        threadPool,
        interceptor,
        (boundAddress) -> DiscoveryNode.createLocal(
            settings,
            boundAddress.publishAddress(),
            settings.get(Node.NODE_NAME_SETTING.getKey(),
            UUIDs.randomBase64UUID())
        ),
        clusterSettings
    );
}
 
Example 13
Source Project: crate   Source File: SerializationTests.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testPutChunkReplicaRequestSerialization() throws Exception {
    UUID transferId = UUID.randomUUID();

    PutChunkReplicaRequest requestOut = new PutChunkReplicaRequest(
        new ShardId("foo", UUIDs.randomBase64UUID(), 1),
        "nodeId",
        transferId,
        0,
        new BytesArray(new byte[]{0x65, 0x66}),
        false
    );
    requestOut.index("foo");
    BytesStreamOutput outputStream = new BytesStreamOutput();
    requestOut.writeTo(outputStream);
    StreamInput inputStream = outputStream.bytes().streamInput();

    PutChunkReplicaRequest requestIn = new PutChunkReplicaRequest(inputStream);

    assertEquals(requestOut.currentPos, requestIn.currentPos);
    assertEquals(requestOut.isLast, requestIn.isLast);
    assertEquals(requestOut.content, requestIn.content);
    assertEquals(requestOut.transferId, requestIn.transferId);
    assertEquals(requestOut.index(), requestIn.index());
}
 
Example 14
private LuceneOrderedDocCollector createOrderedCollector(IndexSearcher searcher, int shardId) {
    CollectorContext collectorContext = new CollectorContext();
    List<LuceneCollectorExpression<?>> expressions = Collections.singletonList(
        new OrderByCollectorExpression(reference, orderBy, o -> o));
    return new LuceneOrderedDocCollector(
        new ShardId("dummy", UUIDs.randomBase64UUID(), shardId),
        searcher,
        new MatchAllDocsQuery(),
        null,
        false,
        5, // batchSize < 10 to have at least one searchMore call.
        RamAccounting.NO_ACCOUNTING,
        collectorContext,
        f -> null,
        new Sort(new SortedNumericSortField(columnName, SortField.Type.LONG, reverseFlags[0])),
        expressions,
        expressions
    );
}
 
Example 15
Source Project: crate   Source File: LuceneOrderedDocCollectorTest.java    License: Apache License 2.0 6 votes vote down vote up
private LuceneOrderedDocCollector collector(IndexSearcher searcher,
                                            List<LuceneCollectorExpression<?>> columnReferences,
                                            Query query,
                                            @Nullable Float minScore, boolean doDocScores) {
    return new LuceneOrderedDocCollector(
        new ShardId("dummy", UUIDs.base64UUID(), 0),
        searcher,
        query,
        minScore,
        doDocScores,
        2,
        RamAccounting.NO_ACCOUNTING,
        new CollectorContext(),
        f -> null,
        new Sort(SortField.FIELD_SCORE),
        columnReferences,
        columnReferences
    );
}
 
Example 16
Source Project: crate   Source File: ShardDeleteRequestTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testStreaming() throws Exception {
    ShardId shardId = new ShardId("test", UUIDs.randomBase64UUID(), 1);
    UUID jobId = UUID.randomUUID();
    ShardDeleteRequest request = new ShardDeleteRequest(shardId, jobId);

    request.add(123, new ShardDeleteRequest.Item("99"));
    request.add(5, new ShardDeleteRequest.Item("42"));

    BytesStreamOutput out = new BytesStreamOutput();
    request.writeTo(out);

    StreamInput in = out.bytes().streamInput();
    ShardDeleteRequest request2 = new ShardDeleteRequest(in);

    assertThat(request, equalTo(request2));
}
 
Example 17
Source Project: crate   Source File: TransportShardDeleteActionTest.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void prepare() throws Exception {
    indexUUID = UUIDs.randomBase64UUID();
    IndicesService indicesService = mock(IndicesService.class);
    IndexService indexService = mock(IndexService.class);
    when(indicesService.indexServiceSafe(new Index(TABLE_IDENT.indexNameOrAlias(), indexUUID))).thenReturn(indexService);
    indexShard = mock(IndexShard.class);
    when(indexService.getShard(0)).thenReturn(indexShard);


    transportShardDeleteAction = new TransportShardDeleteAction(
        MockTransportService.createNewService(
            Settings.EMPTY, Version.CURRENT, THREAD_POOL, clusterService.getClusterSettings()),
        mock(IndexNameExpressionResolver.class),
        mock(ClusterService.class),
        indicesService,
        mock(ThreadPool.class),
        mock(ShardStateAction.class),
        mock(SchemaUpdateClient.class)
    );
}
 
Example 18
Source Project: crate   Source File: DecommissionAllocationDeciderTest.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void init() throws Exception {
    routingAllocation = mock(RoutingAllocation.class);
    when(routingAllocation.decision(any(Decision.class), anyString(), anyString())).then(new Answer<Object>() {
        @Override
        public Object answer(InvocationOnMock invocation) throws Throwable {
            return invocation.getArguments()[0];
        }
    });

    primaryShard = ShardRouting.newUnassigned(
        new ShardId("t", UUIDs.randomBase64UUID(), 0),
        true,
        RecoverySource.PeerRecoverySource.INSTANCE,
        new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "dummy"));
    replicaShard = ShardRouting.newUnassigned(
        new ShardId("t", UUIDs.randomBase64UUID(), 0),
        false,
        RecoverySource.PeerRecoverySource.INSTANCE,
        new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "dummy")
    );
    n1 = new RoutingNode("n1", mock(DiscoveryNode.class));
    n2 = new RoutingNode("n2", mock(DiscoveryNode.class));
}
 
Example 19
Source Project: crate   Source File: SysSnapshotsTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testUnavailableSnapshotsAreFilteredOut() {
    HashMap<String, SnapshotId> snapshots = new HashMap<>();
    SnapshotId s1 = new SnapshotId("s1", UUIDs.randomBase64UUID());
    SnapshotId s2 = new SnapshotId("s2", UUIDs.randomBase64UUID());
    snapshots.put(s1.getUUID(), s1);
    snapshots.put(s2.getUUID(), s2);
    RepositoryData repositoryData = new RepositoryData(
        1, snapshots, Collections.emptyMap(), Collections.emptyMap(), ShardGenerations.EMPTY);

    Repository r1 = mock(Repository.class);
    when(r1.getRepositoryData()).thenReturn(repositoryData);
    when(r1.getMetadata()).thenReturn(new RepositoryMetaData("repo1", "fs", Settings.EMPTY));
    when(r1.getSnapshotInfo(eq(s1))).thenThrow(new SnapshotException("repo1", "s1", "Everything is wrong"));
    when(r1.getSnapshotInfo(eq(s2))).thenReturn(new SnapshotInfo(s2, Collections.emptyList(), SnapshotState.SUCCESS));

    SysSnapshots sysSnapshots = new SysSnapshots(() -> Collections.singletonList(r1));
    List<SysSnapshot> currentSnapshots = ImmutableList.copyOf(sysSnapshots.currentSnapshots());
    assertThat(
        currentSnapshots.stream().map(SysSnapshot::name).collect(Collectors.toList()),
        containsInAnyOrder("s1", "s2")
    );
}
 
Example 20
Source Project: crate   Source File: SQLExecutor.java    License: Apache License 2.0 6 votes vote down vote up
private static IndexMetaData.Builder getIndexMetaData(String indexName,
                                                      Settings settings,
                                                      @Nullable Map<String, Object> mapping,
                                                      Version smallestNodeVersion) throws IOException {
    Settings.Builder builder = Settings.builder()
        .put(settings)
        .put(SETTING_VERSION_CREATED, smallestNodeVersion)
        .put(SETTING_CREATION_DATE, Instant.now().toEpochMilli())
        .put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID());

    Settings indexSettings = builder.build();
    IndexMetaData.Builder metaBuilder = IndexMetaData.builder(indexName)
        .settings(indexSettings);
    if (mapping != null) {
        metaBuilder.putMapping(new MappingMetaData(
            Constants.DEFAULT_MAPPING_TYPE,
            mapping));
    }

    return metaBuilder;
}
 
Example 21
Source Project: crate   Source File: QueryTester.java    License: Apache License 2.0 6 votes vote down vote up
void indexValue(String column, Object value) throws IOException {
    DocumentMapper mapper = indexEnv.mapperService().documentMapperSafe();
    InsertSourceGen sourceGen = InsertSourceGen.of(
        CoordinatorTxnCtx.systemTransactionContext(),
        sqlExecutor.functions(),
        table,
        table.concreteIndices()[0],
        GeneratedColumns.Validation.NONE,
        Collections.singletonList(table.getReference(ColumnIdent.fromPath(column)))
    );
    BytesReference source = sourceGen.generateSourceAndCheckConstraintsAsBytesReference(new Object[]{value});
    SourceToParse sourceToParse = new SourceToParse(
        table.concreteIndices()[0],
        UUIDs.randomBase64UUID(),
        source,
        XContentType.JSON
    );
    ParsedDocument parsedDocument = mapper.parse(sourceToParse);
    indexEnv.writer().addDocuments(parsedDocument.docs());
}
 
Example 22
Source Project: anomaly-detection   Source File: Feature.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Parse raw json content into feature instance.
 *
 * @param parser json based content parser
 * @return feature instance
 * @throws IOException IOException if content can't be parsed correctly
 */
public static Feature parse(XContentParser parser) throws IOException {
    String id = UUIDs.base64UUID();
    String name = null;
    Boolean enabled = null;
    AggregationBuilder aggregation = null;

    ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation);
    while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
        String fieldName = parser.currentName();

        parser.nextToken();
        switch (fieldName) {
            case FEATURE_ID_FIELD:
                id = parser.text();
                break;
            case FEATURE_NAME_FIELD:
                name = parser.text();
                break;
            case FEATURE_ENABLED_FIELD:
                enabled = parser.booleanValue();
                break;
            case AGGREGATION_QUERY:
                ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation);
                aggregation = ParseUtils.toAggregationBuilder(parser);
                break;
            default:
                break;
        }
    }
    return new Feature(id, name, enabled, aggregation);
}
 
Example 23
public String bulkUpsert(String index,String type,List<Object> jsons){
		try {
			if(xclient==null){
				init();
			}
			BulkRequest request = new BulkRequest();
			for (Object json : jsons) {
				JSONObject obj = JSON.parseObject(JSON.toJSONString(json));
				String id = UUIDs.base64UUID();
				if(obj.containsKey("id")){
					id = obj.getString("id");
					obj.remove("id");
				}
//				if(obj.containsKey("id")){
//					request.add(new UpdateRequest(index, type, id).doc(obj.toJSONString(),XContentType.JSON));
//				}else{
//					request.add(new IndexRequest(index, type).source(obj.toJSONString(),XContentType.JSON));
//				}
				request.add(new UpdateRequest(index, type, id).upsert(obj.toJSONString(),XContentType.JSON));
			}
			BulkResponse result = xclient.bulk(request);
			return result.toString();
		}catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		return null;
	}
 
Example 24
Source Project: Stargraph   Source File: ElasticIndexer.java    License: MIT License 5 votes vote down vote up
@Override
protected void doIndex(Serializable data, KBId kbId) throws InterruptedException {
    if (bulkProcessor == null) {
        throw new StarGraphException("Back-end is unreachable now.");
    }

    try {
        final String id = UUIDs.base64UUID();
        IndexRequest request = esClient.createIndexRequest(id, true);
        this.indexRequests.put(id, request);
        bulkProcessor.add(request.source(mapper.writeValueAsBytes(data)));
    } catch (JsonProcessingException e) {
        throw new IndexingException(e);
    }
}
 
Example 25
Source Project: crate   Source File: RepositoryData.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Resolve the given index names to index ids, creating new index ids for
 * new indices in the repository.
 */
public List<IndexId> resolveNewIndices(final List<String> indicesToResolve) {
    List<IndexId> snapshotIndices = new ArrayList<>();
    for (String index : indicesToResolve) {
        final IndexId indexId;
        if (indices.containsKey(index)) {
            indexId = indices.get(index);
        } else {
            indexId = new IndexId(index, UUIDs.randomBase64UUID());
        }
        snapshotIndices.add(indexId);
    }
    return snapshotIndices;
}
 
Example 26
Source Project: crate   Source File: Translog.java    License: Apache License 2.0 5 votes vote down vote up
static String createEmptyTranslog(Path location, long initialGlobalCheckpoint, ShardId shardId,
                                  ChannelFactory channelFactory, long primaryTerm) throws IOException {
    IOUtils.rm(location);
    Files.createDirectories(location);
    final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, 1, initialGlobalCheckpoint, 1);
    final Path checkpointFile = location.resolve(CHECKPOINT_FILE_NAME);
    Checkpoint.write(channelFactory, checkpointFile, checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
    IOUtils.fsync(checkpointFile, false);
    final String translogUUID = UUIDs.randomBase64UUID();
    TranslogWriter writer = TranslogWriter.create(
        shardId,
        translogUUID,
        1,
        location.resolve(getFilename(1)),
        channelFactory,
        new ByteSizeValue(10),
        1,
        initialGlobalCheckpoint,
        () -> {
            throw new UnsupportedOperationException();
        },
        () -> {
            throw new UnsupportedOperationException();
        },
        primaryTerm,
        new TragicExceptionHolder(),
        seqNo -> {
            throw new UnsupportedOperationException();
        }
    );
    writer.close();
    return translogUUID;
}
 
Example 27
Source Project: crate   Source File: Store.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * creates an empty lucene index and a corresponding empty translog. Any existing data will be deleted.
 */
public void createEmpty() throws IOException {
    metadataLock.writeLock().lock();
    try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.CREATE, directory, null)) {
        final Map<String, String> map = new HashMap<>();
        map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID());
        map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(SequenceNumbers.NO_OPS_PERFORMED));
        map.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(SequenceNumbers.NO_OPS_PERFORMED));
        map.put(Engine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, "-1");
        updateCommitData(writer, map);
    } finally {
        metadataLock.writeLock().unlock();
    }
}
 
Example 28
Source Project: crate   Source File: Store.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Marks an existing lucene index with a new history uuid and sets the given maxSeqNo as the local checkpoint
 * as well as the maximum sequence number.
 * This is used to make sure no existing shard will recover from this index using ops based recovery.
 * @see SequenceNumbers#LOCAL_CHECKPOINT_KEY
 * @see SequenceNumbers#MAX_SEQ_NO
 */
public void bootstrapNewHistory(long maxSeqNo) throws IOException {
    metadataLock.writeLock().lock();
    try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) {
        final Map<String, String> userData = getUserData(writer);
        final Map<String, String> map = new HashMap<>();
        map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID());
        map.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo));
        map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(maxSeqNo));
        logger.debug("bootstrap a new history_uuid [{}], user_data [{}]", map, userData);
        updateCommitData(writer, map);
    } finally {
        metadataLock.writeLock().unlock();
    }
}
 
Example 29
Source Project: crate   Source File: RecoverySourceHandlerTests.java    License: Apache License 2.0 5 votes vote down vote up
public StartRecoveryRequest getStartRecoveryRequest() throws IOException {
    Store.MetadataSnapshot metadataSnapshot = randomBoolean() ? Store.MetadataSnapshot.EMPTY :
        new Store.MetadataSnapshot(Collections.emptyMap(),
                                   Collections.singletonMap(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()), randomIntBetween(0, 100));
    return new StartRecoveryRequest(
        shardId,
        null,
        new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT),
        new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT),
        metadataSnapshot,
        randomBoolean(),
        randomNonNegativeLong(),
        randomBoolean() || metadataSnapshot.getHistoryUUID() == null ?
            SequenceNumbers.UNASSIGNED_SEQ_NO : randomNonNegativeLong());
}
 
Example 30
Source Project: crate   Source File: RecoverySourceHandlerTests.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testVerifySeqNoStatsWhenRecoverWithSyncId() throws Exception {
    IndexShard shard = mock(IndexShard.class);
    when(shard.state()).thenReturn(IndexShardState.STARTED);
    RecoverySourceHandler handler = new RecoverySourceHandler(
        shard, new TestRecoveryTargetHandler(), getStartRecoveryRequest(), between(1, 16), between(1, 4));

    String syncId = UUIDs.randomBase64UUID();
    int numDocs = between(0, 1000);
    long localCheckpoint = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE);
    long maxSeqNo = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE);
    assertTrue(handler.canSkipPhase1(
        newMetadataSnapshot(syncId, Long.toString(localCheckpoint), Long.toString(maxSeqNo), numDocs),
        newMetadataSnapshot(syncId, Long.toString(localCheckpoint), Long.toString(maxSeqNo), numDocs)));

    Class<? extends Throwable> expectedError = Assertions.ENABLED ? AssertionError.class : IllegalStateException.class;
    Throwable error = expectThrows(expectedError, () -> {
        long localCheckpointOnTarget = randomValueOtherThan(
            localCheckpoint,
            () -> randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE));
        long maxSeqNoOnTarget = randomValueOtherThan(
            maxSeqNo,
            () -> randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE));
        handler.canSkipPhase1(
            newMetadataSnapshot(syncId, Long.toString(localCheckpoint), Long.toString(maxSeqNo), numDocs),
            newMetadataSnapshot(syncId, Long.toString(localCheckpointOnTarget), Long.toString(maxSeqNoOnTarget), numDocs));
    });
    assertThat(error.getMessage(), containsString("try to recover [index][1] with sync id but seq_no stats are mismatched:"));
}