Java Code Examples for org.apache.ignite.Ignite#dataStreamer()

The following examples show how to use org.apache.ignite.Ignite#dataStreamer() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: IgnitePdsCacheDestroyDuringCheckpointTest.java    From ignite with Apache License 2.0 6 votes vote down vote up
/** */
private void populateCache(Ignite client) {
    for (int i = 0; i < NUM_CACHES; i++) {
        CacheConfiguration cfg = new CacheConfiguration();
        cfg.setName(NAME_PREFIX + i).setAtomicityMode(CacheAtomicityMode.ATOMIC)
                .setBackups(1).setStatisticsEnabled(true).setManagementEnabled(true);
        client.getOrCreateCache(cfg);

        IgniteDataStreamer<Object, Object> streamer = client.dataStreamer(NAME_PREFIX + i);

        for (int j = 0; j < NUM_ENTRIES_PER_CACHE; j++) {
            String bo = i + "|" + j + "|WHATEVER";
            streamer.addData(j, bo);
        }

        streamer.close();
        log.info("Streamer closed");
    }
}
 
Example 2
Source File: DataStreamerUpdateAfterLoadTest.java    From ignite with Apache License 2.0 6 votes vote down vote up
/**
 * @param cacheName Cache name.
 * @param key Key.
 * @param allowOverwrite Streamer flag.
 * @return Next key.
 * @throws Exception If failed.
 */
private int testLoadAndUpdate(String cacheName, int key, boolean allowOverwrite) throws Exception {
    for (int loadNode = 0; loadNode < NODES; loadNode++) {
        Ignite loadIgnite = ignite(loadNode);

        for (int updateNode = 0; updateNode < NODES; updateNode++) {
            try (IgniteDataStreamer<Integer, Integer> streamer = loadIgnite.dataStreamer(cacheName)) {
                streamer.allowOverwrite(allowOverwrite);

                streamer.addData(key, key);
            }

            Ignite updateIgnite = ignite(updateNode);

            IgniteCache<Integer, Integer> cache = updateIgnite.cache(cacheName);

            updateIgnite.cache(cacheName).put(key, key + 1);

            checkValue(key, key + 1, cacheName);

            key++;
        }
    }

    return key;
}
 
Example 3
Source File: DataStreamerTimeoutTest.java    From ignite with Apache License 2.0 6 votes vote down vote up
/**
 * Test timeout on {@code DataStreamer.addData()} method
 * @throws Exception If fail.
 */
@Test
public void testTimeoutOnCloseMethod() throws Exception {
    failOn = 1;

    Ignite ignite = startGrid(1);

    boolean thrown = false;

    try (IgniteDataStreamer ldr = ignite.dataStreamer(CACHE_NAME)) {
        ldr.timeout(TIMEOUT);
        ldr.receiver(new TestDataReceiver());
        ldr.perNodeBufferSize(ENTRY_AMOUNT);

        for (int i = 0; i < ENTRY_AMOUNT; i++)
            ldr.addData(i, i);
    }
    catch (CacheException | IgniteDataStreamerTimeoutException ignored) {
        thrown = true;
    }
    finally {
        stopAllGrids();
    }

    assertTrue(thrown);
}
 
Example 4
Source File: CacheLoadingConcurrentGridStartSelfTest.java    From ignite with Apache License 2.0 6 votes vote down vote up
/**
 * @throws Exception if failed
 */
@Test
public void testLoadCacheWithDataStreamer() throws Exception {
    configured = true;

    try {
        IgniteInClosure<Ignite> f = new IgniteInClosure<Ignite>() {
            @Override public void apply(Ignite grid) {
                try (IgniteDataStreamer<Integer, String> dataStreamer = grid.dataStreamer(DEFAULT_CACHE_NAME)) {
                    dataStreamer.allowOverwrite(allowOverwrite);

                    for (int i = 0; i < KEYS_CNT; i++)
                        dataStreamer.addData(i, Integer.toString(i));
                }

                log.info("Data loaded.");
            }
        };

        loadCache(f);
    }
    finally {
        configured = false;
    }
}
 
Example 5
Source File: SqlStatisticsAbstractTest.java    From ignite with Apache License 2.0 6 votes vote down vote up
/**
 * Start the cache with a test table and test data.
 */
protected IgniteCache createCacheFrom(Ignite node) {
    CacheConfiguration<Integer, String> ccfg = new CacheConfiguration<Integer, String>(DEFAULT_CACHE_NAME)
        .setSqlFunctionClasses(SuspendQuerySqlFunctions.class)
        .setQueryEntities(Collections.singleton(
            new QueryEntity(Integer.class.getName(), String.class.getName())
                .setTableName("TAB")
                .addQueryField("id", Integer.class.getName(), null)
                .addQueryField("name", String.class.getName(), null)
                .setKeyFieldName("id")
                .setValueFieldName("name")
        ));

    IgniteCache<Integer, String> cache = node.createCache(ccfg);

    try (IgniteDataStreamer<Object, Object> ds = node.dataStreamer(DEFAULT_CACHE_NAME)) {
        for (int i = 0; i < TABLE_SIZE; i++)
            ds.addData(i, UUID.randomUUID().toString());
    }

    return cache;
}
 
Example 6
Source File: GridGcTimeoutTest.java    From ignite with Apache License 2.0 6 votes vote down vote up
/**
 * @param args Args.
 */
@SuppressWarnings("InfiniteLoopStatement")
public static void main(String[] args) {
    Ignite g = G.start(U.resolveIgniteUrl(CFG_PATH));

    IgniteDataStreamer<Long, String> ldr = g.dataStreamer("default");

    ldr.perNodeBufferSize(16 * 1024);

    StringBuilder sb = new StringBuilder();

    for (int i = 0; i < VALUE_SIZE - 42; i++)
        sb.append('a');

    String str = sb.toString();
    long cntr = 0;

    while (true) {
        ldr.addData(cntr++, UUID.randomUUID() + str);

        if (cntr % 1000000 == 0)
            X.println("!!! Entries added: " + cntr);
    }
}
 
Example 7
Source File: IgniteCacheTxPreloadNoWriteTest.java    From ignite with Apache License 2.0 5 votes vote down vote up
/**
 * @param commit {@code True} if commit transaction.
 * @throws Exception If failed.
 */
private void txNoWrite(boolean commit) throws Exception {
    Ignite ignite0 = startGrid(0);

    Affinity<Integer> aff = ignite0.affinity(DEFAULT_CACHE_NAME);

    IgniteCache<Integer, Object> cache0 = ignite0.cache(DEFAULT_CACHE_NAME);

    try (IgniteDataStreamer<Integer, Object> streamer = ignite0.dataStreamer(DEFAULT_CACHE_NAME)) {
        for (int i = 0; i < 1000; i++)
            streamer.addData(i + 10000, new byte[1024]);
    }

    Ignite ignite1 = startGrid(1);

    Integer key = primaryKey(ignite1.cache(DEFAULT_CACHE_NAME));

    // Want test scenario when ignite1 is new primary node, but ignite0 is still partition owner.
    assertTrue(aff.isPrimary(ignite1.cluster().localNode(), key));

    try (Transaction tx = ignite0.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
        cache0.get(key);

        if (commit)
            tx.commit();
    }

    GridCacheAdapter cacheAdapter = ((IgniteKernal)ignite(0)).context().cache().internalCache(DEFAULT_CACHE_NAME);

    // Check all transactions are finished.
    assertEquals(0, cacheAdapter.context().tm().idMapSize());

    // Try to start one more node.
    startGrid(2);
}
 
Example 8
Source File: GridDataStreamerImplSelfTest.java    From ignite with Apache License 2.0 5 votes vote down vote up
/**
 *  Tries to propagate cache with binary objects created using the builder.
 *
 * @throws Exception If failed.
 */
@Test
public void testAddBinaryCreatedWithBuilder() throws Exception {
    try {
        binaries = true;

        startGrids(2);

        awaitPartitionMapExchange();

        Ignite g0 = grid(0);

        IgniteDataStreamer<Integer, BinaryObject> dataLdr = g0.dataStreamer(DEFAULT_CACHE_NAME);

        for (int i = 0; i < 500; i++) {
            BinaryObjectBuilder obj = g0.binary().builder("NoExistedClass");

            obj.setField("id", i);
            obj.setField("name", "name = " + i);

            dataLdr.addData(i, obj.build());
        }

        dataLdr.close(false);

        assertEquals(500, g0.cache(DEFAULT_CACHE_NAME).size(CachePeekMode.ALL));
        assertEquals(500, grid(1).cache(DEFAULT_CACHE_NAME).size(CachePeekMode.ALL));
    }
    finally {
        G.stopAll(true);
    }
}
 
Example 9
Source File: DataStreamProcessorSelfTest.java    From ignite with Apache License 2.0 5 votes vote down vote up
/**
 * Test primitive arrays can be passed into data streamer.
 *
 * @throws Exception If failed.
 */
@Test
public void testPrimitiveArrays() throws Exception {
    try {
        useCache = true;
        mode = PARTITIONED;

        Ignite g1 = startGrid(1);
        startGrid(2); // Reproduced only for several nodes in topology (if marshalling is used).

        afterGridStarted();

        List<Object> arrays = Arrays.<Object>asList(
            new byte[] {1}, new boolean[] {true, false}, new char[] {2, 3}, new short[] {3, 4},
            new int[] {4, 5}, new long[] {5, 6}, new float[] {6, 7}, new double[] {7, 8});

        IgniteDataStreamer<Object, Object> dataLdr = g1.dataStreamer(DEFAULT_CACHE_NAME);

        for (int i = 0, size = arrays.size(); i < 1000; i++) {
            Object arr = arrays.get(i % size);

            dataLdr.addData(i, arr);
            dataLdr.addData(i, fixedClosure(arr));
        }

        dataLdr.close(false);
    }
    finally {
        stopAllGrids();
    }
}
 
Example 10
Source File: RebalanceCancellationTest.java    From ignite with Apache License 2.0 5 votes vote down vote up
/**
 * Loades several data entries to cache specified.
 *
 * @param ignite Ignite.
 * @param cacheName Cache name.
 */
private void loadData(Ignite ignite, String cacheName) {
    try (IgniteDataStreamer streamer = ignite.dataStreamer(cacheName)) {
        streamer.allowOverwrite(true);

        for (int i = 0; i < 100; i++)
            streamer.addData(i, System.nanoTime());
    }
}
 
Example 11
Source File: GridIoManagerFileTransmissionSelfTest.java    From ignite with Apache License 2.0 5 votes vote down vote up
/**
 * @param ignite Ignite instance.
 * @param cacheName Cache name to add data to.
 */
private void addCacheData(Ignite ignite, String cacheName) {
    try (IgniteDataStreamer<Integer, Integer> dataStreamer = ignite.dataStreamer(cacheName)) {
        dataStreamer.allowOverwrite(true);

        for (int i = 0; i < CACHE_SIZE; i++)
            dataStreamer.addData(i, i + cacheName.hashCode());
    }
}
 
Example 12
Source File: CacheBinaryKeyConcurrentQueryTest.java    From ignite with Apache License 2.0 5 votes vote down vote up
/**
 * @param ignite Node.
 * @param cacheName Cache name.
 */
private void insertData(Ignite ignite, String cacheName) {
    try (IgniteDataStreamer streamer = ignite.dataStreamer(cacheName)) {
        for (int i = 0; i < KEYS; i++)
            streamer.addData(new TestKey(i), new TestValue(i));
    }
}
 
Example 13
Source File: GridCachePartitionedHitsAndMissesSelfTest.java    From ignite with Apache License 2.0 5 votes vote down vote up
/**
 * Populates cache with data streamer.
 *
 * @param g Grid.
 */
private static void realTimePopulate(final Ignite g) {
    try (IgniteDataStreamer<Integer, Long> ldr = g.dataStreamer(DEFAULT_CACHE_NAME)) {
        // Sets max values to 1 so cache metrics have correct values.
        ldr.perNodeParallelOperations(1);

        // Count closure which increments a count on remote node.
        ldr.receiver(new IncrementingUpdater());

        for (int i = 0; i < CNT; i++)
            ldr.addData(i % (CNT / 2), 1L);
    }
}
 
Example 14
Source File: KafkaIgniteStreamerSelfTest.java    From ignite with Apache License 2.0 4 votes vote down vote up
/**
 * Consumes Kafka stream via Ignite.
 *
 * @param topic Topic name.
 * @param keyValMap Expected key value map.
 * @throws TimeoutException If timed out.
 * @throws InterruptedException If interrupted.
 */
private void consumerStream(String topic, Map<String, String> keyValMap)
    throws TimeoutException, InterruptedException {
    KafkaStreamer<String, String> kafkaStmr = null;

    Ignite ignite = grid();

    try (IgniteDataStreamer<String, String> stmr = ignite.dataStreamer(DEFAULT_CACHE_NAME)) {
        stmr.allowOverwrite(true);
        stmr.autoFlushFrequency(10);

        // Configure Kafka streamer.
        kafkaStmr = new KafkaStreamer<>();

        // Get the cache.
        IgniteCache<String, String> cache = ignite.cache(DEFAULT_CACHE_NAME);

        // Set Ignite instance.
        kafkaStmr.setIgnite(ignite);

        // Set data streamer instance.
        kafkaStmr.setStreamer(stmr);

        // Set the topic.
        kafkaStmr.setTopic(Arrays.asList(topic));

        // Set the number of threads.
        kafkaStmr.setThreads(4);

        // Set the consumer configuration.
        kafkaStmr.setConsumerConfig(
            createDefaultConsumerConfig(embeddedBroker.getBrokerAddress(), "groupX"));

        kafkaStmr.setMultipleTupleExtractor(
            record -> {
                Map<String, String> entries = new HashMap<>();

                try {
                    String key = (String)record.key();
                    String val = (String)record.value();

                    // Convert the message into number of cache entries with same key or dynamic key from actual message.
                    // For now using key as cache entry key and value as cache entry value - for test purpose.
                    entries.put(key, val);
                }
                catch (Exception ex) {
                    fail("Unexpected error." + ex);
                }

                return entries;
            });

        // Start kafka streamer.
        kafkaStmr.start();

        final CountDownLatch latch = new CountDownLatch(CNT);

        IgniteBiPredicate<UUID, CacheEvent> locLsnr = new IgniteBiPredicate<UUID, CacheEvent>() {
            @IgniteInstanceResource
            private Ignite ig;

            @LoggerResource
            private IgniteLogger log;

            /** {@inheritDoc} */
            @Override public boolean apply(UUID uuid, CacheEvent evt) {
                latch.countDown();

                if (log.isInfoEnabled()) {
                    IgniteEx igEx = (IgniteEx)ig;

                    UUID nodeId = igEx.localNode().id();

                    log.info("Recive event=" + evt + ", nodeId=" + nodeId);
                }

                return true;
            }
        };

        ignite.events(ignite.cluster().forCacheNodes(DEFAULT_CACHE_NAME)).remoteListen(locLsnr, null, EVT_CACHE_OBJECT_PUT);

        // Checks all events successfully processed in 10 seconds.
        assertTrue("Failed to wait latch completion, still wait " + latch.getCount() + " events",
            latch.await(10, TimeUnit.SECONDS));

        for (Map.Entry<String, String> entry : keyValMap.entrySet())
            assertEquals(entry.getValue(), cache.get(entry.getKey()));
    }
    finally {
        if (kafkaStmr != null)
            kafkaStmr.stop();
    }
}
 
Example 15
Source File: IgniteChangeGlobalStateDataStreamerTest.java    From ignite with Apache License 2.0 4 votes vote down vote up
/**
 * @throws Exception If failed.
 */
@Test
public void testDeActivateAndActivateDataStreamer() throws Exception {
    Ignite ig1 = primary(0);
    Ignite ig2 = primary(1);
    Ignite ig3 = primary(2);

    Ignite ig1C = primaryClient(0);
    Ignite ig2C = primaryClient(1);
    Ignite ig3C = primaryClient(2);

    assertTrue(ig1.active());
    assertTrue(ig2.active());
    assertTrue(ig3.active());

    assertTrue(ig1C.active());
    assertTrue(ig2C.active());
    assertTrue(ig3C.active());

    String cacheName = "myStreamCache";

    ig2C.getOrCreateCache(cacheName);

    try (IgniteDataStreamer<Integer, String> stmr = ig1.dataStreamer(cacheName)) {
        for (int i = 0; i < 100; i++)
            stmr.addData(i, Integer.toString(i));
    }

    ig2C.active(false);

    assertTrue(!ig1.active());
    assertTrue(!ig2.active());
    assertTrue(!ig3.active());

    assertTrue(!ig1C.active());
    assertTrue(!ig2C.active());
    assertTrue(!ig3C.active());

    boolean fail = false;

    try {
        IgniteDataStreamer<String, String> strm2 = ig2.dataStreamer(cacheName);
    }
    catch (Exception e) {
        fail = true;

        assertTrue(e.getMessage().contains("Can not perform the operation because the cluster is inactive."));
    }

    if (!fail)
        fail("exception was not throw");

    ig3C.active(true);

    assertTrue(ig1.active());
    assertTrue(ig2.active());
    assertTrue(ig3.active());

    assertTrue(ig1C.active());
    assertTrue(ig2C.active());
    assertTrue(ig3C.active());

    try (IgniteDataStreamer<Integer, String> stmr2 = ig2.dataStreamer(cacheName)) {
        for (int i = 100; i < 200; i++)
            stmr2.addData(i, Integer.toString(i));
    }

    IgniteCache<Integer, String> cache = ig3.cache(cacheName);

    for (int i = 0; i < 200; i++)
        assertEquals(String.valueOf(i), cache.get(i));
}
 
Example 16
Source File: IgniteDataStreamerPerformanceTest.java    From ignite with Apache License 2.0 4 votes vote down vote up
/**
 * @throws Exception If failed.
 */
private void doTest() throws Exception {
    System.gc();
    System.gc();
    System.gc();

    try {
        useCache = true;

        startGridsMultiThreaded(GRID_CNT);

        useCache = false;

        Ignite ignite = startGrid();

        final IgniteDataStreamer<Integer, String> ldr = ignite.dataStreamer(DEFAULT_CACHE_NAME);

        ldr.perNodeBufferSize(8192);
        ldr.receiver(DataStreamerCacheUpdaters.<Integer, String>batchedSorted());
        ldr.autoFlushFrequency(0);

        final LongAdder cnt = new LongAdder();

        long start = U.currentTimeMillis();

        Thread t = new Thread(new Runnable() {
            @SuppressWarnings("BusyWait")
            @Override public void run() {
                while (true) {
                    try {
                        Thread.sleep(10000);
                    }
                    catch (InterruptedException ignored) {
                        break;
                    }

                    info(">>> Adds/sec: " + cnt.sumThenReset() / 10);
                }
            }
        });

        t.setDaemon(true);

        t.start();

        int threadNum = 2;//Runtime.getRuntime().availableProcessors();

        multithreaded(new Callable<Object>() {
            @SuppressWarnings("InfiniteLoopStatement")
            @Override public Object call() throws Exception {
                ThreadLocalRandom rnd = ThreadLocalRandom.current();

                while (true) {
                    int i = rnd.nextInt(ENTRY_CNT);

                    ldr.addData(i, vals[rnd.nextInt(vals.length)]);

                    cnt.increment();
                }
            }
        }, threadNum, "loader");

        info("Closing loader...");

        ldr.close(false);

        long duration = U.currentTimeMillis() - start;

        info("Finished performance test. Duration: " + duration + "ms.");
    }
    finally {
        stopAllGrids();
    }
}
 
Example 17
Source File: DataStreamProcessorSelfTest.java    From ignite with Apache License 2.0 3 votes vote down vote up
/**
 * @throws Exception If failed.
 */
@Test
public void testTryFlush() throws Exception {
    mode = PARTITIONED;

    useCache = true;

    try {
        Ignite g = startGrid();

        afterGridStarted();

        IgniteCache<Integer, Integer> c = g.cache(DEFAULT_CACHE_NAME);

        IgniteDataStreamer<Integer, Integer> ldr = g.dataStreamer(DEFAULT_CACHE_NAME);

        ldr.perNodeBufferSize(10);

        for (int i = 0; i < 9; i++)
            ldr.addData(i, i);

        assertTrue(c.localSize() == 0);

        ldr.tryFlush();

        Thread.sleep(100);

        assertEquals(9, c.size());

        ldr.close(false);
    }
    finally {
        stopAllGrids();
    }
}
 
Example 18
Source File: DataStreamProcessorSelfTest.java    From ignite with Apache License 2.0 2 votes vote down vote up
/**
 * @throws Exception If failed.
 */
@Test
public void testFlush() throws Exception {
    mode = PARTITIONED;

    useCache = true;

    try {
        Ignite g = startGrid();

        afterGridStarted();

        final IgniteCache<Integer, Integer> c = g.cache(DEFAULT_CACHE_NAME);

        final IgniteDataStreamer<Integer, Integer> ldr = g.dataStreamer(DEFAULT_CACHE_NAME);

        ldr.perNodeBufferSize(10);

        for (int i = 0; i < 9; i++)
            ldr.addData(i, i);

        assertTrue(c.localSize() == 0);

        multithreaded(new Callable<Void>() {
            @Override public Void call() throws Exception {
                ldr.flush();

                assertEquals(9, c.size());

                return null;
            }
        }, 5, "flush-checker");

        ldr.addData(100, 100);

        ldr.flush();

        assertEquals(10, c.size());

        ldr.addData(200, 200);

        ldr.close(false);

        ldr.future().get();

        assertEquals(11, c.size());
    }
    finally {
        stopAllGrids();
    }
}
 
Example 19
Source File: GridDataStreamerImplSelfTest.java    From ignite with Apache License 2.0 2 votes vote down vote up
/**
 * Data streamer should correctly load entries from HashMap in case of grids with more than one node
 *  and with GridOptimizedMarshaller that requires serializable.
 *
 * @throws Exception If failed.
 */
@Test
public void testAddDataFromMap() throws Exception {
    try {
        binaries = false;

        startGrids(2);

        awaitPartitionMapExchange();

        Ignite g0 = grid(0);

        IgniteDataStreamer<Integer, String> dataLdr = g0.dataStreamer(DEFAULT_CACHE_NAME);

        Map<Integer, String> map = U.newHashMap(KEYS_COUNT);

        for (int i = 0; i < KEYS_COUNT; i++)
            map.put(i, String.valueOf(i));

        dataLdr.addData(map);

        dataLdr.close();

        checkDistribution(grid(0));

        checkDistribution(grid(1));

        // Check several random keys in cache.
        Random rnd = new Random();

        IgniteCache<Integer, String> c0 = g0.cache(DEFAULT_CACHE_NAME);

        for (int i = 0; i < 100; i++) {
            Integer k = rnd.nextInt(KEYS_COUNT);

            String v = c0.get(k);

            assertEquals(k.toString(), v);
        }
    }
    finally {
        G.stopAll(true);
    }
}
 
Example 20
Source File: DataStreamProcessorSelfTest.java    From ignite with Apache License 2.0 2 votes vote down vote up
/**
 * @throws Exception If failed.
 */
@Test
public void testFlushTimeout() throws Exception {
    MvccFeatureChecker.skipIfNotSupported(MvccFeatureChecker.Feature.CACHE_EVENTS);

    mode = PARTITIONED;

    useCache = true;

    try {
        Ignite g = startGrid();

        afterGridStarted();

        final CountDownLatch latch = new CountDownLatch(9);

        g.events().localListen(new IgnitePredicate<Event>() {
            @Override public boolean apply(Event evt) {
                latch.countDown();

                return true;
            }
        }, EVT_CACHE_OBJECT_PUT);

        IgniteCache<Integer, Integer> c = g.cache(DEFAULT_CACHE_NAME);

        assertTrue(c.localSize() == 0);

        IgniteDataStreamer<Integer, Integer> ldr = g.dataStreamer(DEFAULT_CACHE_NAME);

        ldr.perNodeBufferSize(10);
        ldr.autoFlushFrequency(3000);
        ldr.allowOverwrite(true);

        for (int i = 0; i < 9; i++)
            ldr.addData(i, i);

        assertTrue(c.localSize() == 0);

        assertFalse(latch.await(1000, MILLISECONDS));

        assertTrue(c.localSize() == 0);

        assertTrue(latch.await(3000, MILLISECONDS));

        assertEquals(9, c.size());

        ldr.close(false);
    }
    finally {
        stopAllGrids();
    }
}