Java Code Examples for com.gemstone.gemfire.cache.Region#destroyRegion()

The following examples show how to use com.gemstone.gemfire.cache.Region#destroyRegion() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PartitionedRegionSingleNodeOperationsJUnitTest.java    From gemfirexd-oss with Apache License 2.0 6 votes vote down vote up
public void test022GetEntry() throws Exception {
  Region pr = null;
  try {
    pr = PartitionedRegionTestHelper.createPartitionedRegion("testGetEntry", String.valueOf(200), 0);
    final Integer one = new Integer(1);
    pr.put(one, "one" );
    final Region.Entry re = pr.getEntry(one);
    assertFalse(re.isDestroyed());
    assertFalse(re.isLocal());
    assertTrue(((EntrySnapshot)re).wasInitiallyLocal());

    assertEquals("one", re.getValue());
    assertEquals(one, re.getKey());
    // TODO: Finish out the entry operations
    assertNull(pr.getEntry("nuthin"));
  }
  finally {
    if (pr!=null) {
      pr.destroyRegion();
    }      
  }
}
 
Example 2
Source File: WANTestBase.java    From gemfirexd-oss with Apache License 2.0 6 votes vote down vote up
public static void destroyRegionAfterMinRegionSize(String regionName, final int min) {
  final Region r = cache.getRegion(Region.SEPARATOR + regionName);
  assertNotNull(r);
  WaitCriterion wc = new WaitCriterion() {
    public boolean done() {
      if (destroyFlag) {
        return true;
      }
      return false;
    }

    public String description() {
      return "Looking for min size of region to be " + min;
    }
  };
  DistributedTestCase.waitForCriterion(wc, 30000, 5, false); 
  r.destroyRegion();
  destroyFlag = false;
}
 
Example 3
Source File: CompressionStatsDUnitTest.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
/**
 * Destroys a region.
 * @param regionName the region to destroy.
 */
private void destroyRegion(String regionName) {
  Region region = getCache().getRegion(regionName);
  assertNotNull(region);
  
  region.destroyRegion();
}
 
Example 4
Source File: HDFSRegionOperationsJUnitTest.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
public void test160DestroyRegion() {
  Region<Integer, String> r = createRegion(getName());
  for (int i=0; i<100; i++) {
    r.put(i, "value"+i);
  }
  clearBackingCHM(r);
  r.destroyRegion();
  try {
    r.get(3);
    fail("expected exception not thrown");
  } catch (RegionDestroyedException expected) {
  }
}
 
Example 5
Source File: LRUEvictionControllerDUnitTest.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that a capacity controller with LOCAL_DESTROY eviction action
 * cannot be installed into a region
 */
public void testCCMirrored() throws Exception {
  final String name = this.getUniqueName();
  AttributesFactory factory = new AttributesFactory();
  factory.setEnableOffHeapMemory(isOffHeapEnabled());
  factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(10));
  factory.setDataPolicy(DataPolicy.REPLICATE);
  
  Region r = createRegion(name, factory.create());
  RegionAttributes ra = r.getAttributes();
  assertEquals(DataPolicy.PRELOADED, ra.getDataPolicy());
  assertEquals(new SubscriptionAttributes(InterestPolicy.ALL),
               ra.getSubscriptionAttributes());
  r.destroyRegion();
}
 
Example 6
Source File: WANTestBase.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
public static void addCacheListenerAndDestroyRegion(String regionName){
  final Region region = cache.getRegion(Region.SEPARATOR + regionName);
  assertNotNull(region);
  CacheListenerAdapter cl = new CacheListenerAdapter() {
    @Override
    public void afterCreate(EntryEvent event) {
      if((Long)event.getKey() == 99){ 
        region.destroyRegion();
      }
    }
  };
  region.getAttributesMutator().addCacheListener(cl);
}
 
Example 7
Source File: LRUEvictionControllerDUnitTest.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that a capacity controller with LOCAL_DESTROY eviction action
 * cannot be installed into a region
 */
public void testCCMirrored() throws Exception {
  final String name = this.getUniqueName();
  AttributesFactory factory = new AttributesFactory();
  factory.setEnableOffHeapMemory(isOffHeapEnabled());
  factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(10));
  factory.setDataPolicy(DataPolicy.REPLICATE);
  
  Region r = createRegion(name, factory.create());
  RegionAttributes ra = r.getAttributes();
  assertEquals(DataPolicy.PRELOADED, ra.getDataPolicy());
  assertEquals(new SubscriptionAttributes(InterestPolicy.ALL),
               ra.getSubscriptionAttributes());
  r.destroyRegion();
}
 
Example 8
Source File: HdfsSortedOplogOrganizerJUnitTest.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
public void testCorruptHfileBucketFail() throws Exception {
  int port = AvailablePortHelper.getRandomAvailableTCPPort();
  MiniDFSCluster cluster = initMiniCluster(port ,1);
  
  hsf.setHomeDir("Store-1");
  hsf.setNameNodeURL("hdfs://127.0.0.1:" + port);
  HDFSStoreImpl store1 = (HDFSStoreImpl) hsf.create("Store-1");
  
  // create a corrupt file
  FileSystem fs = store1.getFileSystem();
  for (int i = 0; i < 113; i++) {
    FSDataOutputStream opStream = fs.create(new Path("Store-1/region-1/" + i + "/1-1-1.hop"));
    opStream.writeBytes("Some random corrupt file");
    opStream.close();
  }
    
  // create region with store
  regionfactory.setHDFSStoreName(store1.getName());
  Region<Object, Object> region1 = regionfactory.create("region-1");
  ExpectedException ex = TestUtils.addExpectedException("CorruptHFileException");
  try {
    region1.get("key");
    fail("get should have failed with corrupt file error");
  } catch (HDFSIOException e) {
    // expected
  } finally {
    ex.remove();
  }
  
  region1.destroyRegion();
  store1.destroy();
  cluster.shutdown();
  FileUtils.deleteDirectory(new File("hdfs-test-cluster"));
}
 
Example 9
Source File: HdfsSortedOplogOrganizerJUnitTest.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
public void testNewReaderWithNameNodeHA() throws Exception {
  deleteMiniClusterDir();
  int nn1port = AvailablePortHelper.getRandomAvailableTCPPort();
  int nn2port = AvailablePortHelper.getRandomAvailableTCPPort();
  
  MiniDFSCluster cluster = initMiniHACluster(nn1port, nn2port);
  initClientHAConf(nn1port, nn2port);
  
  HDFSStoreImpl store1 = (HDFSStoreImpl) hsf.create("Store-1");
  regionfactory.setHDFSStoreName(store1.getName());
  Region<Object, Object> region1 = regionfactory.create("region-1");
  HdfsRegionManager regionManager1 = ((LocalRegion)region1).getHdfsRegionManager();
  
  HoplogOrganizer<SortedHoplogPersistedEvent> organizer = doRead(regionManager1);
  organizer.close();
  
  TestUtils.addExpectedException("java.io.EOFException");
  NameNode nnode2 = cluster.getNameNode(1);
  assertTrue(nnode2.isStandbyState());
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);
  assertFalse(nnode2.isStandbyState());
  
  organizer = new HdfsSortedOplogOrganizer(regionManager1, 0);
  byte[] keyBytes1 = BlobHelper.serializeToBlob("1");
  byte[] keyBytes3 = BlobHelper.serializeToBlob("3");
  byte[] keyBytes4 = BlobHelper.serializeToBlob("4");
  assertEquals("2-1", organizer.read(keyBytes1).getValue());
  assertEquals("3-3", organizer.read(keyBytes3).getValue());
  assertEquals("1-4", organizer.read(keyBytes4).getValue());
  TestUtils.removeExpectedException("java.io.EOFException");

  region1.destroyRegion();
  store1.destroy();
  cluster.shutdown();
  FileUtils.deleteDirectory(new File("hdfs-test-cluster"));
}
 
Example 10
Source File: MultRegionsClient.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
/**
 * perform destroy region operation on the specified region.
 * @param regionName - used to determine the region performing region ops
 */
protected void destroyRegion(String regionName){
  Region region = RegionHelper.getRegion(regionName);
  
  MasterController.sleepForMs(20000); //sleep 20sec before destroy a region
  synchronized (regionOpsTryLock) {
    regionOpsTryLock = new Boolean(true);      
  }
  
  while (true){
    synchronized (numOfCurrentPutAllOps) {
      if (numOfCurrentPutAllOps.intValue() == 0) {
        if (region != null) {
          region.destroyRegion();
          Log.getLogWriter().info("Successfully destroyed the region " + regionName);
          synchronized (regionOpsTryLock) {
            regionOpsTryLock = new Boolean(false);      
          } //for putAll ops and other region ops to proceed
          CQUtilBB.getBB().getSharedMap().put(CQUtilBB.PerformedDestroyRegion, new Boolean (true));
          performedDestroyRegion = true;
          MapBB.getBB().getSharedCounters().increment(MapBB.NUM_CLOSE);
          return;
        } else {
          //only perform destroyRegion once in the test.
          throw new TestException("region " + regionName + " is not created properly" );
        }          
      }         
    } //end synchronized numOfCurrentPutAllOps 
    MasterController.sleepForMs(5000); //yield for putAll ops to be completed
  }
}
 
Example 11
Source File: WANTestBase.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
public static void addCacheListenerAndDestroyRegion(String regionName){
  final Region region = cache.getRegion(Region.SEPARATOR + regionName);
  assertNotNull(region);
  CacheListenerAdapter cl = new CacheListenerAdapter() {
    @Override
    public void afterCreate(EntryEvent event) {
      if((Long)event.getKey() == 99){ 
        region.destroyRegion();
      }
    }
  };
  region.getAttributesMutator().addCacheListener(cl);
}
 
Example 12
Source File: EventIDVerificationInP2PDUnitTest.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
public static void destroyRegion()
{
  try {
    Region r = cache.getRegion("/" + REGION_NAME);
    assertNotNull(r);
    r.destroyRegion();
  }
  catch (Exception ex) {
    fail("test failed due to exception in destroyRegion ", ex);
  }
}
 
Example 13
Source File: RegionWithHDFSBasicDUnitTest.java    From gemfirexd-oss with Apache License 2.0 4 votes vote down vote up
/**
 * Test that doing a destroyRegion removes all data from HDFS.
 */
public void testGlobalDestroyWithQueueData() {
  Host host = Host.getHost(0);
  VM vm0 = host.getVM(0);
  VM vm1 = host.getVM(1);
  
  final String folderPath = tmpDir + "/testGlobalDestroyWithQueueData";
  final String uniqueName = "testGlobalDestroyWithQueueData";
  int numEntries = 200;
  
  //set a large queue timeout so that data is still in the queue
  final int port0 =createServerRegion(vm0, 7, 31, 40, folderPath, 
      uniqueName, 10000, false, true);
  final int port1 =createServerRegion(vm1, 7, 31, 40, folderPath, 
      uniqueName, 10000, false, true);
  
  doPuts(vm0, uniqueName, numEntries);
  
  SerializableCallable globalDestroy = new SerializableCallable("destroy") {
    public Object call() throws Exception {
      Region r = getRootRegion(uniqueName);
      r.destroyRegion();
      return null;
    }
  };
  
  vm0.invoke(globalDestroy);
  
  //make sure data is not in HDFS
  verifyNoQOrPR(vm0);
  verifyNoQOrPR(vm1);
  verifyNoHDFSData(vm0, uniqueName);
  verifyNoHDFSData(vm1, uniqueName);
  
  closeCache(vm0);
  closeCache(vm1);
  
  //Restart vm0 and make sure it's still empty
  createServerRegion(vm0, 7, 31, 40, folderPath, 
      uniqueName, 1, false, true);
  createServerRegion(vm1, 7, 31, 40, folderPath, 
      uniqueName, 1, false, true);

  //make sure it's empty
  validateEmpty(vm0, numEntries, uniqueName);
  validateEmpty(vm1, numEntries, uniqueName);
  
}
 
Example 14
Source File: DiskOfflineCompactionJUnitTest.java    From gemfirexd-oss with Apache License 2.0 4 votes vote down vote up
public void testbug41862() throws Exception {
  DiskStoreFactory dsf = cache.createDiskStoreFactory();
  dsf.setAutoCompact(false);
  String name = "testbug41862";
  DiskStore diskStore = dsf.create(name);
  File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
  File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
  File krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.krf");
  File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
  AttributesFactory af = new AttributesFactory();
  af.setDiskStoreName(name);
  af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
  Region r = cache.createRegion("r", af.create());
  int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion)r));
  r.create("key1", "value1");
  r.create("key2", "value2"); // to keep this oplog from going empty
  ((LocalRegion)r).getDiskStore().forceRoll();
  r.create("key3", "value3");
  r.remove("key1");
  cache.close();
  ds.disconnect();
  DiskStoreImpl.validate(name, diskStore.getDiskDirs());
  
  int crfsize = Oplog.OPLOG_DISK_STORE_REC_SIZE + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE
  + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
  int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
  int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2"); 
  int createsize3 = getSize4Create(extra_byte_num_per_entry, "key3", "value3");
  // 1 tombstone with key
  int tombstonesize1 = getSize4TombstoneWithKey(extra_byte_num_per_entry, "key1");

  assertEquals(crfsize + createsize1 + createsize2, crfFile.length());
  assertEquals(Oplog.OPLOG_DISK_STORE_REC_SIZE+Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE+getRVVSize(0, null, true), drfFile.length());

  File crf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.crf");
  File drf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.drf");
  
  crfsize += (getRVVSize(1, new int[] {1}, false) - getRVVSize(0, null, false)); // adjust rvv size
  assertEquals(crfsize + createsize3 + tombstonesize1, crf2File.length());
  assertEquals(Oplog.OPLOG_DISK_STORE_REC_SIZE+Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE+getRVVSize(1, new int[] {0}, true), drf2File.length());
  long originalIfLength = ifFile.length();

  DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
  assertEquals(1, dsi.getDeadRecordCount());
  assertEquals(3, dsi.getLiveEntryCount());
  assertEquals(false, crfFile.exists());
  assertEquals(false, drfFile.exists());
  assertEquals(false, krfFile.exists());
  
  // offline compaction did not change  _2.crf and _2.drf not changed.
  assertEquals(crfsize + createsize3 + tombstonesize1, crf2File.length());
  assertEquals(Oplog.OPLOG_DISK_STORE_REC_SIZE+Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE+getRVVSize(1, new int[] {0}, true), drf2File.length());

  // offline compaction reset rvv to be empty, create-entry becomes one update-with-key-entry in _3.crf,
  // since there's no creates, then there's no OPLOG_NEW_ENTRY_BASE_REC_SIZE
  crfsize = Oplog.OPLOG_DISK_STORE_REC_SIZE + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] {1}, false);
  int updatesize1 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key3", "value3");
  File crf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.crf");
  File drf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.drf");
  File krf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.krf");
  assertEquals(true, krf3File.exists());
  assertEquals(true, crf3File.exists());
  assertEquals(true, drf3File.exists());
  assertEquals(crfsize + updatesize1, crf3File.length());
  assertEquals(Oplog.OPLOG_DISK_STORE_REC_SIZE+Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE+getRVVSize(1, new int[] {0}, true), drf3File.length());
  assertEquals(originalIfLength, ifFile.length());

  connectDSandCache();
  dsf = cache.createDiskStoreFactory();
  diskStore = dsf.create(name);
  af = new AttributesFactory();
  af.setDiskStoreName(name);
  af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
  r = cache.createRegion("r", af.create());
  assertEquals(2, r.size());
  assertEquals("value2", r.get("key2"));
  assertEquals("value3", r.get("key3"));

  // if test passed clean up files
  r.destroyRegion();
  diskStore.destroy();
}
 
Example 15
Source File: PartitionedRegionAsSubRegionDUnitTest.java    From gemfirexd-oss with Apache License 2.0 4 votes vote down vote up
public void run2() throws CacheException
{
  Cache cache = getCache();
  Region pr = cache.getRegion(Region.SEPARATOR + PR_PREFIX);
  pr.destroyRegion();
}
 
Example 16
Source File: UpdateVersionJUnitTest.java    From gemfirexd-oss with Apache License 2.0 4 votes vote down vote up
/**
 * Tests for Partitioned Region.
 */

public void testUpdateVersionAfterCreateOnPR() {
  
  Cache cache = new CacheFactory().create();
  Region region = cache.createRegionFactory(RegionShortcut.PARTITION).create(regionName);

  try {
    region.create("key-1", "value-1");

    Entry entry = region.getEntry("key-1");
    assertTrue(entry instanceof EntrySnapshot);
    RegionEntry regionEntry = ((EntrySnapshot)entry).getRegionEntry();

    VersionStamp stamp = regionEntry.getVersionStamp();

    // Create a duplicate entry version tag from stamp with newer time-stamp.
    VersionTag tag = VersionTag.create(stamp.getMemberID());

    int entryVersion = stamp.getEntryVersion();
    VersionSource member = stamp.getMemberID();
    int dsid = stamp.getDistributedSystemId();
    long time = System.currentTimeMillis();
    
    tag.setEntryVersion(entryVersion);
    tag.setDistributedSystemId(dsid);      
    tag.setVersionTimeStamp(time);
    tag.setIsGatewayTag(true);

    assertTrue(region instanceof PartitionedRegion);
    
    EntryEventImpl event = createNewEvent((PartitionedRegion)region, tag, entry.getKey());
    
    ((PartitionedRegion)region).basicUpdateEntryVersion(event);

    // Verify the new stamp
    entry = region.getEntry("key-1");
    assertTrue(entry instanceof EntrySnapshot);
    regionEntry = ((EntrySnapshot)entry).getRegionEntry();

    stamp = regionEntry.getVersionStamp();
    assertEquals(
        "Time stamp did NOT get updated by UPDATE_VERSION operation on LocalRegion",
        time, stamp.getVersionTimeStamp());
    assertEquals(++entryVersion, stamp.getEntryVersion());
    assertEquals(member, stamp.getMemberID());
    assertEquals(dsid, stamp.getDistributedSystemId());
  } finally {
    region.destroyRegion();
    cache.close();
  }
}
 
Example 17
Source File: UpdateVersionJUnitTest.java    From gemfirexd-oss with Apache License 2.0 4 votes vote down vote up
/**
 * Tests for LocalRegion.
 */

public void testUpdateVersionAfterCreate() {
  
  Cache cache = new CacheFactory().create();
  Region region = cache.createRegionFactory(RegionShortcut.REPLICATE).create(regionName);

  try {
    region.create("key-1", "value-1");

    Entry entry = region.getEntry("key-1");
    assertTrue(entry instanceof NonTXEntry);
    RegionEntry regionEntry = ((NonTXEntry)entry).getRegionEntry();

    VersionStamp stamp = regionEntry.getVersionStamp();

    // Create a duplicate entry version tag from stamp with newer time-stamp.
    VersionTag tag = VersionTag.create(stamp.getMemberID());

    int entryVersion = stamp.getEntryVersion();
    VersionSource member = stamp.getMemberID();
    int dsid = stamp.getDistributedSystemId();
    long time = System.currentTimeMillis() + 1;
    
    tag.setEntryVersion(entryVersion);
    tag.setDistributedSystemId(dsid);
    tag.setVersionTimeStamp(time);
    tag.setIsGatewayTag(true);

    assertTrue(region instanceof LocalRegion);
    
    EntryEventImpl event = createNewEvent((LocalRegion)region, tag, entry.getKey());
    
    ((LocalRegion)region).basicUpdateEntryVersion(event);

    // Verify the new stamp
    entry = region.getEntry("key-1");
    assertTrue(entry instanceof NonTXEntry);
    regionEntry = ((NonTXEntry)entry).getRegionEntry();

    stamp = regionEntry.getVersionStamp();
    assertEquals(
        "Time stamp did NOT get updated by UPDATE_VERSION operation on LocalRegion",
        time, stamp.getVersionTimeStamp());
    assertEquals(++entryVersion, stamp.getEntryVersion());
    assertEquals(member, stamp.getMemberID());
    assertEquals(dsid, stamp.getDistributedSystemId());
  } finally {
    region.destroyRegion();
    cache.close();
  }
}
 
Example 18
Source File: LRUEvictionControllerDUnitTest.java    From gemfirexd-oss with Apache License 2.0 4 votes vote down vote up
/**
 * Carefully verifies that region operations effect the {@link
 * LRUStatistics} as expected using the LRU_HEAP algorithm.
 */
public void testRegionOperationsLruHeap() throws CacheException {
  final String name = this.getUniqueName();
  AttributesFactory factory = new AttributesFactory();
  factory.setEnableOffHeapMemory(isOffHeapEnabled());
  factory.setScope(Scope.LOCAL);
  factory.setEvictionAttributes(EvictionAttributes.createLRUHeapAttributes(new ObjectSizer() { public int sizeof(Object o) { return 1024; }; }));

  Region region;
  if (usingMain) {
    DistributedSystem system = DistributedSystem.connect(getDistributedSystemProperties());
    Cache cache = CacheFactory.create(system);
    region = cache.createRegion("Test", factory.create());
  } else {
    region = createRegion(name, factory.create());
  }

  InternalResourceManager irm = (InternalResourceManager) getCache().getResourceManager();
  irm.setEvictionHeapPercentage(5.0f);
  irm.addResourceListener(getResourceType(), this);
  
  HeapEvictor evictor = getEvictor();
  evictor.testAbortAfterLoopCount = 0;
  
  int counter = 0;
  while (!this.receivedEvictionEvent) {
    region.put(counter++, new byte[1024]);
  }
  
  int startingSize = region.size();
  
  LRUStatistics lruStats = getLRUStats(region);
  assertNotNull(lruStats);
  long evictionCount = lruStats.getEvictions();

  for (int i = 0; i <= 10; i++) {
    region.put(counter++, new byte[1024]);
    assertEquals(++evictionCount, lruStats.getEvictions());
    assertEquals(startingSize, region.size());
  }
  
  irm.removeResourceListener(this);
  region.destroyRegion();
  evictor.testAbortAfterLoopCount = Integer.MAX_VALUE;
}
 
Example 19
Source File: DiskOfflineCompactionJUnitTest.java    From gemfirexd-oss with Apache License 2.0 4 votes vote down vote up
public void testbug41862() throws Exception {
  DiskStoreFactory dsf = cache.createDiskStoreFactory();
  dsf.setAutoCompact(false);
  String name = "testbug41862";
  DiskStore diskStore = dsf.create(name);
  File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
  File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
  File krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.krf");
  File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
  AttributesFactory af = new AttributesFactory();
  af.setDiskStoreName(name);
  af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
  Region r = cache.createRegion("r", af.create());
  int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion)r));
  r.create("key1", "value1");
  r.create("key2", "value2"); // to keep this oplog from going empty
  ((LocalRegion)r).getDiskStore().forceRoll();
  r.create("key3", "value3");
  r.remove("key1");
  cache.close();
  ds.disconnect();
  DiskStoreImpl.validate(name, diskStore.getDiskDirs());
  
  int crfsize = Oplog.OPLOG_DISK_STORE_REC_SIZE + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE
  + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
  int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
  int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2"); 
  int createsize3 = getSize4Create(extra_byte_num_per_entry, "key3", "value3");
  // 1 tombstone with key
  int tombstonesize1 = getSize4TombstoneWithKey(extra_byte_num_per_entry, "key1");

  assertEquals(crfsize + createsize1 + createsize2, crfFile.length());
  assertEquals(Oplog.OPLOG_DISK_STORE_REC_SIZE+Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE+getRVVSize(0, null, true), drfFile.length());

  File crf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.crf");
  File drf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.drf");
  
  crfsize += (getRVVSize(1, new int[] {1}, false) - getRVVSize(0, null, false)); // adjust rvv size
  assertEquals(crfsize + createsize3 + tombstonesize1, crf2File.length());
  assertEquals(Oplog.OPLOG_DISK_STORE_REC_SIZE+Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE+getRVVSize(1, new int[] {0}, true), drf2File.length());
  long originalIfLength = ifFile.length();

  DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
  assertEquals(1, dsi.getDeadRecordCount());
  assertEquals(3, dsi.getLiveEntryCount());
  assertEquals(false, crfFile.exists());
  assertEquals(false, drfFile.exists());
  assertEquals(false, krfFile.exists());
  
  // offline compaction did not change  _2.crf and _2.drf not changed.
  assertEquals(crfsize + createsize3 + tombstonesize1, crf2File.length());
  assertEquals(Oplog.OPLOG_DISK_STORE_REC_SIZE+Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE+getRVVSize(1, new int[] {0}, true), drf2File.length());

  // offline compaction reset rvv to be empty, create-entry becomes one update-with-key-entry in _3.crf,
  // since there's no creates, then there's no OPLOG_NEW_ENTRY_BASE_REC_SIZE
  crfsize = Oplog.OPLOG_DISK_STORE_REC_SIZE + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] {1}, false);
  int updatesize1 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key3", "value3");
  File crf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.crf");
  File drf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.drf");
  File krf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.krf");
  assertEquals(true, krf3File.exists());
  assertEquals(true, crf3File.exists());
  assertEquals(true, drf3File.exists());
  assertEquals(crfsize + updatesize1, crf3File.length());
  assertEquals(Oplog.OPLOG_DISK_STORE_REC_SIZE+Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE+getRVVSize(1, new int[] {0}, true), drf3File.length());
  assertEquals(originalIfLength, ifFile.length());

  connectDSandCache();
  dsf = cache.createDiskStoreFactory();
  diskStore = dsf.create(name);
  af = new AttributesFactory();
  af.setDiskStoreName(name);
  af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
  r = cache.createRegion("r", af.create());
  assertEquals(2, r.size());
  assertEquals("value2", r.get("key2"));
  assertEquals("value3", r.get("key3"));

  // if test passed clean up files
  r.destroyRegion();
  diskStore.destroy();
}
 
Example 20
Source File: DiskOfflineCompactionJUnitTest.java    From gemfirexd-oss with Apache License 2.0 4 votes vote down vote up
public void testForceRollTwoEntriesWithUpdates()
    throws Exception {
  DiskStoreFactory dsf = cache.createDiskStoreFactory();
  dsf.setAutoCompact(false);
  String name = "testForceRollTwoEntriesWithUpdates";
  DiskStore diskStore = dsf.create(name);
  File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
  File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
  File krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.krf");
  File crf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.crf");
  File drf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.drf");
  File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
  AttributesFactory af = new AttributesFactory();
  af.setDiskStoreName(name);
  af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
  Region r = cache.createRegion("r", af.create());
  int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion)r));
  r.put("key0", "value0"); //extra key to keep oplog1 from being empty
  r.put("key1", "value1");
  r.put("key2", "value2");
  diskStore.forceRoll();
  r.put("key1", "update1");
  r.put("key2", "update2");
  cache.close();
  ds.disconnect();
  DiskStoreImpl.validate(name, diskStore.getDiskDirs());
  
  int crfsize = Oplog.OPLOG_DISK_STORE_REC_SIZE + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE
  + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
  int createsize0 = getSize4Create(extra_byte_num_per_entry, "key0", "value0");
  int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
  int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2"); 
  int updatesize1 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key1", "update1");
  int updatesize2 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key2", "update2");

  assertEquals(crfsize + createsize0 + createsize1 + createsize2, crfFile.length());
  assertEquals(Oplog.OPLOG_DISK_STORE_REC_SIZE+Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE+getRVVSize(0, null, true), drfFile.length());
  crfsize = Oplog.OPLOG_DISK_STORE_REC_SIZE + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE
  + getRVVSize(1, new int[] {1}, false);
  assertEquals(crfsize + updatesize1 + updatesize2, crf2File.length());
  assertEquals(Oplog.OPLOG_DISK_STORE_REC_SIZE+Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE+getRVVSize(1, new int[] {0}, true), drf2File.length());
  long originalIfLength = ifFile.length();

  DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
  assertEquals(2, dsi.getDeadRecordCount());
  assertEquals(3, dsi.getLiveEntryCount());
  assertEquals(false, crfFile.exists());
  assertEquals(false, drfFile.exists());
  assertEquals(false, krfFile.exists());
  // oplog2 contains two updates so it remains unchanged
  assertEquals(crfsize + updatesize1 + updatesize2, crf2File.length());
  assertEquals(Oplog.OPLOG_DISK_STORE_REC_SIZE+Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE+getRVVSize(1, new int[] {0}, true), drf2File.length());

  File crf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.crf");
  File drf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.drf");
  File krf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.krf");
  assertEquals(true, krf3File.exists());
  assertEquals(true, crf3File.exists());
  assertEquals(true, drf3File.exists());
  // after offline compaction, rvv is reset, and only one update-with-key, i.e. key0 in _3.crf
  crfsize = Oplog.OPLOG_DISK_STORE_REC_SIZE + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE
  + getRVVSize(1, new int[] {1}, false);
  int updatesize0 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key0", "value0");
  assertEquals(crfsize + updatesize0 , crf3File.length());
  assertEquals(Oplog.OPLOG_DISK_STORE_REC_SIZE+Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE+getRVVSize(1, new int[] {0}, true), drf3File.length());
  assertEquals(originalIfLength, ifFile.length());

  connectDSandCache();
  dsf = cache.createDiskStoreFactory();
  diskStore = dsf.create(name);
  af = new AttributesFactory();
  af.setDiskStoreName(name);
  af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
  r = cache.createRegion("r", af.create());
  assertEquals(3, r.size());
  assertEquals("value0", r.get("key0"));
  assertEquals("update1", r.get("key1"));
  assertEquals("update2", r.get("key2"));

  // if test passed clean up files
  r.destroyRegion();
  diskStore.destroy();
}