Java Code Examples for org.mockito.internal.util.reflection.Whitebox#getInternalState()

The following examples show how to use org.mockito.internal.util.reflection.Whitebox#getInternalState() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestDFSOutputStream.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * The computePacketChunkSize() method of DFSOutputStream should set the actual
 * packet size < 64kB. See HDFS-7308 for details.
 */
@Test
public void testComputePacketChunkSize()
    throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  FSDataOutputStream os = fs.create(new Path("/test"));
  DFSOutputStream dos = (DFSOutputStream) Whitebox.getInternalState(os,
      "wrappedStream");

  final int packetSize = 64*1024;
  final int bytesPerChecksum = 512;

  Method method = dos.getClass().getDeclaredMethod("computePacketChunkSize",
      int.class, int.class);
  method.setAccessible(true);
  method.invoke(dos, packetSize, bytesPerChecksum);

  Field field = dos.getClass().getDeclaredField("packetSize");
  field.setAccessible(true);

  Assert.assertTrue((Integer) field.get(dos) + 33 < packetSize);
  // If PKT_MAX_HEADER_LEN is 257, actual packet size come to over 64KB
  // without a fix on HDFS-7308.
  Assert.assertTrue((Integer) field.get(dos) + 257 < packetSize);
}
 
Example 2
Source File: TestDFSOutputStream.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * The close() method of DFSOutputStream should never throw the same exception
 * twice. See HDFS-5335 for details.
 */
@Test
public void testCloseTwice() throws IOException {
  DistributedFileSystem fs = cluster.getFileSystem();
  FSDataOutputStream os = fs.create(new Path("/test"));
  DFSOutputStream dos = (DFSOutputStream) Whitebox.getInternalState(os,
      "wrappedStream");
  @SuppressWarnings("unchecked")
  AtomicReference<IOException> ex = (AtomicReference<IOException>) Whitebox
      .getInternalState(dos, "lastException");
  Assert.assertEquals(null, ex.get());

  dos.close();

  IOException dummy = new IOException("dummy");
  ex.set(dummy);
  try {
    dos.close();
  } catch (IOException e) {
    Assert.assertEquals(e, dummy);
  }
  Assert.assertEquals(null, ex.get());
  dos.close();
}
 
Example 3
Source File: TestDeleteRace.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void run() {
  try {
    Thread.sleep(1000);
    LOG.info("Deleting" + path);
    final FSDirectory fsdir = cluster.getNamesystem().dir;
    INode fileINode = fsdir.getINode4Write(path.toString());
    INodeMap inodeMap = (INodeMap) Whitebox.getInternalState(fsdir,
        "inodeMap");

    fs.delete(path, false);
    // after deletion, add the inode back to the inodeMap
    inodeMap.put(fileINode);
    LOG.info("Deleted" + path);
  } catch (Exception e) {
    LOG.info(e);
  }
}
 
Example 4
Source File: WingtipsToZipkinLifecycleListenerTest.java    From wingtips with Apache License 2.0 6 votes vote down vote up
@DataProvider(value = {
    "true",
    "false"
})
@Test
public void convenience_constructor_sets_fields_as_expected(boolean baseUrlTrailingSlash) throws MalformedURLException {
    // given
    String baseUrlWithoutTrailingSlash = "http://localhost:4242";
    String baseUrl = (baseUrlTrailingSlash)
                     ? baseUrlWithoutTrailingSlash + "/"
                     : baseUrlWithoutTrailingSlash;

    // when
    WingtipsToZipkinLifecycleListener listener = new WingtipsToZipkinLifecycleListener(serviceName, baseUrl);

    // then
    assertThat(listener.serviceName).isEqualTo(serviceName);
    assertThat(listener.zipkinEndpoint.serviceName()).isEqualTo(serviceName);
    assertThat(listener.zipkinSpanConverter).isInstanceOf(WingtipsToZipkinSpanConverterDefaultImpl.class);
    assertThat(listener.zipkinSpanReporter).isInstanceOf(AsyncReporter.class);
    Object spanSender = Whitebox.getInternalState(listener.zipkinSpanReporter, "sender");
    assertThat(spanSender).isInstanceOf(URLConnectionSender.class);
    assertThat(Whitebox.getInternalState(spanSender, "endpoint"))
        .isEqualTo(new URL(baseUrlWithoutTrailingSlash + "/api/v2/spans"));
}
 
Example 5
Source File: CCParticleTest.java    From cocos-ui-libgdx with Apache License 2.0 6 votes vote down vote up
@Test
@NeedGL
public void shouldParseParticle() throws Exception {
    CocoStudioUIEditor editor = new CocoStudioUIEditor(
        Gdx.files.internal("particle/MainScene.json"), null, null, null, null);

    Group group = editor.createGroup();
    CCParticleActor particleActor = group.findActor("Particle_1");
    Object modeA = Whitebox.getInternalState(particleActor, "modeA");
    Float speedVar = (Float) Whitebox.getInternalState(modeA, "speedVar");
    Float tangentialAccel = (Float) Whitebox.getInternalState(modeA, "tangentialAccel");
    Float tangentialAccelVar = (Float) Whitebox.getInternalState(modeA, "tangentialAccelVar");
    assertThat(speedVar, is(190.79f));
    assertThat(tangentialAccel, is(-92.11f));
    assertThat(tangentialAccelVar, is(65.79f));

    Object modeB = Whitebox.getInternalState(particleActor, "modeB");
    Float startRadius = (Float) Whitebox.getInternalState(modeB, "startRadius");
    Float endRadius = (Float) Whitebox.getInternalState(modeB, "endRadius");
    Float rotatePerSecond = (Float) Whitebox.getInternalState(modeB, "rotatePerSecond");
    assertThat(startRadius, is(0f));
    assertThat(endRadius, is(0f));
    assertThat(rotatePerSecond, is(0f));
}
 
Example 6
Source File: TracerTest.java    From wingtips with Apache License 2.0 5 votes vote down vote up
@Test
public void make_code_coverage_happy3() {
    Logger tracerClassLogger = (Logger) Whitebox.getInternalState(Tracer.getInstance(), "classLogger");
    Level origLevel = tracerClassLogger.getLevel();
    try {
        // Enable debug logging.
        tracerClassLogger.setLevel(Level.DEBUG);
        // Exercise a span lifecycle to trigger the code branches that only do something if debug logging is on.
        Tracer.getInstance().startRequestWithRootSpan("foo");
        Tracer.getInstance().completeRequestSpan();
    }
    finally {
        tracerClassLogger.setLevel(origLevel);
    }
}
 
Example 7
Source File: LoggerFactoryTest.java    From slf4j-json-logger with Apache License 2.0 5 votes vote down vote up
@Test
public void testGetLoggerByClass() throws Exception {
  Logger result = LoggerFactory.getLogger(LoggerFactoryTest.class);
  org.slf4j.Logger slf4jLogger = (org.slf4j.Logger) Whitebox.getInternalState(result,
                                                                              "slf4jLogger");

  assertNotNull(slf4jLogger);
  assertEquals(LoggerFactoryTest.class.getName(), slf4jLogger.getName());
}
 
Example 8
Source File: TestPortmap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 1000)
public void testRegistration() throws IOException, InterruptedException {
  XDR req = new XDR();
  RpcCall.getInstance(++xid, RpcProgramPortmap.PROGRAM,
      RpcProgramPortmap.VERSION,
      RpcProgramPortmap.PMAPPROC_SET,
      new CredentialsNone(), new VerifierNone()).write(req);

  PortmapMapping sent = new PortmapMapping(90000, 1,
      PortmapMapping.TRANSPORT_TCP, 1234);
  sent.serialize(req);

  byte[] reqBuf = req.getBytes();
  DatagramSocket s = new DatagramSocket();
  DatagramPacket p = new DatagramPacket(reqBuf, reqBuf.length,
      pm.getUdpServerLoAddress());
  try {
    s.send(p);
  } finally {
    s.close();
  }

  // Give the server a chance to process the request
  Thread.sleep(100);
  boolean found = false;
  @SuppressWarnings("unchecked")
  Map<String, PortmapMapping> map = (Map<String, PortmapMapping>) Whitebox
      .getInternalState(pm.getHandler(), "map");

  for (PortmapMapping m : map.values()) {
    if (m.getPort() == sent.getPort()
        && PortmapMapping.key(m).equals(PortmapMapping.key(sent))) {
      found = true;
      break;
    }
  }
  Assert.assertTrue("Registration failed", found);
}
 
Example 9
Source File: TestPortmap.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 1000)
public void testRegistration() throws IOException, InterruptedException {
  XDR req = new XDR();
  RpcCall.getInstance(++xid, RpcProgramPortmap.PROGRAM,
      RpcProgramPortmap.VERSION,
      RpcProgramPortmap.PMAPPROC_SET,
      new CredentialsNone(), new VerifierNone()).write(req);

  PortmapMapping sent = new PortmapMapping(90000, 1,
      PortmapMapping.TRANSPORT_TCP, 1234);
  sent.serialize(req);

  byte[] reqBuf = req.getBytes();
  DatagramSocket s = new DatagramSocket();
  DatagramPacket p = new DatagramPacket(reqBuf, reqBuf.length,
      pm.getUdpServerLoAddress());
  try {
    s.send(p);
  } finally {
    s.close();
  }

  // Give the server a chance to process the request
  Thread.sleep(100);
  boolean found = false;
  @SuppressWarnings("unchecked")
  Map<String, PortmapMapping> map = (Map<String, PortmapMapping>) Whitebox
      .getInternalState(pm.getHandler(), "map");

  for (PortmapMapping m : map.values()) {
    if (m.getPort() == sent.getPort()
        && PortmapMapping.key(m).equals(PortmapMapping.key(sent))) {
      found = true;
      break;
    }
  }
  Assert.assertTrue("Registration failed", found);
}
 
Example 10
Source File: ClickHouseStatementImplTest.java    From clickhouse-jdbc with Apache License 2.0 5 votes vote down vote up
private static Object readField(Object object, String fieldName, long timeoutSecs) {
    long start = System.currentTimeMillis();
    Object value;
    do {
        value = Whitebox.getInternalState(object, fieldName);
    } while (value == null && TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis() - start) < timeoutSecs);

    return value;
}
 
Example 11
Source File: TestHASafeMode.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Make sure the client retries when the active NN is in safemode
 */
@Test (timeout=300000)
public void testClientRetrySafeMode() throws Exception {
  final Map<Path, Boolean> results = Collections
      .synchronizedMap(new HashMap<Path, Boolean>());
  final Path test = new Path("/test");
  // let nn0 enter safemode
  NameNodeAdapter.enterSafeMode(nn0, false);
  SafeModeInfo safeMode = (SafeModeInfo) Whitebox.getInternalState(
      nn0.getNamesystem(), "safeMode");
  Whitebox.setInternalState(safeMode, "extension", Integer.valueOf(30000));
  LOG.info("enter safemode");
  new Thread() {
    @Override
    public void run() {
      try {
        boolean mkdir = fs.mkdirs(test);
        LOG.info("mkdir finished, result is " + mkdir);
        synchronized (TestHASafeMode.this) {
          results.put(test, mkdir);
          TestHASafeMode.this.notifyAll();
        }
      } catch (Exception e) {
        LOG.info("Got Exception while calling mkdir", e);
      }
    }
  }.start();
  
  // make sure the client's call has actually been handled by the active NN
  assertFalse("The directory should not be created while NN in safemode",
      fs.exists(test));
  
  Thread.sleep(1000);
  // let nn0 leave safemode
  NameNodeAdapter.leaveSafeMode(nn0);
  LOG.info("leave safemode");
  
  synchronized (this) {
    while (!results.containsKey(test)) {
      this.wait();
    }
    assertTrue(results.get(test));
  }
}
 
Example 12
Source File: TestPendingInvalidateBlock.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test whether we can delay the deletion of unknown blocks in DataNode's
 * first several block reports.
 */
@Test
public void testPendingDeleteUnknownBlocks() throws Exception {
  final int fileNum = 5; // 5 files
  final Path[] files = new Path[fileNum];
  final DataNodeProperties[] dnprops = new DataNodeProperties[REPLICATION];
  // create a group of files, each file contains 1 block
  for (int i = 0; i < fileNum; i++) {
    files[i] = new Path("/file" + i);
    DFSTestUtil.createFile(dfs, files[i], BLOCKSIZE, REPLICATION, i);
  }
  // wait until all DataNodes have replicas
  waitForReplication();
  for (int i = REPLICATION - 1; i >= 0; i--) {
    dnprops[i] = cluster.stopDataNode(i);
  }
  Thread.sleep(2000);
  // delete 2 files, we still have 3 files remaining so that we can cover
  // every DN storage
  for (int i = 0; i < 2; i++) {
    dfs.delete(files[i], true);
  }

  // restart NameNode
  cluster.restartNameNode(false);
  InvalidateBlocks invalidateBlocks = (InvalidateBlocks) Whitebox
      .getInternalState(cluster.getNamesystem().getBlockManager(),
          "invalidateBlocks");
  InvalidateBlocks mockIb = Mockito.spy(invalidateBlocks);
  Mockito.doReturn(1L).when(mockIb).getInvalidationDelay();
  Whitebox.setInternalState(cluster.getNamesystem().getBlockManager(),
      "invalidateBlocks", mockIb);

  Assert.assertEquals(0L, cluster.getNamesystem().getPendingDeletionBlocks());
  // restart DataNodes
  for (int i = 0; i < REPLICATION; i++) {
    cluster.restartDataNode(dnprops[i], true);
  }
  cluster.waitActive();

  for (int i = 0; i < REPLICATION; i++) {
    DataNodeTestUtils.triggerBlockReport(cluster.getDataNodes().get(i));
  }
  Thread.sleep(2000);
  // make sure we have received block reports by checking the total block #
  Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
  Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());

  cluster.restartNameNode(true);
  Thread.sleep(6000);
  Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
  Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
}
 
Example 13
Source File: WingtipsWithZipkinSpringBoot2WebfluxConfigurationTest.java    From wingtips with Apache License 2.0 4 votes vote down vote up
@DataProvider(value = {
    "NULL_DEFAULT_OVERRIDES",
    "NO_OVERRIDES",
    "WITH_REPORTER_OVERRIDE",
    "WITH_CONVERTER_OVERRIDE",
    "WITH_REPORTER_AND_CONVERTER_OVERRIDE"
})
@Test
public void constructor_registers_WingtipsToZipkinLifecycleListener_with_expected_values(
    DefaultOverridesScenario scenario
) throws MalformedURLException {
    // given
    String baseUrl = "http://localhost:4242/" + UUID.randomUUID().toString();
    String serviceName = UUID.randomUUID().toString();
    WingtipsZipkinProperties props = generateProps(false, baseUrl, serviceName);

    // when
    WingtipsWithZipkinSpringBoot2WebfluxConfiguration config =
        new WingtipsWithZipkinSpringBoot2WebfluxConfiguration(props, scenario.defaultOverrides);

    // then
    assertThat(config.wingtipsZipkinProperties).isSameAs(props);

    List<SpanLifecycleListener> listeners = Tracer.getInstance().getSpanLifecycleListeners();
    assertThat(listeners).hasSize(1);
    assertThat(listeners.get(0)).isInstanceOf(WingtipsToZipkinLifecycleListener.class);
    WingtipsToZipkinLifecycleListener listener = (WingtipsToZipkinLifecycleListener) listeners.get(0);

    assertThat(Whitebox.getInternalState(listener, "serviceName")).isEqualTo(serviceName);
    assertThat(Whitebox.getInternalState(listener, "zipkinEndpoint"))
        .isEqualTo(Endpoint.newBuilder().serviceName(serviceName).build());
    assertThat(Whitebox.getInternalState(listener, "zipkinSpanConverter")).isNotNull();

    Object zipkinSpanReporter = Whitebox.getInternalState(listener, "zipkinSpanReporter");
    Object zipkinSpanConverter = Whitebox.getInternalState(listener, "zipkinSpanConverter");

    if (scenario.defaultOverrides != null) {
        assertThat(config.zipkinReporterOverride).isSameAs(scenario.defaultOverrides.zipkinReporter);
        assertThat(config.zipkinSpanConverterOverride).isSameAs(scenario.defaultOverrides.zipkinSpanConverter);

        if (scenario.defaultOverrides.zipkinReporter != null) {
            assertThat(zipkinSpanReporter).isSameAs(scenario.defaultOverrides.zipkinReporter);
        }

        if (scenario.defaultOverrides.zipkinSpanConverter != null) {
            assertThat(zipkinSpanConverter).isSameAs(scenario.defaultOverrides.zipkinSpanConverter);
        }
    }

    if (scenario.defaultOverrides == null || scenario.defaultOverrides.zipkinReporter == null) {
        assertThat(zipkinSpanReporter).isInstanceOf(AsyncReporter.class);
        Object spanSender = Whitebox.getInternalState(zipkinSpanReporter, "sender");
        assertThat(spanSender).isInstanceOf(URLConnectionSender.class);
        assertThat(Whitebox.getInternalState(spanSender, "endpoint"))
            .isEqualTo(new URL(baseUrl + "/api/v2/spans"));

        assertThat(config.zipkinReporterOverride).isNull();
    }
    
    if (scenario.defaultOverrides == null || scenario.defaultOverrides.zipkinSpanConverter == null) {
        assertThat(zipkinSpanConverter).isInstanceOf(WingtipsToZipkinSpanConverterDefaultImpl.class);
        assertThat(config.zipkinSpanConverterOverride).isNull();
    }
}
 
Example 14
Source File: TestHftpDelegationToken.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test whether HftpFileSystem maintain wire-compatibility for 0.20.203 when
 * obtaining delegation token. See HDFS-5440 for more details.
 */
@Test
public void testTokenCompatibilityFor203() throws IOException,
    URISyntaxException, AuthenticationException {
  Configuration conf = new Configuration();
  HftpFileSystem fs = new HftpFileSystem();

  Token<?> token = new Token<TokenIdentifier>(new byte[0], new byte[0],
      DelegationTokenIdentifier.HDFS_DELEGATION_KIND, new Text(
          "127.0.0.1:8020"));
  Credentials cred = new Credentials();
  cred.addToken(HftpFileSystem.TOKEN_KIND, token);
  ByteArrayOutputStream os = new ByteArrayOutputStream();
  cred.write(new DataOutputStream(os));

  HttpURLConnection conn = mock(HttpURLConnection.class);
  doReturn(new ByteArrayInputStream(os.toByteArray())).when(conn)
      .getInputStream();
  doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode();

  URLConnectionFactory factory = mock(URLConnectionFactory.class);
  doReturn(conn).when(factory).openConnection(Mockito.<URL> any(),
      anyBoolean());

  final URI uri = new URI("hftp://127.0.0.1:8020");
  fs.initialize(uri, conf);
  fs.connectionFactory = factory;

  UserGroupInformation ugi = UserGroupInformation.createUserForTesting("foo",
      new String[] { "bar" });

  TokenAspect<HftpFileSystem> tokenAspect = new TokenAspect<HftpFileSystem>(
      fs, SecurityUtil.buildTokenService(uri), HftpFileSystem.TOKEN_KIND);

  tokenAspect.initDelegationToken(ugi);
  tokenAspect.ensureTokenInitialized();

  Assert.assertSame(HftpFileSystem.TOKEN_KIND, fs.getRenewToken().getKind());

  Token<?> tok = (Token<?>) Whitebox.getInternalState(fs, "delegationToken");
  Assert.assertNotSame("Not making a copy of the remote token", token, tok);
  Assert.assertEquals(token.getKind(), tok.getKind());
}
 
Example 15
Source File: TestHostFileManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void testIncludeExcludeLists() throws IOException {
  BlockManager bm = mock(BlockManager.class);
  FSNamesystem fsn = mock(FSNamesystem.class);
  Configuration conf = new Configuration();
  HostFileManager hm = mock(HostFileManager.class);
  HostFileManager.HostSet includedNodes = new HostFileManager.HostSet();
  HostFileManager.HostSet excludedNodes = new HostFileManager.HostSet();

  includedNodes.add(entry("127.0.0.1:12345"));
  includedNodes.add(entry("localhost:12345"));
  includedNodes.add(entry("127.0.0.1:12345"));

  includedNodes.add(entry("127.0.0.2"));
  excludedNodes.add(entry("127.0.0.1:12346"));
  excludedNodes.add(entry("127.0.30.1:12346"));

  Assert.assertEquals(2, includedNodes.size());
  Assert.assertEquals(2, excludedNodes.size());

  doReturn(includedNodes).when(hm).getIncludes();
  doReturn(excludedNodes).when(hm).getExcludes();

  DatanodeManager dm = new DatanodeManager(bm, fsn, conf);
  Whitebox.setInternalState(dm, "hostFileManager", hm);
  Map<String, DatanodeDescriptor> dnMap = (Map<String,
          DatanodeDescriptor>) Whitebox.getInternalState(dm, "datanodeMap");

  // After the de-duplication, there should be only one DN from the included
  // nodes declared as dead.
  Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants
          .DatanodeReportType.ALL).size());
  Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants
          .DatanodeReportType.DEAD).size());
  dnMap.put("uuid-foo", new DatanodeDescriptor(new DatanodeID("127.0.0.1",
          "localhost", "uuid-foo", 12345, 1020, 1021, 1022)));
  Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
          .DatanodeReportType.DEAD).size());
  dnMap.put("uuid-bar", new DatanodeDescriptor(new DatanodeID("127.0.0.2",
          "127.0.0.2", "uuid-bar", 12345, 1020, 1021, 1022)));
  Assert.assertEquals(0, dm.getDatanodeListForReport(HdfsConstants
          .DatanodeReportType.DEAD).size());
  DatanodeDescriptor spam = new DatanodeDescriptor(new DatanodeID("127.0.0" +
          ".3", "127.0.0.3", "uuid-spam", 12345, 1020, 1021, 1022));
  DFSTestUtil.setDatanodeDead(spam);
  includedNodes.add(entry("127.0.0.3:12345"));
  dnMap.put("uuid-spam", spam);
  Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
          .DatanodeReportType.DEAD).size());
  dnMap.remove("uuid-spam");
  Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
          .DatanodeReportType.DEAD).size());
  excludedNodes.add(entry("127.0.0.3"));
  Assert.assertEquals(0, dm.getDatanodeListForReport(HdfsConstants
          .DatanodeReportType.DEAD).size());
}
 
Example 16
Source File: TestHostFileManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void testIncludeExcludeLists() throws IOException {
  BlockManager bm = mock(BlockManager.class);
  FSNamesystem fsn = mock(FSNamesystem.class);
  Configuration conf = new Configuration();
  HostFileManager hm = mock(HostFileManager.class);
  HostFileManager.HostSet includedNodes = new HostFileManager.HostSet();
  HostFileManager.HostSet excludedNodes = new HostFileManager.HostSet();

  includedNodes.add(entry("127.0.0.1:12345"));
  includedNodes.add(entry("localhost:12345"));
  includedNodes.add(entry("127.0.0.1:12345"));

  includedNodes.add(entry("127.0.0.2"));
  excludedNodes.add(entry("127.0.0.1:12346"));
  excludedNodes.add(entry("127.0.30.1:12346"));

  Assert.assertEquals(2, includedNodes.size());
  Assert.assertEquals(2, excludedNodes.size());

  doReturn(includedNodes).when(hm).getIncludes();
  doReturn(excludedNodes).when(hm).getExcludes();

  DatanodeManager dm = new DatanodeManager(bm, fsn, conf);
  Whitebox.setInternalState(dm, "hostFileManager", hm);
  Map<String, DatanodeDescriptor> dnMap = (Map<String,
          DatanodeDescriptor>) Whitebox.getInternalState(dm, "datanodeMap");

  // After the de-duplication, there should be only one DN from the included
  // nodes declared as dead.
  Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants
          .DatanodeReportType.ALL).size());
  Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants
          .DatanodeReportType.DEAD).size());
  dnMap.put("uuid-foo", new DatanodeDescriptor(new DatanodeID("127.0.0.1",
          "localhost", "uuid-foo", 12345, 1020, 1021, 1022)));
  Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
          .DatanodeReportType.DEAD).size());
  dnMap.put("uuid-bar", new DatanodeDescriptor(new DatanodeID("127.0.0.2",
          "127.0.0.2", "uuid-bar", 12345, 1020, 1021, 1022)));
  Assert.assertEquals(0, dm.getDatanodeListForReport(HdfsConstants
          .DatanodeReportType.DEAD).size());
  DatanodeDescriptor spam = new DatanodeDescriptor(new DatanodeID("127.0.0" +
          ".3", "127.0.0.3", "uuid-spam", 12345, 1020, 1021, 1022));
  DFSTestUtil.setDatanodeDead(spam);
  includedNodes.add(entry("127.0.0.3:12345"));
  dnMap.put("uuid-spam", spam);
  Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
          .DatanodeReportType.DEAD).size());
  dnMap.remove("uuid-spam");
  Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
          .DatanodeReportType.DEAD).size());
  excludedNodes.add(entry("127.0.0.3"));
  Assert.assertEquals(0, dm.getDatanodeListForReport(HdfsConstants
          .DatanodeReportType.DEAD).size());
}
 
Example 17
Source File: EffectivityConfigSpecTest.java    From eplmp with Eclipse Public License 1.0 4 votes vote down vote up
@Test
public void filterPartIterationTest(){

    //------------- TEST : No effectivities on parts revision -------------
    //## BEGIN CONFIGURATION
    when(pM1_pR1.getEffectivities()).thenReturn(Collections.EMPTY_SET);
    when(pM1_pR2.getEffectivities()).thenReturn(Collections.EMPTY_SET);
    when(pM1_pR3.getEffectivities()).thenReturn(Collections.EMPTY_SET);
    //## END CONFIGURATION
    PartIteration result = effectivityConfigSpec.filterPartIteration(pM1);

    //## BEGIN VERIFICATION
    Assert.assertNull(result);
    //## END VERIFICATION

    //------------- TEST : Effectivity for only one part revision -------------
    //## BEGIN CONFIGURATION
    when(pM1_pR1.getEffectivities()).thenReturn(Collections.singleton(pM1_eF1));
    when(pM1_pR1.getLastIteration()).thenReturn(pM1_pI1);
    doReturn(true).when(effectivityConfigSpec).isEffective(pM1_eF1);
    //## END CONFIGURATION

    result = effectivityConfigSpec.filterPartIteration(pM1);

    //## BEGIN VERIFICATION
    Assert.assertNotNull(result);
    Assert.assertEquals(pM1_pI1,result);
    //## END VERIFICATION

    //------------- TEST : Effectivity for all parts revision -------------
    //Notice : According to implementation only last iteration of revision list
    // will be returned. So we only configure just last.
    //## BEGIN CONFIGURATION
    when(pM1_pR3.getEffectivities()).thenReturn(Collections.singleton(pM1_eF3));
    when(pM1_pR3.getLastIteration()).thenReturn(pM1_pI2);
    doReturn(true).when(effectivityConfigSpec).isEffective(pM1_eF3);
    //## END CONFIGURATION

    result = effectivityConfigSpec.filterPartIteration(pM1);

    //## BEGIN VERIFICATION
    Assert.assertNotNull(result);
    Assert.assertEquals(pM1_pI2,result);
    //## END VERIFICATION

    //## BEGIN VERIFICATION : RETAINED LINK COUNT NUMBER
    HashSet<PartIteration> retained = (HashSet) Whitebox.getInternalState(effectivityConfigSpec, "retainedPartIterations");
    Assert.assertTrue(retained.size() == 2);// both iteration must has been retained
    //## END VERIFICATION :  RETAINED LINK COUNT NUMBER

    //## BEGIN VERIFICATION : VISITED METHOD COUNT NUMBER
    //-> Check if we visited Effective(PartRevision) for each parts revision
    //Notice : by doing this we validate part revision list browsing
    verify(effectivityConfigSpec,times(2)).isEffective(pM1_pR1);
    verify(effectivityConfigSpec,times(2)).isEffective(pM1_pR2);
    verify(effectivityConfigSpec,times(3)).isEffective(pM1_pR3);
    //## BEGIN VERIFICATION : VISITED METHOD COUNT NUMBER
}
 
Example 18
Source File: WingtipsWithZipkinSpringBootConfigurationTest.java    From wingtips with Apache License 2.0 4 votes vote down vote up
@DataProvider(value = {
    "NULL_DEFAULT_OVERRIDES",
    "NO_OVERRIDES",
    "WITH_REPORTER_OVERRIDE",
    "WITH_CONVERTER_OVERRIDE",
    "WITH_REPORTER_AND_CONVERTER_OVERRIDE"
})
@Test
public void constructor_registers_WingtipsToZipkinLifecycleListener_with_expected_values(
    DefaultOverridesScenario scenario
) throws MalformedURLException {
    // given
    String baseUrl = "http://localhost:4242/" + UUID.randomUUID().toString();
    String serviceName = UUID.randomUUID().toString();
    WingtipsZipkinProperties props = generateProps(false, baseUrl, serviceName);

    // when
    new WingtipsWithZipkinSpringBootConfiguration(props, scenario.defaultOverrides);

    // then
    List<SpanLifecycleListener> listeners = Tracer.getInstance().getSpanLifecycleListeners();
    assertThat(listeners).hasSize(1);
    assertThat(listeners.get(0)).isInstanceOf(WingtipsToZipkinLifecycleListener.class);
    WingtipsToZipkinLifecycleListener listener = (WingtipsToZipkinLifecycleListener) listeners.get(0);

    assertThat(Whitebox.getInternalState(listener, "serviceName")).isEqualTo(serviceName);
    assertThat(Whitebox.getInternalState(listener, "zipkinEndpoint"))
        .isEqualTo(Endpoint.newBuilder().serviceName(serviceName).build());
    assertThat(Whitebox.getInternalState(listener, "zipkinSpanConverter")).isNotNull();

    Object zipkinSpanReporter = Whitebox.getInternalState(listener, "zipkinSpanReporter");
    Object zipkinSpanConverter = Whitebox.getInternalState(listener, "zipkinSpanConverter");

    if (scenario.defaultOverrides != null) {
        if (scenario.defaultOverrides.zipkinReporter != null) {
            assertThat(zipkinSpanReporter).isSameAs(scenario.defaultOverrides.zipkinReporter);
        }

        if (scenario.defaultOverrides.zipkinSpanConverter != null) {
            assertThat(zipkinSpanConverter).isSameAs(scenario.defaultOverrides.zipkinSpanConverter);
        }
    }

    if (scenario.defaultOverrides == null || scenario.defaultOverrides.zipkinReporter == null) {
        assertThat(zipkinSpanReporter).isInstanceOf(AsyncReporter.class);
        Object spanSender = Whitebox.getInternalState(zipkinSpanReporter, "sender");
        assertThat(spanSender).isInstanceOf(URLConnectionSender.class);
        assertThat(Whitebox.getInternalState(spanSender, "endpoint"))
            .isEqualTo(new URL(baseUrl + "/api/v2/spans"));
    }
    
    if (scenario.defaultOverrides == null || scenario.defaultOverrides.zipkinSpanConverter == null) {
        assertThat(zipkinSpanConverter).isInstanceOf(WingtipsToZipkinSpanConverterDefaultImpl.class);
    }
}
 
Example 19
Source File: TestMergeManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings({ "unchecked", "deprecation" })
@Test(timeout=10000)
public void testOnDiskMerger() throws IOException, URISyntaxException,
  InterruptedException {
  JobConf jobConf = new JobConf();
  final int SORT_FACTOR = 5;
  jobConf.setInt(MRJobConfig.IO_SORT_FACTOR, SORT_FACTOR);

  MapOutputFile mapOutputFile = new MROutputFiles();
  FileSystem fs = FileSystem.getLocal(jobConf);
  MergeManagerImpl<IntWritable, IntWritable> manager =
    new MergeManagerImpl<IntWritable, IntWritable>(null, jobConf, fs, null
      , null, null, null, null, null, null, null, null, null, mapOutputFile);

  MergeThread<MapOutput<IntWritable, IntWritable>, IntWritable, IntWritable>
    onDiskMerger = (MergeThread<MapOutput<IntWritable, IntWritable>,
      IntWritable, IntWritable>) Whitebox.getInternalState(manager,
        "onDiskMerger");
  int mergeFactor = (Integer) Whitebox.getInternalState(onDiskMerger,
    "mergeFactor");

  // make sure the io.sort.factor is set properly
  assertEquals(mergeFactor, SORT_FACTOR);

  // Stop the onDiskMerger thread so that we can intercept the list of files
  // waiting to be merged.
  onDiskMerger.suspend();

  //Send the list of fake files waiting to be merged
  Random rand = new Random();
  for(int i = 0; i < 2*SORT_FACTOR; ++i) {
    Path path = new Path("somePath");
    CompressAwarePath cap = new CompressAwarePath(path, 1l, rand.nextInt());
    manager.closeOnDiskFile(cap);
  }

  //Check that the files pending to be merged are in sorted order.
  LinkedList<List<CompressAwarePath>> pendingToBeMerged =
    (LinkedList<List<CompressAwarePath>>) Whitebox.getInternalState(
      onDiskMerger, "pendingToBeMerged");
  assertTrue("No inputs were added to list pending to merge",
    pendingToBeMerged.size() > 0);
  for(int i = 0; i < pendingToBeMerged.size(); ++i) {
    List<CompressAwarePath> inputs = pendingToBeMerged.get(i);
    for(int j = 1; j < inputs.size(); ++j) {
      assertTrue("Not enough / too many inputs were going to be merged",
        inputs.size() > 0 && inputs.size() <= SORT_FACTOR);
      assertTrue("Inputs to be merged were not sorted according to size: ",
        inputs.get(j).getCompressedSize()
        >= inputs.get(j-1).getCompressedSize());
    }
  }

}
 
Example 20
Source File: TestMergeManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings({ "unchecked", "deprecation" })
@Test(timeout=10000)
public void testOnDiskMerger() throws IOException, URISyntaxException,
  InterruptedException {
  JobConf jobConf = new JobConf();
  final int SORT_FACTOR = 5;
  jobConf.setInt(MRJobConfig.IO_SORT_FACTOR, SORT_FACTOR);

  MapOutputFile mapOutputFile = new MROutputFiles();
  FileSystem fs = FileSystem.getLocal(jobConf);
  MergeManagerImpl<IntWritable, IntWritable> manager =
    new MergeManagerImpl<IntWritable, IntWritable>(null, jobConf, fs, null
      , null, null, null, null, null, null, null, null, null, mapOutputFile);

  MergeThread<MapOutput<IntWritable, IntWritable>, IntWritable, IntWritable>
    onDiskMerger = (MergeThread<MapOutput<IntWritable, IntWritable>,
      IntWritable, IntWritable>) Whitebox.getInternalState(manager,
        "onDiskMerger");
  int mergeFactor = (Integer) Whitebox.getInternalState(onDiskMerger,
    "mergeFactor");

  // make sure the io.sort.factor is set properly
  assertEquals(mergeFactor, SORT_FACTOR);

  // Stop the onDiskMerger thread so that we can intercept the list of files
  // waiting to be merged.
  onDiskMerger.suspend();

  //Send the list of fake files waiting to be merged
  Random rand = new Random();
  for(int i = 0; i < 2*SORT_FACTOR; ++i) {
    Path path = new Path("somePath");
    CompressAwarePath cap = new CompressAwarePath(path, 1l, rand.nextInt());
    manager.closeOnDiskFile(cap);
  }

  //Check that the files pending to be merged are in sorted order.
  LinkedList<List<CompressAwarePath>> pendingToBeMerged =
    (LinkedList<List<CompressAwarePath>>) Whitebox.getInternalState(
      onDiskMerger, "pendingToBeMerged");
  assertTrue("No inputs were added to list pending to merge",
    pendingToBeMerged.size() > 0);
  for(int i = 0; i < pendingToBeMerged.size(); ++i) {
    List<CompressAwarePath> inputs = pendingToBeMerged.get(i);
    for(int j = 1; j < inputs.size(); ++j) {
      assertTrue("Not enough / too many inputs were going to be merged",
        inputs.size() > 0 && inputs.size() <= SORT_FACTOR);
      assertTrue("Inputs to be merged were not sorted according to size: ",
        inputs.get(j).getCompressedSize()
        >= inputs.get(j-1).getCompressedSize());
    }
  }

}